repo
stringlengths 7
54
| path
stringlengths 4
192
| url
stringlengths 87
284
| code
stringlengths 78
104k
| code_tokens
sequence | docstring
stringlengths 1
46.9k
| docstring_tokens
sequence | language
stringclasses 1
value | partition
stringclasses 3
values |
---|---|---|---|---|---|---|---|---|
oanda/v20-python | src/v20/order.py | https://github.com/oanda/v20-python/blob/f28192f4a31bce038cf6dfa302f5878bec192fe5/src/v20/order.py#L4051-L4237 | def replace(
self,
accountID,
orderSpecifier,
**kwargs
):
"""
Replace an Order in an Account by simultaneously cancelling it and
creating a replacement Order
Args:
accountID:
Account Identifier
orderSpecifier:
The Order Specifier
order:
Specification of the replacing Order
Returns:
v20.response.Response containing the results from submitting the
request
"""
request = Request(
'PUT',
'/v3/accounts/{accountID}/orders/{orderSpecifier}'
)
request.set_path_param(
'accountID',
accountID
)
request.set_path_param(
'orderSpecifier',
orderSpecifier
)
body = EntityDict()
if 'order' in kwargs:
body.set('order', kwargs['order'])
request.set_body_dict(body.dict)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
#
# Parse responses as defined by the API specification
#
if str(response.status) == "201":
if jbody.get('orderCancelTransaction') is not None:
parsed_body['orderCancelTransaction'] = \
self.ctx.transaction.OrderCancelTransaction.from_dict(
jbody['orderCancelTransaction'],
self.ctx
)
if jbody.get('orderCreateTransaction') is not None:
parsed_body['orderCreateTransaction'] = \
self.ctx.transaction.Transaction.from_dict(
jbody['orderCreateTransaction'],
self.ctx
)
if jbody.get('orderFillTransaction') is not None:
parsed_body['orderFillTransaction'] = \
self.ctx.transaction.OrderFillTransaction.from_dict(
jbody['orderFillTransaction'],
self.ctx
)
if jbody.get('orderReissueTransaction') is not None:
parsed_body['orderReissueTransaction'] = \
self.ctx.transaction.Transaction.from_dict(
jbody['orderReissueTransaction'],
self.ctx
)
if jbody.get('orderReissueRejectTransaction') is not None:
parsed_body['orderReissueRejectTransaction'] = \
self.ctx.transaction.Transaction.from_dict(
jbody['orderReissueRejectTransaction'],
self.ctx
)
if jbody.get('replacingOrderCancelTransaction') is not None:
parsed_body['replacingOrderCancelTransaction'] = \
self.ctx.transaction.OrderCancelTransaction.from_dict(
jbody['replacingOrderCancelTransaction'],
self.ctx
)
if jbody.get('relatedTransactionIDs') is not None:
parsed_body['relatedTransactionIDs'] = \
jbody.get('relatedTransactionIDs')
if jbody.get('lastTransactionID') is not None:
parsed_body['lastTransactionID'] = \
jbody.get('lastTransactionID')
elif str(response.status) == "400":
if jbody.get('orderRejectTransaction') is not None:
parsed_body['orderRejectTransaction'] = \
self.ctx.transaction.Transaction.from_dict(
jbody['orderRejectTransaction'],
self.ctx
)
if jbody.get('relatedTransactionIDs') is not None:
parsed_body['relatedTransactionIDs'] = \
jbody.get('relatedTransactionIDs')
if jbody.get('lastTransactionID') is not None:
parsed_body['lastTransactionID'] = \
jbody.get('lastTransactionID')
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "401":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "404":
if jbody.get('orderCancelRejectTransaction') is not None:
parsed_body['orderCancelRejectTransaction'] = \
self.ctx.transaction.Transaction.from_dict(
jbody['orderCancelRejectTransaction'],
self.ctx
)
if jbody.get('relatedTransactionIDs') is not None:
parsed_body['relatedTransactionIDs'] = \
jbody.get('relatedTransactionIDs')
if jbody.get('lastTransactionID') is not None:
parsed_body['lastTransactionID'] = \
jbody.get('lastTransactionID')
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
elif str(response.status) == "405":
if jbody.get('errorCode') is not None:
parsed_body['errorCode'] = \
jbody.get('errorCode')
if jbody.get('errorMessage') is not None:
parsed_body['errorMessage'] = \
jbody.get('errorMessage')
#
# Unexpected response status
#
else:
parsed_body = jbody
response.body = parsed_body
return response | [
"def",
"replace",
"(",
"self",
",",
"accountID",
",",
"orderSpecifier",
",",
"*",
"*",
"kwargs",
")",
":",
"request",
"=",
"Request",
"(",
"'PUT'",
",",
"'/v3/accounts/{accountID}/orders/{orderSpecifier}'",
")",
"request",
".",
"set_path_param",
"(",
"'accountID'",
",",
"accountID",
")",
"request",
".",
"set_path_param",
"(",
"'orderSpecifier'",
",",
"orderSpecifier",
")",
"body",
"=",
"EntityDict",
"(",
")",
"if",
"'order'",
"in",
"kwargs",
":",
"body",
".",
"set",
"(",
"'order'",
",",
"kwargs",
"[",
"'order'",
"]",
")",
"request",
".",
"set_body_dict",
"(",
"body",
".",
"dict",
")",
"response",
"=",
"self",
".",
"ctx",
".",
"request",
"(",
"request",
")",
"if",
"response",
".",
"content_type",
"is",
"None",
":",
"return",
"response",
"if",
"not",
"response",
".",
"content_type",
".",
"startswith",
"(",
"\"application/json\"",
")",
":",
"return",
"response",
"jbody",
"=",
"json",
".",
"loads",
"(",
"response",
".",
"raw_body",
")",
"parsed_body",
"=",
"{",
"}",
"#",
"# Parse responses as defined by the API specification",
"#",
"if",
"str",
"(",
"response",
".",
"status",
")",
"==",
"\"201\"",
":",
"if",
"jbody",
".",
"get",
"(",
"'orderCancelTransaction'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'orderCancelTransaction'",
"]",
"=",
"self",
".",
"ctx",
".",
"transaction",
".",
"OrderCancelTransaction",
".",
"from_dict",
"(",
"jbody",
"[",
"'orderCancelTransaction'",
"]",
",",
"self",
".",
"ctx",
")",
"if",
"jbody",
".",
"get",
"(",
"'orderCreateTransaction'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'orderCreateTransaction'",
"]",
"=",
"self",
".",
"ctx",
".",
"transaction",
".",
"Transaction",
".",
"from_dict",
"(",
"jbody",
"[",
"'orderCreateTransaction'",
"]",
",",
"self",
".",
"ctx",
")",
"if",
"jbody",
".",
"get",
"(",
"'orderFillTransaction'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'orderFillTransaction'",
"]",
"=",
"self",
".",
"ctx",
".",
"transaction",
".",
"OrderFillTransaction",
".",
"from_dict",
"(",
"jbody",
"[",
"'orderFillTransaction'",
"]",
",",
"self",
".",
"ctx",
")",
"if",
"jbody",
".",
"get",
"(",
"'orderReissueTransaction'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'orderReissueTransaction'",
"]",
"=",
"self",
".",
"ctx",
".",
"transaction",
".",
"Transaction",
".",
"from_dict",
"(",
"jbody",
"[",
"'orderReissueTransaction'",
"]",
",",
"self",
".",
"ctx",
")",
"if",
"jbody",
".",
"get",
"(",
"'orderReissueRejectTransaction'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'orderReissueRejectTransaction'",
"]",
"=",
"self",
".",
"ctx",
".",
"transaction",
".",
"Transaction",
".",
"from_dict",
"(",
"jbody",
"[",
"'orderReissueRejectTransaction'",
"]",
",",
"self",
".",
"ctx",
")",
"if",
"jbody",
".",
"get",
"(",
"'replacingOrderCancelTransaction'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'replacingOrderCancelTransaction'",
"]",
"=",
"self",
".",
"ctx",
".",
"transaction",
".",
"OrderCancelTransaction",
".",
"from_dict",
"(",
"jbody",
"[",
"'replacingOrderCancelTransaction'",
"]",
",",
"self",
".",
"ctx",
")",
"if",
"jbody",
".",
"get",
"(",
"'relatedTransactionIDs'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'relatedTransactionIDs'",
"]",
"=",
"jbody",
".",
"get",
"(",
"'relatedTransactionIDs'",
")",
"if",
"jbody",
".",
"get",
"(",
"'lastTransactionID'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'lastTransactionID'",
"]",
"=",
"jbody",
".",
"get",
"(",
"'lastTransactionID'",
")",
"elif",
"str",
"(",
"response",
".",
"status",
")",
"==",
"\"400\"",
":",
"if",
"jbody",
".",
"get",
"(",
"'orderRejectTransaction'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'orderRejectTransaction'",
"]",
"=",
"self",
".",
"ctx",
".",
"transaction",
".",
"Transaction",
".",
"from_dict",
"(",
"jbody",
"[",
"'orderRejectTransaction'",
"]",
",",
"self",
".",
"ctx",
")",
"if",
"jbody",
".",
"get",
"(",
"'relatedTransactionIDs'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'relatedTransactionIDs'",
"]",
"=",
"jbody",
".",
"get",
"(",
"'relatedTransactionIDs'",
")",
"if",
"jbody",
".",
"get",
"(",
"'lastTransactionID'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'lastTransactionID'",
"]",
"=",
"jbody",
".",
"get",
"(",
"'lastTransactionID'",
")",
"if",
"jbody",
".",
"get",
"(",
"'errorCode'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'errorCode'",
"]",
"=",
"jbody",
".",
"get",
"(",
"'errorCode'",
")",
"if",
"jbody",
".",
"get",
"(",
"'errorMessage'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'errorMessage'",
"]",
"=",
"jbody",
".",
"get",
"(",
"'errorMessage'",
")",
"elif",
"str",
"(",
"response",
".",
"status",
")",
"==",
"\"401\"",
":",
"if",
"jbody",
".",
"get",
"(",
"'errorCode'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'errorCode'",
"]",
"=",
"jbody",
".",
"get",
"(",
"'errorCode'",
")",
"if",
"jbody",
".",
"get",
"(",
"'errorMessage'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'errorMessage'",
"]",
"=",
"jbody",
".",
"get",
"(",
"'errorMessage'",
")",
"elif",
"str",
"(",
"response",
".",
"status",
")",
"==",
"\"404\"",
":",
"if",
"jbody",
".",
"get",
"(",
"'orderCancelRejectTransaction'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'orderCancelRejectTransaction'",
"]",
"=",
"self",
".",
"ctx",
".",
"transaction",
".",
"Transaction",
".",
"from_dict",
"(",
"jbody",
"[",
"'orderCancelRejectTransaction'",
"]",
",",
"self",
".",
"ctx",
")",
"if",
"jbody",
".",
"get",
"(",
"'relatedTransactionIDs'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'relatedTransactionIDs'",
"]",
"=",
"jbody",
".",
"get",
"(",
"'relatedTransactionIDs'",
")",
"if",
"jbody",
".",
"get",
"(",
"'lastTransactionID'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'lastTransactionID'",
"]",
"=",
"jbody",
".",
"get",
"(",
"'lastTransactionID'",
")",
"if",
"jbody",
".",
"get",
"(",
"'errorCode'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'errorCode'",
"]",
"=",
"jbody",
".",
"get",
"(",
"'errorCode'",
")",
"if",
"jbody",
".",
"get",
"(",
"'errorMessage'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'errorMessage'",
"]",
"=",
"jbody",
".",
"get",
"(",
"'errorMessage'",
")",
"elif",
"str",
"(",
"response",
".",
"status",
")",
"==",
"\"405\"",
":",
"if",
"jbody",
".",
"get",
"(",
"'errorCode'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'errorCode'",
"]",
"=",
"jbody",
".",
"get",
"(",
"'errorCode'",
")",
"if",
"jbody",
".",
"get",
"(",
"'errorMessage'",
")",
"is",
"not",
"None",
":",
"parsed_body",
"[",
"'errorMessage'",
"]",
"=",
"jbody",
".",
"get",
"(",
"'errorMessage'",
")",
"#",
"# Unexpected response status",
"#",
"else",
":",
"parsed_body",
"=",
"jbody",
"response",
".",
"body",
"=",
"parsed_body",
"return",
"response"
] | Replace an Order in an Account by simultaneously cancelling it and
creating a replacement Order
Args:
accountID:
Account Identifier
orderSpecifier:
The Order Specifier
order:
Specification of the replacing Order
Returns:
v20.response.Response containing the results from submitting the
request | [
"Replace",
"an",
"Order",
"in",
"an",
"Account",
"by",
"simultaneously",
"cancelling",
"it",
"and",
"creating",
"a",
"replacement",
"Order"
] | python | train |
onelogin/python-saml | src/onelogin/saml2/settings.py | https://github.com/onelogin/python-saml/blob/9fe7a72da5b4caa1529c1640b52d2649447ce49b/src/onelogin/saml2/settings.py#L250-L307 | def __add_default_values(self):
"""
Add default values if the settings info is not complete
"""
self.__sp.setdefault('assertionConsumerService', {})
self.__sp['assertionConsumerService'].setdefault('binding', OneLogin_Saml2_Constants.BINDING_HTTP_POST)
self.__sp.setdefault('attributeConsumingService', {})
self.__sp.setdefault('singleLogoutService', {})
self.__sp['singleLogoutService'].setdefault('binding', OneLogin_Saml2_Constants.BINDING_HTTP_REDIRECT)
# Related to nameID
self.__sp.setdefault('NameIDFormat', OneLogin_Saml2_Constants.NAMEID_UNSPECIFIED)
self.__security.setdefault('nameIdEncrypted', False)
# Metadata format
self.__security.setdefault('metadataValidUntil', None) # None means use default
self.__security.setdefault('metadataCacheDuration', None) # None means use default
# Sign provided
self.__security.setdefault('authnRequestsSigned', False)
self.__security.setdefault('logoutRequestSigned', False)
self.__security.setdefault('logoutResponseSigned', False)
self.__security.setdefault('signMetadata', False)
# Sign expected
self.__security.setdefault('wantMessagesSigned', False)
self.__security.setdefault('wantAssertionsSigned', False)
# NameID element expected
self.__security.setdefault('wantNameId', True)
# SAML responses with a InResponseTo attribute not rejected when requestId not passed
self.__security.setdefault('rejectUnsolicitedResponsesWithInResponseTo', False)
# Encrypt expected
self.__security.setdefault('wantAssertionsEncrypted', False)
self.__security.setdefault('wantNameIdEncrypted', False)
# Signature Algorithm
self.__security.setdefault('signatureAlgorithm', OneLogin_Saml2_Constants.RSA_SHA1)
# Digest Algorithm
self.__security.setdefault('digestAlgorithm', OneLogin_Saml2_Constants.SHA1)
# AttributeStatement required by default
self.__security.setdefault('wantAttributeStatement', True)
self.__idp.setdefault('x509cert', '')
self.__idp.setdefault('certFingerprint', '')
self.__idp.setdefault('certFingerprintAlgorithm', 'sha1')
self.__sp.setdefault('x509cert', '')
self.__sp.setdefault('privateKey', '')
self.__security.setdefault('requestedAuthnContext', True)
self.__security.setdefault('failOnAuthnContextMismatch', False) | [
"def",
"__add_default_values",
"(",
"self",
")",
":",
"self",
".",
"__sp",
".",
"setdefault",
"(",
"'assertionConsumerService'",
",",
"{",
"}",
")",
"self",
".",
"__sp",
"[",
"'assertionConsumerService'",
"]",
".",
"setdefault",
"(",
"'binding'",
",",
"OneLogin_Saml2_Constants",
".",
"BINDING_HTTP_POST",
")",
"self",
".",
"__sp",
".",
"setdefault",
"(",
"'attributeConsumingService'",
",",
"{",
"}",
")",
"self",
".",
"__sp",
".",
"setdefault",
"(",
"'singleLogoutService'",
",",
"{",
"}",
")",
"self",
".",
"__sp",
"[",
"'singleLogoutService'",
"]",
".",
"setdefault",
"(",
"'binding'",
",",
"OneLogin_Saml2_Constants",
".",
"BINDING_HTTP_REDIRECT",
")",
"# Related to nameID",
"self",
".",
"__sp",
".",
"setdefault",
"(",
"'NameIDFormat'",
",",
"OneLogin_Saml2_Constants",
".",
"NAMEID_UNSPECIFIED",
")",
"self",
".",
"__security",
".",
"setdefault",
"(",
"'nameIdEncrypted'",
",",
"False",
")",
"# Metadata format",
"self",
".",
"__security",
".",
"setdefault",
"(",
"'metadataValidUntil'",
",",
"None",
")",
"# None means use default",
"self",
".",
"__security",
".",
"setdefault",
"(",
"'metadataCacheDuration'",
",",
"None",
")",
"# None means use default",
"# Sign provided",
"self",
".",
"__security",
".",
"setdefault",
"(",
"'authnRequestsSigned'",
",",
"False",
")",
"self",
".",
"__security",
".",
"setdefault",
"(",
"'logoutRequestSigned'",
",",
"False",
")",
"self",
".",
"__security",
".",
"setdefault",
"(",
"'logoutResponseSigned'",
",",
"False",
")",
"self",
".",
"__security",
".",
"setdefault",
"(",
"'signMetadata'",
",",
"False",
")",
"# Sign expected",
"self",
".",
"__security",
".",
"setdefault",
"(",
"'wantMessagesSigned'",
",",
"False",
")",
"self",
".",
"__security",
".",
"setdefault",
"(",
"'wantAssertionsSigned'",
",",
"False",
")",
"# NameID element expected",
"self",
".",
"__security",
".",
"setdefault",
"(",
"'wantNameId'",
",",
"True",
")",
"# SAML responses with a InResponseTo attribute not rejected when requestId not passed",
"self",
".",
"__security",
".",
"setdefault",
"(",
"'rejectUnsolicitedResponsesWithInResponseTo'",
",",
"False",
")",
"# Encrypt expected",
"self",
".",
"__security",
".",
"setdefault",
"(",
"'wantAssertionsEncrypted'",
",",
"False",
")",
"self",
".",
"__security",
".",
"setdefault",
"(",
"'wantNameIdEncrypted'",
",",
"False",
")",
"# Signature Algorithm",
"self",
".",
"__security",
".",
"setdefault",
"(",
"'signatureAlgorithm'",
",",
"OneLogin_Saml2_Constants",
".",
"RSA_SHA1",
")",
"# Digest Algorithm",
"self",
".",
"__security",
".",
"setdefault",
"(",
"'digestAlgorithm'",
",",
"OneLogin_Saml2_Constants",
".",
"SHA1",
")",
"# AttributeStatement required by default",
"self",
".",
"__security",
".",
"setdefault",
"(",
"'wantAttributeStatement'",
",",
"True",
")",
"self",
".",
"__idp",
".",
"setdefault",
"(",
"'x509cert'",
",",
"''",
")",
"self",
".",
"__idp",
".",
"setdefault",
"(",
"'certFingerprint'",
",",
"''",
")",
"self",
".",
"__idp",
".",
"setdefault",
"(",
"'certFingerprintAlgorithm'",
",",
"'sha1'",
")",
"self",
".",
"__sp",
".",
"setdefault",
"(",
"'x509cert'",
",",
"''",
")",
"self",
".",
"__sp",
".",
"setdefault",
"(",
"'privateKey'",
",",
"''",
")",
"self",
".",
"__security",
".",
"setdefault",
"(",
"'requestedAuthnContext'",
",",
"True",
")",
"self",
".",
"__security",
".",
"setdefault",
"(",
"'failOnAuthnContextMismatch'",
",",
"False",
")"
] | Add default values if the settings info is not complete | [
"Add",
"default",
"values",
"if",
"the",
"settings",
"info",
"is",
"not",
"complete"
] | python | train |
spotify/luigi | luigi/worker.py | https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/worker.py#L561-L583 | def _add_task(self, *args, **kwargs):
"""
Call ``self._scheduler.add_task``, but store the values too so we can
implement :py:func:`luigi.execution_summary.summary`.
"""
task_id = kwargs['task_id']
status = kwargs['status']
runnable = kwargs['runnable']
task = self._scheduled_tasks.get(task_id)
if task:
self._add_task_history.append((task, status, runnable))
kwargs['owners'] = task._owner_list()
if task_id in self._batch_running_tasks:
for batch_task in self._batch_running_tasks.pop(task_id):
self._add_task_history.append((batch_task, status, True))
if task and kwargs.get('params'):
kwargs['param_visibilities'] = task._get_param_visibilities()
self._scheduler.add_task(*args, **kwargs)
logger.info('Informed scheduler that task %s has status %s', task_id, status) | [
"def",
"_add_task",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"task_id",
"=",
"kwargs",
"[",
"'task_id'",
"]",
"status",
"=",
"kwargs",
"[",
"'status'",
"]",
"runnable",
"=",
"kwargs",
"[",
"'runnable'",
"]",
"task",
"=",
"self",
".",
"_scheduled_tasks",
".",
"get",
"(",
"task_id",
")",
"if",
"task",
":",
"self",
".",
"_add_task_history",
".",
"append",
"(",
"(",
"task",
",",
"status",
",",
"runnable",
")",
")",
"kwargs",
"[",
"'owners'",
"]",
"=",
"task",
".",
"_owner_list",
"(",
")",
"if",
"task_id",
"in",
"self",
".",
"_batch_running_tasks",
":",
"for",
"batch_task",
"in",
"self",
".",
"_batch_running_tasks",
".",
"pop",
"(",
"task_id",
")",
":",
"self",
".",
"_add_task_history",
".",
"append",
"(",
"(",
"batch_task",
",",
"status",
",",
"True",
")",
")",
"if",
"task",
"and",
"kwargs",
".",
"get",
"(",
"'params'",
")",
":",
"kwargs",
"[",
"'param_visibilities'",
"]",
"=",
"task",
".",
"_get_param_visibilities",
"(",
")",
"self",
".",
"_scheduler",
".",
"add_task",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"logger",
".",
"info",
"(",
"'Informed scheduler that task %s has status %s'",
",",
"task_id",
",",
"status",
")"
] | Call ``self._scheduler.add_task``, but store the values too so we can
implement :py:func:`luigi.execution_summary.summary`. | [
"Call",
"self",
".",
"_scheduler",
".",
"add_task",
"but",
"store",
"the",
"values",
"too",
"so",
"we",
"can",
"implement",
":",
"py",
":",
"func",
":",
"luigi",
".",
"execution_summary",
".",
"summary",
"."
] | python | train |
quantopian/zipline | zipline/pipeline/factors/factor.py | https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/pipeline/factors/factor.py#L402-L524 | def demean(self, mask=NotSpecified, groupby=NotSpecified):
"""
Construct a Factor that computes ``self`` and subtracts the mean from
row of the result.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means, and output NaN anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, de-meaning the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when computing means.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute means.
Examples
--------
Let ``f`` be a Factor which would produce the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 2.0 3.0 4.0
2017-03-14 1.5 2.5 3.5 1.0
2017-03-15 2.0 3.0 4.0 1.5
2017-03-16 2.5 3.5 1.0 2.0
Let ``c`` be a Classifier producing the following output::
AAPL MSFT MCD BK
2017-03-13 1 1 2 2
2017-03-14 1 1 2 2
2017-03-15 1 1 2 2
2017-03-16 1 1 2 2
Let ``m`` be a Filter producing the following output::
AAPL MSFT MCD BK
2017-03-13 False True True True
2017-03-14 True False True True
2017-03-15 True True False True
2017-03-16 True True True False
Then ``f.demean()`` will subtract the mean from each row produced by
``f``.
::
AAPL MSFT MCD BK
2017-03-13 -1.500 -0.500 0.500 1.500
2017-03-14 -0.625 0.375 1.375 -1.125
2017-03-15 -0.625 0.375 1.375 -1.125
2017-03-16 0.250 1.250 -1.250 -0.250
``f.demean(mask=m)`` will subtract the mean from each row, but means
will be calculated ignoring values on the diagonal, and NaNs will
written to the diagonal in the output. Diagonal values are ignored
because they are the locations where the mask ``m`` produced False.
::
AAPL MSFT MCD BK
2017-03-13 NaN -1.000 0.000 1.000
2017-03-14 -0.500 NaN 1.500 -1.000
2017-03-15 -0.166 0.833 NaN -0.666
2017-03-16 0.166 1.166 -1.333 NaN
``f.demean(groupby=c)`` will subtract the group-mean of AAPL/MSFT and
MCD/BK from their respective entries. The AAPL/MSFT are grouped
together because both assets always produce 1 in the output of the
classifier ``c``. Similarly, MCD/BK are grouped together because they
always produce 2.
::
AAPL MSFT MCD BK
2017-03-13 -0.500 0.500 -0.500 0.500
2017-03-14 -0.500 0.500 1.250 -1.250
2017-03-15 -0.500 0.500 1.250 -1.250
2017-03-16 -0.500 0.500 -0.500 0.500
``f.demean(mask=m, groupby=c)`` will also subtract the group-mean of
AAPL/MSFT and MCD/BK, but means will be calculated ignoring values on
the diagonal , and NaNs will be written to the diagonal in the output.
::
AAPL MSFT MCD BK
2017-03-13 NaN 0.000 -0.500 0.500
2017-03-14 0.000 NaN 1.250 -1.250
2017-03-15 -0.500 0.500 NaN 0.000
2017-03-16 -0.500 0.500 0.000 NaN
Notes
-----
Mean is sensitive to the magnitudes of outliers. When working with
factor that can potentially produce large outliers, it is often useful
to use the ``mask`` parameter to discard values at the extremes of the
distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.demean(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``demean()`` is only supported on Factors of dtype float64.
See Also
--------
:meth:`pandas.DataFrame.groupby`
"""
return GroupedRowTransform(
transform=demean,
transform_args=(),
factor=self,
groupby=groupby,
dtype=self.dtype,
missing_value=self.missing_value,
window_safe=self.window_safe,
mask=mask,
) | [
"def",
"demean",
"(",
"self",
",",
"mask",
"=",
"NotSpecified",
",",
"groupby",
"=",
"NotSpecified",
")",
":",
"return",
"GroupedRowTransform",
"(",
"transform",
"=",
"demean",
",",
"transform_args",
"=",
"(",
")",
",",
"factor",
"=",
"self",
",",
"groupby",
"=",
"groupby",
",",
"dtype",
"=",
"self",
".",
"dtype",
",",
"missing_value",
"=",
"self",
".",
"missing_value",
",",
"window_safe",
"=",
"self",
".",
"window_safe",
",",
"mask",
"=",
"mask",
",",
")"
] | Construct a Factor that computes ``self`` and subtracts the mean from
row of the result.
If ``mask`` is supplied, ignore values where ``mask`` returns False
when computing row means, and output NaN anywhere the mask is False.
If ``groupby`` is supplied, compute by partitioning each row based on
the values produced by ``groupby``, de-meaning the partitioned arrays,
and stitching the sub-results back together.
Parameters
----------
mask : zipline.pipeline.Filter, optional
A Filter defining values to ignore when computing means.
groupby : zipline.pipeline.Classifier, optional
A classifier defining partitions over which to compute means.
Examples
--------
Let ``f`` be a Factor which would produce the following output::
AAPL MSFT MCD BK
2017-03-13 1.0 2.0 3.0 4.0
2017-03-14 1.5 2.5 3.5 1.0
2017-03-15 2.0 3.0 4.0 1.5
2017-03-16 2.5 3.5 1.0 2.0
Let ``c`` be a Classifier producing the following output::
AAPL MSFT MCD BK
2017-03-13 1 1 2 2
2017-03-14 1 1 2 2
2017-03-15 1 1 2 2
2017-03-16 1 1 2 2
Let ``m`` be a Filter producing the following output::
AAPL MSFT MCD BK
2017-03-13 False True True True
2017-03-14 True False True True
2017-03-15 True True False True
2017-03-16 True True True False
Then ``f.demean()`` will subtract the mean from each row produced by
``f``.
::
AAPL MSFT MCD BK
2017-03-13 -1.500 -0.500 0.500 1.500
2017-03-14 -0.625 0.375 1.375 -1.125
2017-03-15 -0.625 0.375 1.375 -1.125
2017-03-16 0.250 1.250 -1.250 -0.250
``f.demean(mask=m)`` will subtract the mean from each row, but means
will be calculated ignoring values on the diagonal, and NaNs will
written to the diagonal in the output. Diagonal values are ignored
because they are the locations where the mask ``m`` produced False.
::
AAPL MSFT MCD BK
2017-03-13 NaN -1.000 0.000 1.000
2017-03-14 -0.500 NaN 1.500 -1.000
2017-03-15 -0.166 0.833 NaN -0.666
2017-03-16 0.166 1.166 -1.333 NaN
``f.demean(groupby=c)`` will subtract the group-mean of AAPL/MSFT and
MCD/BK from their respective entries. The AAPL/MSFT are grouped
together because both assets always produce 1 in the output of the
classifier ``c``. Similarly, MCD/BK are grouped together because they
always produce 2.
::
AAPL MSFT MCD BK
2017-03-13 -0.500 0.500 -0.500 0.500
2017-03-14 -0.500 0.500 1.250 -1.250
2017-03-15 -0.500 0.500 1.250 -1.250
2017-03-16 -0.500 0.500 -0.500 0.500
``f.demean(mask=m, groupby=c)`` will also subtract the group-mean of
AAPL/MSFT and MCD/BK, but means will be calculated ignoring values on
the diagonal , and NaNs will be written to the diagonal in the output.
::
AAPL MSFT MCD BK
2017-03-13 NaN 0.000 -0.500 0.500
2017-03-14 0.000 NaN 1.250 -1.250
2017-03-15 -0.500 0.500 NaN 0.000
2017-03-16 -0.500 0.500 0.000 NaN
Notes
-----
Mean is sensitive to the magnitudes of outliers. When working with
factor that can potentially produce large outliers, it is often useful
to use the ``mask`` parameter to discard values at the extremes of the
distribution::
>>> base = MyFactor(...) # doctest: +SKIP
>>> normalized = base.demean(
... mask=base.percentile_between(1, 99),
... ) # doctest: +SKIP
``demean()`` is only supported on Factors of dtype float64.
See Also
--------
:meth:`pandas.DataFrame.groupby` | [
"Construct",
"a",
"Factor",
"that",
"computes",
"self",
"and",
"subtracts",
"the",
"mean",
"from",
"row",
"of",
"the",
"result",
"."
] | python | train |
google/prettytensor | prettytensor/layers.py | https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/layers.py#L84-L116 | def he_init(n_inputs, n_outputs, activation_fn, uniform=True):
"""Sets the parameter initialization using the method described.
This method is designed to keep the scale of the gradients roughly the same
in all layers with ReLU activations.
He et al. (2015):
Delving deep into rectifiers: surpassing human-level performance on
imageNet classification. International Conference on Computer Vision.
For activations other than ReLU and ReLU6, this method uses Xavier
initialization as in xavier_init().
Args:
n_inputs: The number of input nodes into each output.
n_outputs: The number of output nodes for each input.
activation_fn: Activation function used in this layer.
uniform: If uniform distribution will be used for Xavier initialization.
Normal distribution will be used if False.
Returns:
An initializer.
"""
def in_relu_family(activation_fn):
if isinstance(activation_fn, collections.Sequence):
activation_fn = activation_fn[0]
return activation_fn in (tf.nn.relu, tf.nn.relu6)
if in_relu_family(activation_fn):
stddev = math.sqrt(2.0 / n_inputs)
# TODO(): Evaluates truncated_normal_initializer.
return tf.random_normal_initializer(stddev=stddev)
else:
return xavier_init(n_inputs, n_outputs, uniform) | [
"def",
"he_init",
"(",
"n_inputs",
",",
"n_outputs",
",",
"activation_fn",
",",
"uniform",
"=",
"True",
")",
":",
"def",
"in_relu_family",
"(",
"activation_fn",
")",
":",
"if",
"isinstance",
"(",
"activation_fn",
",",
"collections",
".",
"Sequence",
")",
":",
"activation_fn",
"=",
"activation_fn",
"[",
"0",
"]",
"return",
"activation_fn",
"in",
"(",
"tf",
".",
"nn",
".",
"relu",
",",
"tf",
".",
"nn",
".",
"relu6",
")",
"if",
"in_relu_family",
"(",
"activation_fn",
")",
":",
"stddev",
"=",
"math",
".",
"sqrt",
"(",
"2.0",
"/",
"n_inputs",
")",
"# TODO(): Evaluates truncated_normal_initializer.",
"return",
"tf",
".",
"random_normal_initializer",
"(",
"stddev",
"=",
"stddev",
")",
"else",
":",
"return",
"xavier_init",
"(",
"n_inputs",
",",
"n_outputs",
",",
"uniform",
")"
] | Sets the parameter initialization using the method described.
This method is designed to keep the scale of the gradients roughly the same
in all layers with ReLU activations.
He et al. (2015):
Delving deep into rectifiers: surpassing human-level performance on
imageNet classification. International Conference on Computer Vision.
For activations other than ReLU and ReLU6, this method uses Xavier
initialization as in xavier_init().
Args:
n_inputs: The number of input nodes into each output.
n_outputs: The number of output nodes for each input.
activation_fn: Activation function used in this layer.
uniform: If uniform distribution will be used for Xavier initialization.
Normal distribution will be used if False.
Returns:
An initializer. | [
"Sets",
"the",
"parameter",
"initialization",
"using",
"the",
"method",
"described",
"."
] | python | train |
Miserlou/Zappa | zappa/core.py | https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/core.py#L2633-L2771 | def schedule_events(self, lambda_arn, lambda_name, events, default=True):
"""
Given a Lambda ARN, name and a list of events, schedule this as CloudWatch Events.
'events' is a list of dictionaries, where the dict must contains the string
of a 'function' and the string of the event 'expression', and an optional 'name' and 'description'.
Expressions can be in rate or cron format:
http://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html
"""
# The stream sources - DynamoDB, Kinesis and SQS - are working differently than the other services (pull vs push)
# and do not require event permissions. They do require additional permissions on the Lambda roles though.
# http://docs.aws.amazon.com/lambda/latest/dg/lambda-api-permissions-ref.html
pull_services = ['dynamodb', 'kinesis', 'sqs']
# XXX: Not available in Lambda yet.
# We probably want to execute the latest code.
# if default:
# lambda_arn = lambda_arn + ":$LATEST"
self.unschedule_events(lambda_name=lambda_name, lambda_arn=lambda_arn, events=events,
excluded_source_services=pull_services)
for event in events:
function = event['function']
expression = event.get('expression', None) # single expression
expressions = event.get('expressions', None) # multiple expression
kwargs = event.get('kwargs', {}) # optional dict of keyword arguments for the event
event_source = event.get('event_source', None)
description = event.get('description', function)
# - If 'cron' or 'rate' in expression, use ScheduleExpression
# - Else, use EventPattern
# - ex https://github.com/awslabs/aws-lambda-ddns-function
if not self.credentials_arn:
self.get_credentials_arn()
if expression:
expressions = [expression] # same code for single and multiple expression
if expressions:
for index, expression in enumerate(expressions):
name = self.get_scheduled_event_name(event, function, lambda_name, index)
# if it's possible that we truncated name, generate a unique, shortened name
# https://github.com/Miserlou/Zappa/issues/970
if len(name) >= 64:
rule_name = self.get_hashed_rule_name(event, function, lambda_name)
else:
rule_name = name
rule_response = self.events_client.put_rule(
Name=rule_name,
ScheduleExpression=expression,
State='ENABLED',
Description=description,
RoleArn=self.credentials_arn
)
if 'RuleArn' in rule_response:
logger.debug('Rule created. ARN {}'.format(rule_response['RuleArn']))
# Specific permissions are necessary for any trigger to work.
self.create_event_permission(lambda_name, 'events.amazonaws.com', rule_response['RuleArn'])
# Overwriting the input, supply the original values and add kwargs
input_template = '{"time": <time>, ' \
'"detail-type": <detail-type>, ' \
'"source": <source>,' \
'"account": <account>, ' \
'"region": <region>,' \
'"detail": <detail>, ' \
'"version": <version>,' \
'"resources": <resources>,' \
'"id": <id>,' \
'"kwargs": %s' \
'}' % json.dumps(kwargs)
# Create the CloudWatch event ARN for this function.
# https://github.com/Miserlou/Zappa/issues/359
target_response = self.events_client.put_targets(
Rule=rule_name,
Targets=[
{
'Id': 'Id' + ''.join(random.choice(string.digits) for _ in range(12)),
'Arn': lambda_arn,
'InputTransformer': {
'InputPathsMap': {
'time': '$.time',
'detail-type': '$.detail-type',
'source': '$.source',
'account': '$.account',
'region': '$.region',
'detail': '$.detail',
'version': '$.version',
'resources': '$.resources',
'id': '$.id'
},
'InputTemplate': input_template
}
}
]
)
if target_response['ResponseMetadata']['HTTPStatusCode'] == 200:
print("Scheduled {} with expression {}!".format(rule_name, expression))
else:
print("Problem scheduling {} with expression {}.".format(rule_name, expression))
elif event_source:
service = self.service_from_arn(event_source['arn'])
if service not in pull_services:
svc = ','.join(event['event_source']['events'])
self.create_event_permission(
lambda_name,
service + '.amazonaws.com',
event['event_source']['arn']
)
else:
svc = service
rule_response = add_event_source(
event_source,
lambda_arn,
function,
self.boto_session
)
if rule_response == 'successful':
print("Created {} event schedule for {}!".format(svc, function))
elif rule_response == 'failed':
print("Problem creating {} event schedule for {}!".format(svc, function))
elif rule_response == 'exists':
print("{} event schedule for {} already exists - Nothing to do here.".format(svc, function))
elif rule_response == 'dryrun':
print("Dryrun for creating {} event schedule for {}!!".format(svc, function))
else:
print("Could not create event {} - Please define either an expression or an event source".format(name)) | [
"def",
"schedule_events",
"(",
"self",
",",
"lambda_arn",
",",
"lambda_name",
",",
"events",
",",
"default",
"=",
"True",
")",
":",
"# The stream sources - DynamoDB, Kinesis and SQS - are working differently than the other services (pull vs push)",
"# and do not require event permissions. They do require additional permissions on the Lambda roles though.",
"# http://docs.aws.amazon.com/lambda/latest/dg/lambda-api-permissions-ref.html",
"pull_services",
"=",
"[",
"'dynamodb'",
",",
"'kinesis'",
",",
"'sqs'",
"]",
"# XXX: Not available in Lambda yet.",
"# We probably want to execute the latest code.",
"# if default:",
"# lambda_arn = lambda_arn + \":$LATEST\"",
"self",
".",
"unschedule_events",
"(",
"lambda_name",
"=",
"lambda_name",
",",
"lambda_arn",
"=",
"lambda_arn",
",",
"events",
"=",
"events",
",",
"excluded_source_services",
"=",
"pull_services",
")",
"for",
"event",
"in",
"events",
":",
"function",
"=",
"event",
"[",
"'function'",
"]",
"expression",
"=",
"event",
".",
"get",
"(",
"'expression'",
",",
"None",
")",
"# single expression",
"expressions",
"=",
"event",
".",
"get",
"(",
"'expressions'",
",",
"None",
")",
"# multiple expression",
"kwargs",
"=",
"event",
".",
"get",
"(",
"'kwargs'",
",",
"{",
"}",
")",
"# optional dict of keyword arguments for the event",
"event_source",
"=",
"event",
".",
"get",
"(",
"'event_source'",
",",
"None",
")",
"description",
"=",
"event",
".",
"get",
"(",
"'description'",
",",
"function",
")",
"# - If 'cron' or 'rate' in expression, use ScheduleExpression",
"# - Else, use EventPattern",
"# - ex https://github.com/awslabs/aws-lambda-ddns-function",
"if",
"not",
"self",
".",
"credentials_arn",
":",
"self",
".",
"get_credentials_arn",
"(",
")",
"if",
"expression",
":",
"expressions",
"=",
"[",
"expression",
"]",
"# same code for single and multiple expression",
"if",
"expressions",
":",
"for",
"index",
",",
"expression",
"in",
"enumerate",
"(",
"expressions",
")",
":",
"name",
"=",
"self",
".",
"get_scheduled_event_name",
"(",
"event",
",",
"function",
",",
"lambda_name",
",",
"index",
")",
"# if it's possible that we truncated name, generate a unique, shortened name",
"# https://github.com/Miserlou/Zappa/issues/970",
"if",
"len",
"(",
"name",
")",
">=",
"64",
":",
"rule_name",
"=",
"self",
".",
"get_hashed_rule_name",
"(",
"event",
",",
"function",
",",
"lambda_name",
")",
"else",
":",
"rule_name",
"=",
"name",
"rule_response",
"=",
"self",
".",
"events_client",
".",
"put_rule",
"(",
"Name",
"=",
"rule_name",
",",
"ScheduleExpression",
"=",
"expression",
",",
"State",
"=",
"'ENABLED'",
",",
"Description",
"=",
"description",
",",
"RoleArn",
"=",
"self",
".",
"credentials_arn",
")",
"if",
"'RuleArn'",
"in",
"rule_response",
":",
"logger",
".",
"debug",
"(",
"'Rule created. ARN {}'",
".",
"format",
"(",
"rule_response",
"[",
"'RuleArn'",
"]",
")",
")",
"# Specific permissions are necessary for any trigger to work.",
"self",
".",
"create_event_permission",
"(",
"lambda_name",
",",
"'events.amazonaws.com'",
",",
"rule_response",
"[",
"'RuleArn'",
"]",
")",
"# Overwriting the input, supply the original values and add kwargs",
"input_template",
"=",
"'{\"time\": <time>, '",
"'\"detail-type\": <detail-type>, '",
"'\"source\": <source>,'",
"'\"account\": <account>, '",
"'\"region\": <region>,'",
"'\"detail\": <detail>, '",
"'\"version\": <version>,'",
"'\"resources\": <resources>,'",
"'\"id\": <id>,'",
"'\"kwargs\": %s'",
"'}'",
"%",
"json",
".",
"dumps",
"(",
"kwargs",
")",
"# Create the CloudWatch event ARN for this function.",
"# https://github.com/Miserlou/Zappa/issues/359",
"target_response",
"=",
"self",
".",
"events_client",
".",
"put_targets",
"(",
"Rule",
"=",
"rule_name",
",",
"Targets",
"=",
"[",
"{",
"'Id'",
":",
"'Id'",
"+",
"''",
".",
"join",
"(",
"random",
".",
"choice",
"(",
"string",
".",
"digits",
")",
"for",
"_",
"in",
"range",
"(",
"12",
")",
")",
",",
"'Arn'",
":",
"lambda_arn",
",",
"'InputTransformer'",
":",
"{",
"'InputPathsMap'",
":",
"{",
"'time'",
":",
"'$.time'",
",",
"'detail-type'",
":",
"'$.detail-type'",
",",
"'source'",
":",
"'$.source'",
",",
"'account'",
":",
"'$.account'",
",",
"'region'",
":",
"'$.region'",
",",
"'detail'",
":",
"'$.detail'",
",",
"'version'",
":",
"'$.version'",
",",
"'resources'",
":",
"'$.resources'",
",",
"'id'",
":",
"'$.id'",
"}",
",",
"'InputTemplate'",
":",
"input_template",
"}",
"}",
"]",
")",
"if",
"target_response",
"[",
"'ResponseMetadata'",
"]",
"[",
"'HTTPStatusCode'",
"]",
"==",
"200",
":",
"print",
"(",
"\"Scheduled {} with expression {}!\"",
".",
"format",
"(",
"rule_name",
",",
"expression",
")",
")",
"else",
":",
"print",
"(",
"\"Problem scheduling {} with expression {}.\"",
".",
"format",
"(",
"rule_name",
",",
"expression",
")",
")",
"elif",
"event_source",
":",
"service",
"=",
"self",
".",
"service_from_arn",
"(",
"event_source",
"[",
"'arn'",
"]",
")",
"if",
"service",
"not",
"in",
"pull_services",
":",
"svc",
"=",
"','",
".",
"join",
"(",
"event",
"[",
"'event_source'",
"]",
"[",
"'events'",
"]",
")",
"self",
".",
"create_event_permission",
"(",
"lambda_name",
",",
"service",
"+",
"'.amazonaws.com'",
",",
"event",
"[",
"'event_source'",
"]",
"[",
"'arn'",
"]",
")",
"else",
":",
"svc",
"=",
"service",
"rule_response",
"=",
"add_event_source",
"(",
"event_source",
",",
"lambda_arn",
",",
"function",
",",
"self",
".",
"boto_session",
")",
"if",
"rule_response",
"==",
"'successful'",
":",
"print",
"(",
"\"Created {} event schedule for {}!\"",
".",
"format",
"(",
"svc",
",",
"function",
")",
")",
"elif",
"rule_response",
"==",
"'failed'",
":",
"print",
"(",
"\"Problem creating {} event schedule for {}!\"",
".",
"format",
"(",
"svc",
",",
"function",
")",
")",
"elif",
"rule_response",
"==",
"'exists'",
":",
"print",
"(",
"\"{} event schedule for {} already exists - Nothing to do here.\"",
".",
"format",
"(",
"svc",
",",
"function",
")",
")",
"elif",
"rule_response",
"==",
"'dryrun'",
":",
"print",
"(",
"\"Dryrun for creating {} event schedule for {}!!\"",
".",
"format",
"(",
"svc",
",",
"function",
")",
")",
"else",
":",
"print",
"(",
"\"Could not create event {} - Please define either an expression or an event source\"",
".",
"format",
"(",
"name",
")",
")"
] | Given a Lambda ARN, name and a list of events, schedule this as CloudWatch Events.
'events' is a list of dictionaries, where the dict must contains the string
of a 'function' and the string of the event 'expression', and an optional 'name' and 'description'.
Expressions can be in rate or cron format:
http://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html | [
"Given",
"a",
"Lambda",
"ARN",
"name",
"and",
"a",
"list",
"of",
"events",
"schedule",
"this",
"as",
"CloudWatch",
"Events",
"."
] | python | train |
sanger-pathogens/Fastaq | pyfastaq/intervals.py | https://github.com/sanger-pathogens/Fastaq/blob/2c775c846d2491678a9637daa320592e02c26c72/pyfastaq/intervals.py#L60-L65 | def intersection(self, i):
'''If intervals intersect, returns their intersection, otherwise returns None'''
if self.intersects(i):
return Interval(max(self.start, i.start), min(self.end, i.end))
else:
return None | [
"def",
"intersection",
"(",
"self",
",",
"i",
")",
":",
"if",
"self",
".",
"intersects",
"(",
"i",
")",
":",
"return",
"Interval",
"(",
"max",
"(",
"self",
".",
"start",
",",
"i",
".",
"start",
")",
",",
"min",
"(",
"self",
".",
"end",
",",
"i",
".",
"end",
")",
")",
"else",
":",
"return",
"None"
] | If intervals intersect, returns their intersection, otherwise returns None | [
"If",
"intervals",
"intersect",
"returns",
"their",
"intersection",
"otherwise",
"returns",
"None"
] | python | valid |
IRC-SPHERE/HyperStream | hyperstream/utils/time_utils.py | https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/utils/time_utils.py#L90-L98 | def reconstruct_interval(experiment_id):
"""
Reverse the construct_experiment_id operation
:param experiment_id: The experiment id
:return: time interval
"""
start, end = map(lambda x: udatetime.utcfromtimestamp(x / 1000.0), map(float, experiment_id.split("-")))
from ..time_interval import TimeInterval
return TimeInterval(start, end) | [
"def",
"reconstruct_interval",
"(",
"experiment_id",
")",
":",
"start",
",",
"end",
"=",
"map",
"(",
"lambda",
"x",
":",
"udatetime",
".",
"utcfromtimestamp",
"(",
"x",
"/",
"1000.0",
")",
",",
"map",
"(",
"float",
",",
"experiment_id",
".",
"split",
"(",
"\"-\"",
")",
")",
")",
"from",
".",
".",
"time_interval",
"import",
"TimeInterval",
"return",
"TimeInterval",
"(",
"start",
",",
"end",
")"
] | Reverse the construct_experiment_id operation
:param experiment_id: The experiment id
:return: time interval | [
"Reverse",
"the",
"construct_experiment_id",
"operation",
":",
"param",
"experiment_id",
":",
"The",
"experiment",
"id",
":",
"return",
":",
"time",
"interval"
] | python | train |
SavinaRoja/OpenAccess_EPUB | src/openaccess_epub/utils/__init__.py | https://github.com/SavinaRoja/OpenAccess_EPUB/blob/6b77ba30b7394fd003920e7a7957bca963a90656/src/openaccess_epub/utils/__init__.py#L167-L176 | def get_absolute_path(some_path):
"""
This function will return an appropriate absolute path for the path it is
given. If the input is absolute, it will return unmodified; if the input is
relative, it will be rendered as relative to the current working directory.
"""
if os.path.isabs(some_path):
return some_path
else:
return evaluate_relative_path(os.getcwd(), some_path) | [
"def",
"get_absolute_path",
"(",
"some_path",
")",
":",
"if",
"os",
".",
"path",
".",
"isabs",
"(",
"some_path",
")",
":",
"return",
"some_path",
"else",
":",
"return",
"evaluate_relative_path",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"some_path",
")"
] | This function will return an appropriate absolute path for the path it is
given. If the input is absolute, it will return unmodified; if the input is
relative, it will be rendered as relative to the current working directory. | [
"This",
"function",
"will",
"return",
"an",
"appropriate",
"absolute",
"path",
"for",
"the",
"path",
"it",
"is",
"given",
".",
"If",
"the",
"input",
"is",
"absolute",
"it",
"will",
"return",
"unmodified",
";",
"if",
"the",
"input",
"is",
"relative",
"it",
"will",
"be",
"rendered",
"as",
"relative",
"to",
"the",
"current",
"working",
"directory",
"."
] | python | train |
treycucco/bidon | bidon/util/__init__.py | https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/util/__init__.py#L67-L75 | def json_default(obj):
"""Convert an object to JSON, via the defaults set with register_json_default.
:obj: the object to convert
"""
for default in _JSON_DEFAULTS:
if default[0](obj):
return default[1](obj)
raise TypeError(repr(obj) + " is not JSON serializable") | [
"def",
"json_default",
"(",
"obj",
")",
":",
"for",
"default",
"in",
"_JSON_DEFAULTS",
":",
"if",
"default",
"[",
"0",
"]",
"(",
"obj",
")",
":",
"return",
"default",
"[",
"1",
"]",
"(",
"obj",
")",
"raise",
"TypeError",
"(",
"repr",
"(",
"obj",
")",
"+",
"\" is not JSON serializable\"",
")"
] | Convert an object to JSON, via the defaults set with register_json_default.
:obj: the object to convert | [
"Convert",
"an",
"object",
"to",
"JSON",
"via",
"the",
"defaults",
"set",
"with",
"register_json_default",
"."
] | python | train |
alvarogzp/telegram-bot-framework | bot/multithreading/worker/pool/workers/limited_lifespan.py | https://github.com/alvarogzp/telegram-bot-framework/blob/7b597a415c1901901c677976cb13100fc3083107/bot/multithreading/worker/pool/workers/limited_lifespan.py#L21-L34 | def _get_and_execute(self):
"""
:return: True if it should continue running, False if it should end its execution.
"""
try:
work = self.queue.get(timeout=self.max_seconds_idle)
except queue.Empty:
# max_seconds_idle has been exhausted, exiting
self.end_notify()
return False
else:
self._work(work)
self.queue.task_done()
return True | [
"def",
"_get_and_execute",
"(",
"self",
")",
":",
"try",
":",
"work",
"=",
"self",
".",
"queue",
".",
"get",
"(",
"timeout",
"=",
"self",
".",
"max_seconds_idle",
")",
"except",
"queue",
".",
"Empty",
":",
"# max_seconds_idle has been exhausted, exiting",
"self",
".",
"end_notify",
"(",
")",
"return",
"False",
"else",
":",
"self",
".",
"_work",
"(",
"work",
")",
"self",
".",
"queue",
".",
"task_done",
"(",
")",
"return",
"True"
] | :return: True if it should continue running, False if it should end its execution. | [
":",
"return",
":",
"True",
"if",
"it",
"should",
"continue",
"running",
"False",
"if",
"it",
"should",
"end",
"its",
"execution",
"."
] | python | train |
peerplays-network/python-peerplays | peerplaysapi/websocket.py | https://github.com/peerplays-network/python-peerplays/blob/188f04238e7e21d5f73e9b01099eea44289ef6b7/peerplaysapi/websocket.py#L216-L263 | def on_message(self, ws, reply, *args):
""" This method is called by the websocket connection on every
message that is received. If we receive a ``notice``, we
hand over post-processing and signalling of events to
``process_notice``.
"""
log.debug("Received message: %s" % str(reply))
data = {}
try:
data = json.loads(reply, strict=False)
except ValueError:
raise ValueError("API node returned invalid format. Expected JSON!")
if data.get("method") == "notice":
id = data["params"][0]
if id >= len(self.__events__):
log.critical("Received an id that is out of range\n\n" + str(data))
return
# This is a "general" object change notification
if id == self.__events__.index("on_object"):
# Let's see if a specific object has changed
for notice in data["params"][1]:
try:
if "id" in notice:
self.process_notice(notice)
else:
for obj in notice:
if "id" in obj:
self.process_notice(obj)
except Exception as e:
log.critical(
"Error in process_notice: {}\n\n{}".format(
str(e), traceback.format_exc
)
)
else:
try:
callbackname = self.__events__[id]
log.info("Patching through to call %s" % callbackname)
[getattr(self.events, callbackname)(x) for x in data["params"][1]]
except Exception as e:
log.critical(
"Error in {}: {}\n\n{}".format(
callbackname, str(e), traceback.format_exc()
)
) | [
"def",
"on_message",
"(",
"self",
",",
"ws",
",",
"reply",
",",
"*",
"args",
")",
":",
"log",
".",
"debug",
"(",
"\"Received message: %s\"",
"%",
"str",
"(",
"reply",
")",
")",
"data",
"=",
"{",
"}",
"try",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"reply",
",",
"strict",
"=",
"False",
")",
"except",
"ValueError",
":",
"raise",
"ValueError",
"(",
"\"API node returned invalid format. Expected JSON!\"",
")",
"if",
"data",
".",
"get",
"(",
"\"method\"",
")",
"==",
"\"notice\"",
":",
"id",
"=",
"data",
"[",
"\"params\"",
"]",
"[",
"0",
"]",
"if",
"id",
">=",
"len",
"(",
"self",
".",
"__events__",
")",
":",
"log",
".",
"critical",
"(",
"\"Received an id that is out of range\\n\\n\"",
"+",
"str",
"(",
"data",
")",
")",
"return",
"# This is a \"general\" object change notification",
"if",
"id",
"==",
"self",
".",
"__events__",
".",
"index",
"(",
"\"on_object\"",
")",
":",
"# Let's see if a specific object has changed",
"for",
"notice",
"in",
"data",
"[",
"\"params\"",
"]",
"[",
"1",
"]",
":",
"try",
":",
"if",
"\"id\"",
"in",
"notice",
":",
"self",
".",
"process_notice",
"(",
"notice",
")",
"else",
":",
"for",
"obj",
"in",
"notice",
":",
"if",
"\"id\"",
"in",
"obj",
":",
"self",
".",
"process_notice",
"(",
"obj",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"critical",
"(",
"\"Error in process_notice: {}\\n\\n{}\"",
".",
"format",
"(",
"str",
"(",
"e",
")",
",",
"traceback",
".",
"format_exc",
")",
")",
"else",
":",
"try",
":",
"callbackname",
"=",
"self",
".",
"__events__",
"[",
"id",
"]",
"log",
".",
"info",
"(",
"\"Patching through to call %s\"",
"%",
"callbackname",
")",
"[",
"getattr",
"(",
"self",
".",
"events",
",",
"callbackname",
")",
"(",
"x",
")",
"for",
"x",
"in",
"data",
"[",
"\"params\"",
"]",
"[",
"1",
"]",
"]",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"critical",
"(",
"\"Error in {}: {}\\n\\n{}\"",
".",
"format",
"(",
"callbackname",
",",
"str",
"(",
"e",
")",
",",
"traceback",
".",
"format_exc",
"(",
")",
")",
")"
] | This method is called by the websocket connection on every
message that is received. If we receive a ``notice``, we
hand over post-processing and signalling of events to
``process_notice``. | [
"This",
"method",
"is",
"called",
"by",
"the",
"websocket",
"connection",
"on",
"every",
"message",
"that",
"is",
"received",
".",
"If",
"we",
"receive",
"a",
"notice",
"we",
"hand",
"over",
"post",
"-",
"processing",
"and",
"signalling",
"of",
"events",
"to",
"process_notice",
"."
] | python | train |
chaoss/grimoirelab-elk | grimoire_elk/enriched/git.py | https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/grimoire_elk/enriched/git.py#L809-L842 | def remove_commits(self, items, index, attribute, origin):
"""Delete documents that correspond to commits deleted in the Git repository
:param items: target items to be deleted
:param index: target index
:param attribute: name of the term attribute to search items
:param origin: name of the origin from where the items must be deleted
"""
es_query = '''
{
"query": {
"bool": {
"must": {
"term": {
"origin": "%s"
}
},
"filter": {
"terms": {
"%s": [%s]
}
}
}
}
}
''' % (origin, attribute, ",".join(['"%s"' % i for i in items]))
r = self.requests.post(index + "/_delete_by_query?refresh", data=es_query, headers=HEADER_JSON, verify=False)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as ex:
logger.error("Error updating deleted commits for %s.", self.elastic.anonymize_url(index))
logger.error(r.text)
return | [
"def",
"remove_commits",
"(",
"self",
",",
"items",
",",
"index",
",",
"attribute",
",",
"origin",
")",
":",
"es_query",
"=",
"'''\n {\n \"query\": {\n \"bool\": {\n \"must\": {\n \"term\": {\n \"origin\": \"%s\"\n }\n },\n \"filter\": {\n \"terms\": {\n \"%s\": [%s]\n }\n }\n }\n }\n }\n '''",
"%",
"(",
"origin",
",",
"attribute",
",",
"\",\"",
".",
"join",
"(",
"[",
"'\"%s\"'",
"%",
"i",
"for",
"i",
"in",
"items",
"]",
")",
")",
"r",
"=",
"self",
".",
"requests",
".",
"post",
"(",
"index",
"+",
"\"/_delete_by_query?refresh\"",
",",
"data",
"=",
"es_query",
",",
"headers",
"=",
"HEADER_JSON",
",",
"verify",
"=",
"False",
")",
"try",
":",
"r",
".",
"raise_for_status",
"(",
")",
"except",
"requests",
".",
"exceptions",
".",
"HTTPError",
"as",
"ex",
":",
"logger",
".",
"error",
"(",
"\"Error updating deleted commits for %s.\"",
",",
"self",
".",
"elastic",
".",
"anonymize_url",
"(",
"index",
")",
")",
"logger",
".",
"error",
"(",
"r",
".",
"text",
")",
"return"
] | Delete documents that correspond to commits deleted in the Git repository
:param items: target items to be deleted
:param index: target index
:param attribute: name of the term attribute to search items
:param origin: name of the origin from where the items must be deleted | [
"Delete",
"documents",
"that",
"correspond",
"to",
"commits",
"deleted",
"in",
"the",
"Git",
"repository"
] | python | train |
praekelt/django-analytics | analytics/geckoboard_views.py | https://github.com/praekelt/django-analytics/blob/29c22d03374ccc0ec451650e2c2886d324f6e5c6/analytics/geckoboard_views.py#L199-L215 | def geckoboard_funnel(request, frequency=settings.STATISTIC_FREQUENCY_DAILY):
"""
Returns a funnel chart for the metrics specified in the GET variables.
"""
# get all the parameters for this function
params = get_gecko_params(request, cumulative=True)
metrics = Metric.objects.filter(uid__in=params['uids'])
items = [(metric.latest_count(frequency=params['frequency'], count=not params['cumulative'],
cumulative=params['cumulative']), metric.title) for metric in metrics]
return {
'items' : items,
'type' : params['type'],
'percentage': params['percentage'],
'sort' : params['sort'],
} | [
"def",
"geckoboard_funnel",
"(",
"request",
",",
"frequency",
"=",
"settings",
".",
"STATISTIC_FREQUENCY_DAILY",
")",
":",
"# get all the parameters for this function",
"params",
"=",
"get_gecko_params",
"(",
"request",
",",
"cumulative",
"=",
"True",
")",
"metrics",
"=",
"Metric",
".",
"objects",
".",
"filter",
"(",
"uid__in",
"=",
"params",
"[",
"'uids'",
"]",
")",
"items",
"=",
"[",
"(",
"metric",
".",
"latest_count",
"(",
"frequency",
"=",
"params",
"[",
"'frequency'",
"]",
",",
"count",
"=",
"not",
"params",
"[",
"'cumulative'",
"]",
",",
"cumulative",
"=",
"params",
"[",
"'cumulative'",
"]",
")",
",",
"metric",
".",
"title",
")",
"for",
"metric",
"in",
"metrics",
"]",
"return",
"{",
"'items'",
":",
"items",
",",
"'type'",
":",
"params",
"[",
"'type'",
"]",
",",
"'percentage'",
":",
"params",
"[",
"'percentage'",
"]",
",",
"'sort'",
":",
"params",
"[",
"'sort'",
"]",
",",
"}"
] | Returns a funnel chart for the metrics specified in the GET variables. | [
"Returns",
"a",
"funnel",
"chart",
"for",
"the",
"metrics",
"specified",
"in",
"the",
"GET",
"variables",
"."
] | python | test |
graphql-python/graphql-core-next | graphql/validation/validate.py | https://github.com/graphql-python/graphql-core-next/blob/073dce3f002f897d40f9348ffd8f107815160540/graphql/validation/validate.py#L15-L54 | def validate(
schema: GraphQLSchema,
document_ast: DocumentNode,
rules: Sequence[RuleType] = None,
type_info: TypeInfo = None,
) -> List[GraphQLError]:
"""Implements the "Validation" section of the spec.
Validation runs synchronously, returning a list of encountered errors, or an empty
list if no errors were encountered and the document is valid.
A list of specific validation rules may be provided. If not provided, the default
list of rules defined by the GraphQL specification will be used.
Each validation rule is a ValidationRule object which is a visitor object that holds
a ValidationContext (see the language/visitor API). Visitor methods are expected to
return GraphQLErrors, or lists of GraphQLErrors when invalid.
Optionally a custom TypeInfo instance may be provided. If not provided, one will be
created from the provided schema.
"""
if not document_ast or not isinstance(document_ast, DocumentNode):
raise TypeError("You must provide a document node.")
# If the schema used for validation is invalid, throw an error.
assert_valid_schema(schema)
if type_info is None:
type_info = TypeInfo(schema)
elif not isinstance(type_info, TypeInfo):
raise TypeError(f"Not a TypeInfo object: {inspect(type_info)}")
if rules is None:
rules = specified_rules
elif not isinstance(rules, (list, tuple)):
raise TypeError("Rules must be passed as a list/tuple.")
context = ValidationContext(schema, document_ast, type_info)
# This uses a specialized visitor which runs multiple visitors in parallel,
# while maintaining the visitor skip and break API.
visitors = [rule(context) for rule in rules]
# Visit the whole document with each instance of all provided rules.
visit(document_ast, TypeInfoVisitor(type_info, ParallelVisitor(visitors)))
return context.errors | [
"def",
"validate",
"(",
"schema",
":",
"GraphQLSchema",
",",
"document_ast",
":",
"DocumentNode",
",",
"rules",
":",
"Sequence",
"[",
"RuleType",
"]",
"=",
"None",
",",
"type_info",
":",
"TypeInfo",
"=",
"None",
",",
")",
"->",
"List",
"[",
"GraphQLError",
"]",
":",
"if",
"not",
"document_ast",
"or",
"not",
"isinstance",
"(",
"document_ast",
",",
"DocumentNode",
")",
":",
"raise",
"TypeError",
"(",
"\"You must provide a document node.\"",
")",
"# If the schema used for validation is invalid, throw an error.",
"assert_valid_schema",
"(",
"schema",
")",
"if",
"type_info",
"is",
"None",
":",
"type_info",
"=",
"TypeInfo",
"(",
"schema",
")",
"elif",
"not",
"isinstance",
"(",
"type_info",
",",
"TypeInfo",
")",
":",
"raise",
"TypeError",
"(",
"f\"Not a TypeInfo object: {inspect(type_info)}\"",
")",
"if",
"rules",
"is",
"None",
":",
"rules",
"=",
"specified_rules",
"elif",
"not",
"isinstance",
"(",
"rules",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Rules must be passed as a list/tuple.\"",
")",
"context",
"=",
"ValidationContext",
"(",
"schema",
",",
"document_ast",
",",
"type_info",
")",
"# This uses a specialized visitor which runs multiple visitors in parallel,",
"# while maintaining the visitor skip and break API.",
"visitors",
"=",
"[",
"rule",
"(",
"context",
")",
"for",
"rule",
"in",
"rules",
"]",
"# Visit the whole document with each instance of all provided rules.",
"visit",
"(",
"document_ast",
",",
"TypeInfoVisitor",
"(",
"type_info",
",",
"ParallelVisitor",
"(",
"visitors",
")",
")",
")",
"return",
"context",
".",
"errors"
] | Implements the "Validation" section of the spec.
Validation runs synchronously, returning a list of encountered errors, or an empty
list if no errors were encountered and the document is valid.
A list of specific validation rules may be provided. If not provided, the default
list of rules defined by the GraphQL specification will be used.
Each validation rule is a ValidationRule object which is a visitor object that holds
a ValidationContext (see the language/visitor API). Visitor methods are expected to
return GraphQLErrors, or lists of GraphQLErrors when invalid.
Optionally a custom TypeInfo instance may be provided. If not provided, one will be
created from the provided schema. | [
"Implements",
"the",
"Validation",
"section",
"of",
"the",
"spec",
"."
] | python | train |
rigetti/pyquil | pyquil/api/_quantum_computer.py | https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/api/_quantum_computer.py#L122-L132 | def get_isa(self, oneq_type: str = 'Xhalves',
twoq_type: str = 'CZ') -> ISA:
"""
Return a target ISA for this QuantumComputer's device.
See :py:func:`AbstractDevice.get_isa` for more.
:param oneq_type: The family of one-qubit gates to target
:param twoq_type: The family of two-qubit gates to target
"""
return self.device.get_isa(oneq_type=oneq_type, twoq_type=twoq_type) | [
"def",
"get_isa",
"(",
"self",
",",
"oneq_type",
":",
"str",
"=",
"'Xhalves'",
",",
"twoq_type",
":",
"str",
"=",
"'CZ'",
")",
"->",
"ISA",
":",
"return",
"self",
".",
"device",
".",
"get_isa",
"(",
"oneq_type",
"=",
"oneq_type",
",",
"twoq_type",
"=",
"twoq_type",
")"
] | Return a target ISA for this QuantumComputer's device.
See :py:func:`AbstractDevice.get_isa` for more.
:param oneq_type: The family of one-qubit gates to target
:param twoq_type: The family of two-qubit gates to target | [
"Return",
"a",
"target",
"ISA",
"for",
"this",
"QuantumComputer",
"s",
"device",
"."
] | python | train |
bitesofcode/projexui | projexui/widgets/xmultitagedit.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xmultitagedit.py#L622-L633 | def setEditable(self, state):
"""
Sets whether or not the user can edit the items in the list by
typing.
:param state | <bool>
"""
self._editable = state
if state:
self.setEditTriggers(self.AllEditTriggers)
else:
self.setEditTriggers(self.NoEditTriggers) | [
"def",
"setEditable",
"(",
"self",
",",
"state",
")",
":",
"self",
".",
"_editable",
"=",
"state",
"if",
"state",
":",
"self",
".",
"setEditTriggers",
"(",
"self",
".",
"AllEditTriggers",
")",
"else",
":",
"self",
".",
"setEditTriggers",
"(",
"self",
".",
"NoEditTriggers",
")"
] | Sets whether or not the user can edit the items in the list by
typing.
:param state | <bool> | [
"Sets",
"whether",
"or",
"not",
"the",
"user",
"can",
"edit",
"the",
"items",
"in",
"the",
"list",
"by",
"typing",
".",
":",
"param",
"state",
"|",
"<bool",
">"
] | python | train |
assamite/creamas | creamas/vote.py | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/vote.py#L335-L366 | def validate_candidates(self):
"""Validate current candidates.
This method validates the current candidate list in all the agents
in the environment (or underlying slave environments) and replaces
the current :attr:`candidates` with the list of validated candidates.
The artifact candidates must be hashable and have a :meth:`__eq__`
implemented for validation to work on multi-environments and
distributed environments.
"""
async def slave_task(addr, candidates):
r_manager = await self.env.connect(addr)
return await r_manager.validate_candidates(candidates)
self._log(logging.DEBUG, "Validating {} candidates"
.format(len(self.candidates)))
candidates = self.candidates
if self._single_env:
self._candidates = self.env.validate_candidates(candidates)
else:
mgrs = self.get_managers()
tasks = create_tasks(slave_task, mgrs, candidates, flatten=False)
rets = run(tasks)
valid_candidates = set(self.candidates)
for r in rets:
valid_candidates = valid_candidates.intersection(set(r))
self._candidates = list(valid_candidates)
self._log(logging.DEBUG, "{} candidates after validation"
.format(len(self.candidates))) | [
"def",
"validate_candidates",
"(",
"self",
")",
":",
"async",
"def",
"slave_task",
"(",
"addr",
",",
"candidates",
")",
":",
"r_manager",
"=",
"await",
"self",
".",
"env",
".",
"connect",
"(",
"addr",
")",
"return",
"await",
"r_manager",
".",
"validate_candidates",
"(",
"candidates",
")",
"self",
".",
"_log",
"(",
"logging",
".",
"DEBUG",
",",
"\"Validating {} candidates\"",
".",
"format",
"(",
"len",
"(",
"self",
".",
"candidates",
")",
")",
")",
"candidates",
"=",
"self",
".",
"candidates",
"if",
"self",
".",
"_single_env",
":",
"self",
".",
"_candidates",
"=",
"self",
".",
"env",
".",
"validate_candidates",
"(",
"candidates",
")",
"else",
":",
"mgrs",
"=",
"self",
".",
"get_managers",
"(",
")",
"tasks",
"=",
"create_tasks",
"(",
"slave_task",
",",
"mgrs",
",",
"candidates",
",",
"flatten",
"=",
"False",
")",
"rets",
"=",
"run",
"(",
"tasks",
")",
"valid_candidates",
"=",
"set",
"(",
"self",
".",
"candidates",
")",
"for",
"r",
"in",
"rets",
":",
"valid_candidates",
"=",
"valid_candidates",
".",
"intersection",
"(",
"set",
"(",
"r",
")",
")",
"self",
".",
"_candidates",
"=",
"list",
"(",
"valid_candidates",
")",
"self",
".",
"_log",
"(",
"logging",
".",
"DEBUG",
",",
"\"{} candidates after validation\"",
".",
"format",
"(",
"len",
"(",
"self",
".",
"candidates",
")",
")",
")"
] | Validate current candidates.
This method validates the current candidate list in all the agents
in the environment (or underlying slave environments) and replaces
the current :attr:`candidates` with the list of validated candidates.
The artifact candidates must be hashable and have a :meth:`__eq__`
implemented for validation to work on multi-environments and
distributed environments. | [
"Validate",
"current",
"candidates",
"."
] | python | train |
sprockets/sprockets.mixins.metrics | sprockets/mixins/metrics/statsd.py | https://github.com/sprockets/sprockets.mixins.metrics/blob/0b17d5f0c09a2be9db779e17e6789d3d5ff9a0d0/sprockets/mixins/metrics/statsd.py#L135-L140 | async def _tcp_on_closed(self):
"""Invoked when the socket is closed."""
LOGGER.warning('Not connected to statsd, connecting in %s seconds',
self._tcp_reconnect_sleep)
await asyncio.sleep(self._tcp_reconnect_sleep)
self._sock = self._tcp_socket() | [
"async",
"def",
"_tcp_on_closed",
"(",
"self",
")",
":",
"LOGGER",
".",
"warning",
"(",
"'Not connected to statsd, connecting in %s seconds'",
",",
"self",
".",
"_tcp_reconnect_sleep",
")",
"await",
"asyncio",
".",
"sleep",
"(",
"self",
".",
"_tcp_reconnect_sleep",
")",
"self",
".",
"_sock",
"=",
"self",
".",
"_tcp_socket",
"(",
")"
] | Invoked when the socket is closed. | [
"Invoked",
"when",
"the",
"socket",
"is",
"closed",
"."
] | python | train |
LEMS/pylems | lems/sim/build.py | https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/sim/build.py#L816-L844 | def build_on_event(self, runnable, regime, on_event):
"""
Build OnEvent event handler code.
@param on_event: OnEvent event handler object
@type on_event: lems.model.dynamics.OnEvent
@return: Generated OnEvent code
@rtype: list(string)
"""
on_event_code = []
if self.debug: on_event_code += ['print("Maybe handling something for %s ("+str(id(self))+")")'%(runnable.id),
'print("EICs ("+str(id(self))+"): "+str(self.event_in_counters))']
on_event_code += ['count = self.event_in_counters[\'{0}\']'.\
format(on_event.port),
'while count > 0:',
' print(" Handling event")' if self.debug else '',
' count -= 1']
for action in on_event.actions:
code = self.build_action(runnable, regime, action)
for line in code:
on_event_code += [' ' + line]
on_event_code += ['self.event_in_counters[\'{0}\'] = 0'.\
format(on_event.port),]
return on_event_code | [
"def",
"build_on_event",
"(",
"self",
",",
"runnable",
",",
"regime",
",",
"on_event",
")",
":",
"on_event_code",
"=",
"[",
"]",
"if",
"self",
".",
"debug",
":",
"on_event_code",
"+=",
"[",
"'print(\"Maybe handling something for %s (\"+str(id(self))+\")\")'",
"%",
"(",
"runnable",
".",
"id",
")",
",",
"'print(\"EICs (\"+str(id(self))+\"): \"+str(self.event_in_counters))'",
"]",
"on_event_code",
"+=",
"[",
"'count = self.event_in_counters[\\'{0}\\']'",
".",
"format",
"(",
"on_event",
".",
"port",
")",
",",
"'while count > 0:'",
",",
"' print(\" Handling event\")'",
"if",
"self",
".",
"debug",
"else",
"''",
",",
"' count -= 1'",
"]",
"for",
"action",
"in",
"on_event",
".",
"actions",
":",
"code",
"=",
"self",
".",
"build_action",
"(",
"runnable",
",",
"regime",
",",
"action",
")",
"for",
"line",
"in",
"code",
":",
"on_event_code",
"+=",
"[",
"' '",
"+",
"line",
"]",
"on_event_code",
"+=",
"[",
"'self.event_in_counters[\\'{0}\\'] = 0'",
".",
"format",
"(",
"on_event",
".",
"port",
")",
",",
"]",
"return",
"on_event_code"
] | Build OnEvent event handler code.
@param on_event: OnEvent event handler object
@type on_event: lems.model.dynamics.OnEvent
@return: Generated OnEvent code
@rtype: list(string) | [
"Build",
"OnEvent",
"event",
"handler",
"code",
"."
] | python | train |
spookylukey/django-paypal | paypal/pro/creditcard.py | https://github.com/spookylukey/django-paypal/blob/b07d0a3ad91b5c5fe7bb27be3e5d70aabcdef76f/paypal/pro/creditcard.py#L61-L66 | def get_type(self):
"""Return the type if it matches one of the cards."""
for card, pattern in CARDS.items():
if pattern.match(self.number):
return card
return None | [
"def",
"get_type",
"(",
"self",
")",
":",
"for",
"card",
",",
"pattern",
"in",
"CARDS",
".",
"items",
"(",
")",
":",
"if",
"pattern",
".",
"match",
"(",
"self",
".",
"number",
")",
":",
"return",
"card",
"return",
"None"
] | Return the type if it matches one of the cards. | [
"Return",
"the",
"type",
"if",
"it",
"matches",
"one",
"of",
"the",
"cards",
"."
] | python | train |
iotile/coretools | iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/docbook/__init__.py | https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/docbook/__init__.py#L198-L213 | def _detect(env):
"""
Detect all the command line tools that we might need for creating
the requested output formats.
"""
global prefer_xsltproc
if env.get('DOCBOOK_PREFER_XSLTPROC',''):
prefer_xsltproc = True
if ((not has_libxml2 and not has_lxml) or (prefer_xsltproc)):
# Try to find the XSLT processors
__detect_cl_tool(env, 'DOCBOOK_XSLTPROC', xsltproc_com, xsltproc_com_priority)
__detect_cl_tool(env, 'DOCBOOK_XMLLINT', xmllint_com)
__detect_cl_tool(env, 'DOCBOOK_FOP', fop_com, ['fop','xep','jw']) | [
"def",
"_detect",
"(",
"env",
")",
":",
"global",
"prefer_xsltproc",
"if",
"env",
".",
"get",
"(",
"'DOCBOOK_PREFER_XSLTPROC'",
",",
"''",
")",
":",
"prefer_xsltproc",
"=",
"True",
"if",
"(",
"(",
"not",
"has_libxml2",
"and",
"not",
"has_lxml",
")",
"or",
"(",
"prefer_xsltproc",
")",
")",
":",
"# Try to find the XSLT processors",
"__detect_cl_tool",
"(",
"env",
",",
"'DOCBOOK_XSLTPROC'",
",",
"xsltproc_com",
",",
"xsltproc_com_priority",
")",
"__detect_cl_tool",
"(",
"env",
",",
"'DOCBOOK_XMLLINT'",
",",
"xmllint_com",
")",
"__detect_cl_tool",
"(",
"env",
",",
"'DOCBOOK_FOP'",
",",
"fop_com",
",",
"[",
"'fop'",
",",
"'xep'",
",",
"'jw'",
"]",
")"
] | Detect all the command line tools that we might need for creating
the requested output formats. | [
"Detect",
"all",
"the",
"command",
"line",
"tools",
"that",
"we",
"might",
"need",
"for",
"creating",
"the",
"requested",
"output",
"formats",
"."
] | python | train |
marcinmiklitz/pywindow | pywindow/trajectory.py | https://github.com/marcinmiklitz/pywindow/blob/e5264812157224f22a691741ca2e0aefdc9bd2eb/pywindow/trajectory.py#L68-L95 | def make_supercell(system, matrix, supercell=[1, 1, 1]):
"""
Return a supercell.
This functions takes the input unitcell and creates a supercell of it that
is returned as a new :class:`pywindow.molecular.MolecularSystem`.
Parameters
----------
system : :attr:`pywindow.molecular.MolecularSystem.system`
The unit cell for creation of the supercell
matrix : :class:`numpy.array`
The unit cell parameters in form of a lattice.
supercell : :class:`list`, optional
A list that specifies the size of the supercell in the a, b and c
direction. (default=[1, 1, 1])
Returns
-------
:class:`pywindow.molecular.MolecularSystem`
Returns the created supercell as a new :class:`MolecularSystem`.
"""
user_supercell = [[1, supercell[0]], [1, supercell[1]], [1, supercell[1]]]
system = create_supercell(system, matrix, supercell=user_supercell)
return MolecularSystem.load_system(system) | [
"def",
"make_supercell",
"(",
"system",
",",
"matrix",
",",
"supercell",
"=",
"[",
"1",
",",
"1",
",",
"1",
"]",
")",
":",
"user_supercell",
"=",
"[",
"[",
"1",
",",
"supercell",
"[",
"0",
"]",
"]",
",",
"[",
"1",
",",
"supercell",
"[",
"1",
"]",
"]",
",",
"[",
"1",
",",
"supercell",
"[",
"1",
"]",
"]",
"]",
"system",
"=",
"create_supercell",
"(",
"system",
",",
"matrix",
",",
"supercell",
"=",
"user_supercell",
")",
"return",
"MolecularSystem",
".",
"load_system",
"(",
"system",
")"
] | Return a supercell.
This functions takes the input unitcell and creates a supercell of it that
is returned as a new :class:`pywindow.molecular.MolecularSystem`.
Parameters
----------
system : :attr:`pywindow.molecular.MolecularSystem.system`
The unit cell for creation of the supercell
matrix : :class:`numpy.array`
The unit cell parameters in form of a lattice.
supercell : :class:`list`, optional
A list that specifies the size of the supercell in the a, b and c
direction. (default=[1, 1, 1])
Returns
-------
:class:`pywindow.molecular.MolecularSystem`
Returns the created supercell as a new :class:`MolecularSystem`. | [
"Return",
"a",
"supercell",
"."
] | python | train |
offu/WeRoBot | werobot/pay.py | https://github.com/offu/WeRoBot/blob/fd42109105b03f9acf45ebd9dcabb9d5cff98f3c/werobot/pay.py#L60-L86 | def create_js_pay_params(self, **package):
"""
签名 js 需要的参数
详情请参考 支付开发文档
::
wxclient.create_js_pay_params(
body=标题, out_trade_no=本地订单号, total_fee=价格单位分,
notify_url=通知url,
spbill_create_ip=建议为支付人ip,
)
:param package: 需要签名的的参数
:return: 支付需要的对象
"""
pay_param, sign, sign_type = self._pay_sign_dict(
package=self.create_js_pay_package(**package)
)
pay_param['paySign'] = sign
pay_param['signType'] = sign_type
# 腾讯这个还得转成大写 JS 才认
for key in ['appId', 'timeStamp', 'nonceStr']:
pay_param[key] = str(pay_param.pop(key.lower()))
return pay_param | [
"def",
"create_js_pay_params",
"(",
"self",
",",
"*",
"*",
"package",
")",
":",
"pay_param",
",",
"sign",
",",
"sign_type",
"=",
"self",
".",
"_pay_sign_dict",
"(",
"package",
"=",
"self",
".",
"create_js_pay_package",
"(",
"*",
"*",
"package",
")",
")",
"pay_param",
"[",
"'paySign'",
"]",
"=",
"sign",
"pay_param",
"[",
"'signType'",
"]",
"=",
"sign_type",
"# 腾讯这个还得转成大写 JS 才认",
"for",
"key",
"in",
"[",
"'appId'",
",",
"'timeStamp'",
",",
"'nonceStr'",
"]",
":",
"pay_param",
"[",
"key",
"]",
"=",
"str",
"(",
"pay_param",
".",
"pop",
"(",
"key",
".",
"lower",
"(",
")",
")",
")",
"return",
"pay_param"
] | 签名 js 需要的参数
详情请参考 支付开发文档
::
wxclient.create_js_pay_params(
body=标题, out_trade_no=本地订单号, total_fee=价格单位分,
notify_url=通知url,
spbill_create_ip=建议为支付人ip,
)
:param package: 需要签名的的参数
:return: 支付需要的对象 | [
"签名",
"js",
"需要的参数",
"详情请参考",
"支付开发文档"
] | python | train |
klahnakoski/pyLibrary | jx_elasticsearch/es52/painless.py | https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/jx_elasticsearch/es52/painless.py#L118-L130 | def box(script):
"""
:param es_script:
:return: TEXT EXPRESSION WITH NON OBJECTS BOXED
"""
if script.type is BOOLEAN:
return "Boolean.valueOf(" + text_type(script.expr) + ")"
elif script.type is INTEGER:
return "Integer.valueOf(" + text_type(script.expr) + ")"
elif script.type is NUMBER:
return "Double.valueOf(" + text_type(script.expr) + ")"
else:
return script.expr | [
"def",
"box",
"(",
"script",
")",
":",
"if",
"script",
".",
"type",
"is",
"BOOLEAN",
":",
"return",
"\"Boolean.valueOf(\"",
"+",
"text_type",
"(",
"script",
".",
"expr",
")",
"+",
"\")\"",
"elif",
"script",
".",
"type",
"is",
"INTEGER",
":",
"return",
"\"Integer.valueOf(\"",
"+",
"text_type",
"(",
"script",
".",
"expr",
")",
"+",
"\")\"",
"elif",
"script",
".",
"type",
"is",
"NUMBER",
":",
"return",
"\"Double.valueOf(\"",
"+",
"text_type",
"(",
"script",
".",
"expr",
")",
"+",
"\")\"",
"else",
":",
"return",
"script",
".",
"expr"
] | :param es_script:
:return: TEXT EXPRESSION WITH NON OBJECTS BOXED | [
":",
"param",
"es_script",
":",
":",
"return",
":",
"TEXT",
"EXPRESSION",
"WITH",
"NON",
"OBJECTS",
"BOXED"
] | python | train |
troeger/opensubmit | web/opensubmit/cmdline.py | https://github.com/troeger/opensubmit/blob/384a95b7c6fa41e3f949a129d25dafd9a1c54859/web/opensubmit/cmdline.py#L97-L108 | def django_admin(args):
'''
Run something like it would be done through Django's manage.py.
'''
from django.core.management import execute_from_command_line
from django.core.exceptions import ImproperlyConfigured
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "opensubmit.settings")
try:
execute_from_command_line([sys.argv[0]] + args)
except ImproperlyConfigured as e:
print(str(e))
exit(-1) | [
"def",
"django_admin",
"(",
"args",
")",
":",
"from",
"django",
".",
"core",
".",
"management",
"import",
"execute_from_command_line",
"from",
"django",
".",
"core",
".",
"exceptions",
"import",
"ImproperlyConfigured",
"os",
".",
"environ",
".",
"setdefault",
"(",
"\"DJANGO_SETTINGS_MODULE\"",
",",
"\"opensubmit.settings\"",
")",
"try",
":",
"execute_from_command_line",
"(",
"[",
"sys",
".",
"argv",
"[",
"0",
"]",
"]",
"+",
"args",
")",
"except",
"ImproperlyConfigured",
"as",
"e",
":",
"print",
"(",
"str",
"(",
"e",
")",
")",
"exit",
"(",
"-",
"1",
")"
] | Run something like it would be done through Django's manage.py. | [
"Run",
"something",
"like",
"it",
"would",
"be",
"done",
"through",
"Django",
"s",
"manage",
".",
"py",
"."
] | python | train |
wecatch/app-turbo | turbo/app.py | https://github.com/wecatch/app-turbo/blob/75faf97371a9a138c53f92168d0a486636cb8a9c/turbo/app.py#L307-L314 | def wo_resp(self, resp):
"""
can override for other style
"""
if self._data is not None:
resp['res'] = self.to_str(self._data)
return self.wo_json(resp) | [
"def",
"wo_resp",
"(",
"self",
",",
"resp",
")",
":",
"if",
"self",
".",
"_data",
"is",
"not",
"None",
":",
"resp",
"[",
"'res'",
"]",
"=",
"self",
".",
"to_str",
"(",
"self",
".",
"_data",
")",
"return",
"self",
".",
"wo_json",
"(",
"resp",
")"
] | can override for other style | [
"can",
"override",
"for",
"other",
"style"
] | python | train |
andychase/reparse | reparse/expression.py | https://github.com/andychase/reparse/blob/5f46cdd0fc4e239c0ddeca4b542e48a5ae95c508/reparse/expression.py#L47-L55 | def findall(self, string):
""" Parse string, returning all outputs as parsed by functions
"""
output = []
for match in self.pattern.findall(string):
if hasattr(match, 'strip'):
match = [match]
self._list_add(output, self.run(match))
return output | [
"def",
"findall",
"(",
"self",
",",
"string",
")",
":",
"output",
"=",
"[",
"]",
"for",
"match",
"in",
"self",
".",
"pattern",
".",
"findall",
"(",
"string",
")",
":",
"if",
"hasattr",
"(",
"match",
",",
"'strip'",
")",
":",
"match",
"=",
"[",
"match",
"]",
"self",
".",
"_list_add",
"(",
"output",
",",
"self",
".",
"run",
"(",
"match",
")",
")",
"return",
"output"
] | Parse string, returning all outputs as parsed by functions | [
"Parse",
"string",
"returning",
"all",
"outputs",
"as",
"parsed",
"by",
"functions"
] | python | train |
oceanprotocol/oceandb-bigchaindb-driver | oceandb_bigchaindb_driver/plugin.py | https://github.com/oceanprotocol/oceandb-bigchaindb-driver/blob/82315bcc9f7ba8b01beb08014bdeb541546c6671/oceandb_bigchaindb_driver/plugin.py#L204-L238 | def _delete(self, tx_id):
"""Delete a transaction. Read documentation about CRAB model in https://blog.bigchaindb.com/crab-create-retrieve-append-burn-b9f6d111f460.
:param tx_id: transaction id
:return:
"""
txs = self.driver.instance.transactions.get(asset_id=self.get_asset_id(tx_id))
unspent = txs[-1]
output_index = 0
output = unspent['outputs'][output_index]
transfer_input = {
'fulfillment': output['condition']['details'],
'fulfills': {
'output_index': output_index,
'transaction_id': unspent['id']
},
'owners_before': output['public_keys']
}
prepared_transfer_tx = self.driver.instance.transactions.prepare(
operation='TRANSFER',
asset=unspent['asset'] if 'id' in unspent['asset'] else {'id': unspent['id']},
inputs=transfer_input,
recipients=self.BURN_ADDRESS,
metadata={
'namespace': 'burned',
}
)
signed_tx = self.driver.instance.transactions.fulfill(
prepared_transfer_tx,
private_keys=self.user.private_key,
)
self.driver.instance.transactions.send_commit(signed_tx) | [
"def",
"_delete",
"(",
"self",
",",
"tx_id",
")",
":",
"txs",
"=",
"self",
".",
"driver",
".",
"instance",
".",
"transactions",
".",
"get",
"(",
"asset_id",
"=",
"self",
".",
"get_asset_id",
"(",
"tx_id",
")",
")",
"unspent",
"=",
"txs",
"[",
"-",
"1",
"]",
"output_index",
"=",
"0",
"output",
"=",
"unspent",
"[",
"'outputs'",
"]",
"[",
"output_index",
"]",
"transfer_input",
"=",
"{",
"'fulfillment'",
":",
"output",
"[",
"'condition'",
"]",
"[",
"'details'",
"]",
",",
"'fulfills'",
":",
"{",
"'output_index'",
":",
"output_index",
",",
"'transaction_id'",
":",
"unspent",
"[",
"'id'",
"]",
"}",
",",
"'owners_before'",
":",
"output",
"[",
"'public_keys'",
"]",
"}",
"prepared_transfer_tx",
"=",
"self",
".",
"driver",
".",
"instance",
".",
"transactions",
".",
"prepare",
"(",
"operation",
"=",
"'TRANSFER'",
",",
"asset",
"=",
"unspent",
"[",
"'asset'",
"]",
"if",
"'id'",
"in",
"unspent",
"[",
"'asset'",
"]",
"else",
"{",
"'id'",
":",
"unspent",
"[",
"'id'",
"]",
"}",
",",
"inputs",
"=",
"transfer_input",
",",
"recipients",
"=",
"self",
".",
"BURN_ADDRESS",
",",
"metadata",
"=",
"{",
"'namespace'",
":",
"'burned'",
",",
"}",
")",
"signed_tx",
"=",
"self",
".",
"driver",
".",
"instance",
".",
"transactions",
".",
"fulfill",
"(",
"prepared_transfer_tx",
",",
"private_keys",
"=",
"self",
".",
"user",
".",
"private_key",
",",
")",
"self",
".",
"driver",
".",
"instance",
".",
"transactions",
".",
"send_commit",
"(",
"signed_tx",
")"
] | Delete a transaction. Read documentation about CRAB model in https://blog.bigchaindb.com/crab-create-retrieve-append-burn-b9f6d111f460.
:param tx_id: transaction id
:return: | [
"Delete",
"a",
"transaction",
".",
"Read",
"documentation",
"about",
"CRAB",
"model",
"in",
"https",
":",
"//",
"blog",
".",
"bigchaindb",
".",
"com",
"/",
"crab",
"-",
"create",
"-",
"retrieve",
"-",
"append",
"-",
"burn",
"-",
"b9f6d111f460",
"."
] | python | train |
pandas-dev/pandas | pandas/core/strings.py | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/strings.py#L1240-L1313 | def str_pad(arr, width, side='left', fillchar=' '):
"""
Pad strings in the Series/Index up to width.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with character defined in `fillchar`.
side : {'left', 'right', 'both'}, default 'left'
Side from which to fill resulting string.
fillchar : str, default ' '
Additional character for filling, default is whitespace.
Returns
-------
Series or Index of object
Returns Series or Index with minimum number of char in object.
See Also
--------
Series.str.rjust : Fills the left side of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='left')``.
Series.str.ljust : Fills the right side of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='right')``.
Series.str.center : Fills boths sides of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='both')``.
Series.str.zfill : Pad strings in the Series/Index by prepending '0'
character. Equivalent to ``Series.str.pad(side='left', fillchar='0')``.
Examples
--------
>>> s = pd.Series(["caribou", "tiger"])
>>> s
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10)
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10, side='right', fillchar='-')
0 caribou---
1 tiger-----
dtype: object
>>> s.str.pad(width=10, side='both', fillchar='-')
0 -caribou--
1 --tiger---
dtype: object
"""
if not isinstance(fillchar, str):
msg = 'fillchar must be a character, not {0}'
raise TypeError(msg.format(type(fillchar).__name__))
if len(fillchar) != 1:
raise TypeError('fillchar must be a character, not str')
if not is_integer(width):
msg = 'width must be of integer type, not {0}'
raise TypeError(msg.format(type(width).__name__))
if side == 'left':
f = lambda x: x.rjust(width, fillchar)
elif side == 'right':
f = lambda x: x.ljust(width, fillchar)
elif side == 'both':
f = lambda x: x.center(width, fillchar)
else: # pragma: no cover
raise ValueError('Invalid side')
return _na_map(f, arr) | [
"def",
"str_pad",
"(",
"arr",
",",
"width",
",",
"side",
"=",
"'left'",
",",
"fillchar",
"=",
"' '",
")",
":",
"if",
"not",
"isinstance",
"(",
"fillchar",
",",
"str",
")",
":",
"msg",
"=",
"'fillchar must be a character, not {0}'",
"raise",
"TypeError",
"(",
"msg",
".",
"format",
"(",
"type",
"(",
"fillchar",
")",
".",
"__name__",
")",
")",
"if",
"len",
"(",
"fillchar",
")",
"!=",
"1",
":",
"raise",
"TypeError",
"(",
"'fillchar must be a character, not str'",
")",
"if",
"not",
"is_integer",
"(",
"width",
")",
":",
"msg",
"=",
"'width must be of integer type, not {0}'",
"raise",
"TypeError",
"(",
"msg",
".",
"format",
"(",
"type",
"(",
"width",
")",
".",
"__name__",
")",
")",
"if",
"side",
"==",
"'left'",
":",
"f",
"=",
"lambda",
"x",
":",
"x",
".",
"rjust",
"(",
"width",
",",
"fillchar",
")",
"elif",
"side",
"==",
"'right'",
":",
"f",
"=",
"lambda",
"x",
":",
"x",
".",
"ljust",
"(",
"width",
",",
"fillchar",
")",
"elif",
"side",
"==",
"'both'",
":",
"f",
"=",
"lambda",
"x",
":",
"x",
".",
"center",
"(",
"width",
",",
"fillchar",
")",
"else",
":",
"# pragma: no cover",
"raise",
"ValueError",
"(",
"'Invalid side'",
")",
"return",
"_na_map",
"(",
"f",
",",
"arr",
")"
] | Pad strings in the Series/Index up to width.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with character defined in `fillchar`.
side : {'left', 'right', 'both'}, default 'left'
Side from which to fill resulting string.
fillchar : str, default ' '
Additional character for filling, default is whitespace.
Returns
-------
Series or Index of object
Returns Series or Index with minimum number of char in object.
See Also
--------
Series.str.rjust : Fills the left side of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='left')``.
Series.str.ljust : Fills the right side of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='right')``.
Series.str.center : Fills boths sides of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='both')``.
Series.str.zfill : Pad strings in the Series/Index by prepending '0'
character. Equivalent to ``Series.str.pad(side='left', fillchar='0')``.
Examples
--------
>>> s = pd.Series(["caribou", "tiger"])
>>> s
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10)
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10, side='right', fillchar='-')
0 caribou---
1 tiger-----
dtype: object
>>> s.str.pad(width=10, side='both', fillchar='-')
0 -caribou--
1 --tiger---
dtype: object | [
"Pad",
"strings",
"in",
"the",
"Series",
"/",
"Index",
"up",
"to",
"width",
"."
] | python | train |
marshmallow-code/marshmallow-jsonapi | marshmallow_jsonapi/utils.py | https://github.com/marshmallow-code/marshmallow-jsonapi/blob/7183c9bb5cdeace4143e6678bab48d433ac439a1/marshmallow_jsonapi/utils.py#L38-L56 | def resolve_params(obj, params, default=missing):
"""Given a dictionary of keyword arguments, return the same dictionary except with
values enclosed in `< >` resolved to attributes on `obj`.
"""
param_values = {}
for name, attr_tpl in iteritems(params):
attr_name = tpl(str(attr_tpl))
if attr_name:
attribute_value = get_value(obj, attr_name, default=default)
if attribute_value is not missing:
param_values[name] = attribute_value
else:
raise AttributeError(
'{attr_name!r} is not a valid '
'attribute of {obj!r}'.format(attr_name=attr_name, obj=obj),
)
else:
param_values[name] = attr_tpl
return param_values | [
"def",
"resolve_params",
"(",
"obj",
",",
"params",
",",
"default",
"=",
"missing",
")",
":",
"param_values",
"=",
"{",
"}",
"for",
"name",
",",
"attr_tpl",
"in",
"iteritems",
"(",
"params",
")",
":",
"attr_name",
"=",
"tpl",
"(",
"str",
"(",
"attr_tpl",
")",
")",
"if",
"attr_name",
":",
"attribute_value",
"=",
"get_value",
"(",
"obj",
",",
"attr_name",
",",
"default",
"=",
"default",
")",
"if",
"attribute_value",
"is",
"not",
"missing",
":",
"param_values",
"[",
"name",
"]",
"=",
"attribute_value",
"else",
":",
"raise",
"AttributeError",
"(",
"'{attr_name!r} is not a valid '",
"'attribute of {obj!r}'",
".",
"format",
"(",
"attr_name",
"=",
"attr_name",
",",
"obj",
"=",
"obj",
")",
",",
")",
"else",
":",
"param_values",
"[",
"name",
"]",
"=",
"attr_tpl",
"return",
"param_values"
] | Given a dictionary of keyword arguments, return the same dictionary except with
values enclosed in `< >` resolved to attributes on `obj`. | [
"Given",
"a",
"dictionary",
"of",
"keyword",
"arguments",
"return",
"the",
"same",
"dictionary",
"except",
"with",
"values",
"enclosed",
"in",
"<",
">",
"resolved",
"to",
"attributes",
"on",
"obj",
"."
] | python | train |
twilio/twilio-python | twilio/rest/pricing/v1/__init__.py | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/pricing/v1/__init__.py#L31-L37 | def messaging(self):
"""
:rtype: twilio.rest.pricing.v1.messaging.MessagingList
"""
if self._messaging is None:
self._messaging = MessagingList(self)
return self._messaging | [
"def",
"messaging",
"(",
"self",
")",
":",
"if",
"self",
".",
"_messaging",
"is",
"None",
":",
"self",
".",
"_messaging",
"=",
"MessagingList",
"(",
"self",
")",
"return",
"self",
".",
"_messaging"
] | :rtype: twilio.rest.pricing.v1.messaging.MessagingList | [
":",
"rtype",
":",
"twilio",
".",
"rest",
".",
"pricing",
".",
"v1",
".",
"messaging",
".",
"MessagingList"
] | python | train |
commonsense/metanl | metanl/token_utils.py | https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/token_utils.py#L28-L44 | def untokenize(words):
"""
Untokenizing a text undoes the tokenizing operation, restoring
punctuation and spaces to the places that people expect them to be.
Ideally, `untokenize(tokenize(text))` should be identical to `text`,
except for line breaks.
"""
text = ' '.join(words)
step1 = text.replace("`` ", '"').replace(" ''", '"').replace('. . .', '...')
step2 = step1.replace(" ( ", " (").replace(" ) ", ") ")
step3 = re.sub(r' ([.,:;?!%]+)([ \'"`])', r"\1\2", step2)
step4 = re.sub(r' ([.,:;?!%]+)$', r"\1", step3)
step5 = step4.replace(" '", "'").replace(" n't", "n't").replace(
"can not", "cannot")
step6 = step5.replace(" ` ", " '")
return step6.strip() | [
"def",
"untokenize",
"(",
"words",
")",
":",
"text",
"=",
"' '",
".",
"join",
"(",
"words",
")",
"step1",
"=",
"text",
".",
"replace",
"(",
"\"`` \"",
",",
"'\"'",
")",
".",
"replace",
"(",
"\" ''\"",
",",
"'\"'",
")",
".",
"replace",
"(",
"'. . .'",
",",
"'...'",
")",
"step2",
"=",
"step1",
".",
"replace",
"(",
"\" ( \"",
",",
"\" (\"",
")",
".",
"replace",
"(",
"\" ) \"",
",",
"\") \"",
")",
"step3",
"=",
"re",
".",
"sub",
"(",
"r' ([.,:;?!%]+)([ \\'\"`])'",
",",
"r\"\\1\\2\"",
",",
"step2",
")",
"step4",
"=",
"re",
".",
"sub",
"(",
"r' ([.,:;?!%]+)$'",
",",
"r\"\\1\"",
",",
"step3",
")",
"step5",
"=",
"step4",
".",
"replace",
"(",
"\" '\"",
",",
"\"'\"",
")",
".",
"replace",
"(",
"\" n't\"",
",",
"\"n't\"",
")",
".",
"replace",
"(",
"\"can not\"",
",",
"\"cannot\"",
")",
"step6",
"=",
"step5",
".",
"replace",
"(",
"\" ` \"",
",",
"\" '\"",
")",
"return",
"step6",
".",
"strip",
"(",
")"
] | Untokenizing a text undoes the tokenizing operation, restoring
punctuation and spaces to the places that people expect them to be.
Ideally, `untokenize(tokenize(text))` should be identical to `text`,
except for line breaks. | [
"Untokenizing",
"a",
"text",
"undoes",
"the",
"tokenizing",
"operation",
"restoring",
"punctuation",
"and",
"spaces",
"to",
"the",
"places",
"that",
"people",
"expect",
"them",
"to",
"be",
"."
] | python | train |
CellProfiler/centrosome | centrosome/cpmorphology.py | https://github.com/CellProfiler/centrosome/blob/7bd9350a2d4ae1b215b81eabcecfe560bbb1f32a/centrosome/cpmorphology.py#L3260-L3276 | def hbreak(image, mask=None, iterations=1):
'''Remove horizontal breaks
1 1 1 1 1 1
0 1 0 -> 0 0 0 (this case only)
1 1 1 1 1 1
'''
global hbreak_table
if mask is None:
masked_image = image
else:
masked_image = image.astype(bool).copy()
masked_image[~mask] = False
result = table_lookup(masked_image, hbreak_table, False)
if not mask is None:
result[~mask] = image[~mask]
return result | [
"def",
"hbreak",
"(",
"image",
",",
"mask",
"=",
"None",
",",
"iterations",
"=",
"1",
")",
":",
"global",
"hbreak_table",
"if",
"mask",
"is",
"None",
":",
"masked_image",
"=",
"image",
"else",
":",
"masked_image",
"=",
"image",
".",
"astype",
"(",
"bool",
")",
".",
"copy",
"(",
")",
"masked_image",
"[",
"~",
"mask",
"]",
"=",
"False",
"result",
"=",
"table_lookup",
"(",
"masked_image",
",",
"hbreak_table",
",",
"False",
")",
"if",
"not",
"mask",
"is",
"None",
":",
"result",
"[",
"~",
"mask",
"]",
"=",
"image",
"[",
"~",
"mask",
"]",
"return",
"result"
] | Remove horizontal breaks
1 1 1 1 1 1
0 1 0 -> 0 0 0 (this case only)
1 1 1 1 1 1 | [
"Remove",
"horizontal",
"breaks",
"1",
"1",
"1",
"1",
"1",
"1",
"0",
"1",
"0",
"-",
">",
"0",
"0",
"0",
"(",
"this",
"case",
"only",
")",
"1",
"1",
"1",
"1",
"1",
"1"
] | python | train |
LuminosoInsight/python-ftfy | ftfy/formatting.py | https://github.com/LuminosoInsight/python-ftfy/blob/476acc6ad270bffe07f97d4f7cf2139acdc69633/ftfy/formatting.py#L67-L98 | def display_ljust(text, width, fillchar=' '):
"""
Return `text` left-justified in a Unicode string whose display width,
in a monospaced terminal, should be at least `width` character cells.
The rest of the string will be padded with `fillchar`, which must be
a width-1 character.
"Left" here means toward the beginning of the string, which may actually
appear on the right in an RTL context. This is similar to the use of the
word "left" in "left parenthesis".
>>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し']
>>> for line in lines:
... print(display_ljust(line, 20, '▒'))
Table flip▒▒▒▒▒▒▒▒▒▒
(╯°□°)╯︵ ┻━┻▒▒▒▒▒▒▒
ちゃぶ台返し▒▒▒▒▒▒▒▒
This example, and the similar ones that follow, should come out justified
correctly when viewed in a monospaced terminal. It will probably not look
correct if you're viewing this code or documentation in a Web browser.
"""
if character_width(fillchar) != 1:
raise ValueError("The padding character must have display width 1")
text_width = monospaced_width(text)
if text_width == -1:
# There's a control character here, so just don't add padding
return text
padding = max(0, width - text_width)
return text + fillchar * padding | [
"def",
"display_ljust",
"(",
"text",
",",
"width",
",",
"fillchar",
"=",
"' '",
")",
":",
"if",
"character_width",
"(",
"fillchar",
")",
"!=",
"1",
":",
"raise",
"ValueError",
"(",
"\"The padding character must have display width 1\"",
")",
"text_width",
"=",
"monospaced_width",
"(",
"text",
")",
"if",
"text_width",
"==",
"-",
"1",
":",
"# There's a control character here, so just don't add padding",
"return",
"text",
"padding",
"=",
"max",
"(",
"0",
",",
"width",
"-",
"text_width",
")",
"return",
"text",
"+",
"fillchar",
"*",
"padding"
] | Return `text` left-justified in a Unicode string whose display width,
in a monospaced terminal, should be at least `width` character cells.
The rest of the string will be padded with `fillchar`, which must be
a width-1 character.
"Left" here means toward the beginning of the string, which may actually
appear on the right in an RTL context. This is similar to the use of the
word "left" in "left parenthesis".
>>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し']
>>> for line in lines:
... print(display_ljust(line, 20, '▒'))
Table flip▒▒▒▒▒▒▒▒▒▒
(╯°□°)╯︵ ┻━┻▒▒▒▒▒▒▒
ちゃぶ台返し▒▒▒▒▒▒▒▒
This example, and the similar ones that follow, should come out justified
correctly when viewed in a monospaced terminal. It will probably not look
correct if you're viewing this code or documentation in a Web browser. | [
"Return",
"text",
"left",
"-",
"justified",
"in",
"a",
"Unicode",
"string",
"whose",
"display",
"width",
"in",
"a",
"monospaced",
"terminal",
"should",
"be",
"at",
"least",
"width",
"character",
"cells",
".",
"The",
"rest",
"of",
"the",
"string",
"will",
"be",
"padded",
"with",
"fillchar",
"which",
"must",
"be",
"a",
"width",
"-",
"1",
"character",
"."
] | python | train |
limodou/uliweb | uliweb/orm/__init__.py | https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/orm/__init__.py#L2728-L2739 | def distinct(self, field=None):
"""
If field is None, then it means that it'll create:
select distinct *
and if field is not None, for example: 'name', it'll create:
select distinc(name),
"""
if field is None:
self.funcs.append(('distinct', (), {}))
else:
self.distinct_field = field
return self | [
"def",
"distinct",
"(",
"self",
",",
"field",
"=",
"None",
")",
":",
"if",
"field",
"is",
"None",
":",
"self",
".",
"funcs",
".",
"append",
"(",
"(",
"'distinct'",
",",
"(",
")",
",",
"{",
"}",
")",
")",
"else",
":",
"self",
".",
"distinct_field",
"=",
"field",
"return",
"self"
] | If field is None, then it means that it'll create:
select distinct *
and if field is not None, for example: 'name', it'll create:
select distinc(name), | [
"If",
"field",
"is",
"None",
"then",
"it",
"means",
"that",
"it",
"ll",
"create",
":",
"select",
"distinct",
"*",
"and",
"if",
"field",
"is",
"not",
"None",
"for",
"example",
":",
"name",
"it",
"ll",
"create",
":",
"select",
"distinc",
"(",
"name",
")"
] | python | train |
ceph/ceph-deploy | ceph_deploy/hosts/remotes.py | https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/hosts/remotes.py#L178-L189 | def write_keyring(path, key, uid=-1, gid=-1):
""" create a keyring file """
# Note that we *require* to avoid deletion of the temp file
# otherwise we risk not being able to copy the contents from
# one file system to the other, hence the `delete=False`
tmp_file = tempfile.NamedTemporaryFile('wb', delete=False)
tmp_file.write(key)
tmp_file.close()
keyring_dir = os.path.dirname(path)
if not path_exists(keyring_dir):
makedir(keyring_dir, uid, gid)
shutil.move(tmp_file.name, path) | [
"def",
"write_keyring",
"(",
"path",
",",
"key",
",",
"uid",
"=",
"-",
"1",
",",
"gid",
"=",
"-",
"1",
")",
":",
"# Note that we *require* to avoid deletion of the temp file",
"# otherwise we risk not being able to copy the contents from",
"# one file system to the other, hence the `delete=False`",
"tmp_file",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"'wb'",
",",
"delete",
"=",
"False",
")",
"tmp_file",
".",
"write",
"(",
"key",
")",
"tmp_file",
".",
"close",
"(",
")",
"keyring_dir",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"path",
")",
"if",
"not",
"path_exists",
"(",
"keyring_dir",
")",
":",
"makedir",
"(",
"keyring_dir",
",",
"uid",
",",
"gid",
")",
"shutil",
".",
"move",
"(",
"tmp_file",
".",
"name",
",",
"path",
")"
] | create a keyring file | [
"create",
"a",
"keyring",
"file"
] | python | train |
HewlettPackard/python-hpOneView | hpOneView/resources/networking/logical_interconnects.py | https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/networking/logical_interconnects.py#L449-L464 | def update_qos_aggregated_configuration(self, qos_configuration, timeout=-1):
"""
Updates the QoS aggregated configuration for the logical interconnect.
Args:
qos_configuration:
QOS configuration.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns:
dict: Logical Interconnect.
"""
uri = "{}{}".format(self.data["uri"], self.QOS_AGGREGATED_CONFIGURATION)
return self._helper.update(qos_configuration, uri=uri, timeout=timeout) | [
"def",
"update_qos_aggregated_configuration",
"(",
"self",
",",
"qos_configuration",
",",
"timeout",
"=",
"-",
"1",
")",
":",
"uri",
"=",
"\"{}{}\"",
".",
"format",
"(",
"self",
".",
"data",
"[",
"\"uri\"",
"]",
",",
"self",
".",
"QOS_AGGREGATED_CONFIGURATION",
")",
"return",
"self",
".",
"_helper",
".",
"update",
"(",
"qos_configuration",
",",
"uri",
"=",
"uri",
",",
"timeout",
"=",
"timeout",
")"
] | Updates the QoS aggregated configuration for the logical interconnect.
Args:
qos_configuration:
QOS configuration.
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in
OneView, just stops waiting for its completion.
Returns:
dict: Logical Interconnect. | [
"Updates",
"the",
"QoS",
"aggregated",
"configuration",
"for",
"the",
"logical",
"interconnect",
"."
] | python | train |
stitchfix/pyxley | pyxley/charts/mg/barchart.py | https://github.com/stitchfix/pyxley/blob/2dab00022d977d986169cd8a629b3a2f91be893f/pyxley/charts/mg/barchart.py#L58-L70 | def to_json(df, x, y):
"""Format output for json response."""
values = []
for i, row in df.iterrows():
values.append({
"x": row[x],
"y": row[y]
})
if df.empty:
return {"result": [{"x": 0, "y": 0}], "date": False}
return {"result": values, "date": False} | [
"def",
"to_json",
"(",
"df",
",",
"x",
",",
"y",
")",
":",
"values",
"=",
"[",
"]",
"for",
"i",
",",
"row",
"in",
"df",
".",
"iterrows",
"(",
")",
":",
"values",
".",
"append",
"(",
"{",
"\"x\"",
":",
"row",
"[",
"x",
"]",
",",
"\"y\"",
":",
"row",
"[",
"y",
"]",
"}",
")",
"if",
"df",
".",
"empty",
":",
"return",
"{",
"\"result\"",
":",
"[",
"{",
"\"x\"",
":",
"0",
",",
"\"y\"",
":",
"0",
"}",
"]",
",",
"\"date\"",
":",
"False",
"}",
"return",
"{",
"\"result\"",
":",
"values",
",",
"\"date\"",
":",
"False",
"}"
] | Format output for json response. | [
"Format",
"output",
"for",
"json",
"response",
"."
] | python | train |
treycucco/bidon | bidon/db/access/model_access.py | https://github.com/treycucco/bidon/blob/d9f24596841d0e69e8ac70a1d1a1deecea95e340/bidon/db/access/model_access.py#L208-L212 | def get_model_id_constraints(model):
"""Returns constraints to target a specific model."""
pkname = model.primary_key_name
pkey = model.primary_key
return get_id_constraints(pkname, pkey) | [
"def",
"get_model_id_constraints",
"(",
"model",
")",
":",
"pkname",
"=",
"model",
".",
"primary_key_name",
"pkey",
"=",
"model",
".",
"primary_key",
"return",
"get_id_constraints",
"(",
"pkname",
",",
"pkey",
")"
] | Returns constraints to target a specific model. | [
"Returns",
"constraints",
"to",
"target",
"a",
"specific",
"model",
"."
] | python | train |
RedKrieg/pysparklines | sparkline/sparkline.py | https://github.com/RedKrieg/pysparklines/blob/7efdc98f841a0003e138a93c4e27cd71a64e7062/sparkline/sparkline.py#L41-L55 | def guess_series(input_string):
u"""Tries to convert <input_string> into a list of floats.
Example:
>>> guess_series("0.5 1.2 3.5 7.3 8 12.5, 13.2,"
... "15.0, 14.2, 11.8, 6.1, 1.9")
[0.5, 1.2, 3.5, 7.3, 8.0, 12.5, 13.2, 15.0, 14.2, 11.8, 6.1, 1.9]
"""
float_finder = re.compile("([-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?)")
return ([
i for i in [
_convert_to_float(j) for j in float_finder.findall(input_string)
# Remove entires we couldn't convert to a sensible value.
] if i is not None and not math.isnan(i) and not math.isinf(i)
]) | [
"def",
"guess_series",
"(",
"input_string",
")",
":",
"float_finder",
"=",
"re",
".",
"compile",
"(",
"\"([-+]?[0-9]*\\.?[0-9]+(?:[eE][-+]?[0-9]+)?)\"",
")",
"return",
"(",
"[",
"i",
"for",
"i",
"in",
"[",
"_convert_to_float",
"(",
"j",
")",
"for",
"j",
"in",
"float_finder",
".",
"findall",
"(",
"input_string",
")",
"# Remove entires we couldn't convert to a sensible value.",
"]",
"if",
"i",
"is",
"not",
"None",
"and",
"not",
"math",
".",
"isnan",
"(",
"i",
")",
"and",
"not",
"math",
".",
"isinf",
"(",
"i",
")",
"]",
")"
] | u"""Tries to convert <input_string> into a list of floats.
Example:
>>> guess_series("0.5 1.2 3.5 7.3 8 12.5, 13.2,"
... "15.0, 14.2, 11.8, 6.1, 1.9")
[0.5, 1.2, 3.5, 7.3, 8.0, 12.5, 13.2, 15.0, 14.2, 11.8, 6.1, 1.9] | [
"u",
"Tries",
"to",
"convert",
"<input_string",
">",
"into",
"a",
"list",
"of",
"floats",
"."
] | python | train |
NetEaseGame/ATX | atx/drivers/ios_webdriveragent.py | https://github.com/NetEaseGame/ATX/blob/f4415c57b45cb0730e08899cbc92a2af1c047ffb/atx/drivers/ios_webdriveragent.py#L165-L175 | def _take_screenshot(self):
"""Take a screenshot, also called by Mixin
Args:
- filename(string): file name to save
Returns:
PIL Image object
"""
raw_png = self._wda.screenshot()
img = Image.open(BytesIO(raw_png))
return img | [
"def",
"_take_screenshot",
"(",
"self",
")",
":",
"raw_png",
"=",
"self",
".",
"_wda",
".",
"screenshot",
"(",
")",
"img",
"=",
"Image",
".",
"open",
"(",
"BytesIO",
"(",
"raw_png",
")",
")",
"return",
"img"
] | Take a screenshot, also called by Mixin
Args:
- filename(string): file name to save
Returns:
PIL Image object | [
"Take",
"a",
"screenshot",
"also",
"called",
"by",
"Mixin",
"Args",
":",
"-",
"filename",
"(",
"string",
")",
":",
"file",
"name",
"to",
"save"
] | python | train |
StackStorm/pybind | pybind/nos/v6_0_2f/rbridge_id/snmp_server/__init__.py | https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/snmp_server/__init__.py#L100-L121 | def _set_engineID(self, v, load=False):
"""
Setter method for engineID, mapped from YANG variable /rbridge_id/snmp_server/engineID (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_engineID is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_engineID() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=engineID.engineID, is_container='container', presence=False, yang_name="engineID", rest_name="engineID", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u"Holds local Agents's Engine ID. Reboot is required to make changes to be effective in snmp", u'callpoint': u'snmplocalengineid', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """engineID must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=engineID.engineID, is_container='container', presence=False, yang_name="engineID", rest_name="engineID", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u"Holds local Agents's Engine ID. Reboot is required to make changes to be effective in snmp", u'callpoint': u'snmplocalengineid', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='container', is_config=True)""",
})
self.__engineID = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_engineID",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
"base",
"=",
"engineID",
".",
"engineID",
",",
"is_container",
"=",
"'container'",
",",
"presence",
"=",
"False",
",",
"yang_name",
"=",
"\"engineID\"",
",",
"rest_name",
"=",
"\"engineID\"",
",",
"parent",
"=",
"self",
",",
"path_helper",
"=",
"self",
".",
"_path_helper",
",",
"extmethods",
"=",
"self",
".",
"_extmethods",
",",
"register_paths",
"=",
"True",
",",
"extensions",
"=",
"{",
"u'tailf-common'",
":",
"{",
"u'info'",
":",
"u\"Holds local Agents's Engine ID. Reboot is required to make changes to be effective in snmp\"",
",",
"u'callpoint'",
":",
"u'snmplocalengineid'",
",",
"u'cli-incomplete-no'",
":",
"None",
"}",
"}",
",",
"namespace",
"=",
"'urn:brocade.com:mgmt:brocade-snmp'",
",",
"defining_module",
"=",
"'brocade-snmp'",
",",
"yang_type",
"=",
"'container'",
",",
"is_config",
"=",
"True",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
")",
":",
"raise",
"ValueError",
"(",
"{",
"'error-string'",
":",
"\"\"\"engineID must be of a type compatible with container\"\"\"",
",",
"'defined-type'",
":",
"\"container\"",
",",
"'generated-type'",
":",
"\"\"\"YANGDynClass(base=engineID.engineID, is_container='container', presence=False, yang_name=\"engineID\", rest_name=\"engineID\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u\"Holds local Agents's Engine ID. Reboot is required to make changes to be effective in snmp\", u'callpoint': u'snmplocalengineid', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='container', is_config=True)\"\"\"",
",",
"}",
")",
"self",
".",
"__engineID",
"=",
"t",
"if",
"hasattr",
"(",
"self",
",",
"'_set'",
")",
":",
"self",
".",
"_set",
"(",
")"
] | Setter method for engineID, mapped from YANG variable /rbridge_id/snmp_server/engineID (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_engineID is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_engineID() directly. | [
"Setter",
"method",
"for",
"engineID",
"mapped",
"from",
"YANG",
"variable",
"/",
"rbridge_id",
"/",
"snmp_server",
"/",
"engineID",
"(",
"container",
")",
"If",
"this",
"variable",
"is",
"read",
"-",
"only",
"(",
"config",
":",
"false",
")",
"in",
"the",
"source",
"YANG",
"file",
"then",
"_set_engineID",
"is",
"considered",
"as",
"a",
"private",
"method",
".",
"Backends",
"looking",
"to",
"populate",
"this",
"variable",
"should",
"do",
"so",
"via",
"calling",
"thisObj",
".",
"_set_engineID",
"()",
"directly",
"."
] | python | train |
KelSolaar/Umbra | umbra/managers/actions_manager.py | https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/managers/actions_manager.py#L374-L392 | def get_category(self, name, vivify=False):
"""
Returns requested category.
:param name: Category to retrieve.
:type name: unicode
:param vivify: Vivify missing parents in the chain to the requested category.
:type vivify: bool
:return: Category.
:rtype: dict
"""
category = self.__get_category(self.__categories, name, vivify)
if isinstance(category, dict):
LOGGER.debug("> Category '{0}': '{1}'.".format(name, category))
return category
else:
raise umbra.exceptions.CategoryExistsError("{0} | '{1}' category doesn't exists!".format
(self.__class__.__name__, name)) | [
"def",
"get_category",
"(",
"self",
",",
"name",
",",
"vivify",
"=",
"False",
")",
":",
"category",
"=",
"self",
".",
"__get_category",
"(",
"self",
".",
"__categories",
",",
"name",
",",
"vivify",
")",
"if",
"isinstance",
"(",
"category",
",",
"dict",
")",
":",
"LOGGER",
".",
"debug",
"(",
"\"> Category '{0}': '{1}'.\"",
".",
"format",
"(",
"name",
",",
"category",
")",
")",
"return",
"category",
"else",
":",
"raise",
"umbra",
".",
"exceptions",
".",
"CategoryExistsError",
"(",
"\"{0} | '{1}' category doesn't exists!\"",
".",
"format",
"(",
"self",
".",
"__class__",
".",
"__name__",
",",
"name",
")",
")"
] | Returns requested category.
:param name: Category to retrieve.
:type name: unicode
:param vivify: Vivify missing parents in the chain to the requested category.
:type vivify: bool
:return: Category.
:rtype: dict | [
"Returns",
"requested",
"category",
"."
] | python | train |
saltstack/salt | salt/modules/aptly.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/aptly.py#L462-L485 | def list_mirrors(config_path=_DEFAULT_CONFIG_PATH):
'''
Get a list of all the mirrored remote repositories.
:param str config_path: The path to the configuration file for the aptly instance.
:return: A list of the mirror names.
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' aptly.list_mirrors
'''
_validate_config(config_path)
cmd = ['mirror', 'list', '-config={}'.format(config_path), '-raw=true']
cmd_ret = _cmd_run(cmd)
ret = [line.strip() for line in cmd_ret.splitlines()]
log.debug('Found mirrors: %s', len(ret))
return ret | [
"def",
"list_mirrors",
"(",
"config_path",
"=",
"_DEFAULT_CONFIG_PATH",
")",
":",
"_validate_config",
"(",
"config_path",
")",
"cmd",
"=",
"[",
"'mirror'",
",",
"'list'",
",",
"'-config={}'",
".",
"format",
"(",
"config_path",
")",
",",
"'-raw=true'",
"]",
"cmd_ret",
"=",
"_cmd_run",
"(",
"cmd",
")",
"ret",
"=",
"[",
"line",
".",
"strip",
"(",
")",
"for",
"line",
"in",
"cmd_ret",
".",
"splitlines",
"(",
")",
"]",
"log",
".",
"debug",
"(",
"'Found mirrors: %s'",
",",
"len",
"(",
"ret",
")",
")",
"return",
"ret"
] | Get a list of all the mirrored remote repositories.
:param str config_path: The path to the configuration file for the aptly instance.
:return: A list of the mirror names.
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' aptly.list_mirrors | [
"Get",
"a",
"list",
"of",
"all",
"the",
"mirrored",
"remote",
"repositories",
"."
] | python | train |
angr/angr | angr/analyses/ddg.py | https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/ddg.py#L1581-L1605 | def find_killers(self, var_def, simplified_graph=True):
"""
Find all killers to the specified variable definition.
:param ProgramVariable var_def: The variable definition.
:param bool simplified_graph: True if we want to search in the simplified graph, False otherwise.
:return: A collection of all killers to the specified variable definition.
:rtype: list
"""
if simplified_graph:
graph = self.simplified_data_graph
else:
graph = self.data_graph
if var_def not in graph:
return []
killers = []
out_edges = graph.out_edges(var_def, data=True)
for _, dst, data in out_edges:
if 'type' in data and data['type'] == 'kill':
killers.append(dst)
return killers | [
"def",
"find_killers",
"(",
"self",
",",
"var_def",
",",
"simplified_graph",
"=",
"True",
")",
":",
"if",
"simplified_graph",
":",
"graph",
"=",
"self",
".",
"simplified_data_graph",
"else",
":",
"graph",
"=",
"self",
".",
"data_graph",
"if",
"var_def",
"not",
"in",
"graph",
":",
"return",
"[",
"]",
"killers",
"=",
"[",
"]",
"out_edges",
"=",
"graph",
".",
"out_edges",
"(",
"var_def",
",",
"data",
"=",
"True",
")",
"for",
"_",
",",
"dst",
",",
"data",
"in",
"out_edges",
":",
"if",
"'type'",
"in",
"data",
"and",
"data",
"[",
"'type'",
"]",
"==",
"'kill'",
":",
"killers",
".",
"append",
"(",
"dst",
")",
"return",
"killers"
] | Find all killers to the specified variable definition.
:param ProgramVariable var_def: The variable definition.
:param bool simplified_graph: True if we want to search in the simplified graph, False otherwise.
:return: A collection of all killers to the specified variable definition.
:rtype: list | [
"Find",
"all",
"killers",
"to",
"the",
"specified",
"variable",
"definition",
"."
] | python | train |
Nic30/hwt | hwt/pyUtils/fileHelpers.py | https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/pyUtils/fileHelpers.py#L5-L26 | def find_files(directory, pattern, recursive=True):
"""
Find files by pattern in directory
"""
if not os.path.isdir(directory):
if os.path.exists(directory):
raise IOError(directory + ' is not directory')
else:
raise IOError(directory + " does not exists")
if recursive:
for root, _, files in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
else:
root = directory
for basename in os.listdir(root):
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
if os.path.isfile(filename):
yield filename | [
"def",
"find_files",
"(",
"directory",
",",
"pattern",
",",
"recursive",
"=",
"True",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"directory",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"directory",
")",
":",
"raise",
"IOError",
"(",
"directory",
"+",
"' is not directory'",
")",
"else",
":",
"raise",
"IOError",
"(",
"directory",
"+",
"\" does not exists\"",
")",
"if",
"recursive",
":",
"for",
"root",
",",
"_",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"directory",
")",
":",
"for",
"basename",
"in",
"files",
":",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"basename",
",",
"pattern",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"basename",
")",
"yield",
"filename",
"else",
":",
"root",
"=",
"directory",
"for",
"basename",
"in",
"os",
".",
"listdir",
"(",
"root",
")",
":",
"if",
"fnmatch",
".",
"fnmatch",
"(",
"basename",
",",
"pattern",
")",
":",
"filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root",
",",
"basename",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"yield",
"filename"
] | Find files by pattern in directory | [
"Find",
"files",
"by",
"pattern",
"in",
"directory"
] | python | test |
revarbat/pymuv | pymuv/lvalue.py | https://github.com/revarbat/pymuv/blob/cefa2f2d35fc32054b9595da5f3393f6cceee5e0/pymuv/lvalue.py#L300-L339 | def del_expr(self, ctx):
"""
Returns the MUF needed to delete a given lvalue. (ie: array item)
Returned MUF will set a bare variable to 0, and will remove the
given indexed item for an indexed array or dictionary.
"""
if self.readonly:
raise MuvError(
"Cannot assign value to constant '%s'." % self.varname,
position=self.position
)
varname = ctx.lookup_variable(self.varname)
if varname is None:
raise MuvError(
"Undeclared identifier '%s'." % self.varname,
position=self.position
)
if len(self.indexing) == 0:
return "0 {var} !".format(var=varname)
if len(self.indexing) == 1:
if ctx.target in ['fb7']:
return "{var} @ {idx} array_delitem".format(
var=varname,
idx=self.indexing[0].generate_code(ctx),
)
else:
return "{var} @ {idx} array_delitem dup {var} !".format(
var=varname,
idx=self.indexing[0].generate_code(ctx),
)
if ctx.target in ['fb7']:
return "{var} @ {{ {idx} }}list array_nested_del".format(
var=varname,
idx=" ".join(x.generate_code(ctx) for x in self.indexing),
)
else:
return "{var} @ {{ {idx} }}list array_nested_del dup {var} !".format(
var=varname,
idx=" ".join(x.generate_code(ctx) for x in self.indexing),
) | [
"def",
"del_expr",
"(",
"self",
",",
"ctx",
")",
":",
"if",
"self",
".",
"readonly",
":",
"raise",
"MuvError",
"(",
"\"Cannot assign value to constant '%s'.\"",
"%",
"self",
".",
"varname",
",",
"position",
"=",
"self",
".",
"position",
")",
"varname",
"=",
"ctx",
".",
"lookup_variable",
"(",
"self",
".",
"varname",
")",
"if",
"varname",
"is",
"None",
":",
"raise",
"MuvError",
"(",
"\"Undeclared identifier '%s'.\"",
"%",
"self",
".",
"varname",
",",
"position",
"=",
"self",
".",
"position",
")",
"if",
"len",
"(",
"self",
".",
"indexing",
")",
"==",
"0",
":",
"return",
"\"0 {var} !\"",
".",
"format",
"(",
"var",
"=",
"varname",
")",
"if",
"len",
"(",
"self",
".",
"indexing",
")",
"==",
"1",
":",
"if",
"ctx",
".",
"target",
"in",
"[",
"'fb7'",
"]",
":",
"return",
"\"{var} @ {idx} array_delitem\"",
".",
"format",
"(",
"var",
"=",
"varname",
",",
"idx",
"=",
"self",
".",
"indexing",
"[",
"0",
"]",
".",
"generate_code",
"(",
"ctx",
")",
",",
")",
"else",
":",
"return",
"\"{var} @ {idx} array_delitem dup {var} !\"",
".",
"format",
"(",
"var",
"=",
"varname",
",",
"idx",
"=",
"self",
".",
"indexing",
"[",
"0",
"]",
".",
"generate_code",
"(",
"ctx",
")",
",",
")",
"if",
"ctx",
".",
"target",
"in",
"[",
"'fb7'",
"]",
":",
"return",
"\"{var} @ {{ {idx} }}list array_nested_del\"",
".",
"format",
"(",
"var",
"=",
"varname",
",",
"idx",
"=",
"\" \"",
".",
"join",
"(",
"x",
".",
"generate_code",
"(",
"ctx",
")",
"for",
"x",
"in",
"self",
".",
"indexing",
")",
",",
")",
"else",
":",
"return",
"\"{var} @ {{ {idx} }}list array_nested_del dup {var} !\"",
".",
"format",
"(",
"var",
"=",
"varname",
",",
"idx",
"=",
"\" \"",
".",
"join",
"(",
"x",
".",
"generate_code",
"(",
"ctx",
")",
"for",
"x",
"in",
"self",
".",
"indexing",
")",
",",
")"
] | Returns the MUF needed to delete a given lvalue. (ie: array item)
Returned MUF will set a bare variable to 0, and will remove the
given indexed item for an indexed array or dictionary. | [
"Returns",
"the",
"MUF",
"needed",
"to",
"delete",
"a",
"given",
"lvalue",
".",
"(",
"ie",
":",
"array",
"item",
")",
"Returned",
"MUF",
"will",
"set",
"a",
"bare",
"variable",
"to",
"0",
"and",
"will",
"remove",
"the",
"given",
"indexed",
"item",
"for",
"an",
"indexed",
"array",
"or",
"dictionary",
"."
] | python | train |
cokelaer/spectrum | src/spectrum/toeplitz.py | https://github.com/cokelaer/spectrum/blob/bad6c32e3f10e185098748f67bb421b378b06afe/src/spectrum/toeplitz.py#L21-L81 | def TOEPLITZ(T0, TC, TR, Z):
"""solve the general toeplitz linear equations
Solve TX=Z
:param T0: zero lag value
:param TC: r1 to rN
:param TR: r1 to rN
returns X
requires 3M^2+M operations instead of M^3 with gaussian elimination
.. warning:: not used right now
"""
assert len(TC)>0
assert len(TC)==len(TR)
M = len(TC)
X = numpy.zeros(M+1,dtype=complex)
A = numpy.zeros(M,dtype=complex)
B = numpy.zeros(M,dtype=complex)
P = T0
if P == 0: raise ValueError("P must be different from zero")
if P == 0: raise ValueError("P must be different from zero")
X[0] = Z[0]/T0
for k in range(0, M):
save1 = TC[k]
save2 = TR[k]
beta = X[0]*TC[k]
if k == 0:
temp1 = -save1 / P
temp2 = -save2 / P
else:
for j in range(0, k):
save1 = save1 + A[j] * TC[k-j-1]
save2 = save2 + B[j] * TR[k-j-1]
beta = beta + X[j+1] * TC[k-j-1]
temp1 = -save1 / P
temp2 = -save2/P
P = P * (1. - (temp1*temp2))
if P <= 0:
raise ValueError("singular matrix")
A[k] = temp1
B[k] = temp2
alpha = (Z[k+1]-beta)/P
if k == 0:
X[k+1] = alpha
for j in range(0,k+1):
X[j] = X[j] + alpha * B[k-j]
continue
for j in range(0, k):
kj = k-j-1
save1 = A[j]
A[j] = save1 + temp1 * B[kj]
B[kj] = B[kj] + temp2*save1
X[k+1] = alpha
for j in range(0,k+1):
X[j] = X[j] + alpha*B[k-j]
return X | [
"def",
"TOEPLITZ",
"(",
"T0",
",",
"TC",
",",
"TR",
",",
"Z",
")",
":",
"assert",
"len",
"(",
"TC",
")",
">",
"0",
"assert",
"len",
"(",
"TC",
")",
"==",
"len",
"(",
"TR",
")",
"M",
"=",
"len",
"(",
"TC",
")",
"X",
"=",
"numpy",
".",
"zeros",
"(",
"M",
"+",
"1",
",",
"dtype",
"=",
"complex",
")",
"A",
"=",
"numpy",
".",
"zeros",
"(",
"M",
",",
"dtype",
"=",
"complex",
")",
"B",
"=",
"numpy",
".",
"zeros",
"(",
"M",
",",
"dtype",
"=",
"complex",
")",
"P",
"=",
"T0",
"if",
"P",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"P must be different from zero\"",
")",
"if",
"P",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"\"P must be different from zero\"",
")",
"X",
"[",
"0",
"]",
"=",
"Z",
"[",
"0",
"]",
"/",
"T0",
"for",
"k",
"in",
"range",
"(",
"0",
",",
"M",
")",
":",
"save1",
"=",
"TC",
"[",
"k",
"]",
"save2",
"=",
"TR",
"[",
"k",
"]",
"beta",
"=",
"X",
"[",
"0",
"]",
"*",
"TC",
"[",
"k",
"]",
"if",
"k",
"==",
"0",
":",
"temp1",
"=",
"-",
"save1",
"/",
"P",
"temp2",
"=",
"-",
"save2",
"/",
"P",
"else",
":",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"k",
")",
":",
"save1",
"=",
"save1",
"+",
"A",
"[",
"j",
"]",
"*",
"TC",
"[",
"k",
"-",
"j",
"-",
"1",
"]",
"save2",
"=",
"save2",
"+",
"B",
"[",
"j",
"]",
"*",
"TR",
"[",
"k",
"-",
"j",
"-",
"1",
"]",
"beta",
"=",
"beta",
"+",
"X",
"[",
"j",
"+",
"1",
"]",
"*",
"TC",
"[",
"k",
"-",
"j",
"-",
"1",
"]",
"temp1",
"=",
"-",
"save1",
"/",
"P",
"temp2",
"=",
"-",
"save2",
"/",
"P",
"P",
"=",
"P",
"*",
"(",
"1.",
"-",
"(",
"temp1",
"*",
"temp2",
")",
")",
"if",
"P",
"<=",
"0",
":",
"raise",
"ValueError",
"(",
"\"singular matrix\"",
")",
"A",
"[",
"k",
"]",
"=",
"temp1",
"B",
"[",
"k",
"]",
"=",
"temp2",
"alpha",
"=",
"(",
"Z",
"[",
"k",
"+",
"1",
"]",
"-",
"beta",
")",
"/",
"P",
"if",
"k",
"==",
"0",
":",
"X",
"[",
"k",
"+",
"1",
"]",
"=",
"alpha",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"k",
"+",
"1",
")",
":",
"X",
"[",
"j",
"]",
"=",
"X",
"[",
"j",
"]",
"+",
"alpha",
"*",
"B",
"[",
"k",
"-",
"j",
"]",
"continue",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"k",
")",
":",
"kj",
"=",
"k",
"-",
"j",
"-",
"1",
"save1",
"=",
"A",
"[",
"j",
"]",
"A",
"[",
"j",
"]",
"=",
"save1",
"+",
"temp1",
"*",
"B",
"[",
"kj",
"]",
"B",
"[",
"kj",
"]",
"=",
"B",
"[",
"kj",
"]",
"+",
"temp2",
"*",
"save1",
"X",
"[",
"k",
"+",
"1",
"]",
"=",
"alpha",
"for",
"j",
"in",
"range",
"(",
"0",
",",
"k",
"+",
"1",
")",
":",
"X",
"[",
"j",
"]",
"=",
"X",
"[",
"j",
"]",
"+",
"alpha",
"*",
"B",
"[",
"k",
"-",
"j",
"]",
"return",
"X"
] | solve the general toeplitz linear equations
Solve TX=Z
:param T0: zero lag value
:param TC: r1 to rN
:param TR: r1 to rN
returns X
requires 3M^2+M operations instead of M^3 with gaussian elimination
.. warning:: not used right now | [
"solve",
"the",
"general",
"toeplitz",
"linear",
"equations"
] | python | valid |
bcbio/bcbio-nextgen | bcbio/rnaseq/gtf.py | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/rnaseq/gtf.py#L263-L282 | def _biotype_lookup_fn(gtf):
"""
return a function that will look up the biotype of a feature
this checks for either gene_biotype or biotype being set or for the source
column to have biotype information
"""
db = get_gtf_db(gtf)
sources = set([feature.source for feature in db.all_features()])
gene_biotypes = set([feature.attributes.get("gene_biotype", [None])[0]
for feature in db.all_features()])
biotypes = set([feature.attributes.get("biotype", [None])[0]
for feature in db.all_features()])
if "protein_coding" in sources:
return lambda feature: feature.source
elif "protein_coding" in biotypes:
return lambda feature: feature.attributes.get("biotype", [None])[0]
elif "protein_coding" in gene_biotypes:
return lambda feature: feature.attributes.get("gene_biotype", [None])[0]
else:
return None | [
"def",
"_biotype_lookup_fn",
"(",
"gtf",
")",
":",
"db",
"=",
"get_gtf_db",
"(",
"gtf",
")",
"sources",
"=",
"set",
"(",
"[",
"feature",
".",
"source",
"for",
"feature",
"in",
"db",
".",
"all_features",
"(",
")",
"]",
")",
"gene_biotypes",
"=",
"set",
"(",
"[",
"feature",
".",
"attributes",
".",
"get",
"(",
"\"gene_biotype\"",
",",
"[",
"None",
"]",
")",
"[",
"0",
"]",
"for",
"feature",
"in",
"db",
".",
"all_features",
"(",
")",
"]",
")",
"biotypes",
"=",
"set",
"(",
"[",
"feature",
".",
"attributes",
".",
"get",
"(",
"\"biotype\"",
",",
"[",
"None",
"]",
")",
"[",
"0",
"]",
"for",
"feature",
"in",
"db",
".",
"all_features",
"(",
")",
"]",
")",
"if",
"\"protein_coding\"",
"in",
"sources",
":",
"return",
"lambda",
"feature",
":",
"feature",
".",
"source",
"elif",
"\"protein_coding\"",
"in",
"biotypes",
":",
"return",
"lambda",
"feature",
":",
"feature",
".",
"attributes",
".",
"get",
"(",
"\"biotype\"",
",",
"[",
"None",
"]",
")",
"[",
"0",
"]",
"elif",
"\"protein_coding\"",
"in",
"gene_biotypes",
":",
"return",
"lambda",
"feature",
":",
"feature",
".",
"attributes",
".",
"get",
"(",
"\"gene_biotype\"",
",",
"[",
"None",
"]",
")",
"[",
"0",
"]",
"else",
":",
"return",
"None"
] | return a function that will look up the biotype of a feature
this checks for either gene_biotype or biotype being set or for the source
column to have biotype information | [
"return",
"a",
"function",
"that",
"will",
"look",
"up",
"the",
"biotype",
"of",
"a",
"feature",
"this",
"checks",
"for",
"either",
"gene_biotype",
"or",
"biotype",
"being",
"set",
"or",
"for",
"the",
"source",
"column",
"to",
"have",
"biotype",
"information"
] | python | train |
10gen/mongo-orchestration | mongo_orchestration/servers.py | https://github.com/10gen/mongo-orchestration/blob/81fd2224205922ea2178b08190b53a33aec47261/mongo_orchestration/servers.py#L283-L313 | def info(self):
"""return info about server as dict object"""
proc_info = {"name": self.name,
"params": self.cfg,
"alive": self.is_alive,
"optfile": self.config_path}
if self.is_alive:
proc_info['pid'] = self.proc.pid
logger.debug("proc_info: {proc_info}".format(**locals()))
mongodb_uri = ''
server_info = {}
status_info = {}
if self.hostname and self.cfg.get('port', None):
try:
c = self.connection
server_info = c.server_info()
logger.debug("server_info: {server_info}".format(**locals()))
mongodb_uri = 'mongodb://' + self.hostname
status_info = {"primary": c.is_primary, "mongos": c.is_mongos}
logger.debug("status_info: {status_info}".format(**locals()))
except (pymongo.errors.AutoReconnect, pymongo.errors.OperationFailure, pymongo.errors.ConnectionFailure):
server_info = {}
status_info = {}
result = {"mongodb_uri": mongodb_uri, "statuses": status_info,
"serverInfo": server_info, "procInfo": proc_info,
"orchestration": 'servers'}
if self.login:
result['mongodb_auth_uri'] = self.mongodb_auth_uri(self.hostname)
logger.debug("return {result}".format(result=result))
return result | [
"def",
"info",
"(",
"self",
")",
":",
"proc_info",
"=",
"{",
"\"name\"",
":",
"self",
".",
"name",
",",
"\"params\"",
":",
"self",
".",
"cfg",
",",
"\"alive\"",
":",
"self",
".",
"is_alive",
",",
"\"optfile\"",
":",
"self",
".",
"config_path",
"}",
"if",
"self",
".",
"is_alive",
":",
"proc_info",
"[",
"'pid'",
"]",
"=",
"self",
".",
"proc",
".",
"pid",
"logger",
".",
"debug",
"(",
"\"proc_info: {proc_info}\"",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
")",
"mongodb_uri",
"=",
"''",
"server_info",
"=",
"{",
"}",
"status_info",
"=",
"{",
"}",
"if",
"self",
".",
"hostname",
"and",
"self",
".",
"cfg",
".",
"get",
"(",
"'port'",
",",
"None",
")",
":",
"try",
":",
"c",
"=",
"self",
".",
"connection",
"server_info",
"=",
"c",
".",
"server_info",
"(",
")",
"logger",
".",
"debug",
"(",
"\"server_info: {server_info}\"",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
")",
"mongodb_uri",
"=",
"'mongodb://'",
"+",
"self",
".",
"hostname",
"status_info",
"=",
"{",
"\"primary\"",
":",
"c",
".",
"is_primary",
",",
"\"mongos\"",
":",
"c",
".",
"is_mongos",
"}",
"logger",
".",
"debug",
"(",
"\"status_info: {status_info}\"",
".",
"format",
"(",
"*",
"*",
"locals",
"(",
")",
")",
")",
"except",
"(",
"pymongo",
".",
"errors",
".",
"AutoReconnect",
",",
"pymongo",
".",
"errors",
".",
"OperationFailure",
",",
"pymongo",
".",
"errors",
".",
"ConnectionFailure",
")",
":",
"server_info",
"=",
"{",
"}",
"status_info",
"=",
"{",
"}",
"result",
"=",
"{",
"\"mongodb_uri\"",
":",
"mongodb_uri",
",",
"\"statuses\"",
":",
"status_info",
",",
"\"serverInfo\"",
":",
"server_info",
",",
"\"procInfo\"",
":",
"proc_info",
",",
"\"orchestration\"",
":",
"'servers'",
"}",
"if",
"self",
".",
"login",
":",
"result",
"[",
"'mongodb_auth_uri'",
"]",
"=",
"self",
".",
"mongodb_auth_uri",
"(",
"self",
".",
"hostname",
")",
"logger",
".",
"debug",
"(",
"\"return {result}\"",
".",
"format",
"(",
"result",
"=",
"result",
")",
")",
"return",
"result"
] | return info about server as dict object | [
"return",
"info",
"about",
"server",
"as",
"dict",
"object"
] | python | train |
Microsoft/malmo | MalmoEnv/malmoenv/bootstrap.py | https://github.com/Microsoft/malmo/blob/4139cd6f3e52f6e893a931a1d4b70d35f8e70e5a/MalmoEnv/malmoenv/bootstrap.py#L68-L88 | def launch_minecraft(port, installdir="MalmoPlatform", replaceable=False):
"""Launch Minecraft listening for malmoenv connections.
Args:
port: the TCP port to listen on.
installdir: the install dir name. Defaults to MalmoPlatform.
Must be same as given (or defaulted) in download call if used.
replaceable: whether or not to automatically restart Minecraft (default is false).
"""
launch_script = './launchClient.sh'
if os.name == 'nt':
launch_script = 'launchClient.bat'
cwd = os.getcwd()
os.chdir(installdir)
os.chdir("Minecraft")
try:
cmd = [launch_script, '-port', str(port), '-env']
if replaceable:
cmd.append('-replaceable')
subprocess.check_call(cmd)
finally:
os.chdir(cwd) | [
"def",
"launch_minecraft",
"(",
"port",
",",
"installdir",
"=",
"\"MalmoPlatform\"",
",",
"replaceable",
"=",
"False",
")",
":",
"launch_script",
"=",
"'./launchClient.sh'",
"if",
"os",
".",
"name",
"==",
"'nt'",
":",
"launch_script",
"=",
"'launchClient.bat'",
"cwd",
"=",
"os",
".",
"getcwd",
"(",
")",
"os",
".",
"chdir",
"(",
"installdir",
")",
"os",
".",
"chdir",
"(",
"\"Minecraft\"",
")",
"try",
":",
"cmd",
"=",
"[",
"launch_script",
",",
"'-port'",
",",
"str",
"(",
"port",
")",
",",
"'-env'",
"]",
"if",
"replaceable",
":",
"cmd",
".",
"append",
"(",
"'-replaceable'",
")",
"subprocess",
".",
"check_call",
"(",
"cmd",
")",
"finally",
":",
"os",
".",
"chdir",
"(",
"cwd",
")"
] | Launch Minecraft listening for malmoenv connections.
Args:
port: the TCP port to listen on.
installdir: the install dir name. Defaults to MalmoPlatform.
Must be same as given (or defaulted) in download call if used.
replaceable: whether or not to automatically restart Minecraft (default is false). | [
"Launch",
"Minecraft",
"listening",
"for",
"malmoenv",
"connections",
".",
"Args",
":",
"port",
":",
"the",
"TCP",
"port",
"to",
"listen",
"on",
".",
"installdir",
":",
"the",
"install",
"dir",
"name",
".",
"Defaults",
"to",
"MalmoPlatform",
".",
"Must",
"be",
"same",
"as",
"given",
"(",
"or",
"defaulted",
")",
"in",
"download",
"call",
"if",
"used",
".",
"replaceable",
":",
"whether",
"or",
"not",
"to",
"automatically",
"restart",
"Minecraft",
"(",
"default",
"is",
"false",
")",
"."
] | python | train |
JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L12729-L12739 | def gps_rtcm_data_send(self, flags, len, data, force_mavlink1=False):
'''
WORK IN PROGRESS! RTCM message for injecting into the onboard GPS
(used for DGPS)
flags : LSB: 1 means message is fragmented (uint8_t)
len : data length (uint8_t)
data : RTCM message (may be fragmented) (uint8_t)
'''
return self.send(self.gps_rtcm_data_encode(flags, len, data), force_mavlink1=force_mavlink1) | [
"def",
"gps_rtcm_data_send",
"(",
"self",
",",
"flags",
",",
"len",
",",
"data",
",",
"force_mavlink1",
"=",
"False",
")",
":",
"return",
"self",
".",
"send",
"(",
"self",
".",
"gps_rtcm_data_encode",
"(",
"flags",
",",
"len",
",",
"data",
")",
",",
"force_mavlink1",
"=",
"force_mavlink1",
")"
] | WORK IN PROGRESS! RTCM message for injecting into the onboard GPS
(used for DGPS)
flags : LSB: 1 means message is fragmented (uint8_t)
len : data length (uint8_t)
data : RTCM message (may be fragmented) (uint8_t) | [
"WORK",
"IN",
"PROGRESS!",
"RTCM",
"message",
"for",
"injecting",
"into",
"the",
"onboard",
"GPS",
"(",
"used",
"for",
"DGPS",
")"
] | python | train |
datajoint/datajoint-python | datajoint/connection.py | https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/connection.py#L20-L44 | def conn(host=None, user=None, password=None, init_fun=None, reset=False):
"""
Returns a persistent connection object to be shared by multiple modules.
If the connection is not yet established or reset=True, a new connection is set up.
If connection information is not provided, it is taken from config which takes the
information from dj_local_conf.json. If the password is not specified in that file
datajoint prompts for the password.
:param host: hostname
:param user: mysql user
:param password: mysql password
:param init_fun: initialization function
:param reset: whether the connection should be reset or not
"""
if not hasattr(conn, 'connection') or reset:
host = host if host is not None else config['database.host']
user = user if user is not None else config['database.user']
password = password if password is not None else config['database.password']
if user is None: # pragma: no cover
user = input("Please enter DataJoint username: ")
if password is None: # pragma: no cover
password = getpass(prompt="Please enter DataJoint password: ")
init_fun = init_fun if init_fun is not None else config['connection.init_function']
conn.connection = Connection(host, user, password, init_fun)
return conn.connection | [
"def",
"conn",
"(",
"host",
"=",
"None",
",",
"user",
"=",
"None",
",",
"password",
"=",
"None",
",",
"init_fun",
"=",
"None",
",",
"reset",
"=",
"False",
")",
":",
"if",
"not",
"hasattr",
"(",
"conn",
",",
"'connection'",
")",
"or",
"reset",
":",
"host",
"=",
"host",
"if",
"host",
"is",
"not",
"None",
"else",
"config",
"[",
"'database.host'",
"]",
"user",
"=",
"user",
"if",
"user",
"is",
"not",
"None",
"else",
"config",
"[",
"'database.user'",
"]",
"password",
"=",
"password",
"if",
"password",
"is",
"not",
"None",
"else",
"config",
"[",
"'database.password'",
"]",
"if",
"user",
"is",
"None",
":",
"# pragma: no cover",
"user",
"=",
"input",
"(",
"\"Please enter DataJoint username: \"",
")",
"if",
"password",
"is",
"None",
":",
"# pragma: no cover",
"password",
"=",
"getpass",
"(",
"prompt",
"=",
"\"Please enter DataJoint password: \"",
")",
"init_fun",
"=",
"init_fun",
"if",
"init_fun",
"is",
"not",
"None",
"else",
"config",
"[",
"'connection.init_function'",
"]",
"conn",
".",
"connection",
"=",
"Connection",
"(",
"host",
",",
"user",
",",
"password",
",",
"init_fun",
")",
"return",
"conn",
".",
"connection"
] | Returns a persistent connection object to be shared by multiple modules.
If the connection is not yet established or reset=True, a new connection is set up.
If connection information is not provided, it is taken from config which takes the
information from dj_local_conf.json. If the password is not specified in that file
datajoint prompts for the password.
:param host: hostname
:param user: mysql user
:param password: mysql password
:param init_fun: initialization function
:param reset: whether the connection should be reset or not | [
"Returns",
"a",
"persistent",
"connection",
"object",
"to",
"be",
"shared",
"by",
"multiple",
"modules",
".",
"If",
"the",
"connection",
"is",
"not",
"yet",
"established",
"or",
"reset",
"=",
"True",
"a",
"new",
"connection",
"is",
"set",
"up",
".",
"If",
"connection",
"information",
"is",
"not",
"provided",
"it",
"is",
"taken",
"from",
"config",
"which",
"takes",
"the",
"information",
"from",
"dj_local_conf",
".",
"json",
".",
"If",
"the",
"password",
"is",
"not",
"specified",
"in",
"that",
"file",
"datajoint",
"prompts",
"for",
"the",
"password",
"."
] | python | train |
mbedmicro/pyOCD | pyocd/probe/pydapaccess/dap_access_cmsis_dap.py | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py#L452-L465 | def get_connected_devices():
"""
Return an array of all mbed boards connected
"""
all_daplinks = []
all_interfaces = _get_interfaces()
for interface in all_interfaces:
try:
new_daplink = DAPAccessCMSISDAP(None, interface=interface)
all_daplinks.append(new_daplink)
except DAPAccessIntf.TransferError:
logger = logging.getLogger(__name__)
logger.error('Failed to get unique id', exc_info=session.Session.get_current().log_tracebacks)
return all_daplinks | [
"def",
"get_connected_devices",
"(",
")",
":",
"all_daplinks",
"=",
"[",
"]",
"all_interfaces",
"=",
"_get_interfaces",
"(",
")",
"for",
"interface",
"in",
"all_interfaces",
":",
"try",
":",
"new_daplink",
"=",
"DAPAccessCMSISDAP",
"(",
"None",
",",
"interface",
"=",
"interface",
")",
"all_daplinks",
".",
"append",
"(",
"new_daplink",
")",
"except",
"DAPAccessIntf",
".",
"TransferError",
":",
"logger",
"=",
"logging",
".",
"getLogger",
"(",
"__name__",
")",
"logger",
".",
"error",
"(",
"'Failed to get unique id'",
",",
"exc_info",
"=",
"session",
".",
"Session",
".",
"get_current",
"(",
")",
".",
"log_tracebacks",
")",
"return",
"all_daplinks"
] | Return an array of all mbed boards connected | [
"Return",
"an",
"array",
"of",
"all",
"mbed",
"boards",
"connected"
] | python | train |
pgjones/quart | quart/ctx.py | https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/ctx.py#L368-L373 | def pop(self, name: str, default: Any=_sentinel) -> Any:
"""Pop, get and remove the named attribute of this instance."""
if default is _sentinel:
return self.__dict__.pop(name)
else:
return self.__dict__.pop(name, default) | [
"def",
"pop",
"(",
"self",
",",
"name",
":",
"str",
",",
"default",
":",
"Any",
"=",
"_sentinel",
")",
"->",
"Any",
":",
"if",
"default",
"is",
"_sentinel",
":",
"return",
"self",
".",
"__dict__",
".",
"pop",
"(",
"name",
")",
"else",
":",
"return",
"self",
".",
"__dict__",
".",
"pop",
"(",
"name",
",",
"default",
")"
] | Pop, get and remove the named attribute of this instance. | [
"Pop",
"get",
"and",
"remove",
"the",
"named",
"attribute",
"of",
"this",
"instance",
"."
] | python | train |
CalebBell/thermo | thermo/chemical.py | https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/chemical.py#L1604-L1625 | def Cps(self):
r'''Solid-phase heat capacity of the chemical at its current temperature,
in units of [J/kg/K]. For calculation of this property at other
temperatures, or specifying manually the method used to calculate it,
and more - see the object oriented interface
:obj:`thermo.heat_capacity.HeatCapacitySolid`; each Chemical instance
creates one to actually perform the calculations. Note that that
interface provides output in molar units.
Examples
--------
>>> Chemical('palladium', T=400).Cps
241.63563239992484
>>> Pd = Chemical('palladium', T=400)
>>> Cpsms = [Pd.HeatCapacitySolid.T_dependent_property(T) for T in np.linspace(300,500, 5)]
>>> [property_molar_to_mass(Cps, Pd.MW) for Cps in Cpsms]
[234.40150347679008, 238.01856793835751, 241.63563239992484, 245.25269686149224, 248.86976132305958]
'''
Cpsm = self.HeatCapacitySolid(self.T)
if Cpsm:
return property_molar_to_mass(Cpsm, self.MW)
return None | [
"def",
"Cps",
"(",
"self",
")",
":",
"Cpsm",
"=",
"self",
".",
"HeatCapacitySolid",
"(",
"self",
".",
"T",
")",
"if",
"Cpsm",
":",
"return",
"property_molar_to_mass",
"(",
"Cpsm",
",",
"self",
".",
"MW",
")",
"return",
"None"
] | r'''Solid-phase heat capacity of the chemical at its current temperature,
in units of [J/kg/K]. For calculation of this property at other
temperatures, or specifying manually the method used to calculate it,
and more - see the object oriented interface
:obj:`thermo.heat_capacity.HeatCapacitySolid`; each Chemical instance
creates one to actually perform the calculations. Note that that
interface provides output in molar units.
Examples
--------
>>> Chemical('palladium', T=400).Cps
241.63563239992484
>>> Pd = Chemical('palladium', T=400)
>>> Cpsms = [Pd.HeatCapacitySolid.T_dependent_property(T) for T in np.linspace(300,500, 5)]
>>> [property_molar_to_mass(Cps, Pd.MW) for Cps in Cpsms]
[234.40150347679008, 238.01856793835751, 241.63563239992484, 245.25269686149224, 248.86976132305958] | [
"r",
"Solid",
"-",
"phase",
"heat",
"capacity",
"of",
"the",
"chemical",
"at",
"its",
"current",
"temperature",
"in",
"units",
"of",
"[",
"J",
"/",
"kg",
"/",
"K",
"]",
".",
"For",
"calculation",
"of",
"this",
"property",
"at",
"other",
"temperatures",
"or",
"specifying",
"manually",
"the",
"method",
"used",
"to",
"calculate",
"it",
"and",
"more",
"-",
"see",
"the",
"object",
"oriented",
"interface",
":",
"obj",
":",
"thermo",
".",
"heat_capacity",
".",
"HeatCapacitySolid",
";",
"each",
"Chemical",
"instance",
"creates",
"one",
"to",
"actually",
"perform",
"the",
"calculations",
".",
"Note",
"that",
"that",
"interface",
"provides",
"output",
"in",
"molar",
"units",
"."
] | python | valid |
openstack/proliantutils | proliantutils/ilo/ris.py | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L752-L764 | def reset_secure_boot_keys(self):
"""Reset secure boot keys to manufacturing defaults.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
if self._is_boot_mode_uefi():
self._change_secure_boot_settings('ResetToDefaultKeys', True)
else:
msg = ('System is not in UEFI boot mode. "SecureBoot" related '
'resources cannot be changed.')
raise exception.IloCommandNotSupportedInBiosError(msg) | [
"def",
"reset_secure_boot_keys",
"(",
"self",
")",
":",
"if",
"self",
".",
"_is_boot_mode_uefi",
"(",
")",
":",
"self",
".",
"_change_secure_boot_settings",
"(",
"'ResetToDefaultKeys'",
",",
"True",
")",
"else",
":",
"msg",
"=",
"(",
"'System is not in UEFI boot mode. \"SecureBoot\" related '",
"'resources cannot be changed.'",
")",
"raise",
"exception",
".",
"IloCommandNotSupportedInBiosError",
"(",
"msg",
")"
] | Reset secure boot keys to manufacturing defaults.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server. | [
"Reset",
"secure",
"boot",
"keys",
"to",
"manufacturing",
"defaults",
"."
] | python | train |
stevepeak/inquiry | inquiry/helpers.py | https://github.com/stevepeak/inquiry/blob/f6ea435c302560ba19985b5d4ce2c97e2f321508/inquiry/helpers.py#L18-L33 | def _merge_fix(d):
"""Fixes keys that start with "&" and "-"
d = {
"&steve": 10,
"-gary": 4
}
result = {
"steve": 10,
"gary": 4
}
"""
if type(d) is dict:
for key in d.keys():
if key[0] in ('&', '-'):
d[key[1:]] = _merge_fix(d.pop(key))
return d | [
"def",
"_merge_fix",
"(",
"d",
")",
":",
"if",
"type",
"(",
"d",
")",
"is",
"dict",
":",
"for",
"key",
"in",
"d",
".",
"keys",
"(",
")",
":",
"if",
"key",
"[",
"0",
"]",
"in",
"(",
"'&'",
",",
"'-'",
")",
":",
"d",
"[",
"key",
"[",
"1",
":",
"]",
"]",
"=",
"_merge_fix",
"(",
"d",
".",
"pop",
"(",
"key",
")",
")",
"return",
"d"
] | Fixes keys that start with "&" and "-"
d = {
"&steve": 10,
"-gary": 4
}
result = {
"steve": 10,
"gary": 4
} | [
"Fixes",
"keys",
"that",
"start",
"with",
"&",
"and",
"-",
"d",
"=",
"{",
"&steve",
":",
"10",
"-",
"gary",
":",
"4",
"}",
"result",
"=",
"{",
"steve",
":",
"10",
"gary",
":",
"4",
"}"
] | python | train |
bitesofcode/projexui | projexui/widgets/xoverlaywizard.py | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xoverlaywizard.py#L558-L618 | def next(self):
"""
Goes to the previous page for this wizard.
"""
curr_page = self.currentPage()
if not curr_page:
return
elif not curr_page.validatePage():
return
pageId = curr_page.nextId()
try:
next_page = self._pages[pageId]
except KeyError:
return
self._currentId = pageId
self._navigation.append(pageId)
y = curr_page.y()
next_page.move(self.width(), y)
# animate the last page in
anim_in = QtCore.QPropertyAnimation(self)
anim_in.setTargetObject(curr_page)
anim_in.setPropertyName('pos')
anim_in.setStartValue(curr_page.pos())
anim_in.setEndValue(QtCore.QPoint(-curr_page.width(), y))
anim_in.setDuration(self.animationSpeed())
anim_in.setEasingCurve(QtCore.QEasingCurve.Linear)
# animate the current page out
anim_out = QtCore.QPropertyAnimation(self)
anim_out.setTargetObject(next_page)
anim_out.setPropertyName('pos')
anim_out.setStartValue(next_page.pos())
anim_out.setEndValue(curr_page.pos())
anim_out.setDuration(self.animationSpeed())
anim_out.setEasingCurve(QtCore.QEasingCurve.Linear)
# create the anim group
anim_grp = QtCore.QParallelAnimationGroup(self)
anim_grp.addAnimation(anim_in)
anim_grp.addAnimation(anim_out)
anim_grp.finished.connect(curr_page.hide)
anim_grp.finished.connect(anim_grp.deleteLater)
next_page.show()
# update the button states
self._buttons[self.WizardButton.BackButton].setVisible(True)
self._buttons[self.WizardButton.NextButton].setVisible(self.canGoForward())
self._buttons[self.WizardButton.RetryButton].setVisible(self.canRetry())
self._buttons[self.WizardButton.CommitButton].setVisible(next_page.isCommitPage())
self._buttons[self.WizardButton.FinishButton].setVisible(next_page.isFinalPage())
self.adjustSize()
# initialize the new page
self.currentIdChanged.emit(pageId)
next_page.initializePage()
anim_grp.start() | [
"def",
"next",
"(",
"self",
")",
":",
"curr_page",
"=",
"self",
".",
"currentPage",
"(",
")",
"if",
"not",
"curr_page",
":",
"return",
"elif",
"not",
"curr_page",
".",
"validatePage",
"(",
")",
":",
"return",
"pageId",
"=",
"curr_page",
".",
"nextId",
"(",
")",
"try",
":",
"next_page",
"=",
"self",
".",
"_pages",
"[",
"pageId",
"]",
"except",
"KeyError",
":",
"return",
"self",
".",
"_currentId",
"=",
"pageId",
"self",
".",
"_navigation",
".",
"append",
"(",
"pageId",
")",
"y",
"=",
"curr_page",
".",
"y",
"(",
")",
"next_page",
".",
"move",
"(",
"self",
".",
"width",
"(",
")",
",",
"y",
")",
"# animate the last page in",
"anim_in",
"=",
"QtCore",
".",
"QPropertyAnimation",
"(",
"self",
")",
"anim_in",
".",
"setTargetObject",
"(",
"curr_page",
")",
"anim_in",
".",
"setPropertyName",
"(",
"'pos'",
")",
"anim_in",
".",
"setStartValue",
"(",
"curr_page",
".",
"pos",
"(",
")",
")",
"anim_in",
".",
"setEndValue",
"(",
"QtCore",
".",
"QPoint",
"(",
"-",
"curr_page",
".",
"width",
"(",
")",
",",
"y",
")",
")",
"anim_in",
".",
"setDuration",
"(",
"self",
".",
"animationSpeed",
"(",
")",
")",
"anim_in",
".",
"setEasingCurve",
"(",
"QtCore",
".",
"QEasingCurve",
".",
"Linear",
")",
"# animate the current page out",
"anim_out",
"=",
"QtCore",
".",
"QPropertyAnimation",
"(",
"self",
")",
"anim_out",
".",
"setTargetObject",
"(",
"next_page",
")",
"anim_out",
".",
"setPropertyName",
"(",
"'pos'",
")",
"anim_out",
".",
"setStartValue",
"(",
"next_page",
".",
"pos",
"(",
")",
")",
"anim_out",
".",
"setEndValue",
"(",
"curr_page",
".",
"pos",
"(",
")",
")",
"anim_out",
".",
"setDuration",
"(",
"self",
".",
"animationSpeed",
"(",
")",
")",
"anim_out",
".",
"setEasingCurve",
"(",
"QtCore",
".",
"QEasingCurve",
".",
"Linear",
")",
"# create the anim group",
"anim_grp",
"=",
"QtCore",
".",
"QParallelAnimationGroup",
"(",
"self",
")",
"anim_grp",
".",
"addAnimation",
"(",
"anim_in",
")",
"anim_grp",
".",
"addAnimation",
"(",
"anim_out",
")",
"anim_grp",
".",
"finished",
".",
"connect",
"(",
"curr_page",
".",
"hide",
")",
"anim_grp",
".",
"finished",
".",
"connect",
"(",
"anim_grp",
".",
"deleteLater",
")",
"next_page",
".",
"show",
"(",
")",
"# update the button states",
"self",
".",
"_buttons",
"[",
"self",
".",
"WizardButton",
".",
"BackButton",
"]",
".",
"setVisible",
"(",
"True",
")",
"self",
".",
"_buttons",
"[",
"self",
".",
"WizardButton",
".",
"NextButton",
"]",
".",
"setVisible",
"(",
"self",
".",
"canGoForward",
"(",
")",
")",
"self",
".",
"_buttons",
"[",
"self",
".",
"WizardButton",
".",
"RetryButton",
"]",
".",
"setVisible",
"(",
"self",
".",
"canRetry",
"(",
")",
")",
"self",
".",
"_buttons",
"[",
"self",
".",
"WizardButton",
".",
"CommitButton",
"]",
".",
"setVisible",
"(",
"next_page",
".",
"isCommitPage",
"(",
")",
")",
"self",
".",
"_buttons",
"[",
"self",
".",
"WizardButton",
".",
"FinishButton",
"]",
".",
"setVisible",
"(",
"next_page",
".",
"isFinalPage",
"(",
")",
")",
"self",
".",
"adjustSize",
"(",
")",
"# initialize the new page",
"self",
".",
"currentIdChanged",
".",
"emit",
"(",
"pageId",
")",
"next_page",
".",
"initializePage",
"(",
")",
"anim_grp",
".",
"start",
"(",
")"
] | Goes to the previous page for this wizard. | [
"Goes",
"to",
"the",
"previous",
"page",
"for",
"this",
"wizard",
"."
] | python | train |
HDI-Project/ballet | ballet/util/io.py | https://github.com/HDI-Project/ballet/blob/6f4d4b87b8234cb6bb38b9e9484a58ef8fe8fdb2/ballet/util/io.py#L20-L33 | def write_tabular(obj, filepath):
"""Write tabular object in HDF5 or pickle format
Args:
obj (array or DataFrame): tabular object to write
filepath (path-like): path to write to; must end in '.h5' or '.pkl'
"""
_, fn, ext = splitext2(filepath)
if ext == '.h5':
_write_tabular_h5(obj, filepath)
elif ext == '.pkl':
_write_tabular_pickle(obj, filepath)
else:
raise NotImplementedError | [
"def",
"write_tabular",
"(",
"obj",
",",
"filepath",
")",
":",
"_",
",",
"fn",
",",
"ext",
"=",
"splitext2",
"(",
"filepath",
")",
"if",
"ext",
"==",
"'.h5'",
":",
"_write_tabular_h5",
"(",
"obj",
",",
"filepath",
")",
"elif",
"ext",
"==",
"'.pkl'",
":",
"_write_tabular_pickle",
"(",
"obj",
",",
"filepath",
")",
"else",
":",
"raise",
"NotImplementedError"
] | Write tabular object in HDF5 or pickle format
Args:
obj (array or DataFrame): tabular object to write
filepath (path-like): path to write to; must end in '.h5' or '.pkl' | [
"Write",
"tabular",
"object",
"in",
"HDF5",
"or",
"pickle",
"format"
] | python | train |
QuantEcon/QuantEcon.py | quantecon/markov/ddp.py | https://github.com/QuantEcon/QuantEcon.py/blob/26a66c552f2a73967d7efb6e1f4b4c4985a12643/quantecon/markov/ddp.py#L455-L485 | def to_sa_pair_form(self, sparse=True):
"""
Convert this instance of `DiscreteDP` to SA-pair form
Parameters
----------
sparse : bool, optional(default=True)
Should the `Q` matrix be stored as a sparse matrix?
If true the CSR format is used
Returns
-------
ddp_sa : DiscreteDP
The correspnoding DiscreteDP instance in SA-pair form
Notes
-----
If this instance is already in SA-pair form then it is returned
un-modified
"""
if self._sa_pair:
return self
else:
s_ind, a_ind = np.where(self.R > - np.inf)
RL = self.R[s_ind, a_ind]
if sparse:
QL = sp.csr_matrix(self.Q[s_ind, a_ind])
else:
QL = self.Q[s_ind, a_ind]
return DiscreteDP(RL, QL, self.beta, s_ind, a_ind) | [
"def",
"to_sa_pair_form",
"(",
"self",
",",
"sparse",
"=",
"True",
")",
":",
"if",
"self",
".",
"_sa_pair",
":",
"return",
"self",
"else",
":",
"s_ind",
",",
"a_ind",
"=",
"np",
".",
"where",
"(",
"self",
".",
"R",
">",
"-",
"np",
".",
"inf",
")",
"RL",
"=",
"self",
".",
"R",
"[",
"s_ind",
",",
"a_ind",
"]",
"if",
"sparse",
":",
"QL",
"=",
"sp",
".",
"csr_matrix",
"(",
"self",
".",
"Q",
"[",
"s_ind",
",",
"a_ind",
"]",
")",
"else",
":",
"QL",
"=",
"self",
".",
"Q",
"[",
"s_ind",
",",
"a_ind",
"]",
"return",
"DiscreteDP",
"(",
"RL",
",",
"QL",
",",
"self",
".",
"beta",
",",
"s_ind",
",",
"a_ind",
")"
] | Convert this instance of `DiscreteDP` to SA-pair form
Parameters
----------
sparse : bool, optional(default=True)
Should the `Q` matrix be stored as a sparse matrix?
If true the CSR format is used
Returns
-------
ddp_sa : DiscreteDP
The correspnoding DiscreteDP instance in SA-pair form
Notes
-----
If this instance is already in SA-pair form then it is returned
un-modified | [
"Convert",
"this",
"instance",
"of",
"DiscreteDP",
"to",
"SA",
"-",
"pair",
"form"
] | python | train |
uralbash/pyramid_pages | pyramid_pages/resources.py | https://github.com/uralbash/pyramid_pages/blob/545b1ecb2e5dee5742135ba2a689b9635dd4efa1/pyramid_pages/resources.py#L156-L166 | def models_of_config(config):
""" Return list of models from all resources in config.
"""
resources = resources_of_config(config)
models = []
for resource in resources:
if not hasattr(resource, '__table__') and hasattr(resource, 'model'):
models.append(resource.model)
else:
models.append(resource)
return models | [
"def",
"models_of_config",
"(",
"config",
")",
":",
"resources",
"=",
"resources_of_config",
"(",
"config",
")",
"models",
"=",
"[",
"]",
"for",
"resource",
"in",
"resources",
":",
"if",
"not",
"hasattr",
"(",
"resource",
",",
"'__table__'",
")",
"and",
"hasattr",
"(",
"resource",
",",
"'model'",
")",
":",
"models",
".",
"append",
"(",
"resource",
".",
"model",
")",
"else",
":",
"models",
".",
"append",
"(",
"resource",
")",
"return",
"models"
] | Return list of models from all resources in config. | [
"Return",
"list",
"of",
"models",
"from",
"all",
"resources",
"in",
"config",
"."
] | python | train |
rigetti/pyquil | pyquil/paulis.py | https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/paulis.py#L159-L173 | def copy(self):
"""
Properly creates a new PauliTerm, with a completely new dictionary
of operators
"""
new_term = PauliTerm("I", 0, 1.0) # create new object
# manually copy all attributes over
for key in self.__dict__.keys():
val = self.__dict__[key]
if isinstance(val, (dict, list, set)): # mutable types
new_term.__dict__[key] = copy.copy(val)
else: # immutable types
new_term.__dict__[key] = val
return new_term | [
"def",
"copy",
"(",
"self",
")",
":",
"new_term",
"=",
"PauliTerm",
"(",
"\"I\"",
",",
"0",
",",
"1.0",
")",
"# create new object",
"# manually copy all attributes over",
"for",
"key",
"in",
"self",
".",
"__dict__",
".",
"keys",
"(",
")",
":",
"val",
"=",
"self",
".",
"__dict__",
"[",
"key",
"]",
"if",
"isinstance",
"(",
"val",
",",
"(",
"dict",
",",
"list",
",",
"set",
")",
")",
":",
"# mutable types",
"new_term",
".",
"__dict__",
"[",
"key",
"]",
"=",
"copy",
".",
"copy",
"(",
"val",
")",
"else",
":",
"# immutable types",
"new_term",
".",
"__dict__",
"[",
"key",
"]",
"=",
"val",
"return",
"new_term"
] | Properly creates a new PauliTerm, with a completely new dictionary
of operators | [
"Properly",
"creates",
"a",
"new",
"PauliTerm",
"with",
"a",
"completely",
"new",
"dictionary",
"of",
"operators"
] | python | train |
googleapis/oauth2client | oauth2client/contrib/appengine.py | https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/contrib/appengine.py#L892-L910 | def oauth2decorator_from_clientsecrets(filename, scope,
message=None, cache=None):
"""Creates an OAuth2Decorator populated from a clientsecrets file.
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may
contain HTML and will be presented on the web interface for
any method that uses the decorator.
cache: An optional cache service client that implements get() and set()
methods. See clientsecrets.loadfile() for details.
Returns: An OAuth2Decorator
"""
return OAuth2DecoratorFromClientSecrets(filename, scope,
message=message, cache=cache) | [
"def",
"oauth2decorator_from_clientsecrets",
"(",
"filename",
",",
"scope",
",",
"message",
"=",
"None",
",",
"cache",
"=",
"None",
")",
":",
"return",
"OAuth2DecoratorFromClientSecrets",
"(",
"filename",
",",
"scope",
",",
"message",
"=",
"message",
",",
"cache",
"=",
"cache",
")"
] | Creates an OAuth2Decorator populated from a clientsecrets file.
Args:
filename: string, File name of client secrets.
scope: string or list of strings, scope(s) of the credentials being
requested.
message: string, A friendly string to display to the user if the
clientsecrets file is missing or invalid. The message may
contain HTML and will be presented on the web interface for
any method that uses the decorator.
cache: An optional cache service client that implements get() and set()
methods. See clientsecrets.loadfile() for details.
Returns: An OAuth2Decorator | [
"Creates",
"an",
"OAuth2Decorator",
"populated",
"from",
"a",
"clientsecrets",
"file",
"."
] | python | valid |
mikedh/trimesh | trimesh/primitives.py | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/primitives.py#L624-L636 | def volume(self):
"""
The volume of the primitive extrusion.
Calculated from polygon and height to avoid mesh creation.
Returns
----------
volume: float, volume of 3D extrusion
"""
volume = abs(self.primitive.polygon.area *
self.primitive.height)
return volume | [
"def",
"volume",
"(",
"self",
")",
":",
"volume",
"=",
"abs",
"(",
"self",
".",
"primitive",
".",
"polygon",
".",
"area",
"*",
"self",
".",
"primitive",
".",
"height",
")",
"return",
"volume"
] | The volume of the primitive extrusion.
Calculated from polygon and height to avoid mesh creation.
Returns
----------
volume: float, volume of 3D extrusion | [
"The",
"volume",
"of",
"the",
"primitive",
"extrusion",
"."
] | python | train |
OSSOS/MOP | src/jjk/preproc/findTriplets.py | https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/jjk/preproc/findTriplets.py#L257-L312 | def setDiscoveryTriples(win,table="discovery"):
"""Provide user with a list of triples that could be discovery triples"""
win.help("Getting a list of pointings with triples from the CFEPS db")
pointings=getPointingsWithTriples()
win.help("Select the "+table+" triple form the list...")
import time
for pointing in pointings:
header="%10s %10s %8s %10s %8s" % (pointing[1],'mjdate','Elongation','Filter', 'IQ')
triples=getTriples(pointing=pointing[0])
choices=[]
triplist=[]
no_type=0
previous_list=[]
for triple in triples:
#win.help(str(triple))
tripinfo=getTripInfo(triple[0])
if not tripinfo[table]==None:
previous_list.append(triple[0])
#if not abs(180-tripinfo['elongation'])< 20:
# continue
triplist.append(triple)
if str(tripinfo['iq'])=='None':
tripinfo['iq']=-1.0
obs_type=' '
if tripinfo['discovery']:
obs_type='D'
elif tripinfo['checkup']:
obs_type='C'
elif tripinfo['recovery']:
obs_type='R'
if obs_type==' ':
no_type+=1
line=(obs_type,tripinfo['mjdate'], tripinfo['elongation'],
tripinfo['filter'], tripinfo['iq'], tripinfo['block'] )
choices.append('%10s %10s %8.2f %10s %8.2f %8s' % line)
if len(choices)==0 or no_type==0:
continue
#if len(previous_list)==1:
# continue
win.help("Choose a "+table+" triple (space) [no choice means skip] then press enter\n (q) to exit")
choice=win.list(header,choices)
if choice==None:
win.help("Loading next triple")
break
### Record which triplet is a discovery triplet
if len(choice)!=1:
win.help("Loading next triple\n")
continue
discovery_triple=triplist[choice[0]]
for triple in previous_list:
sql="DELETE FROM "+table+" WHERE triple=%s "
cfeps.execute(sql,triple)
sql="INSERT INTO "+table+" ( triple ) VALUES ( %s ) "
cfeps.execute(sql,discovery_triple) | [
"def",
"setDiscoveryTriples",
"(",
"win",
",",
"table",
"=",
"\"discovery\"",
")",
":",
"win",
".",
"help",
"(",
"\"Getting a list of pointings with triples from the CFEPS db\"",
")",
"pointings",
"=",
"getPointingsWithTriples",
"(",
")",
"win",
".",
"help",
"(",
"\"Select the \"",
"+",
"table",
"+",
"\" triple form the list...\"",
")",
"import",
"time",
"for",
"pointing",
"in",
"pointings",
":",
"header",
"=",
"\"%10s %10s %8s %10s %8s\"",
"%",
"(",
"pointing",
"[",
"1",
"]",
",",
"'mjdate'",
",",
"'Elongation'",
",",
"'Filter'",
",",
"'IQ'",
")",
"triples",
"=",
"getTriples",
"(",
"pointing",
"=",
"pointing",
"[",
"0",
"]",
")",
"choices",
"=",
"[",
"]",
"triplist",
"=",
"[",
"]",
"no_type",
"=",
"0",
"previous_list",
"=",
"[",
"]",
"for",
"triple",
"in",
"triples",
":",
"#win.help(str(triple))",
"tripinfo",
"=",
"getTripInfo",
"(",
"triple",
"[",
"0",
"]",
")",
"if",
"not",
"tripinfo",
"[",
"table",
"]",
"==",
"None",
":",
"previous_list",
".",
"append",
"(",
"triple",
"[",
"0",
"]",
")",
"#if not abs(180-tripinfo['elongation'])< 20:",
"#\tcontinue",
"triplist",
".",
"append",
"(",
"triple",
")",
"if",
"str",
"(",
"tripinfo",
"[",
"'iq'",
"]",
")",
"==",
"'None'",
":",
"tripinfo",
"[",
"'iq'",
"]",
"=",
"-",
"1.0",
"obs_type",
"=",
"' '",
"if",
"tripinfo",
"[",
"'discovery'",
"]",
":",
"obs_type",
"=",
"'D'",
"elif",
"tripinfo",
"[",
"'checkup'",
"]",
":",
"obs_type",
"=",
"'C'",
"elif",
"tripinfo",
"[",
"'recovery'",
"]",
":",
"obs_type",
"=",
"'R'",
"if",
"obs_type",
"==",
"' '",
":",
"no_type",
"+=",
"1",
"line",
"=",
"(",
"obs_type",
",",
"tripinfo",
"[",
"'mjdate'",
"]",
",",
"tripinfo",
"[",
"'elongation'",
"]",
",",
"tripinfo",
"[",
"'filter'",
"]",
",",
"tripinfo",
"[",
"'iq'",
"]",
",",
"tripinfo",
"[",
"'block'",
"]",
")",
"choices",
".",
"append",
"(",
"'%10s %10s %8.2f %10s %8.2f %8s'",
"%",
"line",
")",
"if",
"len",
"(",
"choices",
")",
"==",
"0",
"or",
"no_type",
"==",
"0",
":",
"continue",
"#if len(previous_list)==1:",
"# continue",
"win",
".",
"help",
"(",
"\"Choose a \"",
"+",
"table",
"+",
"\" triple (space) [no choice means skip] then press enter\\n (q) to exit\"",
")",
"choice",
"=",
"win",
".",
"list",
"(",
"header",
",",
"choices",
")",
"if",
"choice",
"==",
"None",
":",
"win",
".",
"help",
"(",
"\"Loading next triple\"",
")",
"break",
"### Record which triplet is a discovery triplet",
"if",
"len",
"(",
"choice",
")",
"!=",
"1",
":",
"win",
".",
"help",
"(",
"\"Loading next triple\\n\"",
")",
"continue",
"discovery_triple",
"=",
"triplist",
"[",
"choice",
"[",
"0",
"]",
"]",
"for",
"triple",
"in",
"previous_list",
":",
"sql",
"=",
"\"DELETE FROM \"",
"+",
"table",
"+",
"\" WHERE triple=%s \"",
"cfeps",
".",
"execute",
"(",
"sql",
",",
"triple",
")",
"sql",
"=",
"\"INSERT INTO \"",
"+",
"table",
"+",
"\" ( triple ) VALUES ( %s ) \"",
"cfeps",
".",
"execute",
"(",
"sql",
",",
"discovery_triple",
")"
] | Provide user with a list of triples that could be discovery triples | [
"Provide",
"user",
"with",
"a",
"list",
"of",
"triples",
"that",
"could",
"be",
"discovery",
"triples"
] | python | train |
spacetelescope/drizzlepac | drizzlepac/buildwcs.py | https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/buildwcs.py#L75-L219 | def build(outname, wcsname, refimage, undistort=False,
applycoeffs=False, coeffsfile=None, **wcspars):
""" Core functionality to create a WCS instance from a reference image WCS,
user supplied parameters or user adjusted reference WCS.
The distortion information can either be read in as part of the reference
image WCS or given in 'coeffsfile'.
Parameters
----------
outname : string
filename of output WCS
wcsname : string
WCSNAME ID for generated WCS
refimage : string
filename of image with source WCS used as basis for output WCS
undistort : bool
Create an undistorted WCS?
applycoeffs : bool
Apply coefficients from refimage to generate undistorted WCS?
coeffsfile : string
If specified, read distortion coeffs from separate file
"""
# Insure that the User WCS parameters have values for all the parameters,
# even if that value is 'None'
user_wcs_pars = convert_user_pars(wcspars)
userwcs = wcspars['userwcs']
"""
Use cases to document the logic required to interpret the parameters
WCS generation based on refimage/userwcs parameters
-------------------------------------------------------------
refimage == None, userwcs == False:
*NO WCS specified*
=> print a WARNING message and return without doing anything
refimage == None, userwcs == True:
=> Create WCS without a distortion model entirely from user parameters*
refimage != None, userwcs == False:
=> No user WCS parameters specified
=> Simply use refimage WCS as specified
refimage != None, userwcs == True:
=> Update refimage WCS with user specified values*
Apply distortion and generate final headerlet using processed WCS
-----------------------------------------------------------------
refimage == None, userwcs == True:
*Output WCS generated entirely from user supplied parameters*
Case 1: applycoeffs == False, undistort == True/False (ignored)
=> no distortion model to interpret
=> generate undistorted headerlet with no distortion model
Case 2: applycoeffs == True/False, undistort == True
=> ignore any user specified distortion model
=> generate undistorted headerlet with no distortion model
Case 3: applycoeffs == True, undistort == False
=> WCS from scratch combined with distortion model from another image
=> generate headerlet with distortion model
refimage != None, userwcs == True/False:
*Output WCS generated from reference image possibly modified by user parameters*
Case 4: applycoeffs == False, undistort == True
=> If refimage has distortion, remove it
=> generate undistorted headerlet with no distortion model
Case 5: applycoeffs == False, undistort == False
=> Leave refimage distortion model (if any) unmodified
=> generate a headerlet using same distortion model (if any) as refimage
Case 6: applycoeffs == True, undistort == False
=> Update refimage with distortion model with user-specified model
=> generate a headerlet with a distortion model
Case 7: applycoeffs == True, undistort == True
=> ignore user specified distortion model and undistort WCS
=> generate a headerlet without a distortion model
"""
### Build WCS from refimage and/or user pars
if util.is_blank(refimage) and not userwcs:
print('WARNING: No WCS specified... No WCS created!')
return
customwcs = None
if util.is_blank(refimage) and userwcs:
# create HSTWCS object from user parameters
complete_wcs = True
for key in user_wcs_pars:
if util.is_blank(user_wcs_pars[key]):
complete_wcs = False
break
if complete_wcs:
customwcs = wcs_functions.build_hstwcs(user_wcs_pars['crval1'],user_wcs_pars['crval2'],
user_wcs_pars['crpix1'],user_wcs_pars['crpix2'],
user_wcs_pars['naxis1'],user_wcs_pars['naxis2'],
user_wcs_pars['pscale'],user_wcs_pars['orientat'])
else:
print('WARNING: Not enough WCS information provided by user!')
raise ValueError
if not util.is_blank(refimage):
refwcs = stwcs.wcsutil.HSTWCS(refimage)
else:
refwcs = customwcs
### Apply distortion model (if any) to update WCS
if applycoeffs and not util.is_blank(coeffsfile):
if not util.is_blank(refimage):
replace_model(refwcs, coeffsfile)
else:
if not undistort:
add_model(refwcs,coeffsfile)
# Only working with custom WCS from user, no distortion
# so apply model to WCS, including modifying the CD matrix
apply_model(refwcs)
### Create undistorted WCS, if requested
if undistort:
outwcs = undistortWCS(refwcs)
else:
outwcs = refwcs
if userwcs:
# replace (some/all?) WCS values from refimage with user WCS values
# by running 'updatewcs' functions on input WCS
outwcs = mergewcs(outwcs,customwcs,user_wcs_pars)
### Create the final headerlet and write it out, if specified
if not util.is_blank(refimage):
template = refimage
elif not util.is_blank(coeffsfile):
template = coeffsfile
else:
template = None
# create default WCSNAME if None was given
wcsname = create_WCSname(wcsname)
print('Creating final headerlet with name ',wcsname,' using template ',template)
outhdr = generate_headerlet(outwcs,template,wcsname,outname=outname)
# synchronize this new WCS with the rest of the chips in the image
for ext in outhdr:
if 'extname' in ext.header and ext.header['extname'] == 'SIPWCS':
ext_wcs = wcsutil.HSTWCS(ext)
stwcs.updatewcs.makewcs.MakeWCS.updateWCS(ext_wcs,outwcs)
return outwcs | [
"def",
"build",
"(",
"outname",
",",
"wcsname",
",",
"refimage",
",",
"undistort",
"=",
"False",
",",
"applycoeffs",
"=",
"False",
",",
"coeffsfile",
"=",
"None",
",",
"*",
"*",
"wcspars",
")",
":",
"# Insure that the User WCS parameters have values for all the parameters,",
"# even if that value is 'None'",
"user_wcs_pars",
"=",
"convert_user_pars",
"(",
"wcspars",
")",
"userwcs",
"=",
"wcspars",
"[",
"'userwcs'",
"]",
"\"\"\"\n Use cases to document the logic required to interpret the parameters\n\n WCS generation based on refimage/userwcs parameters\n -------------------------------------------------------------\n refimage == None, userwcs == False:\n *NO WCS specified*\n => print a WARNING message and return without doing anything\n refimage == None, userwcs == True:\n => Create WCS without a distortion model entirely from user parameters*\n refimage != None, userwcs == False:\n => No user WCS parameters specified\n => Simply use refimage WCS as specified\n refimage != None, userwcs == True:\n => Update refimage WCS with user specified values*\n\n Apply distortion and generate final headerlet using processed WCS\n -----------------------------------------------------------------\n refimage == None, userwcs == True:\n *Output WCS generated entirely from user supplied parameters*\n Case 1: applycoeffs == False, undistort == True/False (ignored)\n => no distortion model to interpret\n => generate undistorted headerlet with no distortion model\n Case 2: applycoeffs == True/False, undistort == True\n => ignore any user specified distortion model\n => generate undistorted headerlet with no distortion model\n Case 3: applycoeffs == True, undistort == False\n => WCS from scratch combined with distortion model from another image\n => generate headerlet with distortion model\n\n refimage != None, userwcs == True/False:\n *Output WCS generated from reference image possibly modified by user parameters*\n Case 4: applycoeffs == False, undistort == True\n => If refimage has distortion, remove it\n => generate undistorted headerlet with no distortion model\n Case 5: applycoeffs == False, undistort == False\n => Leave refimage distortion model (if any) unmodified\n => generate a headerlet using same distortion model (if any) as refimage\n Case 6: applycoeffs == True, undistort == False\n => Update refimage with distortion model with user-specified model\n => generate a headerlet with a distortion model\n Case 7: applycoeffs == True, undistort == True\n => ignore user specified distortion model and undistort WCS\n => generate a headerlet without a distortion model\n \"\"\"",
"### Build WCS from refimage and/or user pars",
"if",
"util",
".",
"is_blank",
"(",
"refimage",
")",
"and",
"not",
"userwcs",
":",
"print",
"(",
"'WARNING: No WCS specified... No WCS created!'",
")",
"return",
"customwcs",
"=",
"None",
"if",
"util",
".",
"is_blank",
"(",
"refimage",
")",
"and",
"userwcs",
":",
"# create HSTWCS object from user parameters",
"complete_wcs",
"=",
"True",
"for",
"key",
"in",
"user_wcs_pars",
":",
"if",
"util",
".",
"is_blank",
"(",
"user_wcs_pars",
"[",
"key",
"]",
")",
":",
"complete_wcs",
"=",
"False",
"break",
"if",
"complete_wcs",
":",
"customwcs",
"=",
"wcs_functions",
".",
"build_hstwcs",
"(",
"user_wcs_pars",
"[",
"'crval1'",
"]",
",",
"user_wcs_pars",
"[",
"'crval2'",
"]",
",",
"user_wcs_pars",
"[",
"'crpix1'",
"]",
",",
"user_wcs_pars",
"[",
"'crpix2'",
"]",
",",
"user_wcs_pars",
"[",
"'naxis1'",
"]",
",",
"user_wcs_pars",
"[",
"'naxis2'",
"]",
",",
"user_wcs_pars",
"[",
"'pscale'",
"]",
",",
"user_wcs_pars",
"[",
"'orientat'",
"]",
")",
"else",
":",
"print",
"(",
"'WARNING: Not enough WCS information provided by user!'",
")",
"raise",
"ValueError",
"if",
"not",
"util",
".",
"is_blank",
"(",
"refimage",
")",
":",
"refwcs",
"=",
"stwcs",
".",
"wcsutil",
".",
"HSTWCS",
"(",
"refimage",
")",
"else",
":",
"refwcs",
"=",
"customwcs",
"### Apply distortion model (if any) to update WCS",
"if",
"applycoeffs",
"and",
"not",
"util",
".",
"is_blank",
"(",
"coeffsfile",
")",
":",
"if",
"not",
"util",
".",
"is_blank",
"(",
"refimage",
")",
":",
"replace_model",
"(",
"refwcs",
",",
"coeffsfile",
")",
"else",
":",
"if",
"not",
"undistort",
":",
"add_model",
"(",
"refwcs",
",",
"coeffsfile",
")",
"# Only working with custom WCS from user, no distortion",
"# so apply model to WCS, including modifying the CD matrix",
"apply_model",
"(",
"refwcs",
")",
"### Create undistorted WCS, if requested",
"if",
"undistort",
":",
"outwcs",
"=",
"undistortWCS",
"(",
"refwcs",
")",
"else",
":",
"outwcs",
"=",
"refwcs",
"if",
"userwcs",
":",
"# replace (some/all?) WCS values from refimage with user WCS values",
"# by running 'updatewcs' functions on input WCS",
"outwcs",
"=",
"mergewcs",
"(",
"outwcs",
",",
"customwcs",
",",
"user_wcs_pars",
")",
"### Create the final headerlet and write it out, if specified",
"if",
"not",
"util",
".",
"is_blank",
"(",
"refimage",
")",
":",
"template",
"=",
"refimage",
"elif",
"not",
"util",
".",
"is_blank",
"(",
"coeffsfile",
")",
":",
"template",
"=",
"coeffsfile",
"else",
":",
"template",
"=",
"None",
"# create default WCSNAME if None was given",
"wcsname",
"=",
"create_WCSname",
"(",
"wcsname",
")",
"print",
"(",
"'Creating final headerlet with name '",
",",
"wcsname",
",",
"' using template '",
",",
"template",
")",
"outhdr",
"=",
"generate_headerlet",
"(",
"outwcs",
",",
"template",
",",
"wcsname",
",",
"outname",
"=",
"outname",
")",
"# synchronize this new WCS with the rest of the chips in the image",
"for",
"ext",
"in",
"outhdr",
":",
"if",
"'extname'",
"in",
"ext",
".",
"header",
"and",
"ext",
".",
"header",
"[",
"'extname'",
"]",
"==",
"'SIPWCS'",
":",
"ext_wcs",
"=",
"wcsutil",
".",
"HSTWCS",
"(",
"ext",
")",
"stwcs",
".",
"updatewcs",
".",
"makewcs",
".",
"MakeWCS",
".",
"updateWCS",
"(",
"ext_wcs",
",",
"outwcs",
")",
"return",
"outwcs"
] | Core functionality to create a WCS instance from a reference image WCS,
user supplied parameters or user adjusted reference WCS.
The distortion information can either be read in as part of the reference
image WCS or given in 'coeffsfile'.
Parameters
----------
outname : string
filename of output WCS
wcsname : string
WCSNAME ID for generated WCS
refimage : string
filename of image with source WCS used as basis for output WCS
undistort : bool
Create an undistorted WCS?
applycoeffs : bool
Apply coefficients from refimage to generate undistorted WCS?
coeffsfile : string
If specified, read distortion coeffs from separate file | [
"Core",
"functionality",
"to",
"create",
"a",
"WCS",
"instance",
"from",
"a",
"reference",
"image",
"WCS",
"user",
"supplied",
"parameters",
"or",
"user",
"adjusted",
"reference",
"WCS",
".",
"The",
"distortion",
"information",
"can",
"either",
"be",
"read",
"in",
"as",
"part",
"of",
"the",
"reference",
"image",
"WCS",
"or",
"given",
"in",
"coeffsfile",
"."
] | python | train |
GoogleCloudPlatform/appengine-mapreduce | python/src/mapreduce/handlers.py | https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/src/mapreduce/handlers.py#L1008-L1023 | def _processing_limit(self, spec):
"""Get the limit on the number of map calls allowed by this slice.
Args:
spec: a Mapreduce spec.
Returns:
The limit as a positive int if specified by user. -1 otherwise.
"""
processing_rate = float(spec.mapper.params.get("processing_rate", 0))
slice_processing_limit = -1
if processing_rate > 0:
slice_processing_limit = int(math.ceil(
parameters.config._SLICE_DURATION_SEC*processing_rate/
int(spec.mapper.shard_count)))
return slice_processing_limit | [
"def",
"_processing_limit",
"(",
"self",
",",
"spec",
")",
":",
"processing_rate",
"=",
"float",
"(",
"spec",
".",
"mapper",
".",
"params",
".",
"get",
"(",
"\"processing_rate\"",
",",
"0",
")",
")",
"slice_processing_limit",
"=",
"-",
"1",
"if",
"processing_rate",
">",
"0",
":",
"slice_processing_limit",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"parameters",
".",
"config",
".",
"_SLICE_DURATION_SEC",
"*",
"processing_rate",
"/",
"int",
"(",
"spec",
".",
"mapper",
".",
"shard_count",
")",
")",
")",
"return",
"slice_processing_limit"
] | Get the limit on the number of map calls allowed by this slice.
Args:
spec: a Mapreduce spec.
Returns:
The limit as a positive int if specified by user. -1 otherwise. | [
"Get",
"the",
"limit",
"on",
"the",
"number",
"of",
"map",
"calls",
"allowed",
"by",
"this",
"slice",
"."
] | python | train |
chemlab/chemlab | chemlab/io/handlers/gamess.py | https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/io/handlers/gamess.py#L107-L139 | def _parse_tddft(self):
"""Parse the output resulted from a tddft calculation.
"""
text = self.text
energies = sections("SUMMARY OF TDDFT RESULTS",
"DONE WITH TD-DFT EXCITATION ENERGIES",
text)
lines = energies[0].splitlines()
regex = re.compile("""
\s+(\d+) # State Number
\s+([ ^]+) # State sym
\s+([+-]?\d+\.\d+) # Tot Energy
\s+([+-]?\d+\.\d+) # Exc Energy
(\s+([+-]?\d+\.\d+) #
\s+([+-]?\d+\.\d+) # Dipole moment
\s+([+-]?\d+\.\d+) #
\s+([+-]?\d+\.\d+))? # Oscillator strength
""", flags=re.VERBOSE)
states = []
for line in lines:
match = regex.match(line)
if match:
# Check for strange behaviour of symmetry
if not re.match("\w+",match.group(4)):
raise ValueError("Strange symmetry string: %s"%match.group(4))
osc_strength = float(match.group(9)) if match.group(9) else 0.000
states.append({"num": int(match.group(1)), "sym": match.group(4),
"strength": osc_strength})
return {"states": states} | [
"def",
"_parse_tddft",
"(",
"self",
")",
":",
"text",
"=",
"self",
".",
"text",
"energies",
"=",
"sections",
"(",
"\"SUMMARY OF TDDFT RESULTS\"",
",",
"\"DONE WITH TD-DFT EXCITATION ENERGIES\"",
",",
"text",
")",
"lines",
"=",
"energies",
"[",
"0",
"]",
".",
"splitlines",
"(",
")",
"regex",
"=",
"re",
".",
"compile",
"(",
"\"\"\"\n \\s+(\\d+) # State Number\n \\s+([ ^]+) # State sym\n \\s+([+-]?\\d+\\.\\d+) # Tot Energy \n \\s+([+-]?\\d+\\.\\d+) # Exc Energy\n (\\s+([+-]?\\d+\\.\\d+) # \n \\s+([+-]?\\d+\\.\\d+) # Dipole moment\n \\s+([+-]?\\d+\\.\\d+) # \n \\s+([+-]?\\d+\\.\\d+))? # Oscillator strength\n \"\"\"",
",",
"flags",
"=",
"re",
".",
"VERBOSE",
")",
"states",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"match",
"=",
"regex",
".",
"match",
"(",
"line",
")",
"if",
"match",
":",
"# Check for strange behaviour of symmetry",
"if",
"not",
"re",
".",
"match",
"(",
"\"\\w+\"",
",",
"match",
".",
"group",
"(",
"4",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"Strange symmetry string: %s\"",
"%",
"match",
".",
"group",
"(",
"4",
")",
")",
"osc_strength",
"=",
"float",
"(",
"match",
".",
"group",
"(",
"9",
")",
")",
"if",
"match",
".",
"group",
"(",
"9",
")",
"else",
"0.000",
"states",
".",
"append",
"(",
"{",
"\"num\"",
":",
"int",
"(",
"match",
".",
"group",
"(",
"1",
")",
")",
",",
"\"sym\"",
":",
"match",
".",
"group",
"(",
"4",
")",
",",
"\"strength\"",
":",
"osc_strength",
"}",
")",
"return",
"{",
"\"states\"",
":",
"states",
"}"
] | Parse the output resulted from a tddft calculation. | [
"Parse",
"the",
"output",
"resulted",
"from",
"a",
"tddft",
"calculation",
"."
] | python | train |
theonion/djes | djes/utils/query.py | https://github.com/theonion/djes/blob/8f7347382c74172e82e959e3dfbc12b18fbb523f/djes/utils/query.py#L4-L29 | def batched_queryset(queryset, chunksize=1000):
'''''
Iterate over a Django Queryset ordered by the primary key
This method loads a maximum of chunksize (default: 1000) rows in it's
memory at the same time while django normally would load all rows in it's
memory. Using the iterator() method only causes it to not preload all the
classes.
Note that the implementation of the iterator does not support ordered query sets.
Source: https://djangosnippets.org/snippets/1949/
'''
try:
last_pk = queryset.order_by('-pk')[0].pk
except IndexError:
# Support empty querysets
return
queryset = queryset.order_by('pk')
pk = 0
while pk < last_pk:
for row in queryset.filter(pk__gt=pk)[:chunksize]:
pk = row.pk
yield row
gc.collect() | [
"def",
"batched_queryset",
"(",
"queryset",
",",
"chunksize",
"=",
"1000",
")",
":",
"try",
":",
"last_pk",
"=",
"queryset",
".",
"order_by",
"(",
"'-pk'",
")",
"[",
"0",
"]",
".",
"pk",
"except",
"IndexError",
":",
"# Support empty querysets",
"return",
"queryset",
"=",
"queryset",
".",
"order_by",
"(",
"'pk'",
")",
"pk",
"=",
"0",
"while",
"pk",
"<",
"last_pk",
":",
"for",
"row",
"in",
"queryset",
".",
"filter",
"(",
"pk__gt",
"=",
"pk",
")",
"[",
":",
"chunksize",
"]",
":",
"pk",
"=",
"row",
".",
"pk",
"yield",
"row",
"gc",
".",
"collect",
"(",
")"
] | Iterate over a Django Queryset ordered by the primary key
This method loads a maximum of chunksize (default: 1000) rows in it's
memory at the same time while django normally would load all rows in it's
memory. Using the iterator() method only causes it to not preload all the
classes.
Note that the implementation of the iterator does not support ordered query sets.
Source: https://djangosnippets.org/snippets/1949/ | [
"Iterate",
"over",
"a",
"Django",
"Queryset",
"ordered",
"by",
"the",
"primary",
"key"
] | python | train |
pypa/pipenv | pipenv/vendor/pathlib2/__init__.py | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pathlib2/__init__.py#L1544-L1554 | def exists(self):
"""
Whether this path exists.
"""
try:
self.stat()
except OSError as e:
if e.errno not in (ENOENT, ENOTDIR):
raise
return False
return True | [
"def",
"exists",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"stat",
"(",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"not",
"in",
"(",
"ENOENT",
",",
"ENOTDIR",
")",
":",
"raise",
"return",
"False",
"return",
"True"
] | Whether this path exists. | [
"Whether",
"this",
"path",
"exists",
"."
] | python | train |
theislab/scanpy | scanpy/utils.py | https://github.com/theislab/scanpy/blob/9e4e5ee02e04cf618872d9b098e24f0542e8b227/scanpy/utils.py#L591-L608 | def moving_average(a, n):
"""Moving average over one-dimensional array.
Parameters
----------
a : np.ndarray
One-dimensional array.
n : int
Number of entries to average over. n=2 means averaging over the currrent
the previous entry.
Returns
-------
An array view storing the moving average.
"""
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n | [
"def",
"moving_average",
"(",
"a",
",",
"n",
")",
":",
"ret",
"=",
"np",
".",
"cumsum",
"(",
"a",
",",
"dtype",
"=",
"float",
")",
"ret",
"[",
"n",
":",
"]",
"=",
"ret",
"[",
"n",
":",
"]",
"-",
"ret",
"[",
":",
"-",
"n",
"]",
"return",
"ret",
"[",
"n",
"-",
"1",
":",
"]",
"/",
"n"
] | Moving average over one-dimensional array.
Parameters
----------
a : np.ndarray
One-dimensional array.
n : int
Number of entries to average over. n=2 means averaging over the currrent
the previous entry.
Returns
-------
An array view storing the moving average. | [
"Moving",
"average",
"over",
"one",
"-",
"dimensional",
"array",
"."
] | python | train |
Dispersive-Hydrodynamics-Lab/PACE | PACE/PACE.py | https://github.com/Dispersive-Hydrodynamics-Lab/PACE/blob/4ce27d5fc9b02cc2ce55f6fea7fc8d6015317e1f/PACE/PACE.py#L191-L203 | def plot_traindata(self, name: str='dataplot') -> None:
"""
Plots traindata.... choo choo...
"""
traindata = self.get_traindata()
plt.figure(figsize=(16, 16))
plt.scatter(traindata[:, 1], traindata[:, 2],
c=traindata[:, 5], marker='o', label='Datastore Points')
plt.xlabel(r'$\log_{10}$ Noise')
plt.ylabel(r'$\log_{10}$ Curvature')
plt.legend(loc=2, fontsize='xx-large')
plt.savefig('./img/{}.png'.format(name)) | [
"def",
"plot_traindata",
"(",
"self",
",",
"name",
":",
"str",
"=",
"'dataplot'",
")",
"->",
"None",
":",
"traindata",
"=",
"self",
".",
"get_traindata",
"(",
")",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"(",
"16",
",",
"16",
")",
")",
"plt",
".",
"scatter",
"(",
"traindata",
"[",
":",
",",
"1",
"]",
",",
"traindata",
"[",
":",
",",
"2",
"]",
",",
"c",
"=",
"traindata",
"[",
":",
",",
"5",
"]",
",",
"marker",
"=",
"'o'",
",",
"label",
"=",
"'Datastore Points'",
")",
"plt",
".",
"xlabel",
"(",
"r'$\\log_{10}$ Noise'",
")",
"plt",
".",
"ylabel",
"(",
"r'$\\log_{10}$ Curvature'",
")",
"plt",
".",
"legend",
"(",
"loc",
"=",
"2",
",",
"fontsize",
"=",
"'xx-large'",
")",
"plt",
".",
"savefig",
"(",
"'./img/{}.png'",
".",
"format",
"(",
"name",
")",
")"
] | Plots traindata.... choo choo... | [
"Plots",
"traindata",
"....",
"choo",
"choo",
"..."
] | python | train |
tanghaibao/goatools | goatools/go_enrichment.py | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/go_enrichment.py#L482-L486 | def prt_tsv(self, prt, goea_results, **kws):
"""Write tab-separated table data"""
prt_flds = kws.get('prt_flds', self.get_prtflds_default(goea_results))
tsv_data = MgrNtGOEAs(goea_results).get_goea_nts_prt(prt_flds, **kws)
RPT.prt_tsv(prt, tsv_data, **kws) | [
"def",
"prt_tsv",
"(",
"self",
",",
"prt",
",",
"goea_results",
",",
"*",
"*",
"kws",
")",
":",
"prt_flds",
"=",
"kws",
".",
"get",
"(",
"'prt_flds'",
",",
"self",
".",
"get_prtflds_default",
"(",
"goea_results",
")",
")",
"tsv_data",
"=",
"MgrNtGOEAs",
"(",
"goea_results",
")",
".",
"get_goea_nts_prt",
"(",
"prt_flds",
",",
"*",
"*",
"kws",
")",
"RPT",
".",
"prt_tsv",
"(",
"prt",
",",
"tsv_data",
",",
"*",
"*",
"kws",
")"
] | Write tab-separated table data | [
"Write",
"tab",
"-",
"separated",
"table",
"data"
] | python | train |
project-rig/rig | rig/routing_table/ordered_covering.py | https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/ordered_covering.py#L553-L598 | def _refine_upcheck(merge, min_goodness):
"""Remove from the merge any entries which would be covered by entries
between their current position and the merge insertion position.
For example, the third entry of::
0011 -> N
0100 -> N
1000 -> N
X000 -> NE
Cannot be merged with the first two entries because that would generate the
new entry ``XXXX`` which would move ``1000`` below the entry with the
key-mask pair of ``X000``, which would cover it.
Returns
-------
:py:class:`~.Merge`
New merge with entries possibly removed. If the goodness of the merge
ever drops below `min_goodness` then an empty merge will be returned.
bool
If the merge has been changed at all.
"""
# Remove any entries which would be covered by entries above the merge
# position.
changed = False
for i in sorted(merge.entries, reverse=True):
# Get all the entries that are between the entry we're looking at the
# insertion index of the proposed merged index. If this entry would be
# covered up by any of them then we remove it from the merge.
entry = merge.routing_table[i]
key, mask = entry.key, entry.mask
if any(intersect(key, mask, other.key, other.mask) for other in
merge.routing_table[i+1:merge.insertion_index]):
# The entry would be partially or wholly covered by another entry,
# remove it from the merge and return a new merge.
merge = _Merge(merge.routing_table, merge.entries - {i})
changed = True
# Check if the merge is sufficiently good
if merge.goodness <= min_goodness:
merge = _Merge(merge.routing_table) # Replace with empty merge
break
# Return the final merge
return merge, changed | [
"def",
"_refine_upcheck",
"(",
"merge",
",",
"min_goodness",
")",
":",
"# Remove any entries which would be covered by entries above the merge",
"# position.",
"changed",
"=",
"False",
"for",
"i",
"in",
"sorted",
"(",
"merge",
".",
"entries",
",",
"reverse",
"=",
"True",
")",
":",
"# Get all the entries that are between the entry we're looking at the",
"# insertion index of the proposed merged index. If this entry would be",
"# covered up by any of them then we remove it from the merge.",
"entry",
"=",
"merge",
".",
"routing_table",
"[",
"i",
"]",
"key",
",",
"mask",
"=",
"entry",
".",
"key",
",",
"entry",
".",
"mask",
"if",
"any",
"(",
"intersect",
"(",
"key",
",",
"mask",
",",
"other",
".",
"key",
",",
"other",
".",
"mask",
")",
"for",
"other",
"in",
"merge",
".",
"routing_table",
"[",
"i",
"+",
"1",
":",
"merge",
".",
"insertion_index",
"]",
")",
":",
"# The entry would be partially or wholly covered by another entry,",
"# remove it from the merge and return a new merge.",
"merge",
"=",
"_Merge",
"(",
"merge",
".",
"routing_table",
",",
"merge",
".",
"entries",
"-",
"{",
"i",
"}",
")",
"changed",
"=",
"True",
"# Check if the merge is sufficiently good",
"if",
"merge",
".",
"goodness",
"<=",
"min_goodness",
":",
"merge",
"=",
"_Merge",
"(",
"merge",
".",
"routing_table",
")",
"# Replace with empty merge",
"break",
"# Return the final merge",
"return",
"merge",
",",
"changed"
] | Remove from the merge any entries which would be covered by entries
between their current position and the merge insertion position.
For example, the third entry of::
0011 -> N
0100 -> N
1000 -> N
X000 -> NE
Cannot be merged with the first two entries because that would generate the
new entry ``XXXX`` which would move ``1000`` below the entry with the
key-mask pair of ``X000``, which would cover it.
Returns
-------
:py:class:`~.Merge`
New merge with entries possibly removed. If the goodness of the merge
ever drops below `min_goodness` then an empty merge will be returned.
bool
If the merge has been changed at all. | [
"Remove",
"from",
"the",
"merge",
"any",
"entries",
"which",
"would",
"be",
"covered",
"by",
"entries",
"between",
"their",
"current",
"position",
"and",
"the",
"merge",
"insertion",
"position",
"."
] | python | train |
tnkteja/myhelp | virtualEnvironment/lib/python2.7/site-packages/pip/utils/__init__.py | https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/pip/utils/__init__.py#L102-L126 | def find_command(cmd, paths=None, pathext=None):
"""Searches the PATH for the given command and returns its path"""
if paths is None:
paths = os.environ.get('PATH', '').split(os.pathsep)
if isinstance(paths, six.string_types):
paths = [paths]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = get_pathext()
pathext = [ext for ext in pathext.lower().split(os.pathsep) if len(ext)]
# don't use extensions if the command ends with one of them
if os.path.splitext(cmd)[1].lower() in pathext:
pathext = ['']
# check if we find the command on PATH
for path in paths:
# try without extension first
cmd_path = os.path.join(path, cmd)
for ext in pathext:
# then including the extension
cmd_path_ext = cmd_path + ext
if os.path.isfile(cmd_path_ext):
return cmd_path_ext
if os.path.isfile(cmd_path):
return cmd_path
raise BadCommand('Cannot find command %r' % cmd) | [
"def",
"find_command",
"(",
"cmd",
",",
"paths",
"=",
"None",
",",
"pathext",
"=",
"None",
")",
":",
"if",
"paths",
"is",
"None",
":",
"paths",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'PATH'",
",",
"''",
")",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"if",
"isinstance",
"(",
"paths",
",",
"six",
".",
"string_types",
")",
":",
"paths",
"=",
"[",
"paths",
"]",
"# check if there are funny path extensions for executables, e.g. Windows",
"if",
"pathext",
"is",
"None",
":",
"pathext",
"=",
"get_pathext",
"(",
")",
"pathext",
"=",
"[",
"ext",
"for",
"ext",
"in",
"pathext",
".",
"lower",
"(",
")",
".",
"split",
"(",
"os",
".",
"pathsep",
")",
"if",
"len",
"(",
"ext",
")",
"]",
"# don't use extensions if the command ends with one of them",
"if",
"os",
".",
"path",
".",
"splitext",
"(",
"cmd",
")",
"[",
"1",
"]",
".",
"lower",
"(",
")",
"in",
"pathext",
":",
"pathext",
"=",
"[",
"''",
"]",
"# check if we find the command on PATH",
"for",
"path",
"in",
"paths",
":",
"# try without extension first",
"cmd_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"cmd",
")",
"for",
"ext",
"in",
"pathext",
":",
"# then including the extension",
"cmd_path_ext",
"=",
"cmd_path",
"+",
"ext",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"cmd_path_ext",
")",
":",
"return",
"cmd_path_ext",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"cmd_path",
")",
":",
"return",
"cmd_path",
"raise",
"BadCommand",
"(",
"'Cannot find command %r'",
"%",
"cmd",
")"
] | Searches the PATH for the given command and returns its path | [
"Searches",
"the",
"PATH",
"for",
"the",
"given",
"command",
"and",
"returns",
"its",
"path"
] | python | test |
amcat/nlpipe | nlpipe/backend.py | https://github.com/amcat/nlpipe/blob/e9dcf0214d5dc6ba3900b8d7359909e1e33f1ce7/nlpipe/backend.py#L58-L66 | def get_input_ids(query, limit=None):
"""Get the ids of existing input documents that match a query"""
docs = scan(_es, index=esconfig.ES_INPUT_INDEX,
doc_type=esconfig.ES_INPUT_DOCTYPE,
query=query, size=(limit or 1000), fields="")
for i, a in enumerate(docs):
if limit and i >= limit:
return
yield a['_id'] | [
"def",
"get_input_ids",
"(",
"query",
",",
"limit",
"=",
"None",
")",
":",
"docs",
"=",
"scan",
"(",
"_es",
",",
"index",
"=",
"esconfig",
".",
"ES_INPUT_INDEX",
",",
"doc_type",
"=",
"esconfig",
".",
"ES_INPUT_DOCTYPE",
",",
"query",
"=",
"query",
",",
"size",
"=",
"(",
"limit",
"or",
"1000",
")",
",",
"fields",
"=",
"\"\"",
")",
"for",
"i",
",",
"a",
"in",
"enumerate",
"(",
"docs",
")",
":",
"if",
"limit",
"and",
"i",
">=",
"limit",
":",
"return",
"yield",
"a",
"[",
"'_id'",
"]"
] | Get the ids of existing input documents that match a query | [
"Get",
"the",
"ids",
"of",
"existing",
"input",
"documents",
"that",
"match",
"a",
"query"
] | python | train |
tonyseek/html5lib-truncation | html5lib_truncation/utils.py | https://github.com/tonyseek/html5lib-truncation/blob/b5551e345e583d04dbdf6b97dc2a43a266eec8d6/html5lib_truncation/utils.py#L4-L29 | def truncate_sentence(text, max_chars, break_words=False, padding=0):
"""Truncates a sentence.
:param max_chars: The maximum characters of truncated sentence.
:param break_words: If you wish to truncate given sentence strictly even
if it breaks a word, set it to ``True``. It defaults
to ``False`` which means truncating given sentence
shorter but never breaking words.
:param padding: The padding size for truncating. It is usually used to
keep spaces for some ending characters such as ``"..."``.
:return: The truncated sentence.
"""
if break_words:
return text[:-abs(max_chars - len(text)) - padding]
words = []
for word in text.split():
predicted_len = (
sum(map(len, words)) + # length of words
len(word) + # length of next word
len(words) - 1 + # length of spaces
padding)
if predicted_len >= max_chars:
break
words.append(word)
return ' '.join(words) | [
"def",
"truncate_sentence",
"(",
"text",
",",
"max_chars",
",",
"break_words",
"=",
"False",
",",
"padding",
"=",
"0",
")",
":",
"if",
"break_words",
":",
"return",
"text",
"[",
":",
"-",
"abs",
"(",
"max_chars",
"-",
"len",
"(",
"text",
")",
")",
"-",
"padding",
"]",
"words",
"=",
"[",
"]",
"for",
"word",
"in",
"text",
".",
"split",
"(",
")",
":",
"predicted_len",
"=",
"(",
"sum",
"(",
"map",
"(",
"len",
",",
"words",
")",
")",
"+",
"# length of words",
"len",
"(",
"word",
")",
"+",
"# length of next word",
"len",
"(",
"words",
")",
"-",
"1",
"+",
"# length of spaces",
"padding",
")",
"if",
"predicted_len",
">=",
"max_chars",
":",
"break",
"words",
".",
"append",
"(",
"word",
")",
"return",
"' '",
".",
"join",
"(",
"words",
")"
] | Truncates a sentence.
:param max_chars: The maximum characters of truncated sentence.
:param break_words: If you wish to truncate given sentence strictly even
if it breaks a word, set it to ``True``. It defaults
to ``False`` which means truncating given sentence
shorter but never breaking words.
:param padding: The padding size for truncating. It is usually used to
keep spaces for some ending characters such as ``"..."``.
:return: The truncated sentence. | [
"Truncates",
"a",
"sentence",
"."
] | python | train |
LonamiWebs/Telethon | telethon_examples/interactive_telegram_client.py | https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon_examples/interactive_telegram_client.py#L26-L31 | def print_title(title):
"""Helper function to print titles to the console more nicely"""
sprint('\n')
sprint('=={}=='.format('=' * len(title)))
sprint('= {} ='.format(title))
sprint('=={}=='.format('=' * len(title))) | [
"def",
"print_title",
"(",
"title",
")",
":",
"sprint",
"(",
"'\\n'",
")",
"sprint",
"(",
"'=={}=='",
".",
"format",
"(",
"'='",
"*",
"len",
"(",
"title",
")",
")",
")",
"sprint",
"(",
"'= {} ='",
".",
"format",
"(",
"title",
")",
")",
"sprint",
"(",
"'=={}=='",
".",
"format",
"(",
"'='",
"*",
"len",
"(",
"title",
")",
")",
")"
] | Helper function to print titles to the console more nicely | [
"Helper",
"function",
"to",
"print",
"titles",
"to",
"the",
"console",
"more",
"nicely"
] | python | train |
tensorflow/mesh | mesh_tensorflow/ops.py | https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4431-L4453 | def pretty_print_counters(counters):
"""print counters hierarchically.
Each counter is a pair of a string and a number.
The string can have slashes, meaning that the number also counts towards
each prefix. e.g. "parameters/trainable" counts towards both "parameters"
and "parameters/trainable".
Args:
counters: a list of (string, number) pairs
Returns:
a string
"""
totals = collections.defaultdict(int)
for (name, val) in counters:
prefixes = [name[:i] for i in xrange(len(name)) if name[i] == "/"] + [name]
for p in prefixes:
totals[p] += val
parts = []
for name, val in sorted(six.iteritems(totals)):
parts.append(" " * name.count("/") + "%s: %.3g" % (name, val))
return "\n".join(parts) | [
"def",
"pretty_print_counters",
"(",
"counters",
")",
":",
"totals",
"=",
"collections",
".",
"defaultdict",
"(",
"int",
")",
"for",
"(",
"name",
",",
"val",
")",
"in",
"counters",
":",
"prefixes",
"=",
"[",
"name",
"[",
":",
"i",
"]",
"for",
"i",
"in",
"xrange",
"(",
"len",
"(",
"name",
")",
")",
"if",
"name",
"[",
"i",
"]",
"==",
"\"/\"",
"]",
"+",
"[",
"name",
"]",
"for",
"p",
"in",
"prefixes",
":",
"totals",
"[",
"p",
"]",
"+=",
"val",
"parts",
"=",
"[",
"]",
"for",
"name",
",",
"val",
"in",
"sorted",
"(",
"six",
".",
"iteritems",
"(",
"totals",
")",
")",
":",
"parts",
".",
"append",
"(",
"\" \"",
"*",
"name",
".",
"count",
"(",
"\"/\"",
")",
"+",
"\"%s: %.3g\"",
"%",
"(",
"name",
",",
"val",
")",
")",
"return",
"\"\\n\"",
".",
"join",
"(",
"parts",
")"
] | print counters hierarchically.
Each counter is a pair of a string and a number.
The string can have slashes, meaning that the number also counts towards
each prefix. e.g. "parameters/trainable" counts towards both "parameters"
and "parameters/trainable".
Args:
counters: a list of (string, number) pairs
Returns:
a string | [
"print",
"counters",
"hierarchically",
"."
] | python | train |
dantezhu/melon | melon/melon.py | https://github.com/dantezhu/melon/blob/44d859fa85fbfb2d77479e01eade925a0d26e4f7/melon/melon.py#L113-L126 | def make_proc_name(self, subtitle):
"""
获取进程名称
:param subtitle:
:return:
"""
proc_name = '[%s:%s %s] %s' % (
constants.NAME,
subtitle,
self.name,
' '.join([sys.executable] + sys.argv)
)
return proc_name | [
"def",
"make_proc_name",
"(",
"self",
",",
"subtitle",
")",
":",
"proc_name",
"=",
"'[%s:%s %s] %s'",
"%",
"(",
"constants",
".",
"NAME",
",",
"subtitle",
",",
"self",
".",
"name",
",",
"' '",
".",
"join",
"(",
"[",
"sys",
".",
"executable",
"]",
"+",
"sys",
".",
"argv",
")",
")",
"return",
"proc_name"
] | 获取进程名称
:param subtitle:
:return: | [
"获取进程名称",
":",
"param",
"subtitle",
":",
":",
"return",
":"
] | python | train |
tensorflow/tensorboard | tensorboard/backend/event_processing/directory_watcher.py | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/backend/event_processing/directory_watcher.py#L172-L192 | def _SetPath(self, path):
"""Sets the current path to watch for new events.
This also records the size of the old path, if any. If the size can't be
found, an error is logged.
Args:
path: The full path of the file to watch.
"""
old_path = self._path
if old_path and not io_wrapper.IsCloudPath(old_path):
try:
# We're done with the path, so store its size.
size = tf.io.gfile.stat(old_path).length
logger.debug('Setting latest size of %s to %d', old_path, size)
self._finalized_sizes[old_path] = size
except tf.errors.OpError as e:
logger.error('Unable to get size of %s: %s', old_path, e)
self._path = path
self._loader = self._loader_factory(path) | [
"def",
"_SetPath",
"(",
"self",
",",
"path",
")",
":",
"old_path",
"=",
"self",
".",
"_path",
"if",
"old_path",
"and",
"not",
"io_wrapper",
".",
"IsCloudPath",
"(",
"old_path",
")",
":",
"try",
":",
"# We're done with the path, so store its size.",
"size",
"=",
"tf",
".",
"io",
".",
"gfile",
".",
"stat",
"(",
"old_path",
")",
".",
"length",
"logger",
".",
"debug",
"(",
"'Setting latest size of %s to %d'",
",",
"old_path",
",",
"size",
")",
"self",
".",
"_finalized_sizes",
"[",
"old_path",
"]",
"=",
"size",
"except",
"tf",
".",
"errors",
".",
"OpError",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'Unable to get size of %s: %s'",
",",
"old_path",
",",
"e",
")",
"self",
".",
"_path",
"=",
"path",
"self",
".",
"_loader",
"=",
"self",
".",
"_loader_factory",
"(",
"path",
")"
] | Sets the current path to watch for new events.
This also records the size of the old path, if any. If the size can't be
found, an error is logged.
Args:
path: The full path of the file to watch. | [
"Sets",
"the",
"current",
"path",
"to",
"watch",
"for",
"new",
"events",
"."
] | python | train |
xflr6/graphviz | graphviz/backend.py | https://github.com/xflr6/graphviz/blob/7376095ef1e47abad7e0b0361b6c9720b706e7a0/graphviz/backend.py#L139-L161 | def run(cmd, input=None, capture_output=False, check=False, quiet=False, **kwargs):
"""Run the command described by cmd and return its (stdout, stderr) tuple."""
if input is not None:
kwargs['stdin'] = subprocess.PIPE
if capture_output:
kwargs['stdout'] = kwargs['stderr'] = subprocess.PIPE
try:
proc = subprocess.Popen(cmd, startupinfo=get_startupinfo(), **kwargs)
except OSError as e:
if e.errno == errno.ENOENT:
raise ExecutableNotFound(cmd)
else:
raise
out, err = proc.communicate(input)
if not quiet and err:
stderr_write_bytes(err, flush=True)
if check and proc.returncode:
raise CalledProcessError(proc.returncode, cmd, output=out, stderr=err)
return out, err | [
"def",
"run",
"(",
"cmd",
",",
"input",
"=",
"None",
",",
"capture_output",
"=",
"False",
",",
"check",
"=",
"False",
",",
"quiet",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"input",
"is",
"not",
"None",
":",
"kwargs",
"[",
"'stdin'",
"]",
"=",
"subprocess",
".",
"PIPE",
"if",
"capture_output",
":",
"kwargs",
"[",
"'stdout'",
"]",
"=",
"kwargs",
"[",
"'stderr'",
"]",
"=",
"subprocess",
".",
"PIPE",
"try",
":",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
",",
"startupinfo",
"=",
"get_startupinfo",
"(",
")",
",",
"*",
"*",
"kwargs",
")",
"except",
"OSError",
"as",
"e",
":",
"if",
"e",
".",
"errno",
"==",
"errno",
".",
"ENOENT",
":",
"raise",
"ExecutableNotFound",
"(",
"cmd",
")",
"else",
":",
"raise",
"out",
",",
"err",
"=",
"proc",
".",
"communicate",
"(",
"input",
")",
"if",
"not",
"quiet",
"and",
"err",
":",
"stderr_write_bytes",
"(",
"err",
",",
"flush",
"=",
"True",
")",
"if",
"check",
"and",
"proc",
".",
"returncode",
":",
"raise",
"CalledProcessError",
"(",
"proc",
".",
"returncode",
",",
"cmd",
",",
"output",
"=",
"out",
",",
"stderr",
"=",
"err",
")",
"return",
"out",
",",
"err"
] | Run the command described by cmd and return its (stdout, stderr) tuple. | [
"Run",
"the",
"command",
"described",
"by",
"cmd",
"and",
"return",
"its",
"(",
"stdout",
"stderr",
")",
"tuple",
"."
] | python | train |
tkf/rash | rash/database.py | https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/database.py#L129-L160 | def connection(self, commit=False):
"""
Context manager to keep around DB connection.
:rtype: sqlite3.Connection
SOMEDAY: Get rid of this function. Keeping connection around as
an argument to the method using this context manager is
probably better as it is more explicit.
Also, holding "global state" as instance attribute is bad for
supporting threaded search, which is required for more fluent
percol integration.
"""
if commit:
self._need_commit = True
if self._db:
yield self._db
else:
try:
with self._get_db() as db:
self._db = db
db.create_function("REGEXP", 2, sql_regexp_func)
db.create_function("PROGRAM_NAME", 1,
sql_program_name_func)
db.create_function("PATHDIST", 2, sql_pathdist_func)
yield self._db
if self._need_commit:
db.commit()
finally:
self._db = None
self._need_commit = False | [
"def",
"connection",
"(",
"self",
",",
"commit",
"=",
"False",
")",
":",
"if",
"commit",
":",
"self",
".",
"_need_commit",
"=",
"True",
"if",
"self",
".",
"_db",
":",
"yield",
"self",
".",
"_db",
"else",
":",
"try",
":",
"with",
"self",
".",
"_get_db",
"(",
")",
"as",
"db",
":",
"self",
".",
"_db",
"=",
"db",
"db",
".",
"create_function",
"(",
"\"REGEXP\"",
",",
"2",
",",
"sql_regexp_func",
")",
"db",
".",
"create_function",
"(",
"\"PROGRAM_NAME\"",
",",
"1",
",",
"sql_program_name_func",
")",
"db",
".",
"create_function",
"(",
"\"PATHDIST\"",
",",
"2",
",",
"sql_pathdist_func",
")",
"yield",
"self",
".",
"_db",
"if",
"self",
".",
"_need_commit",
":",
"db",
".",
"commit",
"(",
")",
"finally",
":",
"self",
".",
"_db",
"=",
"None",
"self",
".",
"_need_commit",
"=",
"False"
] | Context manager to keep around DB connection.
:rtype: sqlite3.Connection
SOMEDAY: Get rid of this function. Keeping connection around as
an argument to the method using this context manager is
probably better as it is more explicit.
Also, holding "global state" as instance attribute is bad for
supporting threaded search, which is required for more fluent
percol integration. | [
"Context",
"manager",
"to",
"keep",
"around",
"DB",
"connection",
"."
] | python | train |
flowersteam/explauto | explauto/sensorimotor_model/inverse/cma.py | https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/cma.py#L1137-L1167 | def repair(self, x, copy_if_changed=True, copy_always=False):
"""sets out-of-bounds components of ``x`` on the bounds.
"""
# TODO (old data): CPU(N,lam,iter=20,200,100): 3.3s of 8s for two bounds, 1.8s of 6.5s for one bound
# remark: np.max([bounds[0], x]) is about 40 times slower than max((bounds[0], x))
copy = copy_if_changed
if copy_always:
x = array(x, copy=True)
bounds = self.bounds
if bounds not in (None, [None, None], (None, None)): # solely for effiency
x = array(x, copy=True) if copy and not copy_always else x
if bounds[0] is not None:
if isscalar(bounds[0]):
for i in rglen(x):
x[i] = max((bounds[0], x[i]))
else:
for i in rglen(x):
j = min([i, len(bounds[0]) - 1])
if bounds[0][j] is not None:
x[i] = max((bounds[0][j], x[i]))
if bounds[1] is not None:
if isscalar(bounds[1]):
for i in rglen(x):
x[i] = min((bounds[1], x[i]))
else:
for i in rglen(x):
j = min((i, len(bounds[1]) - 1))
if bounds[1][j] is not None:
x[i] = min((bounds[1][j], x[i]))
return x | [
"def",
"repair",
"(",
"self",
",",
"x",
",",
"copy_if_changed",
"=",
"True",
",",
"copy_always",
"=",
"False",
")",
":",
"# TODO (old data): CPU(N,lam,iter=20,200,100): 3.3s of 8s for two bounds, 1.8s of 6.5s for one bound",
"# remark: np.max([bounds[0], x]) is about 40 times slower than max((bounds[0], x))",
"copy",
"=",
"copy_if_changed",
"if",
"copy_always",
":",
"x",
"=",
"array",
"(",
"x",
",",
"copy",
"=",
"True",
")",
"bounds",
"=",
"self",
".",
"bounds",
"if",
"bounds",
"not",
"in",
"(",
"None",
",",
"[",
"None",
",",
"None",
"]",
",",
"(",
"None",
",",
"None",
")",
")",
":",
"# solely for effiency",
"x",
"=",
"array",
"(",
"x",
",",
"copy",
"=",
"True",
")",
"if",
"copy",
"and",
"not",
"copy_always",
"else",
"x",
"if",
"bounds",
"[",
"0",
"]",
"is",
"not",
"None",
":",
"if",
"isscalar",
"(",
"bounds",
"[",
"0",
"]",
")",
":",
"for",
"i",
"in",
"rglen",
"(",
"x",
")",
":",
"x",
"[",
"i",
"]",
"=",
"max",
"(",
"(",
"bounds",
"[",
"0",
"]",
",",
"x",
"[",
"i",
"]",
")",
")",
"else",
":",
"for",
"i",
"in",
"rglen",
"(",
"x",
")",
":",
"j",
"=",
"min",
"(",
"[",
"i",
",",
"len",
"(",
"bounds",
"[",
"0",
"]",
")",
"-",
"1",
"]",
")",
"if",
"bounds",
"[",
"0",
"]",
"[",
"j",
"]",
"is",
"not",
"None",
":",
"x",
"[",
"i",
"]",
"=",
"max",
"(",
"(",
"bounds",
"[",
"0",
"]",
"[",
"j",
"]",
",",
"x",
"[",
"i",
"]",
")",
")",
"if",
"bounds",
"[",
"1",
"]",
"is",
"not",
"None",
":",
"if",
"isscalar",
"(",
"bounds",
"[",
"1",
"]",
")",
":",
"for",
"i",
"in",
"rglen",
"(",
"x",
")",
":",
"x",
"[",
"i",
"]",
"=",
"min",
"(",
"(",
"bounds",
"[",
"1",
"]",
",",
"x",
"[",
"i",
"]",
")",
")",
"else",
":",
"for",
"i",
"in",
"rglen",
"(",
"x",
")",
":",
"j",
"=",
"min",
"(",
"(",
"i",
",",
"len",
"(",
"bounds",
"[",
"1",
"]",
")",
"-",
"1",
")",
")",
"if",
"bounds",
"[",
"1",
"]",
"[",
"j",
"]",
"is",
"not",
"None",
":",
"x",
"[",
"i",
"]",
"=",
"min",
"(",
"(",
"bounds",
"[",
"1",
"]",
"[",
"j",
"]",
",",
"x",
"[",
"i",
"]",
")",
")",
"return",
"x"
] | sets out-of-bounds components of ``x`` on the bounds. | [
"sets",
"out",
"-",
"of",
"-",
"bounds",
"components",
"of",
"x",
"on",
"the",
"bounds",
"."
] | python | train |
a1ezzz/wasp-general | wasp_general/network/primitives.py | https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/network/primitives.py#L584-L597 | def parse_socket_info(cls, info):
""" Parse string that is formed like '[address]<:port>' and return corresponding
:class:`.WIPV4ScketInfo` object
:param info: string to parse
:return: WIPV4ScketInfo
"""
info = info.split(':')
if len(info) > 2:
raise ValueError('Incorrect socket info specified')
address = info[0].strip()
port = int(info[1].strip()) if len(info) == 2 else None
return WIPV4SocketInfo(address=address, port=port) | [
"def",
"parse_socket_info",
"(",
"cls",
",",
"info",
")",
":",
"info",
"=",
"info",
".",
"split",
"(",
"':'",
")",
"if",
"len",
"(",
"info",
")",
">",
"2",
":",
"raise",
"ValueError",
"(",
"'Incorrect socket info specified'",
")",
"address",
"=",
"info",
"[",
"0",
"]",
".",
"strip",
"(",
")",
"port",
"=",
"int",
"(",
"info",
"[",
"1",
"]",
".",
"strip",
"(",
")",
")",
"if",
"len",
"(",
"info",
")",
"==",
"2",
"else",
"None",
"return",
"WIPV4SocketInfo",
"(",
"address",
"=",
"address",
",",
"port",
"=",
"port",
")"
] | Parse string that is formed like '[address]<:port>' and return corresponding
:class:`.WIPV4ScketInfo` object
:param info: string to parse
:return: WIPV4ScketInfo | [
"Parse",
"string",
"that",
"is",
"formed",
"like",
"[",
"address",
"]",
"<",
":",
"port",
">",
"and",
"return",
"corresponding",
":",
"class",
":",
".",
"WIPV4ScketInfo",
"object"
] | python | train |
chrisrink10/basilisp | src/basilisp/lang/compiler/generator.py | https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/compiler/generator.py#L1928-L1937 | def _maybe_class_to_py_ast(_: GeneratorContext, node: MaybeClass) -> GeneratedPyAST:
"""Generate a Python AST node for accessing a potential Python module
variable name."""
assert node.op == NodeOp.MAYBE_CLASS
return GeneratedPyAST(
node=ast.Name(
id=Maybe(_MODULE_ALIASES.get(node.class_)).or_else_get(node.class_),
ctx=ast.Load(),
)
) | [
"def",
"_maybe_class_to_py_ast",
"(",
"_",
":",
"GeneratorContext",
",",
"node",
":",
"MaybeClass",
")",
"->",
"GeneratedPyAST",
":",
"assert",
"node",
".",
"op",
"==",
"NodeOp",
".",
"MAYBE_CLASS",
"return",
"GeneratedPyAST",
"(",
"node",
"=",
"ast",
".",
"Name",
"(",
"id",
"=",
"Maybe",
"(",
"_MODULE_ALIASES",
".",
"get",
"(",
"node",
".",
"class_",
")",
")",
".",
"or_else_get",
"(",
"node",
".",
"class_",
")",
",",
"ctx",
"=",
"ast",
".",
"Load",
"(",
")",
",",
")",
")"
] | Generate a Python AST node for accessing a potential Python module
variable name. | [
"Generate",
"a",
"Python",
"AST",
"node",
"for",
"accessing",
"a",
"potential",
"Python",
"module",
"variable",
"name",
"."
] | python | test |
mbakker7/timml | timml/linesink.py | https://github.com/mbakker7/timml/blob/91e99ad573cb8a9ad8ac1fa041c3ca44520c2390/timml/linesink.py#L715-L736 | def potinf(self, x, y, aq=None):
'''
linesink 0, order 0, layer[0]
order 0, layer[1]
...
order 1, layer[0]
order 1, layer[1]
...
linesink 1, order 0, layer[0]
order 0, layer[1]
...
order 1, layer[0]
order 1, layer[1]
...
'''
if aq is None: aq = self.model.aq.find_aquifer_data(x, y)
rv = np.zeros((self.nls, self.lslist[0].nparam, aq.naq))
if aq in self.aq:
for i, ls in enumerate(self.lslist):
rv[i] = ls.potinf(x, y, aq)
rv.shape = (self.nparam, aq.naq)
return rv | [
"def",
"potinf",
"(",
"self",
",",
"x",
",",
"y",
",",
"aq",
"=",
"None",
")",
":",
"if",
"aq",
"is",
"None",
":",
"aq",
"=",
"self",
".",
"model",
".",
"aq",
".",
"find_aquifer_data",
"(",
"x",
",",
"y",
")",
"rv",
"=",
"np",
".",
"zeros",
"(",
"(",
"self",
".",
"nls",
",",
"self",
".",
"lslist",
"[",
"0",
"]",
".",
"nparam",
",",
"aq",
".",
"naq",
")",
")",
"if",
"aq",
"in",
"self",
".",
"aq",
":",
"for",
"i",
",",
"ls",
"in",
"enumerate",
"(",
"self",
".",
"lslist",
")",
":",
"rv",
"[",
"i",
"]",
"=",
"ls",
".",
"potinf",
"(",
"x",
",",
"y",
",",
"aq",
")",
"rv",
".",
"shape",
"=",
"(",
"self",
".",
"nparam",
",",
"aq",
".",
"naq",
")",
"return",
"rv"
] | linesink 0, order 0, layer[0]
order 0, layer[1]
...
order 1, layer[0]
order 1, layer[1]
...
linesink 1, order 0, layer[0]
order 0, layer[1]
...
order 1, layer[0]
order 1, layer[1]
... | [
"linesink",
"0",
"order",
"0",
"layer",
"[",
"0",
"]",
"order",
"0",
"layer",
"[",
"1",
"]",
"...",
"order",
"1",
"layer",
"[",
"0",
"]",
"order",
"1",
"layer",
"[",
"1",
"]",
"...",
"linesink",
"1",
"order",
"0",
"layer",
"[",
"0",
"]",
"order",
"0",
"layer",
"[",
"1",
"]",
"...",
"order",
"1",
"layer",
"[",
"0",
"]",
"order",
"1",
"layer",
"[",
"1",
"]",
"..."
] | python | train |
Spinmob/spinmob | _data.py | https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_data.py#L948-L1018 | def is_same_as(self, other_databox, headers=True, columns=True, header_order=True, column_order=True, ckeys=True):
"""
Tests that the important (i.e. savable) information in this databox
is the same as that of the other_databox.
Parameters
----------
other_databox
Databox with which to compare.
headers=True
Make sure all header elements match.
columns=True
Make sure every element of every column matches.
header_order=True
Whether the order of the header elements must match.
column_order=True
Whether the order of the columns must match. This is only a sensible
concern if ckeys=True.
ckeys=True
Whether the actual ckeys matter, or just the ordered columns of data.
Note the == symbol runs this function with everything True.
"""
d = other_databox
if not hasattr(other_databox, '_is_spinmob_databox'): return False
# Proceed by testing things one at a time, returning false if one fails
if headers:
# Same number of elements
if not len(self.hkeys) == len(d.hkeys): return False
# Elements
if header_order and not self.hkeys == d.hkeys: return False
# Each value
for k in self.hkeys:
# Make sure the key exists
if not k in d.hkeys: return False
# Make sure it's the same.
if not self.h(k) == d.h(k): return False
if columns:
# Same number of columns
if not len(self.ckeys) == len(d.ckeys): return False
# If we're checking columns by ckeys
if ckeys:
# Columns
if column_order and not self.ckeys == d.ckeys: return False
# Each value of each array
for k in self.ckeys:
# Make sure the key exists
if not k in d.ckeys: return False
# Check the values
if not (_n.array(self[k]) == _n.array(d[k])).all(): return False
# Otherwise we're ignoring ckeys
else:
for n in range(len(self.ckeys)):
if not (_n.array(self[n]) == _n.array(d[n])).all(): return False
# Passes all tests
return True | [
"def",
"is_same_as",
"(",
"self",
",",
"other_databox",
",",
"headers",
"=",
"True",
",",
"columns",
"=",
"True",
",",
"header_order",
"=",
"True",
",",
"column_order",
"=",
"True",
",",
"ckeys",
"=",
"True",
")",
":",
"d",
"=",
"other_databox",
"if",
"not",
"hasattr",
"(",
"other_databox",
",",
"'_is_spinmob_databox'",
")",
":",
"return",
"False",
"# Proceed by testing things one at a time, returning false if one fails",
"if",
"headers",
":",
"# Same number of elements",
"if",
"not",
"len",
"(",
"self",
".",
"hkeys",
")",
"==",
"len",
"(",
"d",
".",
"hkeys",
")",
":",
"return",
"False",
"# Elements",
"if",
"header_order",
"and",
"not",
"self",
".",
"hkeys",
"==",
"d",
".",
"hkeys",
":",
"return",
"False",
"# Each value",
"for",
"k",
"in",
"self",
".",
"hkeys",
":",
"# Make sure the key exists",
"if",
"not",
"k",
"in",
"d",
".",
"hkeys",
":",
"return",
"False",
"# Make sure it's the same.",
"if",
"not",
"self",
".",
"h",
"(",
"k",
")",
"==",
"d",
".",
"h",
"(",
"k",
")",
":",
"return",
"False",
"if",
"columns",
":",
"# Same number of columns",
"if",
"not",
"len",
"(",
"self",
".",
"ckeys",
")",
"==",
"len",
"(",
"d",
".",
"ckeys",
")",
":",
"return",
"False",
"# If we're checking columns by ckeys",
"if",
"ckeys",
":",
"# Columns",
"if",
"column_order",
"and",
"not",
"self",
".",
"ckeys",
"==",
"d",
".",
"ckeys",
":",
"return",
"False",
"# Each value of each array",
"for",
"k",
"in",
"self",
".",
"ckeys",
":",
"# Make sure the key exists",
"if",
"not",
"k",
"in",
"d",
".",
"ckeys",
":",
"return",
"False",
"# Check the values",
"if",
"not",
"(",
"_n",
".",
"array",
"(",
"self",
"[",
"k",
"]",
")",
"==",
"_n",
".",
"array",
"(",
"d",
"[",
"k",
"]",
")",
")",
".",
"all",
"(",
")",
":",
"return",
"False",
"# Otherwise we're ignoring ckeys",
"else",
":",
"for",
"n",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"ckeys",
")",
")",
":",
"if",
"not",
"(",
"_n",
".",
"array",
"(",
"self",
"[",
"n",
"]",
")",
"==",
"_n",
".",
"array",
"(",
"d",
"[",
"n",
"]",
")",
")",
".",
"all",
"(",
")",
":",
"return",
"False",
"# Passes all tests",
"return",
"True"
] | Tests that the important (i.e. savable) information in this databox
is the same as that of the other_databox.
Parameters
----------
other_databox
Databox with which to compare.
headers=True
Make sure all header elements match.
columns=True
Make sure every element of every column matches.
header_order=True
Whether the order of the header elements must match.
column_order=True
Whether the order of the columns must match. This is only a sensible
concern if ckeys=True.
ckeys=True
Whether the actual ckeys matter, or just the ordered columns of data.
Note the == symbol runs this function with everything True. | [
"Tests",
"that",
"the",
"important",
"(",
"i",
".",
"e",
".",
"savable",
")",
"information",
"in",
"this",
"databox",
"is",
"the",
"same",
"as",
"that",
"of",
"the",
"other_databox",
".",
"Parameters",
"----------",
"other_databox",
"Databox",
"with",
"which",
"to",
"compare",
".",
"headers",
"=",
"True",
"Make",
"sure",
"all",
"header",
"elements",
"match",
".",
"columns",
"=",
"True",
"Make",
"sure",
"every",
"element",
"of",
"every",
"column",
"matches",
".",
"header_order",
"=",
"True",
"Whether",
"the",
"order",
"of",
"the",
"header",
"elements",
"must",
"match",
".",
"column_order",
"=",
"True",
"Whether",
"the",
"order",
"of",
"the",
"columns",
"must",
"match",
".",
"This",
"is",
"only",
"a",
"sensible",
"concern",
"if",
"ckeys",
"=",
"True",
".",
"ckeys",
"=",
"True",
"Whether",
"the",
"actual",
"ckeys",
"matter",
"or",
"just",
"the",
"ordered",
"columns",
"of",
"data",
".",
"Note",
"the",
"==",
"symbol",
"runs",
"this",
"function",
"with",
"everything",
"True",
"."
] | python | train |
saltstack/salt | salt/config/__init__.py | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/config/__init__.py#L4224-L4251 | def apply_spm_config(overrides, defaults):
'''
Returns the spm configurations dict.
.. versionadded:: 2015.8.1
'''
opts = defaults.copy()
_adjust_log_file_override(overrides, defaults['log_file'])
if overrides:
opts.update(overrides)
# Prepend root_dir to other paths
prepend_root_dirs = [
'formula_path', 'pillar_path', 'reactor_path',
'spm_cache_dir', 'spm_build_dir'
]
# These can be set to syslog, so, not actual paths on the system
for config_key in ('spm_logfile',):
log_setting = opts.get(config_key, '')
if log_setting is None:
continue
if urlparse(log_setting).scheme == '':
prepend_root_dirs.append(config_key)
prepend_root_dir(opts, prepend_root_dirs)
return opts | [
"def",
"apply_spm_config",
"(",
"overrides",
",",
"defaults",
")",
":",
"opts",
"=",
"defaults",
".",
"copy",
"(",
")",
"_adjust_log_file_override",
"(",
"overrides",
",",
"defaults",
"[",
"'log_file'",
"]",
")",
"if",
"overrides",
":",
"opts",
".",
"update",
"(",
"overrides",
")",
"# Prepend root_dir to other paths",
"prepend_root_dirs",
"=",
"[",
"'formula_path'",
",",
"'pillar_path'",
",",
"'reactor_path'",
",",
"'spm_cache_dir'",
",",
"'spm_build_dir'",
"]",
"# These can be set to syslog, so, not actual paths on the system",
"for",
"config_key",
"in",
"(",
"'spm_logfile'",
",",
")",
":",
"log_setting",
"=",
"opts",
".",
"get",
"(",
"config_key",
",",
"''",
")",
"if",
"log_setting",
"is",
"None",
":",
"continue",
"if",
"urlparse",
"(",
"log_setting",
")",
".",
"scheme",
"==",
"''",
":",
"prepend_root_dirs",
".",
"append",
"(",
"config_key",
")",
"prepend_root_dir",
"(",
"opts",
",",
"prepend_root_dirs",
")",
"return",
"opts"
] | Returns the spm configurations dict.
.. versionadded:: 2015.8.1 | [
"Returns",
"the",
"spm",
"configurations",
"dict",
"."
] | python | train |
guaix-ucm/pyemir | emirdrp/processing/wavecal/slitlet2d.py | https://github.com/guaix-ucm/pyemir/blob/fef6bbabcb13f80123cafd1800a0f508a3c21702/emirdrp/processing/wavecal/slitlet2d.py#L406-L431 | def ximshow_unrectified(self, slitlet2d):
"""Display unrectified image with spectrails and frontiers.
Parameters
----------
slitlet2d : numpy array
Array containing the unrectified slitlet image.
"""
title = "Slitlet#" + str(self.islitlet)
ax = ximshow(slitlet2d, title=title,
first_pixel=(self.bb_nc1_orig, self.bb_ns1_orig),
show=False)
xdum = np.linspace(1, EMIR_NAXIS1, num=EMIR_NAXIS1)
ylower = self.list_spectrails[0](xdum)
ax.plot(xdum, ylower, 'b-')
ymiddle = self.list_spectrails[1](xdum)
ax.plot(xdum, ymiddle, 'b--')
yupper = self.list_spectrails[2](xdum)
ax.plot(xdum, yupper, 'b-')
ylower_frontier = self.list_frontiers[0](xdum)
ax.plot(xdum, ylower_frontier, 'b:')
yupper_frontier = self.list_frontiers[1](xdum)
ax.plot(xdum, yupper_frontier, 'b:')
pause_debugplot(debugplot=self.debugplot, pltshow=True) | [
"def",
"ximshow_unrectified",
"(",
"self",
",",
"slitlet2d",
")",
":",
"title",
"=",
"\"Slitlet#\"",
"+",
"str",
"(",
"self",
".",
"islitlet",
")",
"ax",
"=",
"ximshow",
"(",
"slitlet2d",
",",
"title",
"=",
"title",
",",
"first_pixel",
"=",
"(",
"self",
".",
"bb_nc1_orig",
",",
"self",
".",
"bb_ns1_orig",
")",
",",
"show",
"=",
"False",
")",
"xdum",
"=",
"np",
".",
"linspace",
"(",
"1",
",",
"EMIR_NAXIS1",
",",
"num",
"=",
"EMIR_NAXIS1",
")",
"ylower",
"=",
"self",
".",
"list_spectrails",
"[",
"0",
"]",
"(",
"xdum",
")",
"ax",
".",
"plot",
"(",
"xdum",
",",
"ylower",
",",
"'b-'",
")",
"ymiddle",
"=",
"self",
".",
"list_spectrails",
"[",
"1",
"]",
"(",
"xdum",
")",
"ax",
".",
"plot",
"(",
"xdum",
",",
"ymiddle",
",",
"'b--'",
")",
"yupper",
"=",
"self",
".",
"list_spectrails",
"[",
"2",
"]",
"(",
"xdum",
")",
"ax",
".",
"plot",
"(",
"xdum",
",",
"yupper",
",",
"'b-'",
")",
"ylower_frontier",
"=",
"self",
".",
"list_frontiers",
"[",
"0",
"]",
"(",
"xdum",
")",
"ax",
".",
"plot",
"(",
"xdum",
",",
"ylower_frontier",
",",
"'b:'",
")",
"yupper_frontier",
"=",
"self",
".",
"list_frontiers",
"[",
"1",
"]",
"(",
"xdum",
")",
"ax",
".",
"plot",
"(",
"xdum",
",",
"yupper_frontier",
",",
"'b:'",
")",
"pause_debugplot",
"(",
"debugplot",
"=",
"self",
".",
"debugplot",
",",
"pltshow",
"=",
"True",
")"
] | Display unrectified image with spectrails and frontiers.
Parameters
----------
slitlet2d : numpy array
Array containing the unrectified slitlet image. | [
"Display",
"unrectified",
"image",
"with",
"spectrails",
"and",
"frontiers",
"."
] | python | train |
AtteqCom/zsl | src/zsl/interface/task_queue.py | https://github.com/AtteqCom/zsl/blob/ab51a96da1780ff642912396d4b85bdcb72560c1/src/zsl/interface/task_queue.py#L28-L52 | def execute_job(job, app=Injected, task_router=Injected):
# type: (Job, Zsl, TaskRouter) -> dict
"""Execute a job.
:param job: job to execute
:type job: Job
:param app: service application instance, injected
:type app: ServiceApplication
:param task_router: task router instance, injected
:type task_router: TaskRouter
:return: task result
:rtype: dict
"""
app.logger.info("Job fetched, preparing the task '{0}'.".format(job.path))
task, task_callable = task_router.route(job.path)
jc = JobContext(job, task, task_callable)
app.logger.info("Executing task.")
result = jc.task_callable(jc.task_data)
app.logger.info("Task {0} executed successfully.".format(job.path))
return {'task_name': job.path, 'data': result} | [
"def",
"execute_job",
"(",
"job",
",",
"app",
"=",
"Injected",
",",
"task_router",
"=",
"Injected",
")",
":",
"# type: (Job, Zsl, TaskRouter) -> dict",
"app",
".",
"logger",
".",
"info",
"(",
"\"Job fetched, preparing the task '{0}'.\"",
".",
"format",
"(",
"job",
".",
"path",
")",
")",
"task",
",",
"task_callable",
"=",
"task_router",
".",
"route",
"(",
"job",
".",
"path",
")",
"jc",
"=",
"JobContext",
"(",
"job",
",",
"task",
",",
"task_callable",
")",
"app",
".",
"logger",
".",
"info",
"(",
"\"Executing task.\"",
")",
"result",
"=",
"jc",
".",
"task_callable",
"(",
"jc",
".",
"task_data",
")",
"app",
".",
"logger",
".",
"info",
"(",
"\"Task {0} executed successfully.\"",
".",
"format",
"(",
"job",
".",
"path",
")",
")",
"return",
"{",
"'task_name'",
":",
"job",
".",
"path",
",",
"'data'",
":",
"result",
"}"
] | Execute a job.
:param job: job to execute
:type job: Job
:param app: service application instance, injected
:type app: ServiceApplication
:param task_router: task router instance, injected
:type task_router: TaskRouter
:return: task result
:rtype: dict | [
"Execute",
"a",
"job",
"."
] | python | train |
jquast/wcwidth | bin/wcwidth-libc-comparator.py | https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/bin/wcwidth-libc-comparator.py#L44-L63 | def report_ucs_msg(ucs, wcwidth_libc, wcwidth_local):
"""
Return string report of combining character differences.
:param ucs: unicode point.
:type ucs: unicode
:param wcwidth_libc: libc-wcwidth's reported character length.
:type comb_py: int
:param wcwidth_local: wcwidth's reported character length.
:type comb_wc: int
:rtype: unicode
"""
ucp = (ucs.encode('unicode_escape')[2:]
.decode('ascii')
.upper()
.lstrip('0'))
url = "http://codepoints.net/U+{}".format(ucp)
name = unicodedata.name(ucs)
return (u"libc,ours={},{} [--o{}o--] name={} val={} {}"
" ".format(wcwidth_libc, wcwidth_local, ucs, name, ord(ucs), url)) | [
"def",
"report_ucs_msg",
"(",
"ucs",
",",
"wcwidth_libc",
",",
"wcwidth_local",
")",
":",
"ucp",
"=",
"(",
"ucs",
".",
"encode",
"(",
"'unicode_escape'",
")",
"[",
"2",
":",
"]",
".",
"decode",
"(",
"'ascii'",
")",
".",
"upper",
"(",
")",
".",
"lstrip",
"(",
"'0'",
")",
")",
"url",
"=",
"\"http://codepoints.net/U+{}\"",
".",
"format",
"(",
"ucp",
")",
"name",
"=",
"unicodedata",
".",
"name",
"(",
"ucs",
")",
"return",
"(",
"u\"libc,ours={},{} [--o{}o--] name={} val={} {}\"",
"\" \"",
".",
"format",
"(",
"wcwidth_libc",
",",
"wcwidth_local",
",",
"ucs",
",",
"name",
",",
"ord",
"(",
"ucs",
")",
",",
"url",
")",
")"
] | Return string report of combining character differences.
:param ucs: unicode point.
:type ucs: unicode
:param wcwidth_libc: libc-wcwidth's reported character length.
:type comb_py: int
:param wcwidth_local: wcwidth's reported character length.
:type comb_wc: int
:rtype: unicode | [
"Return",
"string",
"report",
"of",
"combining",
"character",
"differences",
"."
] | python | train |
jgillick/LendingClub | lendingclub/session.py | https://github.com/jgillick/LendingClub/blob/4495f99fd869810f39c00e02b0f4112c6b210384/lendingclub/session.py#L73-L85 | def __continue_session(self):
"""
Check if the time since the last HTTP request is under the
session timeout limit. If it's been too long since the last request
attempt to authenticate again.
"""
now = time.time()
diff = abs(now - self.last_request_time)
timeout_sec = self.session_timeout * 60 # convert minutes to seconds
if diff >= timeout_sec:
self.__log('Session timed out, attempting to authenticate')
self.authenticate() | [
"def",
"__continue_session",
"(",
"self",
")",
":",
"now",
"=",
"time",
".",
"time",
"(",
")",
"diff",
"=",
"abs",
"(",
"now",
"-",
"self",
".",
"last_request_time",
")",
"timeout_sec",
"=",
"self",
".",
"session_timeout",
"*",
"60",
"# convert minutes to seconds",
"if",
"diff",
">=",
"timeout_sec",
":",
"self",
".",
"__log",
"(",
"'Session timed out, attempting to authenticate'",
")",
"self",
".",
"authenticate",
"(",
")"
] | Check if the time since the last HTTP request is under the
session timeout limit. If it's been too long since the last request
attempt to authenticate again. | [
"Check",
"if",
"the",
"time",
"since",
"the",
"last",
"HTTP",
"request",
"is",
"under",
"the",
"session",
"timeout",
"limit",
".",
"If",
"it",
"s",
"been",
"too",
"long",
"since",
"the",
"last",
"request",
"attempt",
"to",
"authenticate",
"again",
"."
] | python | train |
dariusbakunas/rawdisk | rawdisk/util/rawstruct.py | https://github.com/dariusbakunas/rawdisk/blob/1dc9d0b377fe5da3c406ccec4abc238c54167403/rawdisk/util/rawstruct.py#L195-L204 | def get_string(self, offset, length):
"""Returns string (length bytes)
Args:
offset (int): sring offset in byte array
length (int): string length
"""
return struct.unpack(str(length) + "s", self.data[
offset:offset + length
])[0] | [
"def",
"get_string",
"(",
"self",
",",
"offset",
",",
"length",
")",
":",
"return",
"struct",
".",
"unpack",
"(",
"str",
"(",
"length",
")",
"+",
"\"s\"",
",",
"self",
".",
"data",
"[",
"offset",
":",
"offset",
"+",
"length",
"]",
")",
"[",
"0",
"]"
] | Returns string (length bytes)
Args:
offset (int): sring offset in byte array
length (int): string length | [
"Returns",
"string",
"(",
"length",
"bytes",
")"
] | python | train |
NASA-AMMOS/AIT-Core | ait/core/server/client.py | https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/server/client.py#L34-L39 | def publish(self, msg):
"""
Publishes input message with client name as topic.
"""
self.pub.send("{} {}".format(self.name, msg))
log.debug('Published message from {}'.format(self)) | [
"def",
"publish",
"(",
"self",
",",
"msg",
")",
":",
"self",
".",
"pub",
".",
"send",
"(",
"\"{} {}\"",
".",
"format",
"(",
"self",
".",
"name",
",",
"msg",
")",
")",
"log",
".",
"debug",
"(",
"'Published message from {}'",
".",
"format",
"(",
"self",
")",
")"
] | Publishes input message with client name as topic. | [
"Publishes",
"input",
"message",
"with",
"client",
"name",
"as",
"topic",
"."
] | python | train |
fracpete/python-weka-wrapper3 | python/weka/core/classes.py | https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/classes.py#L843-L852 | def component_type(self):
"""
Returns the classname of the elements.
:return: the class of the elements
:rtype: str
"""
cls = javabridge.call(self.jobject, "getClass", "()Ljava/lang/Class;")
comptype = javabridge.call(cls, "getComponentType", "()Ljava/lang/Class;")
return javabridge.call(comptype, "getName", "()Ljava/lang/String;") | [
"def",
"component_type",
"(",
"self",
")",
":",
"cls",
"=",
"javabridge",
".",
"call",
"(",
"self",
".",
"jobject",
",",
"\"getClass\"",
",",
"\"()Ljava/lang/Class;\"",
")",
"comptype",
"=",
"javabridge",
".",
"call",
"(",
"cls",
",",
"\"getComponentType\"",
",",
"\"()Ljava/lang/Class;\"",
")",
"return",
"javabridge",
".",
"call",
"(",
"comptype",
",",
"\"getName\"",
",",
"\"()Ljava/lang/String;\"",
")"
] | Returns the classname of the elements.
:return: the class of the elements
:rtype: str | [
"Returns",
"the",
"classname",
"of",
"the",
"elements",
"."
] | python | train |
lago-project/lago | lago/plugins/vm.py | https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/plugins/vm.py#L476-L492 | def export_disks(
self,
standalone=True,
dst_dir=None,
compress=False,
collect_only=False,
with_threads=True,
*args,
**kwargs
):
"""
Thin method that just uses the provider
"""
return self.provider.export_disks(
standalone, dst_dir, compress, collect_only, with_threads, *args,
**kwargs
) | [
"def",
"export_disks",
"(",
"self",
",",
"standalone",
"=",
"True",
",",
"dst_dir",
"=",
"None",
",",
"compress",
"=",
"False",
",",
"collect_only",
"=",
"False",
",",
"with_threads",
"=",
"True",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"provider",
".",
"export_disks",
"(",
"standalone",
",",
"dst_dir",
",",
"compress",
",",
"collect_only",
",",
"with_threads",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Thin method that just uses the provider | [
"Thin",
"method",
"that",
"just",
"uses",
"the",
"provider"
] | python | train |
pyviz/holoviews | holoviews/core/dimension.py | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/dimension.py#L1370-L1385 | def _deduplicate_items(cls, items):
"Deduplicates assigned paths by incrementing numbering"
counter = Counter([path[:i] for path, _ in items for i in range(1, len(path)+1)])
if sum(counter.values()) == len(counter):
return items
new_items = []
counts = defaultdict(lambda: 0)
for i, (path, item) in enumerate(items):
if counter[path] > 1:
path = path + (util.int_to_roman(counts[path]+1),)
elif counts[path]:
path = path[:-1] + (util.int_to_roman(counts[path]+1),)
new_items.append((path, item))
counts[path] += 1
return new_items | [
"def",
"_deduplicate_items",
"(",
"cls",
",",
"items",
")",
":",
"counter",
"=",
"Counter",
"(",
"[",
"path",
"[",
":",
"i",
"]",
"for",
"path",
",",
"_",
"in",
"items",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"path",
")",
"+",
"1",
")",
"]",
")",
"if",
"sum",
"(",
"counter",
".",
"values",
"(",
")",
")",
"==",
"len",
"(",
"counter",
")",
":",
"return",
"items",
"new_items",
"=",
"[",
"]",
"counts",
"=",
"defaultdict",
"(",
"lambda",
":",
"0",
")",
"for",
"i",
",",
"(",
"path",
",",
"item",
")",
"in",
"enumerate",
"(",
"items",
")",
":",
"if",
"counter",
"[",
"path",
"]",
">",
"1",
":",
"path",
"=",
"path",
"+",
"(",
"util",
".",
"int_to_roman",
"(",
"counts",
"[",
"path",
"]",
"+",
"1",
")",
",",
")",
"elif",
"counts",
"[",
"path",
"]",
":",
"path",
"=",
"path",
"[",
":",
"-",
"1",
"]",
"+",
"(",
"util",
".",
"int_to_roman",
"(",
"counts",
"[",
"path",
"]",
"+",
"1",
")",
",",
")",
"new_items",
".",
"append",
"(",
"(",
"path",
",",
"item",
")",
")",
"counts",
"[",
"path",
"]",
"+=",
"1",
"return",
"new_items"
] | Deduplicates assigned paths by incrementing numbering | [
"Deduplicates",
"assigned",
"paths",
"by",
"incrementing",
"numbering"
] | python | train |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.