Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
1,600 | vertexproject/synapse | synapse/cortex.py | Cortex._initLayerCtors | def _initLayerCtors(self):
'''
Registration for built-in Layer ctors
'''
ctors = {
'lmdb': s_lmdblayer.LmdbLayer,
'remote': s_remotelayer.RemoteLayer,
}
self.layrctors.update(**ctors) | python | def _initLayerCtors(self):
'''
Registration for built-in Layer ctors
'''
ctors = {
'lmdb': s_lmdblayer.LmdbLayer,
'remote': s_remotelayer.RemoteLayer,
}
self.layrctors.update(**ctors) | ['def', '_initLayerCtors', '(', 'self', ')', ':', 'ctors', '=', '{', "'lmdb'", ':', 's_lmdblayer', '.', 'LmdbLayer', ',', "'remote'", ':', 's_remotelayer', '.', 'RemoteLayer', ',', '}', 'self', '.', 'layrctors', '.', 'update', '(', '*', '*', 'ctors', ')'] | Registration for built-in Layer ctors | ['Registration', 'for', 'built', '-', 'in', 'Layer', 'ctors'] | train | https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/cortex.py#L732-L740 |
1,601 | ml4ai/delphi | delphi/translators/for2py/arrays.py | Array._mk_uninit_array | def _mk_uninit_array(self, bounds):
""" given a list of bounds for the N dimensions of an array,
_mk_uninit_array() creates and returns an N-dimensional array of
the size specified by the bounds with each element set to the value
None."""
if len(bounds) == 0:
raise For2PyError("Zero-length arrays current not handled!.")
this_dim = bounds[0]
lo,hi = this_dim[0],this_dim[1]
sz = hi-lo+1
if len(bounds) == 1:
return [None] * sz
sub_array = self._mk_uninit_array(bounds[1:])
this_array = [copy.deepcopy(sub_array) for i in range(sz)]
return this_array | python | def _mk_uninit_array(self, bounds):
""" given a list of bounds for the N dimensions of an array,
_mk_uninit_array() creates and returns an N-dimensional array of
the size specified by the bounds with each element set to the value
None."""
if len(bounds) == 0:
raise For2PyError("Zero-length arrays current not handled!.")
this_dim = bounds[0]
lo,hi = this_dim[0],this_dim[1]
sz = hi-lo+1
if len(bounds) == 1:
return [None] * sz
sub_array = self._mk_uninit_array(bounds[1:])
this_array = [copy.deepcopy(sub_array) for i in range(sz)]
return this_array | ['def', '_mk_uninit_array', '(', 'self', ',', 'bounds', ')', ':', 'if', 'len', '(', 'bounds', ')', '==', '0', ':', 'raise', 'For2PyError', '(', '"Zero-length arrays current not handled!."', ')', 'this_dim', '=', 'bounds', '[', '0', ']', 'lo', ',', 'hi', '=', 'this_dim', '[', '0', ']', ',', 'this_dim', '[', '1', ']', 'sz', '=', 'hi', '-', 'lo', '+', '1', 'if', 'len', '(', 'bounds', ')', '==', '1', ':', 'return', '[', 'None', ']', '*', 'sz', 'sub_array', '=', 'self', '.', '_mk_uninit_array', '(', 'bounds', '[', '1', ':', ']', ')', 'this_array', '=', '[', 'copy', '.', 'deepcopy', '(', 'sub_array', ')', 'for', 'i', 'in', 'range', '(', 'sz', ')', ']', 'return', 'this_array'] | given a list of bounds for the N dimensions of an array,
_mk_uninit_array() creates and returns an N-dimensional array of
the size specified by the bounds with each element set to the value
None. | ['given', 'a', 'list', 'of', 'bounds', 'for', 'the', 'N', 'dimensions', 'of', 'an', 'array', '_mk_uninit_array', '()', 'creates', 'and', 'returns', 'an', 'N', '-', 'dimensional', 'array', 'of', 'the', 'size', 'specified', 'by', 'the', 'bounds', 'with', 'each', 'element', 'set', 'to', 'the', 'value', 'None', '.'] | train | https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/translators/for2py/arrays.py#L34-L52 |
1,602 | juanifioren/django-oidc-provider | oidc_provider/lib/utils/token.py | create_id_token | def create_id_token(token, user, aud, nonce='', at_hash='', request=None, scope=None):
"""
Creates the id_token dictionary.
See: http://openid.net/specs/openid-connect-core-1_0.html#IDToken
Return a dic.
"""
if scope is None:
scope = []
sub = settings.get('OIDC_IDTOKEN_SUB_GENERATOR', import_str=True)(user=user)
expires_in = settings.get('OIDC_IDTOKEN_EXPIRE')
# Convert datetimes into timestamps.
now = int(time.time())
iat_time = now
exp_time = int(now + expires_in)
user_auth_time = user.last_login or user.date_joined
auth_time = int(dateformat.format(user_auth_time, 'U'))
dic = {
'iss': get_issuer(request=request),
'sub': sub,
'aud': str(aud),
'exp': exp_time,
'iat': iat_time,
'auth_time': auth_time,
}
if nonce:
dic['nonce'] = str(nonce)
if at_hash:
dic['at_hash'] = at_hash
# Inlude (or not) user standard claims in the id_token.
if settings.get('OIDC_IDTOKEN_INCLUDE_CLAIMS'):
if settings.get('OIDC_EXTRA_SCOPE_CLAIMS'):
custom_claims = settings.get('OIDC_EXTRA_SCOPE_CLAIMS', import_str=True)(token)
claims = custom_claims.create_response_dic()
else:
claims = StandardScopeClaims(token).create_response_dic()
dic.update(claims)
dic = run_processing_hook(
dic, 'OIDC_IDTOKEN_PROCESSING_HOOK',
user=user, token=token, request=request)
return dic | python | def create_id_token(token, user, aud, nonce='', at_hash='', request=None, scope=None):
"""
Creates the id_token dictionary.
See: http://openid.net/specs/openid-connect-core-1_0.html#IDToken
Return a dic.
"""
if scope is None:
scope = []
sub = settings.get('OIDC_IDTOKEN_SUB_GENERATOR', import_str=True)(user=user)
expires_in = settings.get('OIDC_IDTOKEN_EXPIRE')
# Convert datetimes into timestamps.
now = int(time.time())
iat_time = now
exp_time = int(now + expires_in)
user_auth_time = user.last_login or user.date_joined
auth_time = int(dateformat.format(user_auth_time, 'U'))
dic = {
'iss': get_issuer(request=request),
'sub': sub,
'aud': str(aud),
'exp': exp_time,
'iat': iat_time,
'auth_time': auth_time,
}
if nonce:
dic['nonce'] = str(nonce)
if at_hash:
dic['at_hash'] = at_hash
# Inlude (or not) user standard claims in the id_token.
if settings.get('OIDC_IDTOKEN_INCLUDE_CLAIMS'):
if settings.get('OIDC_EXTRA_SCOPE_CLAIMS'):
custom_claims = settings.get('OIDC_EXTRA_SCOPE_CLAIMS', import_str=True)(token)
claims = custom_claims.create_response_dic()
else:
claims = StandardScopeClaims(token).create_response_dic()
dic.update(claims)
dic = run_processing_hook(
dic, 'OIDC_IDTOKEN_PROCESSING_HOOK',
user=user, token=token, request=request)
return dic | ['def', 'create_id_token', '(', 'token', ',', 'user', ',', 'aud', ',', 'nonce', '=', "''", ',', 'at_hash', '=', "''", ',', 'request', '=', 'None', ',', 'scope', '=', 'None', ')', ':', 'if', 'scope', 'is', 'None', ':', 'scope', '=', '[', ']', 'sub', '=', 'settings', '.', 'get', '(', "'OIDC_IDTOKEN_SUB_GENERATOR'", ',', 'import_str', '=', 'True', ')', '(', 'user', '=', 'user', ')', 'expires_in', '=', 'settings', '.', 'get', '(', "'OIDC_IDTOKEN_EXPIRE'", ')', '# Convert datetimes into timestamps.', 'now', '=', 'int', '(', 'time', '.', 'time', '(', ')', ')', 'iat_time', '=', 'now', 'exp_time', '=', 'int', '(', 'now', '+', 'expires_in', ')', 'user_auth_time', '=', 'user', '.', 'last_login', 'or', 'user', '.', 'date_joined', 'auth_time', '=', 'int', '(', 'dateformat', '.', 'format', '(', 'user_auth_time', ',', "'U'", ')', ')', 'dic', '=', '{', "'iss'", ':', 'get_issuer', '(', 'request', '=', 'request', ')', ',', "'sub'", ':', 'sub', ',', "'aud'", ':', 'str', '(', 'aud', ')', ',', "'exp'", ':', 'exp_time', ',', "'iat'", ':', 'iat_time', ',', "'auth_time'", ':', 'auth_time', ',', '}', 'if', 'nonce', ':', 'dic', '[', "'nonce'", ']', '=', 'str', '(', 'nonce', ')', 'if', 'at_hash', ':', 'dic', '[', "'at_hash'", ']', '=', 'at_hash', '# Inlude (or not) user standard claims in the id_token.', 'if', 'settings', '.', 'get', '(', "'OIDC_IDTOKEN_INCLUDE_CLAIMS'", ')', ':', 'if', 'settings', '.', 'get', '(', "'OIDC_EXTRA_SCOPE_CLAIMS'", ')', ':', 'custom_claims', '=', 'settings', '.', 'get', '(', "'OIDC_EXTRA_SCOPE_CLAIMS'", ',', 'import_str', '=', 'True', ')', '(', 'token', ')', 'claims', '=', 'custom_claims', '.', 'create_response_dic', '(', ')', 'else', ':', 'claims', '=', 'StandardScopeClaims', '(', 'token', ')', '.', 'create_response_dic', '(', ')', 'dic', '.', 'update', '(', 'claims', ')', 'dic', '=', 'run_processing_hook', '(', 'dic', ',', "'OIDC_IDTOKEN_PROCESSING_HOOK'", ',', 'user', '=', 'user', ',', 'token', '=', 'token', ',', 'request', '=', 'request', ')', 'return', 'dic'] | Creates the id_token dictionary.
See: http://openid.net/specs/openid-connect-core-1_0.html#IDToken
Return a dic. | ['Creates', 'the', 'id_token', 'dictionary', '.', 'See', ':', 'http', ':', '//', 'openid', '.', 'net', '/', 'specs', '/', 'openid', '-', 'connect', '-', 'core', '-', '1_0', '.', 'html#IDToken', 'Return', 'a', 'dic', '.'] | train | https://github.com/juanifioren/django-oidc-provider/blob/f0daed07b2ac7608565b80d4c80ccf04d8c416a8/oidc_provider/lib/utils/token.py#L22-L69 |
1,603 | eddieantonio/perfection | perfection/getty.py | hash_parameters | def hash_parameters(keys, minimize=True, to_int=None):
"""
Calculates the parameters for a perfect hash. The result is returned
as a HashInfo tuple which has the following fields:
t
The "table parameter". This is the minimum side length of the
table used to create the hash. In practice, t**2 is the maximum
size of the output hash.
slots
The original inputs mapped to a vector. This is the hash
function.
r
The displacement vector. This is the displacement of the given
row in the result vector. To find a given value, use
``x + r[y]``.
offset
The amount by which to offset all values (once converted to ints)
to_int
A function that converts the input to an int (if given).
Keyword parameters:
``minimize``
Whether or not offset all integer keys internally by the minimum
value. This typically results in smaller output.
``to_int``
A callable that converts the input keys to ints. If not
specified, all keys should be given as ints.
>>> hash_parameters([1, 5, 7], minimize=False)
HashInfo(t=3, slots=(1, 5, 7), r=(-1, -1, 1), offset=0, to_int=None)
>>> hash_parameters([1, 5, 7])
HashInfo(t=3, slots=(1, 5, 7), r=(0, 0, 2), offset=-1, to_int=None)
>>> l = (0, 3, 4, 7 ,10, 13, 15, 18, 19, 21, 22, 24, 26, 29, 30, 34)
>>> phash = hash_parameters(l)
>>> phash.slots
(18, 19, 0, 21, 22, 3, 4, 24, 7, 26, 30, 10, 29, 13, 34, 15)
For some values, the displacement vector will be rather empty:
>>> hash_parameters('Andrea', to_int=ord).r
(1, None, None, None, 0, -3, 4, None)
"""
# If to_int is not assigned, simply use the identity function.
if to_int is None:
to_int = __identity
key_to_original = {to_int(original): original for original in keys}
# Create a set of all items to be hashed.
items = list(key_to_original.keys())
if minimize:
offset = 0 - min(items)
items = frozenset(x + offset for x in items)
else:
offset = 0
# 1. Start with a square array (not stored) that is t units on each side.
# Choose a t such that t * t >= max(S)
t = choose_best_t(items)
assert t * t > max(items) and t * t >= len(items)
# 2. Place each key K in the square at location (x,y), where
# x = K mod t, y = K / t.
row_queue = place_items_in_square(items, t)
# 3. Arrange rows so that they'll fit into one row and generate a
# displacement vector.
final_row, displacement_vector = arrange_rows(row_queue, t)
# Translate the internal keys to their original items.
slots = tuple(key_to_original[item - offset] if item is not None else None
for item in final_row)
# Return the parameters
return HashInfo(
t=t,
slots=slots,
r=displacement_vector,
offset=offset,
to_int=to_int if to_int is not __identity else None
) | python | def hash_parameters(keys, minimize=True, to_int=None):
"""
Calculates the parameters for a perfect hash. The result is returned
as a HashInfo tuple which has the following fields:
t
The "table parameter". This is the minimum side length of the
table used to create the hash. In practice, t**2 is the maximum
size of the output hash.
slots
The original inputs mapped to a vector. This is the hash
function.
r
The displacement vector. This is the displacement of the given
row in the result vector. To find a given value, use
``x + r[y]``.
offset
The amount by which to offset all values (once converted to ints)
to_int
A function that converts the input to an int (if given).
Keyword parameters:
``minimize``
Whether or not offset all integer keys internally by the minimum
value. This typically results in smaller output.
``to_int``
A callable that converts the input keys to ints. If not
specified, all keys should be given as ints.
>>> hash_parameters([1, 5, 7], minimize=False)
HashInfo(t=3, slots=(1, 5, 7), r=(-1, -1, 1), offset=0, to_int=None)
>>> hash_parameters([1, 5, 7])
HashInfo(t=3, slots=(1, 5, 7), r=(0, 0, 2), offset=-1, to_int=None)
>>> l = (0, 3, 4, 7 ,10, 13, 15, 18, 19, 21, 22, 24, 26, 29, 30, 34)
>>> phash = hash_parameters(l)
>>> phash.slots
(18, 19, 0, 21, 22, 3, 4, 24, 7, 26, 30, 10, 29, 13, 34, 15)
For some values, the displacement vector will be rather empty:
>>> hash_parameters('Andrea', to_int=ord).r
(1, None, None, None, 0, -3, 4, None)
"""
# If to_int is not assigned, simply use the identity function.
if to_int is None:
to_int = __identity
key_to_original = {to_int(original): original for original in keys}
# Create a set of all items to be hashed.
items = list(key_to_original.keys())
if minimize:
offset = 0 - min(items)
items = frozenset(x + offset for x in items)
else:
offset = 0
# 1. Start with a square array (not stored) that is t units on each side.
# Choose a t such that t * t >= max(S)
t = choose_best_t(items)
assert t * t > max(items) and t * t >= len(items)
# 2. Place each key K in the square at location (x,y), where
# x = K mod t, y = K / t.
row_queue = place_items_in_square(items, t)
# 3. Arrange rows so that they'll fit into one row and generate a
# displacement vector.
final_row, displacement_vector = arrange_rows(row_queue, t)
# Translate the internal keys to their original items.
slots = tuple(key_to_original[item - offset] if item is not None else None
for item in final_row)
# Return the parameters
return HashInfo(
t=t,
slots=slots,
r=displacement_vector,
offset=offset,
to_int=to_int if to_int is not __identity else None
) | ['def', 'hash_parameters', '(', 'keys', ',', 'minimize', '=', 'True', ',', 'to_int', '=', 'None', ')', ':', '# If to_int is not assigned, simply use the identity function.', 'if', 'to_int', 'is', 'None', ':', 'to_int', '=', '__identity', 'key_to_original', '=', '{', 'to_int', '(', 'original', ')', ':', 'original', 'for', 'original', 'in', 'keys', '}', '# Create a set of all items to be hashed.', 'items', '=', 'list', '(', 'key_to_original', '.', 'keys', '(', ')', ')', 'if', 'minimize', ':', 'offset', '=', '0', '-', 'min', '(', 'items', ')', 'items', '=', 'frozenset', '(', 'x', '+', 'offset', 'for', 'x', 'in', 'items', ')', 'else', ':', 'offset', '=', '0', '# 1. Start with a square array (not stored) that is t units on each side.', '# Choose a t such that t * t >= max(S)', 't', '=', 'choose_best_t', '(', 'items', ')', 'assert', 't', '*', 't', '>', 'max', '(', 'items', ')', 'and', 't', '*', 't', '>=', 'len', '(', 'items', ')', '# 2. Place each key K in the square at location (x,y), where', '# x = K mod t, y = K / t.', 'row_queue', '=', 'place_items_in_square', '(', 'items', ',', 't', ')', "# 3. Arrange rows so that they'll fit into one row and generate a", '# displacement vector.', 'final_row', ',', 'displacement_vector', '=', 'arrange_rows', '(', 'row_queue', ',', 't', ')', '# Translate the internal keys to their original items.', 'slots', '=', 'tuple', '(', 'key_to_original', '[', 'item', '-', 'offset', ']', 'if', 'item', 'is', 'not', 'None', 'else', 'None', 'for', 'item', 'in', 'final_row', ')', '# Return the parameters', 'return', 'HashInfo', '(', 't', '=', 't', ',', 'slots', '=', 'slots', ',', 'r', '=', 'displacement_vector', ',', 'offset', '=', 'offset', ',', 'to_int', '=', 'to_int', 'if', 'to_int', 'is', 'not', '__identity', 'else', 'None', ')'] | Calculates the parameters for a perfect hash. The result is returned
as a HashInfo tuple which has the following fields:
t
The "table parameter". This is the minimum side length of the
table used to create the hash. In practice, t**2 is the maximum
size of the output hash.
slots
The original inputs mapped to a vector. This is the hash
function.
r
The displacement vector. This is the displacement of the given
row in the result vector. To find a given value, use
``x + r[y]``.
offset
The amount by which to offset all values (once converted to ints)
to_int
A function that converts the input to an int (if given).
Keyword parameters:
``minimize``
Whether or not offset all integer keys internally by the minimum
value. This typically results in smaller output.
``to_int``
A callable that converts the input keys to ints. If not
specified, all keys should be given as ints.
>>> hash_parameters([1, 5, 7], minimize=False)
HashInfo(t=3, slots=(1, 5, 7), r=(-1, -1, 1), offset=0, to_int=None)
>>> hash_parameters([1, 5, 7])
HashInfo(t=3, slots=(1, 5, 7), r=(0, 0, 2), offset=-1, to_int=None)
>>> l = (0, 3, 4, 7 ,10, 13, 15, 18, 19, 21, 22, 24, 26, 29, 30, 34)
>>> phash = hash_parameters(l)
>>> phash.slots
(18, 19, 0, 21, 22, 3, 4, 24, 7, 26, 30, 10, 29, 13, 34, 15)
For some values, the displacement vector will be rather empty:
>>> hash_parameters('Andrea', to_int=ord).r
(1, None, None, None, 0, -3, 4, None) | ['Calculates', 'the', 'parameters', 'for', 'a', 'perfect', 'hash', '.', 'The', 'result', 'is', 'returned', 'as', 'a', 'HashInfo', 'tuple', 'which', 'has', 'the', 'following', 'fields', ':'] | train | https://github.com/eddieantonio/perfection/blob/69b7a06b31a15bd9534c69d4bdcc2e48e8ddfc43/perfection/getty.py#L20-L107 |
1,604 | shapiromatron/bmds | bmds/datasets.py | ContinuousDataset.to_dict | def to_dict(self):
"""
Return a dictionary representation of the dataset.
"""
d = dict(doses=self.doses, ns=self.ns, means=self.means, stdevs=self.stdevs)
d.update(self.kwargs)
return d | python | def to_dict(self):
"""
Return a dictionary representation of the dataset.
"""
d = dict(doses=self.doses, ns=self.ns, means=self.means, stdevs=self.stdevs)
d.update(self.kwargs)
return d | ['def', 'to_dict', '(', 'self', ')', ':', 'd', '=', 'dict', '(', 'doses', '=', 'self', '.', 'doses', ',', 'ns', '=', 'self', '.', 'ns', ',', 'means', '=', 'self', '.', 'means', ',', 'stdevs', '=', 'self', '.', 'stdevs', ')', 'd', '.', 'update', '(', 'self', '.', 'kwargs', ')', 'return', 'd'] | Return a dictionary representation of the dataset. | ['Return', 'a', 'dictionary', 'representation', 'of', 'the', 'dataset', '.'] | train | https://github.com/shapiromatron/bmds/blob/395c6ce84ad82876fd9fa4a89a3497fb61616de0/bmds/datasets.py#L354-L360 |
1,605 | sander76/aio-powerview-api | aiopvapi/helpers/powerview_util.py | PowerViewUtil.activate_scene | async def activate_scene(self, scene_id: int):
"""Activate a scene
:param scene_id: Scene id.
:return:
"""
_scene = await self.get_scene(scene_id)
await _scene.activate() | python | async def activate_scene(self, scene_id: int):
"""Activate a scene
:param scene_id: Scene id.
:return:
"""
_scene = await self.get_scene(scene_id)
await _scene.activate() | ['async', 'def', 'activate_scene', '(', 'self', ',', 'scene_id', ':', 'int', ')', ':', '_scene', '=', 'await', 'self', '.', 'get_scene', '(', 'scene_id', ')', 'await', '_scene', '.', 'activate', '(', ')'] | Activate a scene
:param scene_id: Scene id.
:return: | ['Activate', 'a', 'scene'] | train | https://github.com/sander76/aio-powerview-api/blob/08b6ac747aba9de19842359a981a7ff1292f5a6c/aiopvapi/helpers/powerview_util.py#L95-L103 |
1,606 | Legobot/Legobot | Legobot/Connectors/Slack.py | RtmBot.run | def run(self):
'''Extends the run() method of threading.Thread
'''
self.connect()
while True:
for event in self.slack_client.rtm_read():
logger.debug(event)
if 'type' in event and event['type'] in self.supported_events:
event_type = event['type']
dispatcher = self.supported_events[event_type]
message = dispatcher(event)
logger.debug(message)
self.baseplate.tell(message)
self.keepalive()
time.sleep(0.1)
return | python | def run(self):
'''Extends the run() method of threading.Thread
'''
self.connect()
while True:
for event in self.slack_client.rtm_read():
logger.debug(event)
if 'type' in event and event['type'] in self.supported_events:
event_type = event['type']
dispatcher = self.supported_events[event_type]
message = dispatcher(event)
logger.debug(message)
self.baseplate.tell(message)
self.keepalive()
time.sleep(0.1)
return | ['def', 'run', '(', 'self', ')', ':', 'self', '.', 'connect', '(', ')', 'while', 'True', ':', 'for', 'event', 'in', 'self', '.', 'slack_client', '.', 'rtm_read', '(', ')', ':', 'logger', '.', 'debug', '(', 'event', ')', 'if', "'type'", 'in', 'event', 'and', 'event', '[', "'type'", ']', 'in', 'self', '.', 'supported_events', ':', 'event_type', '=', 'event', '[', "'type'", ']', 'dispatcher', '=', 'self', '.', 'supported_events', '[', 'event_type', ']', 'message', '=', 'dispatcher', '(', 'event', ')', 'logger', '.', 'debug', '(', 'message', ')', 'self', '.', 'baseplate', '.', 'tell', '(', 'message', ')', 'self', '.', 'keepalive', '(', ')', 'time', '.', 'sleep', '(', '0.1', ')', 'return'] | Extends the run() method of threading.Thread | ['Extends', 'the', 'run', '()', 'method', 'of', 'threading', '.', 'Thread'] | train | https://github.com/Legobot/Legobot/blob/d13da172960a149681cb5151ce34b2f3a58ad32b/Legobot/Connectors/Slack.py#L95-L111 |
1,607 | lambdamusic/Ontospy | ontospy/extras/shell_lib.py | Shell._print_entity_intro | def _print_entity_intro(self, g=None, entity=None, first_time=True):
"""after a selection, prints on screen basic info about onto or entity, plus change prompt
2015-10-18: removed the sound
2016-01-18: entity is the shell wrapper around the ontospy entity
"""
if entity:
self._clear_screen()
obj = entity['object']
self._print("Loaded %s: <%s>" % (entity['type'].capitalize(), str(obj.uri)), "TIP")
self._print("----------------", "TIP")
# self._print(obj.bestDescription(), "TEXT")
if first_time:
self.prompt = _get_prompt(self.current['file'], self.currentEntity)
elif g:
self._printDescription(False)
if first_time:
self.prompt = _get_prompt(self.current['file']) | python | def _print_entity_intro(self, g=None, entity=None, first_time=True):
"""after a selection, prints on screen basic info about onto or entity, plus change prompt
2015-10-18: removed the sound
2016-01-18: entity is the shell wrapper around the ontospy entity
"""
if entity:
self._clear_screen()
obj = entity['object']
self._print("Loaded %s: <%s>" % (entity['type'].capitalize(), str(obj.uri)), "TIP")
self._print("----------------", "TIP")
# self._print(obj.bestDescription(), "TEXT")
if first_time:
self.prompt = _get_prompt(self.current['file'], self.currentEntity)
elif g:
self._printDescription(False)
if first_time:
self.prompt = _get_prompt(self.current['file']) | ['def', '_print_entity_intro', '(', 'self', ',', 'g', '=', 'None', ',', 'entity', '=', 'None', ',', 'first_time', '=', 'True', ')', ':', 'if', 'entity', ':', 'self', '.', '_clear_screen', '(', ')', 'obj', '=', 'entity', '[', "'object'", ']', 'self', '.', '_print', '(', '"Loaded %s: <%s>"', '%', '(', 'entity', '[', "'type'", ']', '.', 'capitalize', '(', ')', ',', 'str', '(', 'obj', '.', 'uri', ')', ')', ',', '"TIP"', ')', 'self', '.', '_print', '(', '"----------------"', ',', '"TIP"', ')', '# self._print(obj.bestDescription(), "TEXT")', 'if', 'first_time', ':', 'self', '.', 'prompt', '=', '_get_prompt', '(', 'self', '.', 'current', '[', "'file'", ']', ',', 'self', '.', 'currentEntity', ')', 'elif', 'g', ':', 'self', '.', '_printDescription', '(', 'False', ')', 'if', 'first_time', ':', 'self', '.', 'prompt', '=', '_get_prompt', '(', 'self', '.', 'current', '[', "'file'", ']', ')'] | after a selection, prints on screen basic info about onto or entity, plus change prompt
2015-10-18: removed the sound
2016-01-18: entity is the shell wrapper around the ontospy entity | ['after', 'a', 'selection', 'prints', 'on', 'screen', 'basic', 'info', 'about', 'onto', 'or', 'entity', 'plus', 'change', 'prompt', '2015', '-', '10', '-', '18', ':', 'removed', 'the', 'sound', '2016', '-', '01', '-', '18', ':', 'entity', 'is', 'the', 'shell', 'wrapper', 'around', 'the', 'ontospy', 'entity'] | train | https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/extras/shell_lib.py#L207-L223 |
1,608 | FNNDSC/med2image | med2image/systemMisc.py | cdf | def cdf(arr, **kwargs):
"""
ARGS
arr array to calculate cumulative distribution function
**kwargs
Passed directly to numpy.histogram. Typical options include:
bins = <num_bins>
normed = True|False
DESC
Determines the cumulative distribution function.
"""
counts, bin_edges = histogram(arr, **kwargs)
cdf = cumsum(counts)
return cdf | python | def cdf(arr, **kwargs):
"""
ARGS
arr array to calculate cumulative distribution function
**kwargs
Passed directly to numpy.histogram. Typical options include:
bins = <num_bins>
normed = True|False
DESC
Determines the cumulative distribution function.
"""
counts, bin_edges = histogram(arr, **kwargs)
cdf = cumsum(counts)
return cdf | ['def', 'cdf', '(', 'arr', ',', '*', '*', 'kwargs', ')', ':', 'counts', ',', 'bin_edges', '=', 'histogram', '(', 'arr', ',', '*', '*', 'kwargs', ')', 'cdf', '=', 'cumsum', '(', 'counts', ')', 'return', 'cdf'] | ARGS
arr array to calculate cumulative distribution function
**kwargs
Passed directly to numpy.histogram. Typical options include:
bins = <num_bins>
normed = True|False
DESC
Determines the cumulative distribution function. | ['ARGS', 'arr', 'array', 'to', 'calculate', 'cumulative', 'distribution', 'function'] | train | https://github.com/FNNDSC/med2image/blob/638d5d230de47608af20f9764acf8e382c2bf2ff/med2image/systemMisc.py#L105-L120 |
1,609 | baruwa-enterprise/BaruwaAPI | BaruwaAPI/resource.py | BaruwaAPIClient.delete_alias | def delete_alias(self, addressid, data):
"""Delete alias address"""
return self.api_call(
ENDPOINTS['aliases']['delete'],
dict(addressid=addressid),
body=data) | python | def delete_alias(self, addressid, data):
"""Delete alias address"""
return self.api_call(
ENDPOINTS['aliases']['delete'],
dict(addressid=addressid),
body=data) | ['def', 'delete_alias', '(', 'self', ',', 'addressid', ',', 'data', ')', ':', 'return', 'self', '.', 'api_call', '(', 'ENDPOINTS', '[', "'aliases'", ']', '[', "'delete'", ']', ',', 'dict', '(', 'addressid', '=', 'addressid', ')', ',', 'body', '=', 'data', ')'] | Delete alias address | ['Delete', 'alias', 'address'] | train | https://github.com/baruwa-enterprise/BaruwaAPI/blob/53335b377ccfd388e42f4f240f181eed72f51180/BaruwaAPI/resource.py#L121-L126 |
1,610 | supercoderz/pyflightdata | pyflightdata/flightdata.py | FlightData.login | def login(self, email, password):
"""Login to the flightradar24 session
The API currently uses flightradar24 as the primary data source. The site provides different levels of data based on user plans.
For users who have signed up for a plan, this method allows to login with the credentials from flightradar24. The API obtains
a token that will be passed on all the requests; this obtains the data as per the plan limits.
Args:
email (str): The email ID which is used to login to flightradar24
password (str): The password for the user ID
Example::
from pyflightdata import FlightData
f=FlightData()
f.login(myemail,mypassword)
"""
response = FlightData.session.post(
url=LOGIN_URL,
data={
'email': email,
'password': password,
'remember': 'true',
'type': 'web'
},
headers={
'Origin': 'https://www.flightradar24.com',
'Referer': 'https://www.flightradar24.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:28.0) Gecko/20100101 Firefox/28.0'
}
)
response = self._fr24.json_loads_byteified(
response.content) if response.status_code == 200 else None
if response:
token = response['userData']['subscriptionKey']
self.AUTH_TOKEN = token | python | def login(self, email, password):
"""Login to the flightradar24 session
The API currently uses flightradar24 as the primary data source. The site provides different levels of data based on user plans.
For users who have signed up for a plan, this method allows to login with the credentials from flightradar24. The API obtains
a token that will be passed on all the requests; this obtains the data as per the plan limits.
Args:
email (str): The email ID which is used to login to flightradar24
password (str): The password for the user ID
Example::
from pyflightdata import FlightData
f=FlightData()
f.login(myemail,mypassword)
"""
response = FlightData.session.post(
url=LOGIN_URL,
data={
'email': email,
'password': password,
'remember': 'true',
'type': 'web'
},
headers={
'Origin': 'https://www.flightradar24.com',
'Referer': 'https://www.flightradar24.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:28.0) Gecko/20100101 Firefox/28.0'
}
)
response = self._fr24.json_loads_byteified(
response.content) if response.status_code == 200 else None
if response:
token = response['userData']['subscriptionKey']
self.AUTH_TOKEN = token | ['def', 'login', '(', 'self', ',', 'email', ',', 'password', ')', ':', 'response', '=', 'FlightData', '.', 'session', '.', 'post', '(', 'url', '=', 'LOGIN_URL', ',', 'data', '=', '{', "'email'", ':', 'email', ',', "'password'", ':', 'password', ',', "'remember'", ':', "'true'", ',', "'type'", ':', "'web'", '}', ',', 'headers', '=', '{', "'Origin'", ':', "'https://www.flightradar24.com'", ',', "'Referer'", ':', "'https://www.flightradar24.com'", ',', "'User-Agent'", ':', "'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:28.0) Gecko/20100101 Firefox/28.0'", '}', ')', 'response', '=', 'self', '.', '_fr24', '.', 'json_loads_byteified', '(', 'response', '.', 'content', ')', 'if', 'response', '.', 'status_code', '==', '200', 'else', 'None', 'if', 'response', ':', 'token', '=', 'response', '[', "'userData'", ']', '[', "'subscriptionKey'", ']', 'self', '.', 'AUTH_TOKEN', '=', 'token'] | Login to the flightradar24 session
The API currently uses flightradar24 as the primary data source. The site provides different levels of data based on user plans.
For users who have signed up for a plan, this method allows to login with the credentials from flightradar24. The API obtains
a token that will be passed on all the requests; this obtains the data as per the plan limits.
Args:
email (str): The email ID which is used to login to flightradar24
password (str): The password for the user ID
Example::
from pyflightdata import FlightData
f=FlightData()
f.login(myemail,mypassword) | ['Login', 'to', 'the', 'flightradar24', 'session'] | train | https://github.com/supercoderz/pyflightdata/blob/2caf9f429288f9a171893d1b8377d0c6244541cc/pyflightdata/flightdata.py#L507-L543 |
1,611 | portfors-lab/sparkle | sparkle/gui/stim/tuning_curve.py | TuningCurveEditor.setStimReps | def setStimReps(self):
"""Sets the reps of the StimulusModel from values pulled from
this widget"""
reps = self.ui.nrepsSpnbx.value()
self.stimModel.setRepCount(reps) | python | def setStimReps(self):
"""Sets the reps of the StimulusModel from values pulled from
this widget"""
reps = self.ui.nrepsSpnbx.value()
self.stimModel.setRepCount(reps) | ['def', 'setStimReps', '(', 'self', ')', ':', 'reps', '=', 'self', '.', 'ui', '.', 'nrepsSpnbx', '.', 'value', '(', ')', 'self', '.', 'stimModel', '.', 'setRepCount', '(', 'reps', ')'] | Sets the reps of the StimulusModel from values pulled from
this widget | ['Sets', 'the', 'reps', 'of', 'the', 'StimulusModel', 'from', 'values', 'pulled', 'from', 'this', 'widget'] | train | https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/tuning_curve.py#L90-L94 |
1,612 | obriencj/python-javatools | javatools/ziputils.py | compare_zips | def compare_zips(left, right):
"""
yields EVENT,ENTRY pairs describing the differences between left
and right ZipFile instances
"""
ll = set(left.namelist())
rl = set(right.namelist())
for f in ll:
if f in rl:
rl.remove(f)
if f[-1] == '/':
# it's a directory entry
pass
elif _different(left, right, f):
yield DIFF, f
else:
yield SAME, f
else:
yield LEFT, f
for f in rl:
yield RIGHT, f | python | def compare_zips(left, right):
"""
yields EVENT,ENTRY pairs describing the differences between left
and right ZipFile instances
"""
ll = set(left.namelist())
rl = set(right.namelist())
for f in ll:
if f in rl:
rl.remove(f)
if f[-1] == '/':
# it's a directory entry
pass
elif _different(left, right, f):
yield DIFF, f
else:
yield SAME, f
else:
yield LEFT, f
for f in rl:
yield RIGHT, f | ['def', 'compare_zips', '(', 'left', ',', 'right', ')', ':', 'll', '=', 'set', '(', 'left', '.', 'namelist', '(', ')', ')', 'rl', '=', 'set', '(', 'right', '.', 'namelist', '(', ')', ')', 'for', 'f', 'in', 'll', ':', 'if', 'f', 'in', 'rl', ':', 'rl', '.', 'remove', '(', 'f', ')', 'if', 'f', '[', '-', '1', ']', '==', "'/'", ':', "# it's a directory entry", 'pass', 'elif', '_different', '(', 'left', ',', 'right', ',', 'f', ')', ':', 'yield', 'DIFF', ',', 'f', 'else', ':', 'yield', 'SAME', ',', 'f', 'else', ':', 'yield', 'LEFT', ',', 'f', 'for', 'f', 'in', 'rl', ':', 'yield', 'RIGHT', ',', 'f'] | yields EVENT,ENTRY pairs describing the differences between left
and right ZipFile instances | ['yields', 'EVENT', 'ENTRY', 'pairs', 'describing', 'the', 'differences', 'between', 'left', 'and', 'right', 'ZipFile', 'instances'] | train | https://github.com/obriencj/python-javatools/blob/9e2332b452ddc508bed0615937dddcb2cf051557/javatools/ziputils.py#L55-L82 |
1,613 | assamite/creamas | creamas/util.py | wait_tasks | async def wait_tasks(tasks, flatten=True):
'''Gather a list of asynchronous tasks and wait their completion.
:param list tasks:
A list of *asyncio* tasks wrapped in :func:`asyncio.ensure_future`.
:param bool flatten:
If ``True`` the returned results are flattened into one list if the
tasks return iterable objects. The parameter does nothing if all the
results are not iterable.
:returns:
The results of tasks as a list or as a flattened list
'''
rets = await asyncio.gather(*tasks)
if flatten and all(map(lambda x: hasattr(x, '__iter__'), rets)):
rets = list(itertools.chain(*rets))
return rets | python | async def wait_tasks(tasks, flatten=True):
'''Gather a list of asynchronous tasks and wait their completion.
:param list tasks:
A list of *asyncio* tasks wrapped in :func:`asyncio.ensure_future`.
:param bool flatten:
If ``True`` the returned results are flattened into one list if the
tasks return iterable objects. The parameter does nothing if all the
results are not iterable.
:returns:
The results of tasks as a list or as a flattened list
'''
rets = await asyncio.gather(*tasks)
if flatten and all(map(lambda x: hasattr(x, '__iter__'), rets)):
rets = list(itertools.chain(*rets))
return rets | ['async', 'def', 'wait_tasks', '(', 'tasks', ',', 'flatten', '=', 'True', ')', ':', 'rets', '=', 'await', 'asyncio', '.', 'gather', '(', '*', 'tasks', ')', 'if', 'flatten', 'and', 'all', '(', 'map', '(', 'lambda', 'x', ':', 'hasattr', '(', 'x', ',', "'__iter__'", ')', ',', 'rets', ')', ')', ':', 'rets', '=', 'list', '(', 'itertools', '.', 'chain', '(', '*', 'rets', ')', ')', 'return', 'rets'] | Gather a list of asynchronous tasks and wait their completion.
:param list tasks:
A list of *asyncio* tasks wrapped in :func:`asyncio.ensure_future`.
:param bool flatten:
If ``True`` the returned results are flattened into one list if the
tasks return iterable objects. The parameter does nothing if all the
results are not iterable.
:returns:
The results of tasks as a list or as a flattened list | ['Gather', 'a', 'list', 'of', 'asynchronous', 'tasks', 'and', 'wait', 'their', 'completion', '.'] | train | https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/util.py#L51-L66 |
1,614 | spyder-ide/conda-manager | conda_manager/models/dependencies.py | CondaDependenciesModel._timer_update | def _timer_update(self):
"""Add some moving points to the dependency resolution text."""
self._timer_counter += 1
dot = self._timer_dots.pop(0)
self._timer_dots = self._timer_dots + [dot]
self._rows = [[_(u'Resolving dependencies') + dot, u'', u'', u'']]
index = self.createIndex(0, 0)
self.dataChanged.emit(index, index)
if self._timer_counter > 150:
self._timer.stop()
self._timer_counter = 0 | python | def _timer_update(self):
"""Add some moving points to the dependency resolution text."""
self._timer_counter += 1
dot = self._timer_dots.pop(0)
self._timer_dots = self._timer_dots + [dot]
self._rows = [[_(u'Resolving dependencies') + dot, u'', u'', u'']]
index = self.createIndex(0, 0)
self.dataChanged.emit(index, index)
if self._timer_counter > 150:
self._timer.stop()
self._timer_counter = 0 | ['def', '_timer_update', '(', 'self', ')', ':', 'self', '.', '_timer_counter', '+=', '1', 'dot', '=', 'self', '.', '_timer_dots', '.', 'pop', '(', '0', ')', 'self', '.', '_timer_dots', '=', 'self', '.', '_timer_dots', '+', '[', 'dot', ']', 'self', '.', '_rows', '=', '[', '[', '_', '(', "u'Resolving dependencies'", ')', '+', 'dot', ',', "u''", ',', "u''", ',', "u''", ']', ']', 'index', '=', 'self', '.', 'createIndex', '(', '0', ',', '0', ')', 'self', '.', 'dataChanged', '.', 'emit', '(', 'index', ',', 'index', ')', 'if', 'self', '.', '_timer_counter', '>', '150', ':', 'self', '.', '_timer', '.', 'stop', '(', ')', 'self', '.', '_timer_counter', '=', '0'] | Add some moving points to the dependency resolution text. | ['Add', 'some', 'moving', 'points', 'to', 'the', 'dependency', 'resolution', 'text', '.'] | train | https://github.com/spyder-ide/conda-manager/blob/89a2126cbecefc92185cf979347ccac1c5ee5d9d/conda_manager/models/dependencies.py#L72-L83 |
1,615 | santosjorge/cufflinks | cufflinks/colors.py | rgba_to_rgb | def rgba_to_rgb(color, bg='rgb(255,255,255)'):
"""
Converts from rgba to rgb
Parameters:
-----------
color : string
Color representation in rgba
bg : string
Color representation in rgb
Example:
rgba_to_rgb('rgb(23,25,24,.4)''
"""
def c_tup(c):
return eval(c[c.find('('):])
color = c_tup(color)
bg = hex_to_rgb(normalize(bg))
bg = c_tup(bg)
a = color[3]
r = [int((1 - a) * bg[i] + a * color[i]) for i in range(3)]
return 'rgb' + str(tuple(r)) | python | def rgba_to_rgb(color, bg='rgb(255,255,255)'):
"""
Converts from rgba to rgb
Parameters:
-----------
color : string
Color representation in rgba
bg : string
Color representation in rgb
Example:
rgba_to_rgb('rgb(23,25,24,.4)''
"""
def c_tup(c):
return eval(c[c.find('('):])
color = c_tup(color)
bg = hex_to_rgb(normalize(bg))
bg = c_tup(bg)
a = color[3]
r = [int((1 - a) * bg[i] + a * color[i]) for i in range(3)]
return 'rgb' + str(tuple(r)) | ['def', 'rgba_to_rgb', '(', 'color', ',', 'bg', '=', "'rgb(255,255,255)'", ')', ':', 'def', 'c_tup', '(', 'c', ')', ':', 'return', 'eval', '(', 'c', '[', 'c', '.', 'find', '(', "'('", ')', ':', ']', ')', 'color', '=', 'c_tup', '(', 'color', ')', 'bg', '=', 'hex_to_rgb', '(', 'normalize', '(', 'bg', ')', ')', 'bg', '=', 'c_tup', '(', 'bg', ')', 'a', '=', 'color', '[', '3', ']', 'r', '=', '[', 'int', '(', '(', '1', '-', 'a', ')', '*', 'bg', '[', 'i', ']', '+', 'a', '*', 'color', '[', 'i', ']', ')', 'for', 'i', 'in', 'range', '(', '3', ')', ']', 'return', "'rgb'", '+', 'str', '(', 'tuple', '(', 'r', ')', ')'] | Converts from rgba to rgb
Parameters:
-----------
color : string
Color representation in rgba
bg : string
Color representation in rgb
Example:
rgba_to_rgb('rgb(23,25,24,.4)'' | ['Converts', 'from', 'rgba', 'to', 'rgb'] | train | https://github.com/santosjorge/cufflinks/blob/ca1cbf93998dc793d0b1f8ac30fe1f2bd105f63a/cufflinks/colors.py#L122-L143 |
1,616 | materialsproject/pymatgen | pymatgen/analysis/structure_analyzer.py | solid_angle | def solid_angle(center, coords):
"""
Helper method to calculate the solid angle of a set of coords from the
center.
Args:
center (3x1 array): Center to measure solid angle from.
coords (Nx3 array): List of coords to determine solid angle.
Returns:
The solid angle.
"""
o = np.array(center)
r = [np.array(c) - o for c in coords]
r.append(r[0])
n = [np.cross(r[i + 1], r[i]) for i in range(len(r) - 1)]
n.append(np.cross(r[1], r[0]))
vals = []
for i in range(len(n) - 1):
v = -np.dot(n[i], n[i + 1]) \
/ (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1]))
vals.append(acos(abs_cap(v)))
phi = sum(vals)
return phi + (3 - len(r)) * pi | python | def solid_angle(center, coords):
"""
Helper method to calculate the solid angle of a set of coords from the
center.
Args:
center (3x1 array): Center to measure solid angle from.
coords (Nx3 array): List of coords to determine solid angle.
Returns:
The solid angle.
"""
o = np.array(center)
r = [np.array(c) - o for c in coords]
r.append(r[0])
n = [np.cross(r[i + 1], r[i]) for i in range(len(r) - 1)]
n.append(np.cross(r[1], r[0]))
vals = []
for i in range(len(n) - 1):
v = -np.dot(n[i], n[i + 1]) \
/ (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1]))
vals.append(acos(abs_cap(v)))
phi = sum(vals)
return phi + (3 - len(r)) * pi | ['def', 'solid_angle', '(', 'center', ',', 'coords', ')', ':', 'o', '=', 'np', '.', 'array', '(', 'center', ')', 'r', '=', '[', 'np', '.', 'array', '(', 'c', ')', '-', 'o', 'for', 'c', 'in', 'coords', ']', 'r', '.', 'append', '(', 'r', '[', '0', ']', ')', 'n', '=', '[', 'np', '.', 'cross', '(', 'r', '[', 'i', '+', '1', ']', ',', 'r', '[', 'i', ']', ')', 'for', 'i', 'in', 'range', '(', 'len', '(', 'r', ')', '-', '1', ')', ']', 'n', '.', 'append', '(', 'np', '.', 'cross', '(', 'r', '[', '1', ']', ',', 'r', '[', '0', ']', ')', ')', 'vals', '=', '[', ']', 'for', 'i', 'in', 'range', '(', 'len', '(', 'n', ')', '-', '1', ')', ':', 'v', '=', '-', 'np', '.', 'dot', '(', 'n', '[', 'i', ']', ',', 'n', '[', 'i', '+', '1', ']', ')', '/', '(', 'np', '.', 'linalg', '.', 'norm', '(', 'n', '[', 'i', ']', ')', '*', 'np', '.', 'linalg', '.', 'norm', '(', 'n', '[', 'i', '+', '1', ']', ')', ')', 'vals', '.', 'append', '(', 'acos', '(', 'abs_cap', '(', 'v', ')', ')', ')', 'phi', '=', 'sum', '(', 'vals', ')', 'return', 'phi', '+', '(', '3', '-', 'len', '(', 'r', ')', ')', '*', 'pi'] | Helper method to calculate the solid angle of a set of coords from the
center.
Args:
center (3x1 array): Center to measure solid angle from.
coords (Nx3 array): List of coords to determine solid angle.
Returns:
The solid angle. | ['Helper', 'method', 'to', 'calculate', 'the', 'solid', 'angle', 'of', 'a', 'set', 'of', 'coords', 'from', 'the', 'center', '.'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/structure_analyzer.py#L366-L389 |
1,617 | calmjs/calmjs | src/calmjs/dist.py | validate_json_field | def validate_json_field(dist, attr, value):
"""
Check for json validity.
"""
try:
is_json_compat(value)
except ValueError as e:
raise DistutilsSetupError("%r %s" % (attr, e))
return True | python | def validate_json_field(dist, attr, value):
"""
Check for json validity.
"""
try:
is_json_compat(value)
except ValueError as e:
raise DistutilsSetupError("%r %s" % (attr, e))
return True | ['def', 'validate_json_field', '(', 'dist', ',', 'attr', ',', 'value', ')', ':', 'try', ':', 'is_json_compat', '(', 'value', ')', 'except', 'ValueError', 'as', 'e', ':', 'raise', 'DistutilsSetupError', '(', '"%r %s"', '%', '(', 'attr', ',', 'e', ')', ')', 'return', 'True'] | Check for json validity. | ['Check', 'for', 'json', 'validity', '.'] | train | https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/dist.py#L66-L76 |
1,618 | zimeon/iiif | iiif/info.py | IIIFInfo.validate | def validate(self):
"""Validate this object as Image API data.
Raise IIIFInfoError with helpful message if not valid.
"""
errors = []
for param in self.required_params:
if (not hasattr(self, param) or getattr(self, param) is None):
errors.append("missing %s parameter" % (param))
if (len(errors) > 0):
raise IIIFInfoError("Bad data for info.json: " + ", ".join(errors))
return True | python | def validate(self):
"""Validate this object as Image API data.
Raise IIIFInfoError with helpful message if not valid.
"""
errors = []
for param in self.required_params:
if (not hasattr(self, param) or getattr(self, param) is None):
errors.append("missing %s parameter" % (param))
if (len(errors) > 0):
raise IIIFInfoError("Bad data for info.json: " + ", ".join(errors))
return True | ['def', 'validate', '(', 'self', ')', ':', 'errors', '=', '[', ']', 'for', 'param', 'in', 'self', '.', 'required_params', ':', 'if', '(', 'not', 'hasattr', '(', 'self', ',', 'param', ')', 'or', 'getattr', '(', 'self', ',', 'param', ')', 'is', 'None', ')', ':', 'errors', '.', 'append', '(', '"missing %s parameter"', '%', '(', 'param', ')', ')', 'if', '(', 'len', '(', 'errors', ')', '>', '0', ')', ':', 'raise', 'IIIFInfoError', '(', '"Bad data for info.json: "', '+', '", "', '.', 'join', '(', 'errors', ')', ')', 'return', 'True'] | Validate this object as Image API data.
Raise IIIFInfoError with helpful message if not valid. | ['Validate', 'this', 'object', 'as', 'Image', 'API', 'data', '.'] | train | https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/info.py#L417-L428 |
1,619 | jasonlaska/spherecluster | spherecluster/von_mises_fisher_mixture.py | movMF | def movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
):
"""Wrapper for parallelization of _movMF and running n_init times.
"""
if n_init <= 0:
raise ValueError(
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init
)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError(
"Number of iterations should be a positive number,"
" got %d instead" % max_iter
)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
if hasattr(init, "__array__"):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
"Explicit initial center position passed: "
"performing only one init in k-means instead of n_init=%d" % n_init,
RuntimeWarning,
stacklevel=2,
)
n_init = 1
# defaults
best_centers = None
best_labels = None
best_weights = None
best_concentrations = None
best_posterior = None
best_inertia = None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# cluster on the sphere
(centers, weights, concentrations, posterior, labels, inertia) = _movMF(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_centers = centers.copy()
best_labels = labels.copy()
best_weights = weights.copy()
best_concentrations = concentrations.copy()
best_posterior = posterior.copy()
best_inertia = inertia
else:
# parallelisation of movMF runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_movMF)(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
for seed in seeds
)
# Get results with the lowest inertia
centers, weights, concentrations, posteriors, labels, inertia = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_concentrations = concentrations[best]
best_posterior = posteriors[best]
best_weights = weights[best]
return (
best_centers,
best_labels,
best_inertia,
best_weights,
best_concentrations,
best_posterior,
) | python | def movMF(
X,
n_clusters,
posterior_type="soft",
force_weights=None,
n_init=10,
n_jobs=1,
max_iter=300,
verbose=False,
init="random-class",
random_state=None,
tol=1e-6,
copy_x=True,
):
"""Wrapper for parallelization of _movMF and running n_init times.
"""
if n_init <= 0:
raise ValueError(
"Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init
)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError(
"Number of iterations should be a positive number,"
" got %d instead" % max_iter
)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
if hasattr(init, "__array__"):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
"Explicit initial center position passed: "
"performing only one init in k-means instead of n_init=%d" % n_init,
RuntimeWarning,
stacklevel=2,
)
n_init = 1
# defaults
best_centers = None
best_labels = None
best_weights = None
best_concentrations = None
best_posterior = None
best_inertia = None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# cluster on the sphere
(centers, weights, concentrations, posterior, labels, inertia) = _movMF(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_centers = centers.copy()
best_labels = labels.copy()
best_weights = weights.copy()
best_concentrations = concentrations.copy()
best_posterior = posterior.copy()
best_inertia = inertia
else:
# parallelisation of movMF runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_movMF)(
X,
n_clusters,
posterior_type=posterior_type,
force_weights=force_weights,
max_iter=max_iter,
verbose=verbose,
init=init,
random_state=random_state,
tol=tol,
)
for seed in seeds
)
# Get results with the lowest inertia
centers, weights, concentrations, posteriors, labels, inertia = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_concentrations = concentrations[best]
best_posterior = posteriors[best]
best_weights = weights[best]
return (
best_centers,
best_labels,
best_inertia,
best_weights,
best_concentrations,
best_posterior,
) | ['def', 'movMF', '(', 'X', ',', 'n_clusters', ',', 'posterior_type', '=', '"soft"', ',', 'force_weights', '=', 'None', ',', 'n_init', '=', '10', ',', 'n_jobs', '=', '1', ',', 'max_iter', '=', '300', ',', 'verbose', '=', 'False', ',', 'init', '=', '"random-class"', ',', 'random_state', '=', 'None', ',', 'tol', '=', '1e-6', ',', 'copy_x', '=', 'True', ',', ')', ':', 'if', 'n_init', '<=', '0', ':', 'raise', 'ValueError', '(', '"Invalid number of initializations."', '" n_init=%d must be bigger than zero."', '%', 'n_init', ')', 'random_state', '=', 'check_random_state', '(', 'random_state', ')', 'if', 'max_iter', '<=', '0', ':', 'raise', 'ValueError', '(', '"Number of iterations should be a positive number,"', '" got %d instead"', '%', 'max_iter', ')', 'best_inertia', '=', 'np', '.', 'infty', 'X', '=', 'as_float_array', '(', 'X', ',', 'copy', '=', 'copy_x', ')', 'tol', '=', '_tolerance', '(', 'X', ',', 'tol', ')', 'if', 'hasattr', '(', 'init', ',', '"__array__"', ')', ':', 'init', '=', 'check_array', '(', 'init', ',', 'dtype', '=', 'X', '.', 'dtype', '.', 'type', ',', 'copy', '=', 'True', ')', '_validate_center_shape', '(', 'X', ',', 'n_clusters', ',', 'init', ')', 'if', 'n_init', '!=', '1', ':', 'warnings', '.', 'warn', '(', '"Explicit initial center position passed: "', '"performing only one init in k-means instead of n_init=%d"', '%', 'n_init', ',', 'RuntimeWarning', ',', 'stacklevel', '=', '2', ',', ')', 'n_init', '=', '1', '# defaults', 'best_centers', '=', 'None', 'best_labels', '=', 'None', 'best_weights', '=', 'None', 'best_concentrations', '=', 'None', 'best_posterior', '=', 'None', 'best_inertia', '=', 'None', 'if', 'n_jobs', '==', '1', ':', '# For a single thread, less memory is needed if we just store one set', '# of the best results (as opposed to one set per run per thread).', 'for', 'it', 'in', 'range', '(', 'n_init', ')', ':', '# cluster on the sphere', '(', 'centers', ',', 'weights', ',', 'concentrations', ',', 'posterior', ',', 'labels', ',', 'inertia', ')', '=', '_movMF', '(', 'X', ',', 'n_clusters', ',', 'posterior_type', '=', 'posterior_type', ',', 'force_weights', '=', 'force_weights', ',', 'max_iter', '=', 'max_iter', ',', 'verbose', '=', 'verbose', ',', 'init', '=', 'init', ',', 'random_state', '=', 'random_state', ',', 'tol', '=', 'tol', ',', ')', '# determine if these results are the best so far', 'if', 'best_inertia', 'is', 'None', 'or', 'inertia', '<', 'best_inertia', ':', 'best_centers', '=', 'centers', '.', 'copy', '(', ')', 'best_labels', '=', 'labels', '.', 'copy', '(', ')', 'best_weights', '=', 'weights', '.', 'copy', '(', ')', 'best_concentrations', '=', 'concentrations', '.', 'copy', '(', ')', 'best_posterior', '=', 'posterior', '.', 'copy', '(', ')', 'best_inertia', '=', 'inertia', 'else', ':', '# parallelisation of movMF runs', 'seeds', '=', 'random_state', '.', 'randint', '(', 'np', '.', 'iinfo', '(', 'np', '.', 'int32', ')', '.', 'max', ',', 'size', '=', 'n_init', ')', 'results', '=', 'Parallel', '(', 'n_jobs', '=', 'n_jobs', ',', 'verbose', '=', '0', ')', '(', 'delayed', '(', '_movMF', ')', '(', 'X', ',', 'n_clusters', ',', 'posterior_type', '=', 'posterior_type', ',', 'force_weights', '=', 'force_weights', ',', 'max_iter', '=', 'max_iter', ',', 'verbose', '=', 'verbose', ',', 'init', '=', 'init', ',', 'random_state', '=', 'random_state', ',', 'tol', '=', 'tol', ',', ')', 'for', 'seed', 'in', 'seeds', ')', '# Get results with the lowest inertia', 'centers', ',', 'weights', ',', 'concentrations', ',', 'posteriors', ',', 'labels', ',', 'inertia', '=', 'zip', '(', '*', 'results', ')', 'best', '=', 'np', '.', 'argmin', '(', 'inertia', ')', 'best_labels', '=', 'labels', '[', 'best', ']', 'best_inertia', '=', 'inertia', '[', 'best', ']', 'best_centers', '=', 'centers', '[', 'best', ']', 'best_concentrations', '=', 'concentrations', '[', 'best', ']', 'best_posterior', '=', 'posteriors', '[', 'best', ']', 'best_weights', '=', 'weights', '[', 'best', ']', 'return', '(', 'best_centers', ',', 'best_labels', ',', 'best_inertia', ',', 'best_weights', ',', 'best_concentrations', ',', 'best_posterior', ',', ')'] | Wrapper for parallelization of _movMF and running n_init times. | ['Wrapper', 'for', 'parallelization', 'of', '_movMF', 'and', 'running', 'n_init', 'times', '.'] | train | https://github.com/jasonlaska/spherecluster/blob/701b0b1909088a56e353b363b2672580d4fe9d93/spherecluster/von_mises_fisher_mixture.py#L500-L614 |
1,620 | saltstack/salt | salt/modules/rh_service.py | available | def available(name, limit=''):
'''
Return True if the named service is available. Use the ``limit`` param to
restrict results to services of that type.
CLI Examples:
.. code-block:: bash
salt '*' service.available sshd
salt '*' service.available sshd limit=upstart
salt '*' service.available sshd limit=sysvinit
'''
if limit == 'upstart':
return _service_is_upstart(name)
elif limit == 'sysvinit':
return _service_is_sysv(name)
else:
return _service_is_upstart(name) or _service_is_sysv(name) or _service_is_chkconfig(name) | python | def available(name, limit=''):
'''
Return True if the named service is available. Use the ``limit`` param to
restrict results to services of that type.
CLI Examples:
.. code-block:: bash
salt '*' service.available sshd
salt '*' service.available sshd limit=upstart
salt '*' service.available sshd limit=sysvinit
'''
if limit == 'upstart':
return _service_is_upstart(name)
elif limit == 'sysvinit':
return _service_is_sysv(name)
else:
return _service_is_upstart(name) or _service_is_sysv(name) or _service_is_chkconfig(name) | ['def', 'available', '(', 'name', ',', 'limit', '=', "''", ')', ':', 'if', 'limit', '==', "'upstart'", ':', 'return', '_service_is_upstart', '(', 'name', ')', 'elif', 'limit', '==', "'sysvinit'", ':', 'return', '_service_is_sysv', '(', 'name', ')', 'else', ':', 'return', '_service_is_upstart', '(', 'name', ')', 'or', '_service_is_sysv', '(', 'name', ')', 'or', '_service_is_chkconfig', '(', 'name', ')'] | Return True if the named service is available. Use the ``limit`` param to
restrict results to services of that type.
CLI Examples:
.. code-block:: bash
salt '*' service.available sshd
salt '*' service.available sshd limit=upstart
salt '*' service.available sshd limit=sysvinit | ['Return', 'True', 'if', 'the', 'named', 'service', 'is', 'available', '.', 'Use', 'the', 'limit', 'param', 'to', 'restrict', 'results', 'to', 'services', 'of', 'that', 'type', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_service.py#L364-L382 |
1,621 | bharadwaj-raju/libdesktop | libdesktop/system.py | is_running | def is_running(process):
'''
Check if process is running.
Check if the given process name is running or not.
Note:
On a Linux system, kernel threads (like ``kthreadd`` etc.)
are excluded.
Args:
process (str): The name of the process.
Returns:
bool: Is the process running?
'''
if os.name == 'nt':
process_list = get_cmd_out(['tasklist', '/v'])
return process in process_list
else:
process_list = get_cmd_out('ps axw | awk \'{print $5}\'')
for i in process_list.split('\n'):
# 'COMMAND' is the column heading
# [*] indicates kernel-level processes like \
# kthreadd, which manages threads in the Linux kernel
if not i == 'COMMAND' or i.startswith('['):
if i == process:
return True
elif os.path.basename(i) == process:
# check i without executable path
# for example, if 'process' arguments is 'sshd'
# and '/usr/bin/sshd' is listed in ps, return True
return True
return False | python | def is_running(process):
'''
Check if process is running.
Check if the given process name is running or not.
Note:
On a Linux system, kernel threads (like ``kthreadd`` etc.)
are excluded.
Args:
process (str): The name of the process.
Returns:
bool: Is the process running?
'''
if os.name == 'nt':
process_list = get_cmd_out(['tasklist', '/v'])
return process in process_list
else:
process_list = get_cmd_out('ps axw | awk \'{print $5}\'')
for i in process_list.split('\n'):
# 'COMMAND' is the column heading
# [*] indicates kernel-level processes like \
# kthreadd, which manages threads in the Linux kernel
if not i == 'COMMAND' or i.startswith('['):
if i == process:
return True
elif os.path.basename(i) == process:
# check i without executable path
# for example, if 'process' arguments is 'sshd'
# and '/usr/bin/sshd' is listed in ps, return True
return True
return False | ['def', 'is_running', '(', 'process', ')', ':', 'if', 'os', '.', 'name', '==', "'nt'", ':', 'process_list', '=', 'get_cmd_out', '(', '[', "'tasklist'", ',', "'/v'", ']', ')', 'return', 'process', 'in', 'process_list', 'else', ':', 'process_list', '=', 'get_cmd_out', '(', "'ps axw | awk \\'{print $5}\\''", ')', 'for', 'i', 'in', 'process_list', '.', 'split', '(', "'\\n'", ')', ':', "# 'COMMAND' is the column heading", '# [*] indicates kernel-level processes like \\', '# kthreadd, which manages threads in the Linux kernel', 'if', 'not', 'i', '==', "'COMMAND'", 'or', 'i', '.', 'startswith', '(', "'['", ')', ':', 'if', 'i', '==', 'process', ':', 'return', 'True', 'elif', 'os', '.', 'path', '.', 'basename', '(', 'i', ')', '==', 'process', ':', '# check i without executable path', "# for example, if 'process' arguments is 'sshd'", "# and '/usr/bin/sshd' is listed in ps, return True", 'return', 'True', 'return', 'False'] | Check if process is running.
Check if the given process name is running or not.
Note:
On a Linux system, kernel threads (like ``kthreadd`` etc.)
are excluded.
Args:
process (str): The name of the process.
Returns:
bool: Is the process running? | ['Check', 'if', 'process', 'is', 'running', '.'] | train | https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/system.py#L205-L243 |
1,622 | fake-name/ChromeController | ChromeController/Generator/Generated.py | ChromeRemoteDebugInterface.Debugger_setScriptSource | def Debugger_setScriptSource(self, scriptId, scriptSource, **kwargs):
"""
Function path: Debugger.setScriptSource
Domain: Debugger
Method name: setScriptSource
Parameters:
Required arguments:
'scriptId' (type: Runtime.ScriptId) -> Id of the script to edit.
'scriptSource' (type: string) -> New content of the script.
Optional arguments:
'dryRun' (type: boolean) -> If true the change will not actually be applied. Dry run may be used to get result description without actually modifying the code.
Returns:
'callFrames' (type: array) -> New stack trace in case editing has happened while VM was stopped.
'stackChanged' (type: boolean) -> Whether current call stack was modified after applying the changes.
'asyncStackTrace' (type: Runtime.StackTrace) -> Async stack trace, if any.
'exceptionDetails' (type: Runtime.ExceptionDetails) -> Exception details if any.
Description: Edits JavaScript source live.
"""
assert isinstance(scriptSource, (str,)
), "Argument 'scriptSource' must be of type '['str']'. Received type: '%s'" % type(
scriptSource)
if 'dryRun' in kwargs:
assert isinstance(kwargs['dryRun'], (bool,)
), "Optional argument 'dryRun' must be of type '['bool']'. Received type: '%s'" % type(
kwargs['dryRun'])
expected = ['dryRun']
passed_keys = list(kwargs.keys())
assert all([(key in expected) for key in passed_keys]
), "Allowed kwargs are ['dryRun']. Passed kwargs: %s" % passed_keys
subdom_funcs = self.synchronous_command('Debugger.setScriptSource',
scriptId=scriptId, scriptSource=scriptSource, **kwargs)
return subdom_funcs | python | def Debugger_setScriptSource(self, scriptId, scriptSource, **kwargs):
"""
Function path: Debugger.setScriptSource
Domain: Debugger
Method name: setScriptSource
Parameters:
Required arguments:
'scriptId' (type: Runtime.ScriptId) -> Id of the script to edit.
'scriptSource' (type: string) -> New content of the script.
Optional arguments:
'dryRun' (type: boolean) -> If true the change will not actually be applied. Dry run may be used to get result description without actually modifying the code.
Returns:
'callFrames' (type: array) -> New stack trace in case editing has happened while VM was stopped.
'stackChanged' (type: boolean) -> Whether current call stack was modified after applying the changes.
'asyncStackTrace' (type: Runtime.StackTrace) -> Async stack trace, if any.
'exceptionDetails' (type: Runtime.ExceptionDetails) -> Exception details if any.
Description: Edits JavaScript source live.
"""
assert isinstance(scriptSource, (str,)
), "Argument 'scriptSource' must be of type '['str']'. Received type: '%s'" % type(
scriptSource)
if 'dryRun' in kwargs:
assert isinstance(kwargs['dryRun'], (bool,)
), "Optional argument 'dryRun' must be of type '['bool']'. Received type: '%s'" % type(
kwargs['dryRun'])
expected = ['dryRun']
passed_keys = list(kwargs.keys())
assert all([(key in expected) for key in passed_keys]
), "Allowed kwargs are ['dryRun']. Passed kwargs: %s" % passed_keys
subdom_funcs = self.synchronous_command('Debugger.setScriptSource',
scriptId=scriptId, scriptSource=scriptSource, **kwargs)
return subdom_funcs | ['def', 'Debugger_setScriptSource', '(', 'self', ',', 'scriptId', ',', 'scriptSource', ',', '*', '*', 'kwargs', ')', ':', 'assert', 'isinstance', '(', 'scriptSource', ',', '(', 'str', ',', ')', ')', ',', '"Argument \'scriptSource\' must be of type \'[\'str\']\'. Received type: \'%s\'"', '%', 'type', '(', 'scriptSource', ')', 'if', "'dryRun'", 'in', 'kwargs', ':', 'assert', 'isinstance', '(', 'kwargs', '[', "'dryRun'", ']', ',', '(', 'bool', ',', ')', ')', ',', '"Optional argument \'dryRun\' must be of type \'[\'bool\']\'. Received type: \'%s\'"', '%', 'type', '(', 'kwargs', '[', "'dryRun'", ']', ')', 'expected', '=', '[', "'dryRun'", ']', 'passed_keys', '=', 'list', '(', 'kwargs', '.', 'keys', '(', ')', ')', 'assert', 'all', '(', '[', '(', 'key', 'in', 'expected', ')', 'for', 'key', 'in', 'passed_keys', ']', ')', ',', '"Allowed kwargs are [\'dryRun\']. Passed kwargs: %s"', '%', 'passed_keys', 'subdom_funcs', '=', 'self', '.', 'synchronous_command', '(', "'Debugger.setScriptSource'", ',', 'scriptId', '=', 'scriptId', ',', 'scriptSource', '=', 'scriptSource', ',', '*', '*', 'kwargs', ')', 'return', 'subdom_funcs'] | Function path: Debugger.setScriptSource
Domain: Debugger
Method name: setScriptSource
Parameters:
Required arguments:
'scriptId' (type: Runtime.ScriptId) -> Id of the script to edit.
'scriptSource' (type: string) -> New content of the script.
Optional arguments:
'dryRun' (type: boolean) -> If true the change will not actually be applied. Dry run may be used to get result description without actually modifying the code.
Returns:
'callFrames' (type: array) -> New stack trace in case editing has happened while VM was stopped.
'stackChanged' (type: boolean) -> Whether current call stack was modified after applying the changes.
'asyncStackTrace' (type: Runtime.StackTrace) -> Async stack trace, if any.
'exceptionDetails' (type: Runtime.ExceptionDetails) -> Exception details if any.
Description: Edits JavaScript source live. | ['Function', 'path', ':', 'Debugger', '.', 'setScriptSource', 'Domain', ':', 'Debugger', 'Method', 'name', ':', 'setScriptSource', 'Parameters', ':', 'Required', 'arguments', ':', 'scriptId', '(', 'type', ':', 'Runtime', '.', 'ScriptId', ')', '-', '>', 'Id', 'of', 'the', 'script', 'to', 'edit', '.', 'scriptSource', '(', 'type', ':', 'string', ')', '-', '>', 'New', 'content', 'of', 'the', 'script', '.', 'Optional', 'arguments', ':', 'dryRun', '(', 'type', ':', 'boolean', ')', '-', '>', 'If', 'true', 'the', 'change', 'will', 'not', 'actually', 'be', 'applied', '.', 'Dry', 'run', 'may', 'be', 'used', 'to', 'get', 'result', 'description', 'without', 'actually', 'modifying', 'the', 'code', '.', 'Returns', ':', 'callFrames', '(', 'type', ':', 'array', ')', '-', '>', 'New', 'stack', 'trace', 'in', 'case', 'editing', 'has', 'happened', 'while', 'VM', 'was', 'stopped', '.', 'stackChanged', '(', 'type', ':', 'boolean', ')', '-', '>', 'Whether', 'current', 'call', 'stack', 'was', 'modified', 'after', 'applying', 'the', 'changes', '.', 'asyncStackTrace', '(', 'type', ':', 'Runtime', '.', 'StackTrace', ')', '-', '>', 'Async', 'stack', 'trace', 'if', 'any', '.', 'exceptionDetails', '(', 'type', ':', 'Runtime', '.', 'ExceptionDetails', ')', '-', '>', 'Exception', 'details', 'if', 'any', '.', 'Description', ':', 'Edits', 'JavaScript', 'source', 'live', '.'] | train | https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/Generator/Generated.py#L7280-L7313 |
1,623 | saltstack/salt | salt/modules/timezone.py | _timedatectl | def _timedatectl():
'''
get the output of timedatectl
'''
ret = __salt__['cmd.run_all'](['timedatectl'], python_shell=False)
if ret['retcode'] != 0:
msg = 'timedatectl failed: {0}'.format(ret['stderr'])
raise CommandExecutionError(msg)
return ret | python | def _timedatectl():
'''
get the output of timedatectl
'''
ret = __salt__['cmd.run_all'](['timedatectl'], python_shell=False)
if ret['retcode'] != 0:
msg = 'timedatectl failed: {0}'.format(ret['stderr'])
raise CommandExecutionError(msg)
return ret | ['def', '_timedatectl', '(', ')', ':', 'ret', '=', '__salt__', '[', "'cmd.run_all'", ']', '(', '[', "'timedatectl'", ']', ',', 'python_shell', '=', 'False', ')', 'if', 'ret', '[', "'retcode'", ']', '!=', '0', ':', 'msg', '=', "'timedatectl failed: {0}'", '.', 'format', '(', 'ret', '[', "'stderr'", ']', ')', 'raise', 'CommandExecutionError', '(', 'msg', ')', 'return', 'ret'] | get the output of timedatectl | ['get', 'the', 'output', 'of', 'timedatectl'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/timezone.py#L46-L56 |
1,624 | ARMmbed/mbed-cloud-sdk-python | src/mbed_cloud/_backends/enrollment/apis/public_api_api.py | PublicAPIApi.delete_bulk_device_enrollment | def delete_bulk_device_enrollment(self, enrollment_identities, **kwargs): # noqa: E501
"""Bulk delete # noqa: E501
With bulk delete, you can upload a `CSV` file containing a number of enrollment IDs to be deleted. **Example usage:** ``` curl -X POST \\ -H 'Authorization: Bearer <valid access token>' \\ -F 'enrollment_identities=@/path/to/enrollments/enrollments.csv' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments-bulk-deletes ``` **An example `CSV` file:** 1. The first line is assumed to be the header. The content of the header is not validated. 2. Each line can contain comma-separated values, where the first value is always assumed to be the Enrollment ID. 3. Only one enrollment ID is expected per line. 4. Valid enrollments begin with A followed by a - and 95 characters in the format as below. 5. Valid enrollment identities may be enclosed within quotes. 6. UTF-8 encoding is expected. ``` \"enrollment_identity\" \"A-4E:63:2D:AE:14:BC:D1:09:77:21:95:44:ED:34:06:57:1E:03:B1:EF:0E:F2:59:44:71:93:23:22:15:43:23:12\", \"A-4E:63:2D:AE:14:BC:D1:09:77:21:95:44:ED:34:06:57:1E:03:B1:EF:0E:F2:59:25:48:44:71:22:15:43:23:12\", ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_bulk_device_enrollment(enrollment_identities, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param file enrollment_identities: The `CSV` file containing the enrollment IDs. The maximum file size is 10MB. (required)
:return: BulkResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.delete_bulk_device_enrollment_with_http_info(enrollment_identities, **kwargs) # noqa: E501
else:
(data) = self.delete_bulk_device_enrollment_with_http_info(enrollment_identities, **kwargs) # noqa: E501
return data | python | def delete_bulk_device_enrollment(self, enrollment_identities, **kwargs): # noqa: E501
"""Bulk delete # noqa: E501
With bulk delete, you can upload a `CSV` file containing a number of enrollment IDs to be deleted. **Example usage:** ``` curl -X POST \\ -H 'Authorization: Bearer <valid access token>' \\ -F 'enrollment_identities=@/path/to/enrollments/enrollments.csv' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments-bulk-deletes ``` **An example `CSV` file:** 1. The first line is assumed to be the header. The content of the header is not validated. 2. Each line can contain comma-separated values, where the first value is always assumed to be the Enrollment ID. 3. Only one enrollment ID is expected per line. 4. Valid enrollments begin with A followed by a - and 95 characters in the format as below. 5. Valid enrollment identities may be enclosed within quotes. 6. UTF-8 encoding is expected. ``` \"enrollment_identity\" \"A-4E:63:2D:AE:14:BC:D1:09:77:21:95:44:ED:34:06:57:1E:03:B1:EF:0E:F2:59:44:71:93:23:22:15:43:23:12\", \"A-4E:63:2D:AE:14:BC:D1:09:77:21:95:44:ED:34:06:57:1E:03:B1:EF:0E:F2:59:25:48:44:71:22:15:43:23:12\", ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_bulk_device_enrollment(enrollment_identities, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param file enrollment_identities: The `CSV` file containing the enrollment IDs. The maximum file size is 10MB. (required)
:return: BulkResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('asynchronous'):
return self.delete_bulk_device_enrollment_with_http_info(enrollment_identities, **kwargs) # noqa: E501
else:
(data) = self.delete_bulk_device_enrollment_with_http_info(enrollment_identities, **kwargs) # noqa: E501
return data | ['def', 'delete_bulk_device_enrollment', '(', 'self', ',', 'enrollment_identities', ',', '*', '*', 'kwargs', ')', ':', '# noqa: E501', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'asynchronous'", ')', ':', 'return', 'self', '.', 'delete_bulk_device_enrollment_with_http_info', '(', 'enrollment_identities', ',', '*', '*', 'kwargs', ')', '# noqa: E501', 'else', ':', '(', 'data', ')', '=', 'self', '.', 'delete_bulk_device_enrollment_with_http_info', '(', 'enrollment_identities', ',', '*', '*', 'kwargs', ')', '# noqa: E501', 'return', 'data'] | Bulk delete # noqa: E501
With bulk delete, you can upload a `CSV` file containing a number of enrollment IDs to be deleted. **Example usage:** ``` curl -X POST \\ -H 'Authorization: Bearer <valid access token>' \\ -F 'enrollment_identities=@/path/to/enrollments/enrollments.csv' \\ https://api.us-east-1.mbedcloud.com/v3/device-enrollments-bulk-deletes ``` **An example `CSV` file:** 1. The first line is assumed to be the header. The content of the header is not validated. 2. Each line can contain comma-separated values, where the first value is always assumed to be the Enrollment ID. 3. Only one enrollment ID is expected per line. 4. Valid enrollments begin with A followed by a - and 95 characters in the format as below. 5. Valid enrollment identities may be enclosed within quotes. 6. UTF-8 encoding is expected. ``` \"enrollment_identity\" \"A-4E:63:2D:AE:14:BC:D1:09:77:21:95:44:ED:34:06:57:1E:03:B1:EF:0E:F2:59:44:71:93:23:22:15:43:23:12\", \"A-4E:63:2D:AE:14:BC:D1:09:77:21:95:44:ED:34:06:57:1E:03:B1:EF:0E:F2:59:25:48:44:71:22:15:43:23:12\", ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass asynchronous=True
>>> thread = api.delete_bulk_device_enrollment(enrollment_identities, asynchronous=True)
>>> result = thread.get()
:param asynchronous bool
:param file enrollment_identities: The `CSV` file containing the enrollment IDs. The maximum file size is 10MB. (required)
:return: BulkResponse
If the method is called asynchronously,
returns the request thread. | ['Bulk', 'delete', '#', 'noqa', ':', 'E501'] | train | https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/enrollment/apis/public_api_api.py#L234-L254 |
1,625 | google/mobly | mobly/controllers/android_device.py | AndroidDevice.is_adb_root | def is_adb_root(self):
"""True if adb is running as root for this device.
"""
try:
return '0' == self.adb.shell('id -u').decode('utf-8').strip()
except adb.AdbError:
# Wait a bit and retry to work around adb flakiness for this cmd.
time.sleep(0.2)
return '0' == self.adb.shell('id -u').decode('utf-8').strip() | python | def is_adb_root(self):
"""True if adb is running as root for this device.
"""
try:
return '0' == self.adb.shell('id -u').decode('utf-8').strip()
except adb.AdbError:
# Wait a bit and retry to work around adb flakiness for this cmd.
time.sleep(0.2)
return '0' == self.adb.shell('id -u').decode('utf-8').strip() | ['def', 'is_adb_root', '(', 'self', ')', ':', 'try', ':', 'return', "'0'", '==', 'self', '.', 'adb', '.', 'shell', '(', "'id -u'", ')', '.', 'decode', '(', "'utf-8'", ')', '.', 'strip', '(', ')', 'except', 'adb', '.', 'AdbError', ':', '# Wait a bit and retry to work around adb flakiness for this cmd.', 'time', '.', 'sleep', '(', '0.2', ')', 'return', "'0'", '==', 'self', '.', 'adb', '.', 'shell', '(', "'id -u'", ')', '.', 'decode', '(', "'utf-8'", ')', '.', 'strip', '(', ')'] | True if adb is running as root for this device. | ['True', 'if', 'adb', 'is', 'running', 'as', 'root', 'for', 'this', 'device', '.'] | train | https://github.com/google/mobly/blob/38ba2cf7d29a20e6a2fca1718eecb337df38db26/mobly/controllers/android_device.py#L724-L732 |
1,626 | twilio/twilio-python | twilio/rest/ip_messaging/v2/service/channel/message.py | MessageList.create | def create(self, from_=values.unset, attributes=values.unset,
date_created=values.unset, date_updated=values.unset,
last_updated_by=values.unset, body=values.unset,
media_sid=values.unset):
"""
Create a new MessageInstance
:param unicode from_: The identity of the new message's author
:param unicode attributes: A valid JSON string that contains application-specific data
:param datetime date_created: The ISO 8601 date and time in GMT when the resource was created
:param datetime date_updated: The ISO 8601 date and time in GMT when the resource was updated
:param unicode last_updated_by: The Identity of the User who last updated the Message
:param unicode body: The message to send to the channel
:param unicode media_sid: The Media Sid to be attached to the new Message
:returns: Newly created MessageInstance
:rtype: twilio.rest.chat.v2.service.channel.message.MessageInstance
"""
data = values.of({
'From': from_,
'Attributes': attributes,
'DateCreated': serialize.iso8601_datetime(date_created),
'DateUpdated': serialize.iso8601_datetime(date_updated),
'LastUpdatedBy': last_updated_by,
'Body': body,
'MediaSid': media_sid,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return MessageInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
) | python | def create(self, from_=values.unset, attributes=values.unset,
date_created=values.unset, date_updated=values.unset,
last_updated_by=values.unset, body=values.unset,
media_sid=values.unset):
"""
Create a new MessageInstance
:param unicode from_: The identity of the new message's author
:param unicode attributes: A valid JSON string that contains application-specific data
:param datetime date_created: The ISO 8601 date and time in GMT when the resource was created
:param datetime date_updated: The ISO 8601 date and time in GMT when the resource was updated
:param unicode last_updated_by: The Identity of the User who last updated the Message
:param unicode body: The message to send to the channel
:param unicode media_sid: The Media Sid to be attached to the new Message
:returns: Newly created MessageInstance
:rtype: twilio.rest.chat.v2.service.channel.message.MessageInstance
"""
data = values.of({
'From': from_,
'Attributes': attributes,
'DateCreated': serialize.iso8601_datetime(date_created),
'DateUpdated': serialize.iso8601_datetime(date_updated),
'LastUpdatedBy': last_updated_by,
'Body': body,
'MediaSid': media_sid,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return MessageInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
channel_sid=self._solution['channel_sid'],
) | ['def', 'create', '(', 'self', ',', 'from_', '=', 'values', '.', 'unset', ',', 'attributes', '=', 'values', '.', 'unset', ',', 'date_created', '=', 'values', '.', 'unset', ',', 'date_updated', '=', 'values', '.', 'unset', ',', 'last_updated_by', '=', 'values', '.', 'unset', ',', 'body', '=', 'values', '.', 'unset', ',', 'media_sid', '=', 'values', '.', 'unset', ')', ':', 'data', '=', 'values', '.', 'of', '(', '{', "'From'", ':', 'from_', ',', "'Attributes'", ':', 'attributes', ',', "'DateCreated'", ':', 'serialize', '.', 'iso8601_datetime', '(', 'date_created', ')', ',', "'DateUpdated'", ':', 'serialize', '.', 'iso8601_datetime', '(', 'date_updated', ')', ',', "'LastUpdatedBy'", ':', 'last_updated_by', ',', "'Body'", ':', 'body', ',', "'MediaSid'", ':', 'media_sid', ',', '}', ')', 'payload', '=', 'self', '.', '_version', '.', 'create', '(', "'POST'", ',', 'self', '.', '_uri', ',', 'data', '=', 'data', ',', ')', 'return', 'MessageInstance', '(', 'self', '.', '_version', ',', 'payload', ',', 'service_sid', '=', 'self', '.', '_solution', '[', "'service_sid'", ']', ',', 'channel_sid', '=', 'self', '.', '_solution', '[', "'channel_sid'", ']', ',', ')'] | Create a new MessageInstance
:param unicode from_: The identity of the new message's author
:param unicode attributes: A valid JSON string that contains application-specific data
:param datetime date_created: The ISO 8601 date and time in GMT when the resource was created
:param datetime date_updated: The ISO 8601 date and time in GMT when the resource was updated
:param unicode last_updated_by: The Identity of the User who last updated the Message
:param unicode body: The message to send to the channel
:param unicode media_sid: The Media Sid to be attached to the new Message
:returns: Newly created MessageInstance
:rtype: twilio.rest.chat.v2.service.channel.message.MessageInstance | ['Create', 'a', 'new', 'MessageInstance'] | train | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/ip_messaging/v2/service/channel/message.py#L38-L77 |
1,627 | hyperledger/indy-sdk | wrappers/python/indy/wallet.py | generate_wallet_key | async def generate_wallet_key(config: Optional[str]) -> str:
"""
Generate wallet master key.
Returned key is compatible with "RAW" key derivation method.
It allows to avoid expensive key derivation for use cases when wallet keys can be stored in a secure enclave.
:param config: (optional) key configuration json.
{
"seed": string, (optional) Seed that allows deterministic key creation (if not set random one will be created).
Can be UTF-8, base64 or hex string.
}
:return: Error code
"""
logger = logging.getLogger(__name__)
logger.debug("generate_wallet_key: >>> config: %r",
config)
if not hasattr(generate_wallet_key, "cb"):
logger.debug("generate_wallet_key: Creating callback")
generate_wallet_key.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_config = c_char_p(config.encode('utf-8')) if config is not None else None
key = await do_call('indy_generate_wallet_key',
c_config,
generate_wallet_key.cb)
res = key.decode()
logger.debug("generate_wallet_key: <<< res: %r", res)
return res | python | async def generate_wallet_key(config: Optional[str]) -> str:
"""
Generate wallet master key.
Returned key is compatible with "RAW" key derivation method.
It allows to avoid expensive key derivation for use cases when wallet keys can be stored in a secure enclave.
:param config: (optional) key configuration json.
{
"seed": string, (optional) Seed that allows deterministic key creation (if not set random one will be created).
Can be UTF-8, base64 or hex string.
}
:return: Error code
"""
logger = logging.getLogger(__name__)
logger.debug("generate_wallet_key: >>> config: %r",
config)
if not hasattr(generate_wallet_key, "cb"):
logger.debug("generate_wallet_key: Creating callback")
generate_wallet_key.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_config = c_char_p(config.encode('utf-8')) if config is not None else None
key = await do_call('indy_generate_wallet_key',
c_config,
generate_wallet_key.cb)
res = key.decode()
logger.debug("generate_wallet_key: <<< res: %r", res)
return res | ['async', 'def', 'generate_wallet_key', '(', 'config', ':', 'Optional', '[', 'str', ']', ')', '->', 'str', ':', 'logger', '=', 'logging', '.', 'getLogger', '(', '__name__', ')', 'logger', '.', 'debug', '(', '"generate_wallet_key: >>> config: %r"', ',', 'config', ')', 'if', 'not', 'hasattr', '(', 'generate_wallet_key', ',', '"cb"', ')', ':', 'logger', '.', 'debug', '(', '"generate_wallet_key: Creating callback"', ')', 'generate_wallet_key', '.', 'cb', '=', 'create_cb', '(', 'CFUNCTYPE', '(', 'None', ',', 'c_int32', ',', 'c_int32', ',', 'c_char_p', ')', ')', 'c_config', '=', 'c_char_p', '(', 'config', '.', 'encode', '(', "'utf-8'", ')', ')', 'if', 'config', 'is', 'not', 'None', 'else', 'None', 'key', '=', 'await', 'do_call', '(', "'indy_generate_wallet_key'", ',', 'c_config', ',', 'generate_wallet_key', '.', 'cb', ')', 'res', '=', 'key', '.', 'decode', '(', ')', 'logger', '.', 'debug', '(', '"generate_wallet_key: <<< res: %r"', ',', 'res', ')', 'return', 'res'] | Generate wallet master key.
Returned key is compatible with "RAW" key derivation method.
It allows to avoid expensive key derivation for use cases when wallet keys can be stored in a secure enclave.
:param config: (optional) key configuration json.
{
"seed": string, (optional) Seed that allows deterministic key creation (if not set random one will be created).
Can be UTF-8, base64 or hex string.
}
:return: Error code | ['Generate', 'wallet', 'master', 'key', '.', 'Returned', 'key', 'is', 'compatible', 'with', 'RAW', 'key', 'derivation', 'method', '.', 'It', 'allows', 'to', 'avoid', 'expensive', 'key', 'derivation', 'for', 'use', 'cases', 'when', 'wallet', 'keys', 'can', 'be', 'stored', 'in', 'a', 'secure', 'enclave', '.'] | train | https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/wrappers/python/indy/wallet.py#L320-L351 |
1,628 | cherrypy/cheroot | cheroot/wsgi.py | Gateway.write | def write(self, chunk):
"""WSGI callable to write unbuffered data to the client.
This method is also used internally by start_response (to write
data from the iterable returned by the WSGI application).
"""
if not self.started_response:
raise AssertionError('WSGI write called before start_response.')
chunklen = len(chunk)
rbo = self.remaining_bytes_out
if rbo is not None and chunklen > rbo:
if not self.req.sent_headers:
# Whew. We can send a 500 to the client.
self.req.simple_response(
'500 Internal Server Error',
'The requested resource returned more bytes than the '
'declared Content-Length.',
)
else:
# Dang. We have probably already sent data. Truncate the chunk
# to fit (so the client doesn't hang) and raise an error later.
chunk = chunk[:rbo]
self.req.ensure_headers_sent()
self.req.write(chunk)
if rbo is not None:
rbo -= chunklen
if rbo < 0:
raise ValueError(
'Response body exceeds the declared Content-Length.',
) | python | def write(self, chunk):
"""WSGI callable to write unbuffered data to the client.
This method is also used internally by start_response (to write
data from the iterable returned by the WSGI application).
"""
if not self.started_response:
raise AssertionError('WSGI write called before start_response.')
chunklen = len(chunk)
rbo = self.remaining_bytes_out
if rbo is not None and chunklen > rbo:
if not self.req.sent_headers:
# Whew. We can send a 500 to the client.
self.req.simple_response(
'500 Internal Server Error',
'The requested resource returned more bytes than the '
'declared Content-Length.',
)
else:
# Dang. We have probably already sent data. Truncate the chunk
# to fit (so the client doesn't hang) and raise an error later.
chunk = chunk[:rbo]
self.req.ensure_headers_sent()
self.req.write(chunk)
if rbo is not None:
rbo -= chunklen
if rbo < 0:
raise ValueError(
'Response body exceeds the declared Content-Length.',
) | ['def', 'write', '(', 'self', ',', 'chunk', ')', ':', 'if', 'not', 'self', '.', 'started_response', ':', 'raise', 'AssertionError', '(', "'WSGI write called before start_response.'", ')', 'chunklen', '=', 'len', '(', 'chunk', ')', 'rbo', '=', 'self', '.', 'remaining_bytes_out', 'if', 'rbo', 'is', 'not', 'None', 'and', 'chunklen', '>', 'rbo', ':', 'if', 'not', 'self', '.', 'req', '.', 'sent_headers', ':', '# Whew. We can send a 500 to the client.', 'self', '.', 'req', '.', 'simple_response', '(', "'500 Internal Server Error'", ',', "'The requested resource returned more bytes than the '", "'declared Content-Length.'", ',', ')', 'else', ':', '# Dang. We have probably already sent data. Truncate the chunk', "# to fit (so the client doesn't hang) and raise an error later.", 'chunk', '=', 'chunk', '[', ':', 'rbo', ']', 'self', '.', 'req', '.', 'ensure_headers_sent', '(', ')', 'self', '.', 'req', '.', 'write', '(', 'chunk', ')', 'if', 'rbo', 'is', 'not', 'None', ':', 'rbo', '-=', 'chunklen', 'if', 'rbo', '<', '0', ':', 'raise', 'ValueError', '(', "'Response body exceeds the declared Content-Length.'", ',', ')'] | WSGI callable to write unbuffered data to the client.
This method is also used internally by start_response (to write
data from the iterable returned by the WSGI application). | ['WSGI', 'callable', 'to', 'write', 'unbuffered', 'data', 'to', 'the', 'client', '.'] | train | https://github.com/cherrypy/cheroot/blob/2af3b1798d66da697957480d3a8b4831a405770b/cheroot/wsgi.py#L208-L241 |
1,629 | pandas-dev/pandas | pandas/core/panel.py | Panel._extract_axes_for_slice | def _extract_axes_for_slice(self, axes):
"""
Return the slice dictionary for these axes.
"""
return {self._AXIS_SLICEMAP[i]: a for i, a in
zip(self._AXIS_ORDERS[self._AXIS_LEN - len(axes):], axes)} | python | def _extract_axes_for_slice(self, axes):
"""
Return the slice dictionary for these axes.
"""
return {self._AXIS_SLICEMAP[i]: a for i, a in
zip(self._AXIS_ORDERS[self._AXIS_LEN - len(axes):], axes)} | ['def', '_extract_axes_for_slice', '(', 'self', ',', 'axes', ')', ':', 'return', '{', 'self', '.', '_AXIS_SLICEMAP', '[', 'i', ']', ':', 'a', 'for', 'i', ',', 'a', 'in', 'zip', '(', 'self', '.', '_AXIS_ORDERS', '[', 'self', '.', '_AXIS_LEN', '-', 'len', '(', 'axes', ')', ':', ']', ',', 'axes', ')', '}'] | Return the slice dictionary for these axes. | ['Return', 'the', 'slice', 'dictionary', 'for', 'these', 'axes', '.'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/panel.py#L1451-L1456 |
1,630 | codenerix/django-codenerix | codenerix/helpers.py | InMemoryZip.append | def append(self, filename_in_zip, file_contents):
'''
Appends a file with name filename_in_zip and contents of
file_contents to the in-memory zip.
'''
# Set the file pointer to the end of the file
self.in_memory_zip.seek(-1, io.SEEK_END)
# Get a handle to the in-memory zip in append mode
zf = zipfile.ZipFile(self.in_memory_zip, "a", zipfile.ZIP_DEFLATED, False)
# Write the file to the in-memory zip
zf.writestr(filename_in_zip, file_contents)
# Mark the files as having been created on Windows so that
# Unix permissions are not inferred as 0000
for zfile in zf.filelist:
zfile.create_system = 0
# Close the ZipFile
zf.close()
# Rewind the file
self.in_memory_zip.seek(0)
return self | python | def append(self, filename_in_zip, file_contents):
'''
Appends a file with name filename_in_zip and contents of
file_contents to the in-memory zip.
'''
# Set the file pointer to the end of the file
self.in_memory_zip.seek(-1, io.SEEK_END)
# Get a handle to the in-memory zip in append mode
zf = zipfile.ZipFile(self.in_memory_zip, "a", zipfile.ZIP_DEFLATED, False)
# Write the file to the in-memory zip
zf.writestr(filename_in_zip, file_contents)
# Mark the files as having been created on Windows so that
# Unix permissions are not inferred as 0000
for zfile in zf.filelist:
zfile.create_system = 0
# Close the ZipFile
zf.close()
# Rewind the file
self.in_memory_zip.seek(0)
return self | ['def', 'append', '(', 'self', ',', 'filename_in_zip', ',', 'file_contents', ')', ':', '# Set the file pointer to the end of the file', 'self', '.', 'in_memory_zip', '.', 'seek', '(', '-', '1', ',', 'io', '.', 'SEEK_END', ')', '# Get a handle to the in-memory zip in append mode', 'zf', '=', 'zipfile', '.', 'ZipFile', '(', 'self', '.', 'in_memory_zip', ',', '"a"', ',', 'zipfile', '.', 'ZIP_DEFLATED', ',', 'False', ')', '# Write the file to the in-memory zip', 'zf', '.', 'writestr', '(', 'filename_in_zip', ',', 'file_contents', ')', '# Mark the files as having been created on Windows so that', '# Unix permissions are not inferred as 0000', 'for', 'zfile', 'in', 'zf', '.', 'filelist', ':', 'zfile', '.', 'create_system', '=', '0', '# Close the ZipFile', 'zf', '.', 'close', '(', ')', '# Rewind the file', 'self', '.', 'in_memory_zip', '.', 'seek', '(', '0', ')', 'return', 'self'] | Appends a file with name filename_in_zip and contents of
file_contents to the in-memory zip. | ['Appends', 'a', 'file', 'with', 'name', 'filename_in_zip', 'and', 'contents', 'of', 'file_contents', 'to', 'the', 'in', '-', 'memory', 'zip', '.'] | train | https://github.com/codenerix/django-codenerix/blob/1f5527b352141caaee902b37b2648791a06bd57d/codenerix/helpers.py#L491-L516 |
1,631 | PMEAL/porespy | porespy/tools/__funcs__.py | randomize_colors | def randomize_colors(im, keep_vals=[0]):
r'''
Takes a greyscale image and randomly shuffles the greyscale values, so that
all voxels labeled X will be labelled Y, and all voxels labeled Y will be
labeled Z, where X, Y, Z and so on are randomly selected from the values
in the input image.
This function is useful for improving the visibility of images with
neighboring regions that are only incrementally different from each other,
such as that returned by `scipy.ndimage.label`.
Parameters
----------
im : array_like
An ND image of greyscale values.
keep_vals : array_like
Indicate which voxel values should NOT be altered. The default is
`[0]` which is useful for leaving the background of the image
untouched.
Returns
-------
image : ND-array
An image the same size and type as ``im`` but with the greyscale values
reassigned. The unique values in both the input and output images will
be identical.
Notes
-----
If the greyscale values in the input image are not contiguous then the
neither will they be in the output.
Examples
--------
>>> import porespy as ps
>>> import scipy as sp
>>> sp.random.seed(0)
>>> im = sp.random.randint(low=0, high=5, size=[4, 4])
>>> print(im)
[[4 0 3 3]
[3 1 3 2]
[4 0 0 4]
[2 1 0 1]]
>>> im_rand = ps.tools.randomize_colors(im)
>>> print(im_rand)
[[2 0 4 4]
[4 1 4 3]
[2 0 0 2]
[3 1 0 1]]
As can be seen, the 2's have become 3, 3's have become 4, and 4's have
become 2. 1's remained 1 by random accident. 0's remain zeros by default,
but this can be controlled using the `keep_vals` argument.
'''
im_flat = im.flatten()
keep_vals = sp.array(keep_vals)
swap_vals = ~sp.in1d(im_flat, keep_vals)
im_vals = sp.unique(im_flat[swap_vals])
new_vals = sp.random.permutation(im_vals)
im_map = sp.zeros(shape=[sp.amax(im_vals) + 1, ], dtype=int)
im_map[im_vals] = new_vals
im_new = im_map[im_flat]
im_new = sp.reshape(im_new, newshape=sp.shape(im))
return im_new | python | def randomize_colors(im, keep_vals=[0]):
r'''
Takes a greyscale image and randomly shuffles the greyscale values, so that
all voxels labeled X will be labelled Y, and all voxels labeled Y will be
labeled Z, where X, Y, Z and so on are randomly selected from the values
in the input image.
This function is useful for improving the visibility of images with
neighboring regions that are only incrementally different from each other,
such as that returned by `scipy.ndimage.label`.
Parameters
----------
im : array_like
An ND image of greyscale values.
keep_vals : array_like
Indicate which voxel values should NOT be altered. The default is
`[0]` which is useful for leaving the background of the image
untouched.
Returns
-------
image : ND-array
An image the same size and type as ``im`` but with the greyscale values
reassigned. The unique values in both the input and output images will
be identical.
Notes
-----
If the greyscale values in the input image are not contiguous then the
neither will they be in the output.
Examples
--------
>>> import porespy as ps
>>> import scipy as sp
>>> sp.random.seed(0)
>>> im = sp.random.randint(low=0, high=5, size=[4, 4])
>>> print(im)
[[4 0 3 3]
[3 1 3 2]
[4 0 0 4]
[2 1 0 1]]
>>> im_rand = ps.tools.randomize_colors(im)
>>> print(im_rand)
[[2 0 4 4]
[4 1 4 3]
[2 0 0 2]
[3 1 0 1]]
As can be seen, the 2's have become 3, 3's have become 4, and 4's have
become 2. 1's remained 1 by random accident. 0's remain zeros by default,
but this can be controlled using the `keep_vals` argument.
'''
im_flat = im.flatten()
keep_vals = sp.array(keep_vals)
swap_vals = ~sp.in1d(im_flat, keep_vals)
im_vals = sp.unique(im_flat[swap_vals])
new_vals = sp.random.permutation(im_vals)
im_map = sp.zeros(shape=[sp.amax(im_vals) + 1, ], dtype=int)
im_map[im_vals] = new_vals
im_new = im_map[im_flat]
im_new = sp.reshape(im_new, newshape=sp.shape(im))
return im_new | ['def', 'randomize_colors', '(', 'im', ',', 'keep_vals', '=', '[', '0', ']', ')', ':', 'im_flat', '=', 'im', '.', 'flatten', '(', ')', 'keep_vals', '=', 'sp', '.', 'array', '(', 'keep_vals', ')', 'swap_vals', '=', '~', 'sp', '.', 'in1d', '(', 'im_flat', ',', 'keep_vals', ')', 'im_vals', '=', 'sp', '.', 'unique', '(', 'im_flat', '[', 'swap_vals', ']', ')', 'new_vals', '=', 'sp', '.', 'random', '.', 'permutation', '(', 'im_vals', ')', 'im_map', '=', 'sp', '.', 'zeros', '(', 'shape', '=', '[', 'sp', '.', 'amax', '(', 'im_vals', ')', '+', '1', ',', ']', ',', 'dtype', '=', 'int', ')', 'im_map', '[', 'im_vals', ']', '=', 'new_vals', 'im_new', '=', 'im_map', '[', 'im_flat', ']', 'im_new', '=', 'sp', '.', 'reshape', '(', 'im_new', ',', 'newshape', '=', 'sp', '.', 'shape', '(', 'im', ')', ')', 'return', 'im_new'] | r'''
Takes a greyscale image and randomly shuffles the greyscale values, so that
all voxels labeled X will be labelled Y, and all voxels labeled Y will be
labeled Z, where X, Y, Z and so on are randomly selected from the values
in the input image.
This function is useful for improving the visibility of images with
neighboring regions that are only incrementally different from each other,
such as that returned by `scipy.ndimage.label`.
Parameters
----------
im : array_like
An ND image of greyscale values.
keep_vals : array_like
Indicate which voxel values should NOT be altered. The default is
`[0]` which is useful for leaving the background of the image
untouched.
Returns
-------
image : ND-array
An image the same size and type as ``im`` but with the greyscale values
reassigned. The unique values in both the input and output images will
be identical.
Notes
-----
If the greyscale values in the input image are not contiguous then the
neither will they be in the output.
Examples
--------
>>> import porespy as ps
>>> import scipy as sp
>>> sp.random.seed(0)
>>> im = sp.random.randint(low=0, high=5, size=[4, 4])
>>> print(im)
[[4 0 3 3]
[3 1 3 2]
[4 0 0 4]
[2 1 0 1]]
>>> im_rand = ps.tools.randomize_colors(im)
>>> print(im_rand)
[[2 0 4 4]
[4 1 4 3]
[2 0 0 2]
[3 1 0 1]]
As can be seen, the 2's have become 3, 3's have become 4, and 4's have
become 2. 1's remained 1 by random accident. 0's remain zeros by default,
but this can be controlled using the `keep_vals` argument. | ['r', 'Takes', 'a', 'greyscale', 'image', 'and', 'randomly', 'shuffles', 'the', 'greyscale', 'values', 'so', 'that', 'all', 'voxels', 'labeled', 'X', 'will', 'be', 'labelled', 'Y', 'and', 'all', 'voxels', 'labeled', 'Y', 'will', 'be', 'labeled', 'Z', 'where', 'X', 'Y', 'Z', 'and', 'so', 'on', 'are', 'randomly', 'selected', 'from', 'the', 'values', 'in', 'the', 'input', 'image', '.'] | train | https://github.com/PMEAL/porespy/blob/1e13875b56787d8f5b7ffdabce8c4342c33ba9f8/porespy/tools/__funcs__.py#L510-L575 |
1,632 | bitesofcode/projex | projex/envmanager.py | EnvManager.current | def current():
"""
Returns the current environment manager for the projex system.
:return <EnvManager>
"""
if not EnvManager._current:
path = os.environ.get('PROJEX_ENVMGR_PATH')
module = os.environ.get('PROJEX_ENVMGR_MODULE')
clsname = os.environ.get('PROJEX_ENVMGR_CLASS')
cls = EnvManager
if module and clsname:
# check if the user specified an import path
if path:
logger.info('Adding env manager path: %s' % path)
sys.path.insert(0, path)
logger.info('Loading env manager: %s.%s' % (module, clsname))
try:
__import__(module)
mod = sys.modules[module]
cls = getattr(mod, clsname)
except ImportError:
logger.error('Could not import env manager %s', module)
except KeyError:
logger.error('Could not import env manager %s', module)
except AttributeError:
msg = '%s is not a valid class of %s' % (clsname, module)
logger.error(msg)
EnvManager._current = cls()
return EnvManager._current | python | def current():
"""
Returns the current environment manager for the projex system.
:return <EnvManager>
"""
if not EnvManager._current:
path = os.environ.get('PROJEX_ENVMGR_PATH')
module = os.environ.get('PROJEX_ENVMGR_MODULE')
clsname = os.environ.get('PROJEX_ENVMGR_CLASS')
cls = EnvManager
if module and clsname:
# check if the user specified an import path
if path:
logger.info('Adding env manager path: %s' % path)
sys.path.insert(0, path)
logger.info('Loading env manager: %s.%s' % (module, clsname))
try:
__import__(module)
mod = sys.modules[module]
cls = getattr(mod, clsname)
except ImportError:
logger.error('Could not import env manager %s', module)
except KeyError:
logger.error('Could not import env manager %s', module)
except AttributeError:
msg = '%s is not a valid class of %s' % (clsname, module)
logger.error(msg)
EnvManager._current = cls()
return EnvManager._current | ['def', 'current', '(', ')', ':', 'if', 'not', 'EnvManager', '.', '_current', ':', 'path', '=', 'os', '.', 'environ', '.', 'get', '(', "'PROJEX_ENVMGR_PATH'", ')', 'module', '=', 'os', '.', 'environ', '.', 'get', '(', "'PROJEX_ENVMGR_MODULE'", ')', 'clsname', '=', 'os', '.', 'environ', '.', 'get', '(', "'PROJEX_ENVMGR_CLASS'", ')', 'cls', '=', 'EnvManager', 'if', 'module', 'and', 'clsname', ':', '# check if the user specified an import path', 'if', 'path', ':', 'logger', '.', 'info', '(', "'Adding env manager path: %s'", '%', 'path', ')', 'sys', '.', 'path', '.', 'insert', '(', '0', ',', 'path', ')', 'logger', '.', 'info', '(', "'Loading env manager: %s.%s'", '%', '(', 'module', ',', 'clsname', ')', ')', 'try', ':', '__import__', '(', 'module', ')', 'mod', '=', 'sys', '.', 'modules', '[', 'module', ']', 'cls', '=', 'getattr', '(', 'mod', ',', 'clsname', ')', 'except', 'ImportError', ':', 'logger', '.', 'error', '(', "'Could not import env manager %s'", ',', 'module', ')', 'except', 'KeyError', ':', 'logger', '.', 'error', '(', "'Could not import env manager %s'", ',', 'module', ')', 'except', 'AttributeError', ':', 'msg', '=', "'%s is not a valid class of %s'", '%', '(', 'clsname', ',', 'module', ')', 'logger', '.', 'error', '(', 'msg', ')', 'EnvManager', '.', '_current', '=', 'cls', '(', ')', 'return', 'EnvManager', '.', '_current'] | Returns the current environment manager for the projex system.
:return <EnvManager> | ['Returns', 'the', 'current', 'environment', 'manager', 'for', 'the', 'projex', 'system', '.', ':', 'return', '<EnvManager', '>'] | train | https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/envmanager.py#L263-L299 |
1,633 | pandas-dev/pandas | pandas/core/groupby/generic.py | SeriesGroupBy.count | def count(self):
""" Compute count of group, excluding missing values """
ids, _, ngroups = self.grouper.group_info
val = self.obj.get_values()
mask = (ids != -1) & ~isna(val)
ids = ensure_platform_int(ids)
minlength = ngroups or 0
out = np.bincount(ids[mask], minlength=minlength)
return Series(out,
index=self.grouper.result_index,
name=self._selection_name,
dtype='int64') | python | def count(self):
""" Compute count of group, excluding missing values """
ids, _, ngroups = self.grouper.group_info
val = self.obj.get_values()
mask = (ids != -1) & ~isna(val)
ids = ensure_platform_int(ids)
minlength = ngroups or 0
out = np.bincount(ids[mask], minlength=minlength)
return Series(out,
index=self.grouper.result_index,
name=self._selection_name,
dtype='int64') | ['def', 'count', '(', 'self', ')', ':', 'ids', ',', '_', ',', 'ngroups', '=', 'self', '.', 'grouper', '.', 'group_info', 'val', '=', 'self', '.', 'obj', '.', 'get_values', '(', ')', 'mask', '=', '(', 'ids', '!=', '-', '1', ')', '&', '~', 'isna', '(', 'val', ')', 'ids', '=', 'ensure_platform_int', '(', 'ids', ')', 'minlength', '=', 'ngroups', 'or', '0', 'out', '=', 'np', '.', 'bincount', '(', 'ids', '[', 'mask', ']', ',', 'minlength', '=', 'minlength', ')', 'return', 'Series', '(', 'out', ',', 'index', '=', 'self', '.', 'grouper', '.', 'result_index', ',', 'name', '=', 'self', '.', '_selection_name', ',', 'dtype', '=', "'int64'", ')'] | Compute count of group, excluding missing values | ['Compute', 'count', 'of', 'group', 'excluding', 'missing', 'values'] | train | https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/groupby/generic.py#L1183-L1196 |
1,634 | fabaff/python-mystrom | pymystrom/bulb.py | MyStromBulb.get_firmware | def get_firmware(self):
"""Get the current firmware version."""
self.get_status()
try:
self.firmware = self.data['fw_version']
except TypeError:
self.firmware = 'Unknown'
return self.firmware | python | def get_firmware(self):
"""Get the current firmware version."""
self.get_status()
try:
self.firmware = self.data['fw_version']
except TypeError:
self.firmware = 'Unknown'
return self.firmware | ['def', 'get_firmware', '(', 'self', ')', ':', 'self', '.', 'get_status', '(', ')', 'try', ':', 'self', '.', 'firmware', '=', 'self', '.', 'data', '[', "'fw_version'", ']', 'except', 'TypeError', ':', 'self', '.', 'firmware', '=', "'Unknown'", 'return', 'self', '.', 'firmware'] | Get the current firmware version. | ['Get', 'the', 'current', 'firmware', 'version', '.'] | train | https://github.com/fabaff/python-mystrom/blob/86410f8952104651ef76ad37c84c29740c50551e/pymystrom/bulb.py#L64-L72 |
1,635 | gc3-uzh-ch/elasticluster | elasticluster/utils.py | get_num_processors | def get_num_processors():
"""
Return number of online processor cores.
"""
# try different strategies and use first one that succeeeds
try:
return os.cpu_count() # Py3 only
except AttributeError:
pass
try:
import multiprocessing
return multiprocessing.cpu_count()
except ImportError: # no multiprocessing?
pass
except NotImplementedError:
# multiprocessing cannot determine CPU count
pass
try:
from subprocess32 import check_output
ncpus = check_output('nproc')
return int(ncpus)
except CalledProcessError: # no `/usr/bin/nproc`
pass
except (ValueError, TypeError):
# unexpected output from `nproc`
pass
except ImportError: # no subprocess32?
pass
try:
from subprocess import check_output
ncpus = check_output('nproc')
return int(ncpus)
except CalledProcessError: # no `/usr/bin/nproc`
pass
except (ValueError, TypeError):
# unexpected output from `nproc`
pass
except ImportError: # no subprocess.check_call (Py 2.6)
pass
raise RuntimeError("Cannot determine number of processors") | python | def get_num_processors():
"""
Return number of online processor cores.
"""
# try different strategies and use first one that succeeeds
try:
return os.cpu_count() # Py3 only
except AttributeError:
pass
try:
import multiprocessing
return multiprocessing.cpu_count()
except ImportError: # no multiprocessing?
pass
except NotImplementedError:
# multiprocessing cannot determine CPU count
pass
try:
from subprocess32 import check_output
ncpus = check_output('nproc')
return int(ncpus)
except CalledProcessError: # no `/usr/bin/nproc`
pass
except (ValueError, TypeError):
# unexpected output from `nproc`
pass
except ImportError: # no subprocess32?
pass
try:
from subprocess import check_output
ncpus = check_output('nproc')
return int(ncpus)
except CalledProcessError: # no `/usr/bin/nproc`
pass
except (ValueError, TypeError):
# unexpected output from `nproc`
pass
except ImportError: # no subprocess.check_call (Py 2.6)
pass
raise RuntimeError("Cannot determine number of processors") | ['def', 'get_num_processors', '(', ')', ':', '# try different strategies and use first one that succeeeds', 'try', ':', 'return', 'os', '.', 'cpu_count', '(', ')', '# Py3 only', 'except', 'AttributeError', ':', 'pass', 'try', ':', 'import', 'multiprocessing', 'return', 'multiprocessing', '.', 'cpu_count', '(', ')', 'except', 'ImportError', ':', '# no multiprocessing?', 'pass', 'except', 'NotImplementedError', ':', '# multiprocessing cannot determine CPU count', 'pass', 'try', ':', 'from', 'subprocess32', 'import', 'check_output', 'ncpus', '=', 'check_output', '(', "'nproc'", ')', 'return', 'int', '(', 'ncpus', ')', 'except', 'CalledProcessError', ':', '# no `/usr/bin/nproc`', 'pass', 'except', '(', 'ValueError', ',', 'TypeError', ')', ':', '# unexpected output from `nproc`', 'pass', 'except', 'ImportError', ':', '# no subprocess32?', 'pass', 'try', ':', 'from', 'subprocess', 'import', 'check_output', 'ncpus', '=', 'check_output', '(', "'nproc'", ')', 'return', 'int', '(', 'ncpus', ')', 'except', 'CalledProcessError', ':', '# no `/usr/bin/nproc`', 'pass', 'except', '(', 'ValueError', ',', 'TypeError', ')', ':', '# unexpected output from `nproc`', 'pass', 'except', 'ImportError', ':', '# no subprocess.check_call (Py 2.6)', 'pass', 'raise', 'RuntimeError', '(', '"Cannot determine number of processors"', ')'] | Return number of online processor cores. | ['Return', 'number', 'of', 'online', 'processor', 'cores', '.'] | train | https://github.com/gc3-uzh-ch/elasticluster/blob/e6345633308c76de13b889417df572815aabe744/elasticluster/utils.py#L137-L176 |
1,636 | tableau/document-api-python | tableaudocumentapi/datasource.py | base36encode | def base36encode(number):
"""Converts an integer into a base36 string."""
ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyz"
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(ALPHABET):
return sign + ALPHABET[number]
while number != 0:
number, i = divmod(number, len(ALPHABET))
base36 = ALPHABET[i] + base36
return sign + base36 | python | def base36encode(number):
"""Converts an integer into a base36 string."""
ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyz"
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(ALPHABET):
return sign + ALPHABET[number]
while number != 0:
number, i = divmod(number, len(ALPHABET))
base36 = ALPHABET[i] + base36
return sign + base36 | ['def', 'base36encode', '(', 'number', ')', ':', 'ALPHABET', '=', '"0123456789abcdefghijklmnopqrstuvwxyz"', 'base36', '=', "''", 'sign', '=', "''", 'if', 'number', '<', '0', ':', 'sign', '=', "'-'", 'number', '=', '-', 'number', 'if', '0', '<=', 'number', '<', 'len', '(', 'ALPHABET', ')', ':', 'return', 'sign', '+', 'ALPHABET', '[', 'number', ']', 'while', 'number', '!=', '0', ':', 'number', ',', 'i', '=', 'divmod', '(', 'number', ',', 'len', '(', 'ALPHABET', ')', ')', 'base36', '=', 'ALPHABET', '[', 'i', ']', '+', 'base36', 'return', 'sign', '+', 'base36'] | Converts an integer into a base36 string. | ['Converts', 'an', 'integer', 'into', 'a', 'base36', 'string', '.'] | train | https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/datasource.py#L63-L82 |
1,637 | Opentrons/opentrons | api/src/opentrons/legacy_api/instruments/pipette.py | Pipette.return_tip | def return_tip(self, home_after=True):
"""
Drop the pipette's current tip to it's originating tip rack
Notes
-----
This method requires one or more tip-rack :any:`Container`
to be in this Pipette's `tip_racks` list (see :any:`Pipette`)
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> tiprack = labware.load('GEB-tiprack-300', '2') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left',
... tip_racks=[tiprack, tiprack2]) # doctest: +SKIP
>>> p300.pick_up_tip() # doctest: +SKIP
>>> p300.aspirate(50, plate[0]) # doctest: +SKIP
>>> p300.dispense(plate[1]) # doctest: +SKIP
>>> p300.return_tip() # doctest: +SKIP
"""
if not self.tip_attached:
log.warning("Cannot return tip without tip attached.")
if not self.current_tip():
self.robot.add_warning(
'Pipette has no tip to return, dropping in place')
self.drop_tip(self.current_tip(), home_after=home_after)
return self | python | def return_tip(self, home_after=True):
"""
Drop the pipette's current tip to it's originating tip rack
Notes
-----
This method requires one or more tip-rack :any:`Container`
to be in this Pipette's `tip_racks` list (see :any:`Pipette`)
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> tiprack = labware.load('GEB-tiprack-300', '2') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left',
... tip_racks=[tiprack, tiprack2]) # doctest: +SKIP
>>> p300.pick_up_tip() # doctest: +SKIP
>>> p300.aspirate(50, plate[0]) # doctest: +SKIP
>>> p300.dispense(plate[1]) # doctest: +SKIP
>>> p300.return_tip() # doctest: +SKIP
"""
if not self.tip_attached:
log.warning("Cannot return tip without tip attached.")
if not self.current_tip():
self.robot.add_warning(
'Pipette has no tip to return, dropping in place')
self.drop_tip(self.current_tip(), home_after=home_after)
return self | ['def', 'return_tip', '(', 'self', ',', 'home_after', '=', 'True', ')', ':', 'if', 'not', 'self', '.', 'tip_attached', ':', 'log', '.', 'warning', '(', '"Cannot return tip without tip attached."', ')', 'if', 'not', 'self', '.', 'current_tip', '(', ')', ':', 'self', '.', 'robot', '.', 'add_warning', '(', "'Pipette has no tip to return, dropping in place'", ')', 'self', '.', 'drop_tip', '(', 'self', '.', 'current_tip', '(', ')', ',', 'home_after', '=', 'home_after', ')', 'return', 'self'] | Drop the pipette's current tip to it's originating tip rack
Notes
-----
This method requires one or more tip-rack :any:`Container`
to be in this Pipette's `tip_racks` list (see :any:`Pipette`)
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> tiprack = labware.load('GEB-tiprack-300', '2') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left',
... tip_racks=[tiprack, tiprack2]) # doctest: +SKIP
>>> p300.pick_up_tip() # doctest: +SKIP
>>> p300.aspirate(50, plate[0]) # doctest: +SKIP
>>> p300.dispense(plate[1]) # doctest: +SKIP
>>> p300.return_tip() # doctest: +SKIP | ['Drop', 'the', 'pipette', 's', 'current', 'tip', 'to', 'it', 's', 'originating', 'tip', 'rack'] | train | https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/legacy_api/instruments/pipette.py#L876-L911 |
1,638 | LordDarkula/chess_py | chess_py/core/board.py | Board.advantage_as_result | def advantage_as_result(self, move, val_scheme):
"""
Calculates advantage after move is played
:type: move: Move
:type: val_scheme: PieceValues
:rtype: double
"""
test_board = cp(self)
test_board.update(move)
return test_board.material_advantage(move.color, val_scheme) | python | def advantage_as_result(self, move, val_scheme):
"""
Calculates advantage after move is played
:type: move: Move
:type: val_scheme: PieceValues
:rtype: double
"""
test_board = cp(self)
test_board.update(move)
return test_board.material_advantage(move.color, val_scheme) | ['def', 'advantage_as_result', '(', 'self', ',', 'move', ',', 'val_scheme', ')', ':', 'test_board', '=', 'cp', '(', 'self', ')', 'test_board', '.', 'update', '(', 'move', ')', 'return', 'test_board', '.', 'material_advantage', '(', 'move', '.', 'color', ',', 'val_scheme', ')'] | Calculates advantage after move is played
:type: move: Move
:type: val_scheme: PieceValues
:rtype: double | ['Calculates', 'advantage', 'after', 'move', 'is', 'played'] | train | https://github.com/LordDarkula/chess_py/blob/14bebc2f8c49ae25c59375cc83d0b38d8ff7281d/chess_py/core/board.py#L202-L212 |
1,639 | buildbot/buildbot | master/buildbot/www/change_hook.py | ChangeHookResource.render_POST | def render_POST(self, request):
"""
Responds to events and starts the build process
different implementations can decide on what methods they will accept
:arguments:
request
the http request object
"""
try:
d = self.getAndSubmitChanges(request)
except Exception:
d = defer.fail()
def ok(_):
request.setResponseCode(202)
request.finish()
def err(why):
code = 500
if why.check(ValueError):
code = 400
msg = unicode2bytes(why.getErrorMessage())
else:
log.err(why, "adding changes from web hook")
msg = b'Error processing changes.'
request.setResponseCode(code, msg)
request.write(msg)
request.finish()
d.addCallbacks(ok, err)
return server.NOT_DONE_YET | python | def render_POST(self, request):
"""
Responds to events and starts the build process
different implementations can decide on what methods they will accept
:arguments:
request
the http request object
"""
try:
d = self.getAndSubmitChanges(request)
except Exception:
d = defer.fail()
def ok(_):
request.setResponseCode(202)
request.finish()
def err(why):
code = 500
if why.check(ValueError):
code = 400
msg = unicode2bytes(why.getErrorMessage())
else:
log.err(why, "adding changes from web hook")
msg = b'Error processing changes.'
request.setResponseCode(code, msg)
request.write(msg)
request.finish()
d.addCallbacks(ok, err)
return server.NOT_DONE_YET | ['def', 'render_POST', '(', 'self', ',', 'request', ')', ':', 'try', ':', 'd', '=', 'self', '.', 'getAndSubmitChanges', '(', 'request', ')', 'except', 'Exception', ':', 'd', '=', 'defer', '.', 'fail', '(', ')', 'def', 'ok', '(', '_', ')', ':', 'request', '.', 'setResponseCode', '(', '202', ')', 'request', '.', 'finish', '(', ')', 'def', 'err', '(', 'why', ')', ':', 'code', '=', '500', 'if', 'why', '.', 'check', '(', 'ValueError', ')', ':', 'code', '=', '400', 'msg', '=', 'unicode2bytes', '(', 'why', '.', 'getErrorMessage', '(', ')', ')', 'else', ':', 'log', '.', 'err', '(', 'why', ',', '"adding changes from web hook"', ')', 'msg', '=', "b'Error processing changes.'", 'request', '.', 'setResponseCode', '(', 'code', ',', 'msg', ')', 'request', '.', 'write', '(', 'msg', ')', 'request', '.', 'finish', '(', ')', 'd', '.', 'addCallbacks', '(', 'ok', ',', 'err', ')', 'return', 'server', '.', 'NOT_DONE_YET'] | Responds to events and starts the build process
different implementations can decide on what methods they will accept
:arguments:
request
the http request object | ['Responds', 'to', 'events', 'and', 'starts', 'the', 'build', 'process', 'different', 'implementations', 'can', 'decide', 'on', 'what', 'methods', 'they', 'will', 'accept'] | train | https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/www/change_hook.py#L70-L102 |
1,640 | saltstack/salt | salt/modules/cloud.py | _get_client | def _get_client():
'''
Return a cloud client
'''
client = salt.cloud.CloudClient(
os.path.join(os.path.dirname(__opts__['conf_file']), 'cloud'),
pillars=copy.deepcopy(__pillar__.get('cloud', {}))
)
return client | python | def _get_client():
'''
Return a cloud client
'''
client = salt.cloud.CloudClient(
os.path.join(os.path.dirname(__opts__['conf_file']), 'cloud'),
pillars=copy.deepcopy(__pillar__.get('cloud', {}))
)
return client | ['def', '_get_client', '(', ')', ':', 'client', '=', 'salt', '.', 'cloud', '.', 'CloudClient', '(', 'os', '.', 'path', '.', 'join', '(', 'os', '.', 'path', '.', 'dirname', '(', '__opts__', '[', "'conf_file'", ']', ')', ',', "'cloud'", ')', ',', 'pillars', '=', 'copy', '.', 'deepcopy', '(', '__pillar__', '.', 'get', '(', "'cloud'", ',', '{', '}', ')', ')', ')', 'return', 'client'] | Return a cloud client | ['Return', 'a', 'cloud', 'client'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/cloud.py#L41-L49 |
1,641 | silver-castle/mach9 | mach9/request.py | parse_multipart_form | def parse_multipart_form(body, boundary):
'''Parse a request body and returns fields and files
:param body: bytes request body
:param boundary: bytes multipart boundary
:return: fields (RequestParameters), files (RequestParameters)
'''
files = RequestParameters()
fields = RequestParameters()
form_parts = body.split(boundary)
for form_part in form_parts[1:-1]:
file_name = None
file_type = None
field_name = None
line_index = 2
line_end_index = 0
while not line_end_index == -1:
line_end_index = form_part.find(b'\r\n', line_index)
form_line = form_part[line_index:line_end_index].decode('utf-8')
line_index = line_end_index + 2
if not form_line:
break
colon_index = form_line.index(':')
form_header_field = form_line[0:colon_index].lower()
form_header_value, form_parameters = parse_header(
form_line[colon_index + 2:])
if form_header_field == 'content-disposition':
if 'filename' in form_parameters:
file_name = form_parameters['filename']
field_name = form_parameters.get('name')
elif form_header_field == 'content-type':
file_type = form_header_value
post_data = form_part[line_index:-4]
if file_name or file_type:
file = File(type=file_type, name=file_name, body=post_data)
if field_name in files:
files[field_name].append(file)
else:
files[field_name] = [file]
else:
value = post_data.decode('utf-8')
if field_name in fields:
fields[field_name].append(value)
else:
fields[field_name] = [value]
return fields, files | python | def parse_multipart_form(body, boundary):
'''Parse a request body and returns fields and files
:param body: bytes request body
:param boundary: bytes multipart boundary
:return: fields (RequestParameters), files (RequestParameters)
'''
files = RequestParameters()
fields = RequestParameters()
form_parts = body.split(boundary)
for form_part in form_parts[1:-1]:
file_name = None
file_type = None
field_name = None
line_index = 2
line_end_index = 0
while not line_end_index == -1:
line_end_index = form_part.find(b'\r\n', line_index)
form_line = form_part[line_index:line_end_index].decode('utf-8')
line_index = line_end_index + 2
if not form_line:
break
colon_index = form_line.index(':')
form_header_field = form_line[0:colon_index].lower()
form_header_value, form_parameters = parse_header(
form_line[colon_index + 2:])
if form_header_field == 'content-disposition':
if 'filename' in form_parameters:
file_name = form_parameters['filename']
field_name = form_parameters.get('name')
elif form_header_field == 'content-type':
file_type = form_header_value
post_data = form_part[line_index:-4]
if file_name or file_type:
file = File(type=file_type, name=file_name, body=post_data)
if field_name in files:
files[field_name].append(file)
else:
files[field_name] = [file]
else:
value = post_data.decode('utf-8')
if field_name in fields:
fields[field_name].append(value)
else:
fields[field_name] = [value]
return fields, files | ['def', 'parse_multipart_form', '(', 'body', ',', 'boundary', ')', ':', 'files', '=', 'RequestParameters', '(', ')', 'fields', '=', 'RequestParameters', '(', ')', 'form_parts', '=', 'body', '.', 'split', '(', 'boundary', ')', 'for', 'form_part', 'in', 'form_parts', '[', '1', ':', '-', '1', ']', ':', 'file_name', '=', 'None', 'file_type', '=', 'None', 'field_name', '=', 'None', 'line_index', '=', '2', 'line_end_index', '=', '0', 'while', 'not', 'line_end_index', '==', '-', '1', ':', 'line_end_index', '=', 'form_part', '.', 'find', '(', "b'\\r\\n'", ',', 'line_index', ')', 'form_line', '=', 'form_part', '[', 'line_index', ':', 'line_end_index', ']', '.', 'decode', '(', "'utf-8'", ')', 'line_index', '=', 'line_end_index', '+', '2', 'if', 'not', 'form_line', ':', 'break', 'colon_index', '=', 'form_line', '.', 'index', '(', "':'", ')', 'form_header_field', '=', 'form_line', '[', '0', ':', 'colon_index', ']', '.', 'lower', '(', ')', 'form_header_value', ',', 'form_parameters', '=', 'parse_header', '(', 'form_line', '[', 'colon_index', '+', '2', ':', ']', ')', 'if', 'form_header_field', '==', "'content-disposition'", ':', 'if', "'filename'", 'in', 'form_parameters', ':', 'file_name', '=', 'form_parameters', '[', "'filename'", ']', 'field_name', '=', 'form_parameters', '.', 'get', '(', "'name'", ')', 'elif', 'form_header_field', '==', "'content-type'", ':', 'file_type', '=', 'form_header_value', 'post_data', '=', 'form_part', '[', 'line_index', ':', '-', '4', ']', 'if', 'file_name', 'or', 'file_type', ':', 'file', '=', 'File', '(', 'type', '=', 'file_type', ',', 'name', '=', 'file_name', ',', 'body', '=', 'post_data', ')', 'if', 'field_name', 'in', 'files', ':', 'files', '[', 'field_name', ']', '.', 'append', '(', 'file', ')', 'else', ':', 'files', '[', 'field_name', ']', '=', '[', 'file', ']', 'else', ':', 'value', '=', 'post_data', '.', 'decode', '(', "'utf-8'", ')', 'if', 'field_name', 'in', 'fields', ':', 'fields', '[', 'field_name', ']', '.', 'append', '(', 'value', ')', 'else', ':', 'fields', '[', 'field_name', ']', '=', '[', 'value', ']', 'return', 'fields', ',', 'files'] | Parse a request body and returns fields and files
:param body: bytes request body
:param boundary: bytes multipart boundary
:return: fields (RequestParameters), files (RequestParameters) | ['Parse', 'a', 'request', 'body', 'and', 'returns', 'fields', 'and', 'files'] | train | https://github.com/silver-castle/mach9/blob/7a623aab3c70d89d36ade6901b6307e115400c5e/mach9/request.py#L183-L234 |
1,642 | LonamiWebs/Telethon | telethon_generator/parsers/errors.py | _get_class_name | def _get_class_name(error_code):
"""
Gets the corresponding class name for the given error code,
this either being an integer (thus base error name) or str.
"""
if isinstance(error_code, int):
return KNOWN_BASE_CLASSES.get(
error_code, 'RPCError' + str(error_code).replace('-', 'Neg')
)
return snake_to_camel_case(
error_code.replace('FIRSTNAME', 'FIRST_NAME').lower(), suffix='Error') | python | def _get_class_name(error_code):
"""
Gets the corresponding class name for the given error code,
this either being an integer (thus base error name) or str.
"""
if isinstance(error_code, int):
return KNOWN_BASE_CLASSES.get(
error_code, 'RPCError' + str(error_code).replace('-', 'Neg')
)
return snake_to_camel_case(
error_code.replace('FIRSTNAME', 'FIRST_NAME').lower(), suffix='Error') | ['def', '_get_class_name', '(', 'error_code', ')', ':', 'if', 'isinstance', '(', 'error_code', ',', 'int', ')', ':', 'return', 'KNOWN_BASE_CLASSES', '.', 'get', '(', 'error_code', ',', "'RPCError'", '+', 'str', '(', 'error_code', ')', '.', 'replace', '(', "'-'", ',', "'Neg'", ')', ')', 'return', 'snake_to_camel_case', '(', 'error_code', '.', 'replace', '(', "'FIRSTNAME'", ',', "'FIRST_NAME'", ')', '.', 'lower', '(', ')', ',', 'suffix', '=', "'Error'", ')'] | Gets the corresponding class name for the given error code,
this either being an integer (thus base error name) or str. | ['Gets', 'the', 'corresponding', 'class', 'name', 'for', 'the', 'given', 'error', 'code', 'this', 'either', 'being', 'an', 'integer', '(', 'thus', 'base', 'error', 'name', ')', 'or', 'str', '.'] | train | https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon_generator/parsers/errors.py#L19-L30 |
1,643 | log2timeline/dfvfs | dfvfs/resolver/context.py | Context.GetFileSystem | def GetFileSystem(self, path_spec):
"""Retrieves a file system object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
FileSystem: a file system object or None if not cached.
"""
identifier = self._GetFileSystemCacheIdentifier(path_spec)
return self._file_system_cache.GetObject(identifier) | python | def GetFileSystem(self, path_spec):
"""Retrieves a file system object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
FileSystem: a file system object or None if not cached.
"""
identifier = self._GetFileSystemCacheIdentifier(path_spec)
return self._file_system_cache.GetObject(identifier) | ['def', 'GetFileSystem', '(', 'self', ',', 'path_spec', ')', ':', 'identifier', '=', 'self', '.', '_GetFileSystemCacheIdentifier', '(', 'path_spec', ')', 'return', 'self', '.', '_file_system_cache', '.', 'GetObject', '(', 'identifier', ')'] | Retrieves a file system object defined by path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
FileSystem: a file system object or None if not cached. | ['Retrieves', 'a', 'file', 'system', 'object', 'defined', 'by', 'path', 'specification', '.'] | train | https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/resolver/context.py#L114-L124 |
1,644 | fracpete/python-weka-wrapper | python/weka/classifiers.py | Classifier.distributions_for_instances | def distributions_for_instances(self, data):
"""
Peforms predictions, returning the class distributions.
:param data: the Instances to get the class distributions for
:type data: Instances
:return: the class distribution matrix, None if not a batch predictor
:rtype: ndarray
"""
if self.is_batchpredictor:
return arrays.double_matrix_to_ndarray(self.__distributions(data.jobject))
else:
return None | python | def distributions_for_instances(self, data):
"""
Peforms predictions, returning the class distributions.
:param data: the Instances to get the class distributions for
:type data: Instances
:return: the class distribution matrix, None if not a batch predictor
:rtype: ndarray
"""
if self.is_batchpredictor:
return arrays.double_matrix_to_ndarray(self.__distributions(data.jobject))
else:
return None | ['def', 'distributions_for_instances', '(', 'self', ',', 'data', ')', ':', 'if', 'self', '.', 'is_batchpredictor', ':', 'return', 'arrays', '.', 'double_matrix_to_ndarray', '(', 'self', '.', '__distributions', '(', 'data', '.', 'jobject', ')', ')', 'else', ':', 'return', 'None'] | Peforms predictions, returning the class distributions.
:param data: the Instances to get the class distributions for
:type data: Instances
:return: the class distribution matrix, None if not a batch predictor
:rtype: ndarray | ['Peforms', 'predictions', 'returning', 'the', 'class', 'distributions', '.'] | train | https://github.com/fracpete/python-weka-wrapper/blob/e865915146faf40d3bbfedb440328d1360541633/python/weka/classifiers.py#L120-L132 |
1,645 | saltstack/salt | salt/modules/ebuildpkg.py | purge | def purge(name=None, slot=None, fromrepo=None, pkgs=None, **kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any emerge commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Portage does not have a purge, this function calls remove followed
by depclean to emulate a purge process
name
The name of the package to be deleted.
slot
Restrict the remove to a specific slot. Ignored if name is None.
fromrepo
Restrict the remove to a specific slot. Ignored if ``name`` is None.
Multiple Package Options:
pkgs
Uninstall multiple packages. ``slot`` and ``fromrepo`` arguments are
ignored if this argument is present. Must be passed as a python list.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.purge <package name>
salt '*' pkg.purge <package name> slot=4.4
salt '*' pkg.purge <package1>,<package2>,<package3>
salt '*' pkg.purge pkgs='["foo", "bar"]'
'''
ret = remove(name=name, slot=slot, fromrepo=fromrepo, pkgs=pkgs)
ret.update(depclean(name=name, slot=slot, fromrepo=fromrepo, pkgs=pkgs))
return ret | python | def purge(name=None, slot=None, fromrepo=None, pkgs=None, **kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any emerge commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Portage does not have a purge, this function calls remove followed
by depclean to emulate a purge process
name
The name of the package to be deleted.
slot
Restrict the remove to a specific slot. Ignored if name is None.
fromrepo
Restrict the remove to a specific slot. Ignored if ``name`` is None.
Multiple Package Options:
pkgs
Uninstall multiple packages. ``slot`` and ``fromrepo`` arguments are
ignored if this argument is present. Must be passed as a python list.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.purge <package name>
salt '*' pkg.purge <package name> slot=4.4
salt '*' pkg.purge <package1>,<package2>,<package3>
salt '*' pkg.purge pkgs='["foo", "bar"]'
'''
ret = remove(name=name, slot=slot, fromrepo=fromrepo, pkgs=pkgs)
ret.update(depclean(name=name, slot=slot, fromrepo=fromrepo, pkgs=pkgs))
return ret | ['def', 'purge', '(', 'name', '=', 'None', ',', 'slot', '=', 'None', ',', 'fromrepo', '=', 'None', ',', 'pkgs', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'ret', '=', 'remove', '(', 'name', '=', 'name', ',', 'slot', '=', 'slot', ',', 'fromrepo', '=', 'fromrepo', ',', 'pkgs', '=', 'pkgs', ')', 'ret', '.', 'update', '(', 'depclean', '(', 'name', '=', 'name', ',', 'slot', '=', 'slot', ',', 'fromrepo', '=', 'fromrepo', ',', 'pkgs', '=', 'pkgs', ')', ')', 'return', 'ret'] | .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any emerge commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Portage does not have a purge, this function calls remove followed
by depclean to emulate a purge process
name
The name of the package to be deleted.
slot
Restrict the remove to a specific slot. Ignored if name is None.
fromrepo
Restrict the remove to a specific slot. Ignored if ``name`` is None.
Multiple Package Options:
pkgs
Uninstall multiple packages. ``slot`` and ``fromrepo`` arguments are
ignored if this argument is present. Must be passed as a python list.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.purge <package name>
salt '*' pkg.purge <package name> slot=4.4
salt '*' pkg.purge <package1>,<package2>,<package3>
salt '*' pkg.purge pkgs='["foo", "bar"]' | ['..', 'versionchanged', '::', '2015', '.', '8', '.', '12', '2016', '.', '3', '.', '3', '2016', '.', '11', '.', '0', 'On', 'minions', 'running', 'systemd', '>', '=', '205', 'systemd', '-', 'run', '(', '1', ')', '_', 'is', 'now', 'used', 'to', 'isolate', 'commands', 'which', 'modify', 'installed', 'packages', 'from', 'the', 'salt', '-', 'minion', 'daemon', 's', 'control', 'group', '.', 'This', 'is', 'done', 'to', 'keep', 'systemd', 'from', 'killing', 'any', 'emerge', 'commands', 'spawned', 'by', 'Salt', 'when', 'the', 'salt', '-', 'minion', 'service', 'is', 'restarted', '.', '(', 'see', 'KillMode', 'in', 'the', 'systemd', '.', 'kill', '(', '5', ')', '_', 'manpage', 'for', 'more', 'information', ')', '.', 'If', 'desired', 'usage', 'of', 'systemd', '-', 'run', '(', '1', ')', '_', 'can', 'be', 'suppressed', 'by', 'setting', 'a', ':', 'mod', ':', 'config', 'option', '<salt', '.', 'modules', '.', 'config', '.', 'get', '>', 'called', 'systemd', '.', 'scope', 'with', 'a', 'value', 'of', 'False', '(', 'no', 'quotes', ')', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ebuildpkg.py#L1052-L1102 |
1,646 | bukun/TorCMS | torcms/handlers/post_handler.py | PostHandler._change_kind | def _change_kind(self, post_uid):
'''
To modify the category of the post, and kind.
'''
post_data = self.get_post_data()
logger.info('admin post update: {0}'.format(post_data))
MPost.update_misc(post_uid, kind=post_data['kcat'])
# self.update_category(post_uid)
update_category(post_uid, post_data)
self.redirect('/{0}/{1}'.format(router_post[post_data['kcat']], post_uid)) | python | def _change_kind(self, post_uid):
'''
To modify the category of the post, and kind.
'''
post_data = self.get_post_data()
logger.info('admin post update: {0}'.format(post_data))
MPost.update_misc(post_uid, kind=post_data['kcat'])
# self.update_category(post_uid)
update_category(post_uid, post_data)
self.redirect('/{0}/{1}'.format(router_post[post_data['kcat']], post_uid)) | ['def', '_change_kind', '(', 'self', ',', 'post_uid', ')', ':', 'post_data', '=', 'self', '.', 'get_post_data', '(', ')', 'logger', '.', 'info', '(', "'admin post update: {0}'", '.', 'format', '(', 'post_data', ')', ')', 'MPost', '.', 'update_misc', '(', 'post_uid', ',', 'kind', '=', 'post_data', '[', "'kcat'", ']', ')', '# self.update_category(post_uid)', 'update_category', '(', 'post_uid', ',', 'post_data', ')', 'self', '.', 'redirect', '(', "'/{0}/{1}'", '.', 'format', '(', 'router_post', '[', 'post_data', '[', "'kcat'", ']', ']', ',', 'post_uid', ')', ')'] | To modify the category of the post, and kind. | ['To', 'modify', 'the', 'category', 'of', 'the', 'post', 'and', 'kind', '.'] | train | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/post_handler.py#L711-L724 |
1,647 | axialmarket/fsq | fsq/done.py | success | def success(item):
'''Successful finish'''
try:
# mv to done
trg_queue = item.queue
os.rename(fsq_path.item(trg_queue, item.id, host=item.host),
os.path.join(fsq_path.done(trg_queue, host=item.host),
item.id))
except AttributeError, e:
# DuckType TypeError'ing
raise TypeError(u'item must be an FSQWorkItem, not:'\
u' {0}'.format(item.__class__.__name__))
except (OSError, IOError, ), e:
raise FSQDoneError(e.errno, u'cannot mv item to done: {0}:'\
u' {1}'.format(item.id, wrap_io_os_err(e))) | python | def success(item):
'''Successful finish'''
try:
# mv to done
trg_queue = item.queue
os.rename(fsq_path.item(trg_queue, item.id, host=item.host),
os.path.join(fsq_path.done(trg_queue, host=item.host),
item.id))
except AttributeError, e:
# DuckType TypeError'ing
raise TypeError(u'item must be an FSQWorkItem, not:'\
u' {0}'.format(item.__class__.__name__))
except (OSError, IOError, ), e:
raise FSQDoneError(e.errno, u'cannot mv item to done: {0}:'\
u' {1}'.format(item.id, wrap_io_os_err(e))) | ['def', 'success', '(', 'item', ')', ':', 'try', ':', '# mv to done', 'trg_queue', '=', 'item', '.', 'queue', 'os', '.', 'rename', '(', 'fsq_path', '.', 'item', '(', 'trg_queue', ',', 'item', '.', 'id', ',', 'host', '=', 'item', '.', 'host', ')', ',', 'os', '.', 'path', '.', 'join', '(', 'fsq_path', '.', 'done', '(', 'trg_queue', ',', 'host', '=', 'item', '.', 'host', ')', ',', 'item', '.', 'id', ')', ')', 'except', 'AttributeError', ',', 'e', ':', "# DuckType TypeError'ing", 'raise', 'TypeError', '(', "u'item must be an FSQWorkItem, not:'", "u' {0}'", '.', 'format', '(', 'item', '.', '__class__', '.', '__name__', ')', ')', 'except', '(', 'OSError', ',', 'IOError', ',', ')', ',', 'e', ':', 'raise', 'FSQDoneError', '(', 'e', '.', 'errno', ',', "u'cannot mv item to done: {0}:'", "u' {1}'", '.', 'format', '(', 'item', '.', 'id', ',', 'wrap_io_os_err', '(', 'e', ')', ')', ')'] | Successful finish | ['Successful', 'finish'] | train | https://github.com/axialmarket/fsq/blob/43b84c292cb8a187599d86753b947cf73248f989/fsq/done.py#L74-L88 |
1,648 | rameshg87/pyremotevbox | pyremotevbox/ZSI/TCtimes.py | _localtimezone.tzname | def tzname(self, dt):
"""datetime -> string name of time zone."""
tt = _localtime(_mktime((dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1)))
return _time.tzname[tt.tm_isdst > 0] | python | def tzname(self, dt):
"""datetime -> string name of time zone."""
tt = _localtime(_mktime((dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1)))
return _time.tzname[tt.tm_isdst > 0] | ['def', 'tzname', '(', 'self', ',', 'dt', ')', ':', 'tt', '=', '_localtime', '(', '_mktime', '(', '(', 'dt', '.', 'year', ',', 'dt', '.', 'month', ',', 'dt', '.', 'day', ',', 'dt', '.', 'hour', ',', 'dt', '.', 'minute', ',', 'dt', '.', 'second', ',', 'dt', '.', 'weekday', '(', ')', ',', '0', ',', '-', '1', ')', ')', ')', 'return', '_time', '.', 'tzname', '[', 'tt', '.', 'tm_isdst', '>', '0', ']'] | datetime -> string name of time zone. | ['datetime', '-', '>', 'string', 'name', 'of', 'time', 'zone', '.'] | train | https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/TCtimes.py#L40-L44 |
1,649 | vimalloc/flask-jwt-extended | flask_jwt_extended/tokens.py | encode_refresh_token | def encode_refresh_token(identity, secret, algorithm, expires_delta, user_claims,
csrf, identity_claim_key, user_claims_key,
json_encoder=None):
"""
Creates a new encoded (utf-8) refresh token.
:param identity: Some identifier used to identify the owner of this token
:param secret: Secret key to encode the JWT with
:param algorithm: Which algorithm to use for the toek
:param expires_delta: How far in the future this token should expire
(set to False to disable expiration)
:type expires_delta: datetime.timedelta or False
:param user_claims: Custom claims to include in this token. This data must
be json serializable
:param csrf: Whether to include a csrf double submit claim in this token
(boolean)
:param identity_claim_key: Which key should be used to store the identity
:param user_claims_key: Which key should be used to store the user claims
:return: Encoded refresh token
"""
token_data = {
identity_claim_key: identity,
'type': 'refresh',
}
# Don't add extra data to the token if user_claims is empty.
if user_claims:
token_data[user_claims_key] = user_claims
if csrf:
token_data['csrf'] = _create_csrf_token()
return _encode_jwt(token_data, expires_delta, secret, algorithm,
json_encoder=json_encoder) | python | def encode_refresh_token(identity, secret, algorithm, expires_delta, user_claims,
csrf, identity_claim_key, user_claims_key,
json_encoder=None):
"""
Creates a new encoded (utf-8) refresh token.
:param identity: Some identifier used to identify the owner of this token
:param secret: Secret key to encode the JWT with
:param algorithm: Which algorithm to use for the toek
:param expires_delta: How far in the future this token should expire
(set to False to disable expiration)
:type expires_delta: datetime.timedelta or False
:param user_claims: Custom claims to include in this token. This data must
be json serializable
:param csrf: Whether to include a csrf double submit claim in this token
(boolean)
:param identity_claim_key: Which key should be used to store the identity
:param user_claims_key: Which key should be used to store the user claims
:return: Encoded refresh token
"""
token_data = {
identity_claim_key: identity,
'type': 'refresh',
}
# Don't add extra data to the token if user_claims is empty.
if user_claims:
token_data[user_claims_key] = user_claims
if csrf:
token_data['csrf'] = _create_csrf_token()
return _encode_jwt(token_data, expires_delta, secret, algorithm,
json_encoder=json_encoder) | ['def', 'encode_refresh_token', '(', 'identity', ',', 'secret', ',', 'algorithm', ',', 'expires_delta', ',', 'user_claims', ',', 'csrf', ',', 'identity_claim_key', ',', 'user_claims_key', ',', 'json_encoder', '=', 'None', ')', ':', 'token_data', '=', '{', 'identity_claim_key', ':', 'identity', ',', "'type'", ':', "'refresh'", ',', '}', "# Don't add extra data to the token if user_claims is empty.", 'if', 'user_claims', ':', 'token_data', '[', 'user_claims_key', ']', '=', 'user_claims', 'if', 'csrf', ':', 'token_data', '[', "'csrf'", ']', '=', '_create_csrf_token', '(', ')', 'return', '_encode_jwt', '(', 'token_data', ',', 'expires_delta', ',', 'secret', ',', 'algorithm', ',', 'json_encoder', '=', 'json_encoder', ')'] | Creates a new encoded (utf-8) refresh token.
:param identity: Some identifier used to identify the owner of this token
:param secret: Secret key to encode the JWT with
:param algorithm: Which algorithm to use for the toek
:param expires_delta: How far in the future this token should expire
(set to False to disable expiration)
:type expires_delta: datetime.timedelta or False
:param user_claims: Custom claims to include in this token. This data must
be json serializable
:param csrf: Whether to include a csrf double submit claim in this token
(boolean)
:param identity_claim_key: Which key should be used to store the identity
:param user_claims_key: Which key should be used to store the user claims
:return: Encoded refresh token | ['Creates', 'a', 'new', 'encoded', '(', 'utf', '-', '8', ')', 'refresh', 'token', '.'] | train | https://github.com/vimalloc/flask-jwt-extended/blob/569d3b89eb5d2586d0cff4581a346229c623cefc/flask_jwt_extended/tokens.py#L80-L112 |
1,650 | djgagne/hagelslag | hagelslag/processing/STObject.py | STObject.calc_timestep_statistic | def calc_timestep_statistic(self, statistic, time):
"""
Calculate statistics from the primary attribute of the StObject.
Args:
statistic: statistic being calculated
time: Timestep being investigated
Returns:
Value of the statistic
"""
ti = np.where(self.times == time)[0][0]
ma = np.where(self.masks[ti].ravel() == 1)
if statistic in ['mean', 'max', 'min', 'std', 'ptp']:
stat_val = getattr(self.timesteps[ti].ravel()[ma], statistic)()
elif statistic == 'median':
stat_val = np.median(self.timesteps[ti].ravel()[ma])
elif 'percentile' in statistic:
per = int(statistic.split("_")[1])
stat_val = np.percentile(self.timesteps[ti].ravel()[ma], per)
elif 'dt' in statistic:
stat_name = statistic[:-3]
if ti == 0:
stat_val = 0
else:
stat_val = self.calc_timestep_statistic(stat_name, time) -\
self.calc_timestep_statistic(stat_name, time - 1)
else:
stat_val = np.nan
return stat_val | python | def calc_timestep_statistic(self, statistic, time):
"""
Calculate statistics from the primary attribute of the StObject.
Args:
statistic: statistic being calculated
time: Timestep being investigated
Returns:
Value of the statistic
"""
ti = np.where(self.times == time)[0][0]
ma = np.where(self.masks[ti].ravel() == 1)
if statistic in ['mean', 'max', 'min', 'std', 'ptp']:
stat_val = getattr(self.timesteps[ti].ravel()[ma], statistic)()
elif statistic == 'median':
stat_val = np.median(self.timesteps[ti].ravel()[ma])
elif 'percentile' in statistic:
per = int(statistic.split("_")[1])
stat_val = np.percentile(self.timesteps[ti].ravel()[ma], per)
elif 'dt' in statistic:
stat_name = statistic[:-3]
if ti == 0:
stat_val = 0
else:
stat_val = self.calc_timestep_statistic(stat_name, time) -\
self.calc_timestep_statistic(stat_name, time - 1)
else:
stat_val = np.nan
return stat_val | ['def', 'calc_timestep_statistic', '(', 'self', ',', 'statistic', ',', 'time', ')', ':', 'ti', '=', 'np', '.', 'where', '(', 'self', '.', 'times', '==', 'time', ')', '[', '0', ']', '[', '0', ']', 'ma', '=', 'np', '.', 'where', '(', 'self', '.', 'masks', '[', 'ti', ']', '.', 'ravel', '(', ')', '==', '1', ')', 'if', 'statistic', 'in', '[', "'mean'", ',', "'max'", ',', "'min'", ',', "'std'", ',', "'ptp'", ']', ':', 'stat_val', '=', 'getattr', '(', 'self', '.', 'timesteps', '[', 'ti', ']', '.', 'ravel', '(', ')', '[', 'ma', ']', ',', 'statistic', ')', '(', ')', 'elif', 'statistic', '==', "'median'", ':', 'stat_val', '=', 'np', '.', 'median', '(', 'self', '.', 'timesteps', '[', 'ti', ']', '.', 'ravel', '(', ')', '[', 'ma', ']', ')', 'elif', "'percentile'", 'in', 'statistic', ':', 'per', '=', 'int', '(', 'statistic', '.', 'split', '(', '"_"', ')', '[', '1', ']', ')', 'stat_val', '=', 'np', '.', 'percentile', '(', 'self', '.', 'timesteps', '[', 'ti', ']', '.', 'ravel', '(', ')', '[', 'ma', ']', ',', 'per', ')', 'elif', "'dt'", 'in', 'statistic', ':', 'stat_name', '=', 'statistic', '[', ':', '-', '3', ']', 'if', 'ti', '==', '0', ':', 'stat_val', '=', '0', 'else', ':', 'stat_val', '=', 'self', '.', 'calc_timestep_statistic', '(', 'stat_name', ',', 'time', ')', '-', 'self', '.', 'calc_timestep_statistic', '(', 'stat_name', ',', 'time', '-', '1', ')', 'else', ':', 'stat_val', '=', 'np', '.', 'nan', 'return', 'stat_val'] | Calculate statistics from the primary attribute of the StObject.
Args:
statistic: statistic being calculated
time: Timestep being investigated
Returns:
Value of the statistic | ['Calculate', 'statistics', 'from', 'the', 'primary', 'attribute', 'of', 'the', 'StObject', '.'] | train | https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/STObject.py#L421-L450 |
1,651 | senaite/senaite.core | bika/lims/subscribers/auditlog.py | ObjectTransitionedEventHandler | def ObjectTransitionedEventHandler(obj, event):
"""Object has been transitioned to an new state
"""
# only snapshot supported objects
if not supports_snapshots(obj):
return
# default transition entry
entry = {
"modified": DateTime().ISO(),
"action": event.action,
}
# get the last history item
history = api.get_review_history(obj, rev=True)
if history:
entry = history[0]
# make transitions also a modification entry
timestamp = entry.pop("time", DateTime())
entry["modified"] = timestamp.ISO()
entry["action"] = event.action
# take a new snapshot
take_snapshot(obj, **entry)
# reindex the object in the auditlog catalog
reindex_object(obj) | python | def ObjectTransitionedEventHandler(obj, event):
"""Object has been transitioned to an new state
"""
# only snapshot supported objects
if not supports_snapshots(obj):
return
# default transition entry
entry = {
"modified": DateTime().ISO(),
"action": event.action,
}
# get the last history item
history = api.get_review_history(obj, rev=True)
if history:
entry = history[0]
# make transitions also a modification entry
timestamp = entry.pop("time", DateTime())
entry["modified"] = timestamp.ISO()
entry["action"] = event.action
# take a new snapshot
take_snapshot(obj, **entry)
# reindex the object in the auditlog catalog
reindex_object(obj) | ['def', 'ObjectTransitionedEventHandler', '(', 'obj', ',', 'event', ')', ':', '# only snapshot supported objects', 'if', 'not', 'supports_snapshots', '(', 'obj', ')', ':', 'return', '# default transition entry', 'entry', '=', '{', '"modified"', ':', 'DateTime', '(', ')', '.', 'ISO', '(', ')', ',', '"action"', ':', 'event', '.', 'action', ',', '}', '# get the last history item', 'history', '=', 'api', '.', 'get_review_history', '(', 'obj', ',', 'rev', '=', 'True', ')', 'if', 'history', ':', 'entry', '=', 'history', '[', '0', ']', '# make transitions also a modification entry', 'timestamp', '=', 'entry', '.', 'pop', '(', '"time"', ',', 'DateTime', '(', ')', ')', 'entry', '[', '"modified"', ']', '=', 'timestamp', '.', 'ISO', '(', ')', 'entry', '[', '"action"', ']', '=', 'event', '.', 'action', '# take a new snapshot', 'take_snapshot', '(', 'obj', ',', '*', '*', 'entry', ')', '# reindex the object in the auditlog catalog', 'reindex_object', '(', 'obj', ')'] | Object has been transitioned to an new state | ['Object', 'has', 'been', 'transitioned', 'to', 'an', 'new', 'state'] | train | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/subscribers/auditlog.py#L27-L54 |
1,652 | google/transitfeed | transitfeed/loader.py | Loader._GetUtf8Contents | def _GetUtf8Contents(self, file_name):
"""Check for errors in file_name and return a string for csv reader."""
contents = self._FileContents(file_name)
if not contents: # Missing file
return
# Check for errors that will prevent csv.reader from working
if len(contents) >= 2 and contents[0:2] in (codecs.BOM_UTF16_BE,
codecs.BOM_UTF16_LE):
self._problems.FileFormat("appears to be encoded in utf-16", (file_name, ))
# Convert and continue, so we can find more errors
contents = codecs.getdecoder('utf-16')(contents)[0].encode('utf-8')
null_index = contents.find('\0')
if null_index != -1:
# It is easier to get some surrounding text than calculate the exact
# row_num
m = re.search(r'.{,20}\0.{,20}', contents, re.DOTALL)
self._problems.FileFormat(
"contains a null in text \"%s\" at byte %d" %
(codecs.getencoder('string_escape')(m.group()), null_index + 1),
(file_name, ))
return
# strip out any UTF-8 Byte Order Marker (otherwise it'll be
# treated as part of the first column name, causing a mis-parse)
contents = contents.lstrip(codecs.BOM_UTF8)
return contents | python | def _GetUtf8Contents(self, file_name):
"""Check for errors in file_name and return a string for csv reader."""
contents = self._FileContents(file_name)
if not contents: # Missing file
return
# Check for errors that will prevent csv.reader from working
if len(contents) >= 2 and contents[0:2] in (codecs.BOM_UTF16_BE,
codecs.BOM_UTF16_LE):
self._problems.FileFormat("appears to be encoded in utf-16", (file_name, ))
# Convert and continue, so we can find more errors
contents = codecs.getdecoder('utf-16')(contents)[0].encode('utf-8')
null_index = contents.find('\0')
if null_index != -1:
# It is easier to get some surrounding text than calculate the exact
# row_num
m = re.search(r'.{,20}\0.{,20}', contents, re.DOTALL)
self._problems.FileFormat(
"contains a null in text \"%s\" at byte %d" %
(codecs.getencoder('string_escape')(m.group()), null_index + 1),
(file_name, ))
return
# strip out any UTF-8 Byte Order Marker (otherwise it'll be
# treated as part of the first column name, causing a mis-parse)
contents = contents.lstrip(codecs.BOM_UTF8)
return contents | ['def', '_GetUtf8Contents', '(', 'self', ',', 'file_name', ')', ':', 'contents', '=', 'self', '.', '_FileContents', '(', 'file_name', ')', 'if', 'not', 'contents', ':', '# Missing file', 'return', '# Check for errors that will prevent csv.reader from working', 'if', 'len', '(', 'contents', ')', '>=', '2', 'and', 'contents', '[', '0', ':', '2', ']', 'in', '(', 'codecs', '.', 'BOM_UTF16_BE', ',', 'codecs', '.', 'BOM_UTF16_LE', ')', ':', 'self', '.', '_problems', '.', 'FileFormat', '(', '"appears to be encoded in utf-16"', ',', '(', 'file_name', ',', ')', ')', '# Convert and continue, so we can find more errors', 'contents', '=', 'codecs', '.', 'getdecoder', '(', "'utf-16'", ')', '(', 'contents', ')', '[', '0', ']', '.', 'encode', '(', "'utf-8'", ')', 'null_index', '=', 'contents', '.', 'find', '(', "'\\0'", ')', 'if', 'null_index', '!=', '-', '1', ':', '# It is easier to get some surrounding text than calculate the exact', '# row_num', 'm', '=', 're', '.', 'search', '(', "r'.{,20}\\0.{,20}'", ',', 'contents', ',', 're', '.', 'DOTALL', ')', 'self', '.', '_problems', '.', 'FileFormat', '(', '"contains a null in text \\"%s\\" at byte %d"', '%', '(', 'codecs', '.', 'getencoder', '(', "'string_escape'", ')', '(', 'm', '.', 'group', '(', ')', ')', ',', 'null_index', '+', '1', ')', ',', '(', 'file_name', ',', ')', ')', 'return', "# strip out any UTF-8 Byte Order Marker (otherwise it'll be", '# treated as part of the first column name, causing a mis-parse)', 'contents', '=', 'contents', '.', 'lstrip', '(', 'codecs', '.', 'BOM_UTF8', ')', 'return', 'contents'] | Check for errors in file_name and return a string for csv reader. | ['Check', 'for', 'errors', 'in', 'file_name', 'and', 'return', 'a', 'string', 'for', 'csv', 'reader', '.'] | train | https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/loader.py#L118-L145 |
1,653 | lambdamusic/Ontospy | ontospy/core/ontospy.py | Ontospy.ontologyClassTree | def ontologyClassTree(self):
"""
Returns a dict representing the ontology tree
Top level = {0:[top classes]}
Multi inheritance is represented explicitly
"""
treedict = {}
if self.all_classes:
treedict[0] = self.toplayer_classes
for element in self.all_classes:
if element.children():
treedict[element] = element.children()
return treedict
return treedict | python | def ontologyClassTree(self):
"""
Returns a dict representing the ontology tree
Top level = {0:[top classes]}
Multi inheritance is represented explicitly
"""
treedict = {}
if self.all_classes:
treedict[0] = self.toplayer_classes
for element in self.all_classes:
if element.children():
treedict[element] = element.children()
return treedict
return treedict | ['def', 'ontologyClassTree', '(', 'self', ')', ':', 'treedict', '=', '{', '}', 'if', 'self', '.', 'all_classes', ':', 'treedict', '[', '0', ']', '=', 'self', '.', 'toplayer_classes', 'for', 'element', 'in', 'self', '.', 'all_classes', ':', 'if', 'element', '.', 'children', '(', ')', ':', 'treedict', '[', 'element', ']', '=', 'element', '.', 'children', '(', ')', 'return', 'treedict', 'return', 'treedict'] | Returns a dict representing the ontology tree
Top level = {0:[top classes]}
Multi inheritance is represented explicitly | ['Returns', 'a', 'dict', 'representing', 'the', 'ontology', 'tree', 'Top', 'level', '=', '{', '0', ':', '[', 'top', 'classes', ']', '}', 'Multi', 'inheritance', 'is', 'represented', 'explicitly'] | train | https://github.com/lambdamusic/Ontospy/blob/eb46cb13792b2b87f21babdf976996318eec7571/ontospy/core/ontospy.py#L953-L966 |
1,654 | spookylukey/django-paypal | paypal/pro/models.py | PayPalNVP.set_flag | def set_flag(self, info, code=None):
"""Flag this instance for investigation."""
self.flag = True
self.flag_info += info
if code is not None:
self.flag_code = code | python | def set_flag(self, info, code=None):
"""Flag this instance for investigation."""
self.flag = True
self.flag_info += info
if code is not None:
self.flag_code = code | ['def', 'set_flag', '(', 'self', ',', 'info', ',', 'code', '=', 'None', ')', ':', 'self', '.', 'flag', '=', 'True', 'self', '.', 'flag_info', '+=', 'info', 'if', 'code', 'is', 'not', 'None', ':', 'self', '.', 'flag_code', '=', 'code'] | Flag this instance for investigation. | ['Flag', 'this', 'instance', 'for', 'investigation', '.'] | train | https://github.com/spookylukey/django-paypal/blob/b07d0a3ad91b5c5fe7bb27be3e5d70aabcdef76f/paypal/pro/models.py#L123-L128 |
1,655 | gccxml/pygccxml | pygccxml/parser/project_reader.py | project_reader_t.get_os_file_names | def get_os_file_names(files):
"""
returns file names
:param files: list of strings and\\or :class:`file_configuration_t`
instances.
:type files: list
"""
fnames = []
for f in files:
if utils.is_str(f):
fnames.append(f)
elif isinstance(f, file_configuration_t):
if f.content_type in (
file_configuration_t.CONTENT_TYPE.STANDARD_SOURCE_FILE,
file_configuration_t.CONTENT_TYPE.CACHED_SOURCE_FILE):
fnames.append(f.data)
else:
pass
return fnames | python | def get_os_file_names(files):
"""
returns file names
:param files: list of strings and\\or :class:`file_configuration_t`
instances.
:type files: list
"""
fnames = []
for f in files:
if utils.is_str(f):
fnames.append(f)
elif isinstance(f, file_configuration_t):
if f.content_type in (
file_configuration_t.CONTENT_TYPE.STANDARD_SOURCE_FILE,
file_configuration_t.CONTENT_TYPE.CACHED_SOURCE_FILE):
fnames.append(f.data)
else:
pass
return fnames | ['def', 'get_os_file_names', '(', 'files', ')', ':', 'fnames', '=', '[', ']', 'for', 'f', 'in', 'files', ':', 'if', 'utils', '.', 'is_str', '(', 'f', ')', ':', 'fnames', '.', 'append', '(', 'f', ')', 'elif', 'isinstance', '(', 'f', ',', 'file_configuration_t', ')', ':', 'if', 'f', '.', 'content_type', 'in', '(', 'file_configuration_t', '.', 'CONTENT_TYPE', '.', 'STANDARD_SOURCE_FILE', ',', 'file_configuration_t', '.', 'CONTENT_TYPE', '.', 'CACHED_SOURCE_FILE', ')', ':', 'fnames', '.', 'append', '(', 'f', '.', 'data', ')', 'else', ':', 'pass', 'return', 'fnames'] | returns file names
:param files: list of strings and\\or :class:`file_configuration_t`
instances.
:type files: list | ['returns', 'file', 'names'] | train | https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/project_reader.py#L213-L234 |
1,656 | thespacedoctor/crowdedText | crowdedText/crowdedText.py | adjust_text | def adjust_text(x, y, texts, ax=None, expand_text=(1.2, 1.2),
expand_points=(1.2, 1.2), autoalign=True, va='center',
ha='center', force_text=1., force_points=1.,
lim=100, precision=0, only_move={}, text_from_text=True,
text_from_points=True, save_steps=False, save_prefix='',
save_format='png', add_step_numbers=True, draggable=True, repel_from_axes=False, min_arrow_sep=0.0,
*args, **kwargs):
"""
Iteratively adjusts the locations of texts. First moves all texts that are
outside the axes limits inside. Then in each iteration moves all texts away
from each other and from points. In the end hides texts and substitutes
them with annotations to link them to the rescpective points.
Args:
x (seq): x-coordinates of labelled points
y (seq): y-coordinates of labelled points
texts (list): a list of text.Text objects to adjust
ax (obj): axes object with the plot; if not provided is determined by
plt.gca()
expand_text (seq): a tuple/list/... with 2 numbers (x, y) to expand
texts when repelling them from each other; default (1.2, 1.2)
expand_points (seq): a tuple/list/... with 2 numbers (x, y) to expand
texts when repelling them from points; default (1.2, 1.2)
autoalign (bool): If True, the best alignment of all texts will be
determined automatically before running the iterative adjustment;
if 'x' will only align horizontally, if 'y' - vertically; overrides
va and ha
va (str): vertical alignment of texts
ha (str): horizontal alignment of texts
force_text (float): the repel force from texts is multiplied by this
value; default 0.5
force_points (float): the repel force from points is multiplied by this
value; default 0.5
lim (int): limit of number of iterations
precision (float): up to which sum of all overlaps along both x and y
to iterate; may need to increase for complicated situations;
default 0, so no overlaps with anything.
only_move (dict): a dict to restrict movement of texts to only certain
axis. Valid keys are 'points' and 'text', for each of them valid
values are 'x', 'y' and 'xy'. This way you can forbid moving texts
along either of the axes due to overlaps with points, but let it
happen if there is an overlap with texts: only_move={'points':'y',
'text':'xy'}. Default: None, so everything is allowed.
text_from_text (bool): whether to repel texts from each other; default
True
text_from_points (bool): whether to repel texts from points; default
True; can helpful to switch of in extremely crouded plots
save_steps (bool): whether to save intermediate steps as images;
default False
save_prefix (str): a path and/or prefix to the saved steps; default ''
save_format (str): a format to save the steps into; default 'png
*args and **kwargs: any arguments will be fed into plt.annotate after
all the optimization is done just for plotting
add_step_numbers (bool): whether to add step numbers as titles to the
images of saving steps
draggable (bool): whether to make the annotations draggable; default
True
"""
if ax is None:
ax = plt.gca()
r = ax.get_figure().canvas.get_renderer()
orig_xy = [text.get_position() for text in texts]
orig_x = [xy[0] for xy in orig_xy]
orig_y = [xy[1] for xy in orig_xy]
for text in texts:
text.set_va(va)
text.set_ha(ha)
if save_steps:
if add_step_numbers:
plt.title('0a')
plt.savefig(save_prefix + '0a.' + save_format, format=save_format)
if autoalign:
if autoalign is not True:
texts = optimally_align_text(x, y, texts,
direction=autoalign,
expand=expand_points, renderer=r,
ax=ax)
else:
texts = optimally_align_text(orig_x, orig_y, texts,
expand=expand_points, renderer=r,
ax=ax)
if save_steps:
if add_step_numbers:
plt.title('0b')
plt.savefig(save_prefix + '0b.' + save_format, format=save_format)
if repel_from_axes is True:
texts = repel_text_from_axes(
texts, ax, renderer=r, expand=expand_points)
history = [np.inf] * 5
for i in xrange(lim):
q1, q2 = np.inf, np.inf
if text_from_text:
d_x_text, d_y_text, q1 = repel_text(texts, renderer=r, ax=ax,
expand=expand_text)
else:
d_x_text, d_y_text, q1 = [0] * len(texts), [0] * len(texts), 0
if text_from_points:
d_x_points, d_y_points, q2 = repel_text_from_points(x, y, texts,
ax=ax, renderer=r,
expand=expand_points)
else:
d_x_points, d_y_points, q1 = [0] * len(texts), [0] * len(texts), 0
if only_move:
if 'text' in only_move:
if 'x' not in only_move['text']:
d_x_text = np.zeros_like(d_x_text)
if 'y' not in only_move['text']:
d_y_text = np.zeros_like(d_y_text)
if 'points' in only_move:
if 'x' not in only_move['points']:
d_x_points = np.zeros_like(d_x_points)
if 'y' not in only_move['points']:
d_y_points = np.zeros_like(d_y_points)
dx = np.array(d_x_text) + np.array(d_x_points)
dy = np.array(d_y_text) + np.array(d_y_points)
q = round(np.sum(np.array([q1, q2])[np.array([q1, q2]) < np.inf]), 5)
if q > precision and q < np.max(history):
history.pop(0)
history.append(q)
move_texts(texts, dx * force_text, dy * force_points,
bboxes=get_bboxes(texts, r, (1, 1)), ax=ax)
if save_steps:
if add_step_numbers:
plt.title(i + 1)
plt.savefig(save_prefix + str(i + 1) + '.' + save_format,
format=save_format)
else:
break
bboxes = get_bboxes(texts, r, (1, 1))
originLW = kwargs["arrowprops"]["lw"]
for j, text in enumerate(texts):
cx, cy = get_midpoint(bboxes[j])
one = (orig_xy[j][0] - cx)**2
two = (orig_xy[j][1] - cy)**2
sep = (one + two)**0.5
print text.get_text(), sep
try:
if sep < min_arrow_sep:
kwargs["arrowprops"]["lw"] = 0.
else:
kwargs["arrowprops"]["lw"] = originLW
except Exception, e:
print e
a = ax.annotate(text.get_text(), xy=(orig_xy[j]),
xytext=text.get_position(), *args, **kwargs)
a.__dict__.update(text.__dict__)
if draggable:
a.draggable()
texts[j].remove()
if save_steps:
if add_step_numbers:
plt.title(i + 1)
plt.savefig(save_prefix + str(i + 1) + '.' +
save_format, format=save_format) | python | def adjust_text(x, y, texts, ax=None, expand_text=(1.2, 1.2),
expand_points=(1.2, 1.2), autoalign=True, va='center',
ha='center', force_text=1., force_points=1.,
lim=100, precision=0, only_move={}, text_from_text=True,
text_from_points=True, save_steps=False, save_prefix='',
save_format='png', add_step_numbers=True, draggable=True, repel_from_axes=False, min_arrow_sep=0.0,
*args, **kwargs):
"""
Iteratively adjusts the locations of texts. First moves all texts that are
outside the axes limits inside. Then in each iteration moves all texts away
from each other and from points. In the end hides texts and substitutes
them with annotations to link them to the rescpective points.
Args:
x (seq): x-coordinates of labelled points
y (seq): y-coordinates of labelled points
texts (list): a list of text.Text objects to adjust
ax (obj): axes object with the plot; if not provided is determined by
plt.gca()
expand_text (seq): a tuple/list/... with 2 numbers (x, y) to expand
texts when repelling them from each other; default (1.2, 1.2)
expand_points (seq): a tuple/list/... with 2 numbers (x, y) to expand
texts when repelling them from points; default (1.2, 1.2)
autoalign (bool): If True, the best alignment of all texts will be
determined automatically before running the iterative adjustment;
if 'x' will only align horizontally, if 'y' - vertically; overrides
va and ha
va (str): vertical alignment of texts
ha (str): horizontal alignment of texts
force_text (float): the repel force from texts is multiplied by this
value; default 0.5
force_points (float): the repel force from points is multiplied by this
value; default 0.5
lim (int): limit of number of iterations
precision (float): up to which sum of all overlaps along both x and y
to iterate; may need to increase for complicated situations;
default 0, so no overlaps with anything.
only_move (dict): a dict to restrict movement of texts to only certain
axis. Valid keys are 'points' and 'text', for each of them valid
values are 'x', 'y' and 'xy'. This way you can forbid moving texts
along either of the axes due to overlaps with points, but let it
happen if there is an overlap with texts: only_move={'points':'y',
'text':'xy'}. Default: None, so everything is allowed.
text_from_text (bool): whether to repel texts from each other; default
True
text_from_points (bool): whether to repel texts from points; default
True; can helpful to switch of in extremely crouded plots
save_steps (bool): whether to save intermediate steps as images;
default False
save_prefix (str): a path and/or prefix to the saved steps; default ''
save_format (str): a format to save the steps into; default 'png
*args and **kwargs: any arguments will be fed into plt.annotate after
all the optimization is done just for plotting
add_step_numbers (bool): whether to add step numbers as titles to the
images of saving steps
draggable (bool): whether to make the annotations draggable; default
True
"""
if ax is None:
ax = plt.gca()
r = ax.get_figure().canvas.get_renderer()
orig_xy = [text.get_position() for text in texts]
orig_x = [xy[0] for xy in orig_xy]
orig_y = [xy[1] for xy in orig_xy]
for text in texts:
text.set_va(va)
text.set_ha(ha)
if save_steps:
if add_step_numbers:
plt.title('0a')
plt.savefig(save_prefix + '0a.' + save_format, format=save_format)
if autoalign:
if autoalign is not True:
texts = optimally_align_text(x, y, texts,
direction=autoalign,
expand=expand_points, renderer=r,
ax=ax)
else:
texts = optimally_align_text(orig_x, orig_y, texts,
expand=expand_points, renderer=r,
ax=ax)
if save_steps:
if add_step_numbers:
plt.title('0b')
plt.savefig(save_prefix + '0b.' + save_format, format=save_format)
if repel_from_axes is True:
texts = repel_text_from_axes(
texts, ax, renderer=r, expand=expand_points)
history = [np.inf] * 5
for i in xrange(lim):
q1, q2 = np.inf, np.inf
if text_from_text:
d_x_text, d_y_text, q1 = repel_text(texts, renderer=r, ax=ax,
expand=expand_text)
else:
d_x_text, d_y_text, q1 = [0] * len(texts), [0] * len(texts), 0
if text_from_points:
d_x_points, d_y_points, q2 = repel_text_from_points(x, y, texts,
ax=ax, renderer=r,
expand=expand_points)
else:
d_x_points, d_y_points, q1 = [0] * len(texts), [0] * len(texts), 0
if only_move:
if 'text' in only_move:
if 'x' not in only_move['text']:
d_x_text = np.zeros_like(d_x_text)
if 'y' not in only_move['text']:
d_y_text = np.zeros_like(d_y_text)
if 'points' in only_move:
if 'x' not in only_move['points']:
d_x_points = np.zeros_like(d_x_points)
if 'y' not in only_move['points']:
d_y_points = np.zeros_like(d_y_points)
dx = np.array(d_x_text) + np.array(d_x_points)
dy = np.array(d_y_text) + np.array(d_y_points)
q = round(np.sum(np.array([q1, q2])[np.array([q1, q2]) < np.inf]), 5)
if q > precision and q < np.max(history):
history.pop(0)
history.append(q)
move_texts(texts, dx * force_text, dy * force_points,
bboxes=get_bboxes(texts, r, (1, 1)), ax=ax)
if save_steps:
if add_step_numbers:
plt.title(i + 1)
plt.savefig(save_prefix + str(i + 1) + '.' + save_format,
format=save_format)
else:
break
bboxes = get_bboxes(texts, r, (1, 1))
originLW = kwargs["arrowprops"]["lw"]
for j, text in enumerate(texts):
cx, cy = get_midpoint(bboxes[j])
one = (orig_xy[j][0] - cx)**2
two = (orig_xy[j][1] - cy)**2
sep = (one + two)**0.5
print text.get_text(), sep
try:
if sep < min_arrow_sep:
kwargs["arrowprops"]["lw"] = 0.
else:
kwargs["arrowprops"]["lw"] = originLW
except Exception, e:
print e
a = ax.annotate(text.get_text(), xy=(orig_xy[j]),
xytext=text.get_position(), *args, **kwargs)
a.__dict__.update(text.__dict__)
if draggable:
a.draggable()
texts[j].remove()
if save_steps:
if add_step_numbers:
plt.title(i + 1)
plt.savefig(save_prefix + str(i + 1) + '.' +
save_format, format=save_format) | ['def', 'adjust_text', '(', 'x', ',', 'y', ',', 'texts', ',', 'ax', '=', 'None', ',', 'expand_text', '=', '(', '1.2', ',', '1.2', ')', ',', 'expand_points', '=', '(', '1.2', ',', '1.2', ')', ',', 'autoalign', '=', 'True', ',', 'va', '=', "'center'", ',', 'ha', '=', "'center'", ',', 'force_text', '=', '1.', ',', 'force_points', '=', '1.', ',', 'lim', '=', '100', ',', 'precision', '=', '0', ',', 'only_move', '=', '{', '}', ',', 'text_from_text', '=', 'True', ',', 'text_from_points', '=', 'True', ',', 'save_steps', '=', 'False', ',', 'save_prefix', '=', "''", ',', 'save_format', '=', "'png'", ',', 'add_step_numbers', '=', 'True', ',', 'draggable', '=', 'True', ',', 'repel_from_axes', '=', 'False', ',', 'min_arrow_sep', '=', '0.0', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'if', 'ax', 'is', 'None', ':', 'ax', '=', 'plt', '.', 'gca', '(', ')', 'r', '=', 'ax', '.', 'get_figure', '(', ')', '.', 'canvas', '.', 'get_renderer', '(', ')', 'orig_xy', '=', '[', 'text', '.', 'get_position', '(', ')', 'for', 'text', 'in', 'texts', ']', 'orig_x', '=', '[', 'xy', '[', '0', ']', 'for', 'xy', 'in', 'orig_xy', ']', 'orig_y', '=', '[', 'xy', '[', '1', ']', 'for', 'xy', 'in', 'orig_xy', ']', 'for', 'text', 'in', 'texts', ':', 'text', '.', 'set_va', '(', 'va', ')', 'text', '.', 'set_ha', '(', 'ha', ')', 'if', 'save_steps', ':', 'if', 'add_step_numbers', ':', 'plt', '.', 'title', '(', "'0a'", ')', 'plt', '.', 'savefig', '(', 'save_prefix', '+', "'0a.'", '+', 'save_format', ',', 'format', '=', 'save_format', ')', 'if', 'autoalign', ':', 'if', 'autoalign', 'is', 'not', 'True', ':', 'texts', '=', 'optimally_align_text', '(', 'x', ',', 'y', ',', 'texts', ',', 'direction', '=', 'autoalign', ',', 'expand', '=', 'expand_points', ',', 'renderer', '=', 'r', ',', 'ax', '=', 'ax', ')', 'else', ':', 'texts', '=', 'optimally_align_text', '(', 'orig_x', ',', 'orig_y', ',', 'texts', ',', 'expand', '=', 'expand_points', ',', 'renderer', '=', 'r', ',', 'ax', '=', 'ax', ')', 'if', 'save_steps', ':', 'if', 'add_step_numbers', ':', 'plt', '.', 'title', '(', "'0b'", ')', 'plt', '.', 'savefig', '(', 'save_prefix', '+', "'0b.'", '+', 'save_format', ',', 'format', '=', 'save_format', ')', 'if', 'repel_from_axes', 'is', 'True', ':', 'texts', '=', 'repel_text_from_axes', '(', 'texts', ',', 'ax', ',', 'renderer', '=', 'r', ',', 'expand', '=', 'expand_points', ')', 'history', '=', '[', 'np', '.', 'inf', ']', '*', '5', 'for', 'i', 'in', 'xrange', '(', 'lim', ')', ':', 'q1', ',', 'q2', '=', 'np', '.', 'inf', ',', 'np', '.', 'inf', 'if', 'text_from_text', ':', 'd_x_text', ',', 'd_y_text', ',', 'q1', '=', 'repel_text', '(', 'texts', ',', 'renderer', '=', 'r', ',', 'ax', '=', 'ax', ',', 'expand', '=', 'expand_text', ')', 'else', ':', 'd_x_text', ',', 'd_y_text', ',', 'q1', '=', '[', '0', ']', '*', 'len', '(', 'texts', ')', ',', '[', '0', ']', '*', 'len', '(', 'texts', ')', ',', '0', 'if', 'text_from_points', ':', 'd_x_points', ',', 'd_y_points', ',', 'q2', '=', 'repel_text_from_points', '(', 'x', ',', 'y', ',', 'texts', ',', 'ax', '=', 'ax', ',', 'renderer', '=', 'r', ',', 'expand', '=', 'expand_points', ')', 'else', ':', 'd_x_points', ',', 'd_y_points', ',', 'q1', '=', '[', '0', ']', '*', 'len', '(', 'texts', ')', ',', '[', '0', ']', '*', 'len', '(', 'texts', ')', ',', '0', 'if', 'only_move', ':', 'if', "'text'", 'in', 'only_move', ':', 'if', "'x'", 'not', 'in', 'only_move', '[', "'text'", ']', ':', 'd_x_text', '=', 'np', '.', 'zeros_like', '(', 'd_x_text', ')', 'if', "'y'", 'not', 'in', 'only_move', '[', "'text'", ']', ':', 'd_y_text', '=', 'np', '.', 'zeros_like', '(', 'd_y_text', ')', 'if', "'points'", 'in', 'only_move', ':', 'if', "'x'", 'not', 'in', 'only_move', '[', "'points'", ']', ':', 'd_x_points', '=', 'np', '.', 'zeros_like', '(', 'd_x_points', ')', 'if', "'y'", 'not', 'in', 'only_move', '[', "'points'", ']', ':', 'd_y_points', '=', 'np', '.', 'zeros_like', '(', 'd_y_points', ')', 'dx', '=', 'np', '.', 'array', '(', 'd_x_text', ')', '+', 'np', '.', 'array', '(', 'd_x_points', ')', 'dy', '=', 'np', '.', 'array', '(', 'd_y_text', ')', '+', 'np', '.', 'array', '(', 'd_y_points', ')', 'q', '=', 'round', '(', 'np', '.', 'sum', '(', 'np', '.', 'array', '(', '[', 'q1', ',', 'q2', ']', ')', '[', 'np', '.', 'array', '(', '[', 'q1', ',', 'q2', ']', ')', '<', 'np', '.', 'inf', ']', ')', ',', '5', ')', 'if', 'q', '>', 'precision', 'and', 'q', '<', 'np', '.', 'max', '(', 'history', ')', ':', 'history', '.', 'pop', '(', '0', ')', 'history', '.', 'append', '(', 'q', ')', 'move_texts', '(', 'texts', ',', 'dx', '*', 'force_text', ',', 'dy', '*', 'force_points', ',', 'bboxes', '=', 'get_bboxes', '(', 'texts', ',', 'r', ',', '(', '1', ',', '1', ')', ')', ',', 'ax', '=', 'ax', ')', 'if', 'save_steps', ':', 'if', 'add_step_numbers', ':', 'plt', '.', 'title', '(', 'i', '+', '1', ')', 'plt', '.', 'savefig', '(', 'save_prefix', '+', 'str', '(', 'i', '+', '1', ')', '+', "'.'", '+', 'save_format', ',', 'format', '=', 'save_format', ')', 'else', ':', 'break', 'bboxes', '=', 'get_bboxes', '(', 'texts', ',', 'r', ',', '(', '1', ',', '1', ')', ')', 'originLW', '=', 'kwargs', '[', '"arrowprops"', ']', '[', '"lw"', ']', 'for', 'j', ',', 'text', 'in', 'enumerate', '(', 'texts', ')', ':', 'cx', ',', 'cy', '=', 'get_midpoint', '(', 'bboxes', '[', 'j', ']', ')', 'one', '=', '(', 'orig_xy', '[', 'j', ']', '[', '0', ']', '-', 'cx', ')', '**', '2', 'two', '=', '(', 'orig_xy', '[', 'j', ']', '[', '1', ']', '-', 'cy', ')', '**', '2', 'sep', '=', '(', 'one', '+', 'two', ')', '**', '0.5', 'print', 'text', '.', 'get_text', '(', ')', ',', 'sep', 'try', ':', 'if', 'sep', '<', 'min_arrow_sep', ':', 'kwargs', '[', '"arrowprops"', ']', '[', '"lw"', ']', '=', '0.', 'else', ':', 'kwargs', '[', '"arrowprops"', ']', '[', '"lw"', ']', '=', 'originLW', 'except', 'Exception', ',', 'e', ':', 'print', 'e', 'a', '=', 'ax', '.', 'annotate', '(', 'text', '.', 'get_text', '(', ')', ',', 'xy', '=', '(', 'orig_xy', '[', 'j', ']', ')', ',', 'xytext', '=', 'text', '.', 'get_position', '(', ')', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', 'a', '.', '__dict__', '.', 'update', '(', 'text', '.', '__dict__', ')', 'if', 'draggable', ':', 'a', '.', 'draggable', '(', ')', 'texts', '[', 'j', ']', '.', 'remove', '(', ')', 'if', 'save_steps', ':', 'if', 'add_step_numbers', ':', 'plt', '.', 'title', '(', 'i', '+', '1', ')', 'plt', '.', 'savefig', '(', 'save_prefix', '+', 'str', '(', 'i', '+', '1', ')', '+', "'.'", '+', 'save_format', ',', 'format', '=', 'save_format', ')'] | Iteratively adjusts the locations of texts. First moves all texts that are
outside the axes limits inside. Then in each iteration moves all texts away
from each other and from points. In the end hides texts and substitutes
them with annotations to link them to the rescpective points.
Args:
x (seq): x-coordinates of labelled points
y (seq): y-coordinates of labelled points
texts (list): a list of text.Text objects to adjust
ax (obj): axes object with the plot; if not provided is determined by
plt.gca()
expand_text (seq): a tuple/list/... with 2 numbers (x, y) to expand
texts when repelling them from each other; default (1.2, 1.2)
expand_points (seq): a tuple/list/... with 2 numbers (x, y) to expand
texts when repelling them from points; default (1.2, 1.2)
autoalign (bool): If True, the best alignment of all texts will be
determined automatically before running the iterative adjustment;
if 'x' will only align horizontally, if 'y' - vertically; overrides
va and ha
va (str): vertical alignment of texts
ha (str): horizontal alignment of texts
force_text (float): the repel force from texts is multiplied by this
value; default 0.5
force_points (float): the repel force from points is multiplied by this
value; default 0.5
lim (int): limit of number of iterations
precision (float): up to which sum of all overlaps along both x and y
to iterate; may need to increase for complicated situations;
default 0, so no overlaps with anything.
only_move (dict): a dict to restrict movement of texts to only certain
axis. Valid keys are 'points' and 'text', for each of them valid
values are 'x', 'y' and 'xy'. This way you can forbid moving texts
along either of the axes due to overlaps with points, but let it
happen if there is an overlap with texts: only_move={'points':'y',
'text':'xy'}. Default: None, so everything is allowed.
text_from_text (bool): whether to repel texts from each other; default
True
text_from_points (bool): whether to repel texts from points; default
True; can helpful to switch of in extremely crouded plots
save_steps (bool): whether to save intermediate steps as images;
default False
save_prefix (str): a path and/or prefix to the saved steps; default ''
save_format (str): a format to save the steps into; default 'png
*args and **kwargs: any arguments will be fed into plt.annotate after
all the optimization is done just for plotting
add_step_numbers (bool): whether to add step numbers as titles to the
images of saving steps
draggable (bool): whether to make the annotations draggable; default
True | ['Iteratively', 'adjusts', 'the', 'locations', 'of', 'texts', '.', 'First', 'moves', 'all', 'texts', 'that', 'are', 'outside', 'the', 'axes', 'limits', 'inside', '.', 'Then', 'in', 'each', 'iteration', 'moves', 'all', 'texts', 'away', 'from', 'each', 'other', 'and', 'from', 'points', '.', 'In', 'the', 'end', 'hides', 'texts', 'and', 'substitutes', 'them', 'with', 'annotations', 'to', 'link', 'them', 'to', 'the', 'rescpective', 'points', '.'] | train | https://github.com/thespacedoctor/crowdedText/blob/9a2e756b165aab7b48b2fad975b6a17d1ef48965/crowdedText/crowdedText.py#L243-L399 |
1,657 | nicolargo/sxxexx | sxxexx/sxxexx.py | series_t411.buildlist | def buildlist(self, category=tpb.CATEGORIES.VIDEO.TV_SHOWS, limit=1000):
"""
Build the torrent list
Return list of list sorted by seeders count
Id can be used to retrieve torrent associate with this id
[[<title>, <Seeders>, <id>] ...]
"""
try:
s = self.source.search(self.title.lower(), limit)
except Exception as e:
logging.error("Can not send search request to the t411 server")
logging.error(e.message)
sys.exit(1)
try:
for t in s.items():
pass
except:
logging.error("t411 server returned an invalid result")
sys.exit(1)
torrentlist = []
for torrent in s['torrents']:
if isinstance(torrent, dict):
#logging.debug("Compare regex to: %s" % t.title.lower())
if (re.search(self.regexp, torrent['name'].lower()) and (int(torrent['seeders']) >= self.seeders_min)):
# logging.debug("Matched")
torrentlist.append((torrent['name'], torrent['seeders'], torrent['id']))
logging.debug("Found %d matching items " % (len(torrentlist)))
# Return the list
return torrentlist | python | def buildlist(self, category=tpb.CATEGORIES.VIDEO.TV_SHOWS, limit=1000):
"""
Build the torrent list
Return list of list sorted by seeders count
Id can be used to retrieve torrent associate with this id
[[<title>, <Seeders>, <id>] ...]
"""
try:
s = self.source.search(self.title.lower(), limit)
except Exception as e:
logging.error("Can not send search request to the t411 server")
logging.error(e.message)
sys.exit(1)
try:
for t in s.items():
pass
except:
logging.error("t411 server returned an invalid result")
sys.exit(1)
torrentlist = []
for torrent in s['torrents']:
if isinstance(torrent, dict):
#logging.debug("Compare regex to: %s" % t.title.lower())
if (re.search(self.regexp, torrent['name'].lower()) and (int(torrent['seeders']) >= self.seeders_min)):
# logging.debug("Matched")
torrentlist.append((torrent['name'], torrent['seeders'], torrent['id']))
logging.debug("Found %d matching items " % (len(torrentlist)))
# Return the list
return torrentlist | ['def', 'buildlist', '(', 'self', ',', 'category', '=', 'tpb', '.', 'CATEGORIES', '.', 'VIDEO', '.', 'TV_SHOWS', ',', 'limit', '=', '1000', ')', ':', 'try', ':', 's', '=', 'self', '.', 'source', '.', 'search', '(', 'self', '.', 'title', '.', 'lower', '(', ')', ',', 'limit', ')', 'except', 'Exception', 'as', 'e', ':', 'logging', '.', 'error', '(', '"Can not send search request to the t411 server"', ')', 'logging', '.', 'error', '(', 'e', '.', 'message', ')', 'sys', '.', 'exit', '(', '1', ')', 'try', ':', 'for', 't', 'in', 's', '.', 'items', '(', ')', ':', 'pass', 'except', ':', 'logging', '.', 'error', '(', '"t411 server returned an invalid result"', ')', 'sys', '.', 'exit', '(', '1', ')', 'torrentlist', '=', '[', ']', 'for', 'torrent', 'in', 's', '[', "'torrents'", ']', ':', 'if', 'isinstance', '(', 'torrent', ',', 'dict', ')', ':', '#logging.debug("Compare regex to: %s" % t.title.lower())', 'if', '(', 're', '.', 'search', '(', 'self', '.', 'regexp', ',', 'torrent', '[', "'name'", ']', '.', 'lower', '(', ')', ')', 'and', '(', 'int', '(', 'torrent', '[', "'seeders'", ']', ')', '>=', 'self', '.', 'seeders_min', ')', ')', ':', '# logging.debug("Matched")', 'torrentlist', '.', 'append', '(', '(', 'torrent', '[', "'name'", ']', ',', 'torrent', '[', "'seeders'", ']', ',', 'torrent', '[', "'id'", ']', ')', ')', 'logging', '.', 'debug', '(', '"Found %d matching items "', '%', '(', 'len', '(', 'torrentlist', ')', ')', ')', '# Return the list', 'return', 'torrentlist'] | Build the torrent list
Return list of list sorted by seeders count
Id can be used to retrieve torrent associate with this id
[[<title>, <Seeders>, <id>] ...] | ['Build', 'the', 'torrent', 'list', 'Return', 'list', 'of', 'list', 'sorted', 'by', 'seeders', 'count', 'Id', 'can', 'be', 'used', 'to', 'retrieve', 'torrent', 'associate', 'with', 'this', 'id', '[[', '<title', '>', '<Seeders', '>', '<id', '>', ']', '...', ']'] | train | https://github.com/nicolargo/sxxexx/blob/88a896658609f4d56620bfe919284fa74ef560ac/sxxexx/sxxexx.py#L220-L254 |
1,658 | ThreatConnect-Inc/tcex | tcex/tcex_ti/tcex_ti.py | TcExTi.entities | def entities(self, tc_data, resource_type):
"""
Yields a entity. Takes both a list of indicators/groups or a individual
indicator/group response.
example formats:
{
"status":"Success",
"data":{
"resultCount":984240,
"address":[
{
"id":4222035,
"ownerName":"System",
"dateAdded":"2019-03-28T10:32:05-04:00",
"lastModified":"2019-03-28T11:02:46-04:00",
"rating":4,
"confidence":90,
"threatAssessRating":4,
"threatAssessConfidence":90,
"webLink":"{host}/auth/indicators/details/address.xhtml?address=221.123.32.14",
"ip":"221.123.32.14"
},
{
"id":4221517,
"ownerName":"System",
"dateAdded":"2018-11-05T14:24:54-05:00",
"lastModified":"2019-03-07T12:38:36-05:00",
"threatAssessRating":0,
"threatAssessConfidence":0,
"webLink":"{host}/auth/indicators/details/address.xhtml?address=221.123.32.12",
"ip":"221.123.32.12"
}
]
}
}
or:
{
"status": "Success",
"data": {
"address": {
"id": 4222035,
"owner": {
"id": 1,
"name": "System",
"type": "Organization"
},
"dateAdded": "2019-03-28T10:32:05-04:00",
"lastModified": "2019-03-28T11:02:46-04:00",
"rating": 4,
"confidence": 90,
"threatAssessRating": 4,
"threatAssessConfidence": 90,
"webLink": "{host}/auth/indicators/details/address.xhtml?address=221.123.32.14",
"ip": "221.123.32.14"
}
}
}
Args:
json_response:
Yields:
"""
if not isinstance(tc_data, list):
tc_data = [tc_data]
for d in tc_data:
entity = {'id': d.get('id'), 'webLink': d.get('webLink')}
values = []
value = None
if resource_type in self.tcex.group_types:
r = self.tcex.ti.group(group_type=resource_type, name=d.get('name'))
value = d.get('name')
elif resource_type in self.tcex.indicator_types:
r = self.tcex.ti.indicator(indicator_type=resource_type)
r._set_unique_id(d)
value = r.unique_id
elif resource_type.lower() in ['victim']:
r = self.tcex.ti.victim(d.get('name'))
value = d.get('name')
else:
self.tcex.handle_error(925, ['type', 'entities', 'type', 'type', resource_type])
if 'summary' in d:
values.append(d.get('summary'))
else:
values.append(value)
entity['value'] = ' : '.join(values)
if r.is_group() or r.is_indicator():
if 'owner' in d:
entity['ownerName'] = d['owner']['name']
else:
entity['ownerName'] = d.get('ownerName')
entity['dateAdded'] = d.get('dateAdded')
if r.is_victim():
entity['ownerName'] = d.get('org')
if r.is_indicator():
entity['confidence'] = d.get('confidence')
entity['rating'] = d.get('rating')
entity['threatAssessConfidence'] = d.get('threatAssessConfidence')
entity['threatAssessRating'] = d.get('threatAssessRating')
entity['dateLastModified'] = d.get('lastModified')
# type
if d.get('type') is not None:
entity['type'] = d.get('type')
else:
entity['type'] = resource_type
yield entity | python | def entities(self, tc_data, resource_type):
"""
Yields a entity. Takes both a list of indicators/groups or a individual
indicator/group response.
example formats:
{
"status":"Success",
"data":{
"resultCount":984240,
"address":[
{
"id":4222035,
"ownerName":"System",
"dateAdded":"2019-03-28T10:32:05-04:00",
"lastModified":"2019-03-28T11:02:46-04:00",
"rating":4,
"confidence":90,
"threatAssessRating":4,
"threatAssessConfidence":90,
"webLink":"{host}/auth/indicators/details/address.xhtml?address=221.123.32.14",
"ip":"221.123.32.14"
},
{
"id":4221517,
"ownerName":"System",
"dateAdded":"2018-11-05T14:24:54-05:00",
"lastModified":"2019-03-07T12:38:36-05:00",
"threatAssessRating":0,
"threatAssessConfidence":0,
"webLink":"{host}/auth/indicators/details/address.xhtml?address=221.123.32.12",
"ip":"221.123.32.12"
}
]
}
}
or:
{
"status": "Success",
"data": {
"address": {
"id": 4222035,
"owner": {
"id": 1,
"name": "System",
"type": "Organization"
},
"dateAdded": "2019-03-28T10:32:05-04:00",
"lastModified": "2019-03-28T11:02:46-04:00",
"rating": 4,
"confidence": 90,
"threatAssessRating": 4,
"threatAssessConfidence": 90,
"webLink": "{host}/auth/indicators/details/address.xhtml?address=221.123.32.14",
"ip": "221.123.32.14"
}
}
}
Args:
json_response:
Yields:
"""
if not isinstance(tc_data, list):
tc_data = [tc_data]
for d in tc_data:
entity = {'id': d.get('id'), 'webLink': d.get('webLink')}
values = []
value = None
if resource_type in self.tcex.group_types:
r = self.tcex.ti.group(group_type=resource_type, name=d.get('name'))
value = d.get('name')
elif resource_type in self.tcex.indicator_types:
r = self.tcex.ti.indicator(indicator_type=resource_type)
r._set_unique_id(d)
value = r.unique_id
elif resource_type.lower() in ['victim']:
r = self.tcex.ti.victim(d.get('name'))
value = d.get('name')
else:
self.tcex.handle_error(925, ['type', 'entities', 'type', 'type', resource_type])
if 'summary' in d:
values.append(d.get('summary'))
else:
values.append(value)
entity['value'] = ' : '.join(values)
if r.is_group() or r.is_indicator():
if 'owner' in d:
entity['ownerName'] = d['owner']['name']
else:
entity['ownerName'] = d.get('ownerName')
entity['dateAdded'] = d.get('dateAdded')
if r.is_victim():
entity['ownerName'] = d.get('org')
if r.is_indicator():
entity['confidence'] = d.get('confidence')
entity['rating'] = d.get('rating')
entity['threatAssessConfidence'] = d.get('threatAssessConfidence')
entity['threatAssessRating'] = d.get('threatAssessRating')
entity['dateLastModified'] = d.get('lastModified')
# type
if d.get('type') is not None:
entity['type'] = d.get('type')
else:
entity['type'] = resource_type
yield entity | ['def', 'entities', '(', 'self', ',', 'tc_data', ',', 'resource_type', ')', ':', 'if', 'not', 'isinstance', '(', 'tc_data', ',', 'list', ')', ':', 'tc_data', '=', '[', 'tc_data', ']', 'for', 'd', 'in', 'tc_data', ':', 'entity', '=', '{', "'id'", ':', 'd', '.', 'get', '(', "'id'", ')', ',', "'webLink'", ':', 'd', '.', 'get', '(', "'webLink'", ')', '}', 'values', '=', '[', ']', 'value', '=', 'None', 'if', 'resource_type', 'in', 'self', '.', 'tcex', '.', 'group_types', ':', 'r', '=', 'self', '.', 'tcex', '.', 'ti', '.', 'group', '(', 'group_type', '=', 'resource_type', ',', 'name', '=', 'd', '.', 'get', '(', "'name'", ')', ')', 'value', '=', 'd', '.', 'get', '(', "'name'", ')', 'elif', 'resource_type', 'in', 'self', '.', 'tcex', '.', 'indicator_types', ':', 'r', '=', 'self', '.', 'tcex', '.', 'ti', '.', 'indicator', '(', 'indicator_type', '=', 'resource_type', ')', 'r', '.', '_set_unique_id', '(', 'd', ')', 'value', '=', 'r', '.', 'unique_id', 'elif', 'resource_type', '.', 'lower', '(', ')', 'in', '[', "'victim'", ']', ':', 'r', '=', 'self', '.', 'tcex', '.', 'ti', '.', 'victim', '(', 'd', '.', 'get', '(', "'name'", ')', ')', 'value', '=', 'd', '.', 'get', '(', "'name'", ')', 'else', ':', 'self', '.', 'tcex', '.', 'handle_error', '(', '925', ',', '[', "'type'", ',', "'entities'", ',', "'type'", ',', "'type'", ',', 'resource_type', ']', ')', 'if', "'summary'", 'in', 'd', ':', 'values', '.', 'append', '(', 'd', '.', 'get', '(', "'summary'", ')', ')', 'else', ':', 'values', '.', 'append', '(', 'value', ')', 'entity', '[', "'value'", ']', '=', "' : '", '.', 'join', '(', 'values', ')', 'if', 'r', '.', 'is_group', '(', ')', 'or', 'r', '.', 'is_indicator', '(', ')', ':', 'if', "'owner'", 'in', 'd', ':', 'entity', '[', "'ownerName'", ']', '=', 'd', '[', "'owner'", ']', '[', "'name'", ']', 'else', ':', 'entity', '[', "'ownerName'", ']', '=', 'd', '.', 'get', '(', "'ownerName'", ')', 'entity', '[', "'dateAdded'", ']', '=', 'd', '.', 'get', '(', "'dateAdded'", ')', 'if', 'r', '.', 'is_victim', '(', ')', ':', 'entity', '[', "'ownerName'", ']', '=', 'd', '.', 'get', '(', "'org'", ')', 'if', 'r', '.', 'is_indicator', '(', ')', ':', 'entity', '[', "'confidence'", ']', '=', 'd', '.', 'get', '(', "'confidence'", ')', 'entity', '[', "'rating'", ']', '=', 'd', '.', 'get', '(', "'rating'", ')', 'entity', '[', "'threatAssessConfidence'", ']', '=', 'd', '.', 'get', '(', "'threatAssessConfidence'", ')', 'entity', '[', "'threatAssessRating'", ']', '=', 'd', '.', 'get', '(', "'threatAssessRating'", ')', 'entity', '[', "'dateLastModified'", ']', '=', 'd', '.', 'get', '(', "'lastModified'", ')', '# type', 'if', 'd', '.', 'get', '(', "'type'", ')', 'is', 'not', 'None', ':', 'entity', '[', "'type'", ']', '=', 'd', '.', 'get', '(', "'type'", ')', 'else', ':', 'entity', '[', "'type'", ']', '=', 'resource_type', 'yield', 'entity'] | Yields a entity. Takes both a list of indicators/groups or a individual
indicator/group response.
example formats:
{
"status":"Success",
"data":{
"resultCount":984240,
"address":[
{
"id":4222035,
"ownerName":"System",
"dateAdded":"2019-03-28T10:32:05-04:00",
"lastModified":"2019-03-28T11:02:46-04:00",
"rating":4,
"confidence":90,
"threatAssessRating":4,
"threatAssessConfidence":90,
"webLink":"{host}/auth/indicators/details/address.xhtml?address=221.123.32.14",
"ip":"221.123.32.14"
},
{
"id":4221517,
"ownerName":"System",
"dateAdded":"2018-11-05T14:24:54-05:00",
"lastModified":"2019-03-07T12:38:36-05:00",
"threatAssessRating":0,
"threatAssessConfidence":0,
"webLink":"{host}/auth/indicators/details/address.xhtml?address=221.123.32.12",
"ip":"221.123.32.12"
}
]
}
}
or:
{
"status": "Success",
"data": {
"address": {
"id": 4222035,
"owner": {
"id": 1,
"name": "System",
"type": "Organization"
},
"dateAdded": "2019-03-28T10:32:05-04:00",
"lastModified": "2019-03-28T11:02:46-04:00",
"rating": 4,
"confidence": 90,
"threatAssessRating": 4,
"threatAssessConfidence": 90,
"webLink": "{host}/auth/indicators/details/address.xhtml?address=221.123.32.14",
"ip": "221.123.32.14"
}
}
}
Args:
json_response:
Yields: | ['Yields', 'a', 'entity', '.', 'Takes', 'both', 'a', 'list', 'of', 'indicators', '/', 'groups', 'or', 'a', 'individual', 'indicator', '/', 'group', 'response', '.'] | train | https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti/tcex_ti.py#L422-L535 |
1,659 | spyder-ide/spyder | spyder/plugins/explorer/widgets.py | DirView.open_outside_spyder | def open_outside_spyder(self, fnames):
"""Open file outside Spyder with the appropriate application
If this does not work, opening unknown file in Spyder, as text file"""
for path in sorted(fnames):
path = file_uri(path)
ok = programs.start_file(path)
if not ok:
self.sig_edit.emit(path) | python | def open_outside_spyder(self, fnames):
"""Open file outside Spyder with the appropriate application
If this does not work, opening unknown file in Spyder, as text file"""
for path in sorted(fnames):
path = file_uri(path)
ok = programs.start_file(path)
if not ok:
self.sig_edit.emit(path) | ['def', 'open_outside_spyder', '(', 'self', ',', 'fnames', ')', ':', 'for', 'path', 'in', 'sorted', '(', 'fnames', ')', ':', 'path', '=', 'file_uri', '(', 'path', ')', 'ok', '=', 'programs', '.', 'start_file', '(', 'path', ')', 'if', 'not', 'ok', ':', 'self', '.', 'sig_edit', '.', 'emit', '(', 'path', ')'] | Open file outside Spyder with the appropriate application
If this does not work, opening unknown file in Spyder, as text file | ['Open', 'file', 'outside', 'Spyder', 'with', 'the', 'appropriate', 'application', 'If', 'this', 'does', 'not', 'work', 'opening', 'unknown', 'file', 'in', 'Spyder', 'as', 'text', 'file'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/explorer/widgets.py#L569-L576 |
1,660 | AndrewAnnex/SpiceyPy | spiceypy/spiceypy.py | bodn2c | def bodn2c(name):
"""
Translate the name of a body or object to the corresponding SPICE
integer ID code.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bodn2c_c.html
:param name: Body name to be translated into a SPICE ID code.
:type name: str
:return: SPICE integer ID code for the named body.
:rtype: int
"""
name = stypes.stringToCharP(name)
code = ctypes.c_int(0)
found = ctypes.c_int(0)
libspice.bodn2c_c(name, ctypes.byref(code), ctypes.byref(found))
return code.value, bool(found.value) | python | def bodn2c(name):
"""
Translate the name of a body or object to the corresponding SPICE
integer ID code.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bodn2c_c.html
:param name: Body name to be translated into a SPICE ID code.
:type name: str
:return: SPICE integer ID code for the named body.
:rtype: int
"""
name = stypes.stringToCharP(name)
code = ctypes.c_int(0)
found = ctypes.c_int(0)
libspice.bodn2c_c(name, ctypes.byref(code), ctypes.byref(found))
return code.value, bool(found.value) | ['def', 'bodn2c', '(', 'name', ')', ':', 'name', '=', 'stypes', '.', 'stringToCharP', '(', 'name', ')', 'code', '=', 'ctypes', '.', 'c_int', '(', '0', ')', 'found', '=', 'ctypes', '.', 'c_int', '(', '0', ')', 'libspice', '.', 'bodn2c_c', '(', 'name', ',', 'ctypes', '.', 'byref', '(', 'code', ')', ',', 'ctypes', '.', 'byref', '(', 'found', ')', ')', 'return', 'code', '.', 'value', ',', 'bool', '(', 'found', '.', 'value', ')'] | Translate the name of a body or object to the corresponding SPICE
integer ID code.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bodn2c_c.html
:param name: Body name to be translated into a SPICE ID code.
:type name: str
:return: SPICE integer ID code for the named body.
:rtype: int | ['Translate', 'the', 'name', 'of', 'a', 'body', 'or', 'object', 'to', 'the', 'corresponding', 'SPICE', 'integer', 'ID', 'code', '.'] | train | https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L471-L487 |
1,661 | observermedia/django-wordpress-rest | wordpress/loading.py | WPAPILoader.process_post_author | def process_post_author(self, bulk_mode, api_author):
"""
Create or update an Author related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_author: the data in the api for the Author
:return: the up-to-date Author object
"""
# get from the ref data map if in bulk mode, else look it up from the db
if bulk_mode:
author = self.ref_data_map["authors"].get(api_author["ID"])
if author:
self.update_existing_author(author, api_author)
else:
# if the author wasn't found (likely because it's a Byline or guest author, not a user),
# go ahead and create the author now
author = Author.objects.create(site_id=self.site_id,
wp_id=api_author["ID"],
**self.api_object_data("author", api_author))
else:
# do a direct db lookup if we're not in bulk mode
author, created = self.get_or_create_author(api_author)
if author and not created:
self.update_existing_author(author, api_author)
# add to the ref data map so we don't try to create it again
if author:
self.ref_data_map["authors"][api_author["ID"]] = author
return author | python | def process_post_author(self, bulk_mode, api_author):
"""
Create or update an Author related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_author: the data in the api for the Author
:return: the up-to-date Author object
"""
# get from the ref data map if in bulk mode, else look it up from the db
if bulk_mode:
author = self.ref_data_map["authors"].get(api_author["ID"])
if author:
self.update_existing_author(author, api_author)
else:
# if the author wasn't found (likely because it's a Byline or guest author, not a user),
# go ahead and create the author now
author = Author.objects.create(site_id=self.site_id,
wp_id=api_author["ID"],
**self.api_object_data("author", api_author))
else:
# do a direct db lookup if we're not in bulk mode
author, created = self.get_or_create_author(api_author)
if author and not created:
self.update_existing_author(author, api_author)
# add to the ref data map so we don't try to create it again
if author:
self.ref_data_map["authors"][api_author["ID"]] = author
return author | ['def', 'process_post_author', '(', 'self', ',', 'bulk_mode', ',', 'api_author', ')', ':', '# get from the ref data map if in bulk mode, else look it up from the db', 'if', 'bulk_mode', ':', 'author', '=', 'self', '.', 'ref_data_map', '[', '"authors"', ']', '.', 'get', '(', 'api_author', '[', '"ID"', ']', ')', 'if', 'author', ':', 'self', '.', 'update_existing_author', '(', 'author', ',', 'api_author', ')', 'else', ':', "# if the author wasn't found (likely because it's a Byline or guest author, not a user),", '# go ahead and create the author now', 'author', '=', 'Author', '.', 'objects', '.', 'create', '(', 'site_id', '=', 'self', '.', 'site_id', ',', 'wp_id', '=', 'api_author', '[', '"ID"', ']', ',', '*', '*', 'self', '.', 'api_object_data', '(', '"author"', ',', 'api_author', ')', ')', 'else', ':', "# do a direct db lookup if we're not in bulk mode", 'author', ',', 'created', '=', 'self', '.', 'get_or_create_author', '(', 'api_author', ')', 'if', 'author', 'and', 'not', 'created', ':', 'self', '.', 'update_existing_author', '(', 'author', ',', 'api_author', ')', "# add to the ref data map so we don't try to create it again", 'if', 'author', ':', 'self', '.', 'ref_data_map', '[', '"authors"', ']', '[', 'api_author', '[', '"ID"', ']', ']', '=', 'author', 'return', 'author'] | Create or update an Author related to a post.
:param bulk_mode: If True, minimize db operations by bulk creating post objects
:param api_author: the data in the api for the Author
:return: the up-to-date Author object | ['Create', 'or', 'update', 'an', 'Author', 'related', 'to', 'a', 'post', '.'] | train | https://github.com/observermedia/django-wordpress-rest/blob/f0d96891d8ac5a69c8ba90e044876e756fad1bfe/wordpress/loading.py#L635-L664 |
1,662 | glue-viz/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/gloo/context.py | forget_canvas | def forget_canvas(canvas):
""" Forget about the given canvas. Used by the canvas when closed.
"""
cc = [c() for c in canvasses if c() is not None]
while canvas in cc:
cc.remove(canvas)
canvasses[:] = [weakref.ref(c) for c in cc] | python | def forget_canvas(canvas):
""" Forget about the given canvas. Used by the canvas when closed.
"""
cc = [c() for c in canvasses if c() is not None]
while canvas in cc:
cc.remove(canvas)
canvasses[:] = [weakref.ref(c) for c in cc] | ['def', 'forget_canvas', '(', 'canvas', ')', ':', 'cc', '=', '[', 'c', '(', ')', 'for', 'c', 'in', 'canvasses', 'if', 'c', '(', ')', 'is', 'not', 'None', ']', 'while', 'canvas', 'in', 'cc', ':', 'cc', '.', 'remove', '(', 'canvas', ')', 'canvasses', '[', ':', ']', '=', '[', 'weakref', '.', 'ref', '(', 'c', ')', 'for', 'c', 'in', 'cc', ']'] | Forget about the given canvas. Used by the canvas when closed. | ['Forget', 'about', 'the', 'given', 'canvas', '.', 'Used', 'by', 'the', 'canvas', 'when', 'closed', '.'] | train | https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/gloo/context.py#L79-L85 |
1,663 | pypa/pipenv | pipenv/vendor/requests/sessions.py | Session.prepare_request | def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
:rtype: requests.PreparedRequest
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p | python | def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
:rtype: requests.PreparedRequest
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p | ['def', 'prepare_request', '(', 'self', ',', 'request', ')', ':', 'cookies', '=', 'request', '.', 'cookies', 'or', '{', '}', '# Bootstrap CookieJar.', 'if', 'not', 'isinstance', '(', 'cookies', ',', 'cookielib', '.', 'CookieJar', ')', ':', 'cookies', '=', 'cookiejar_from_dict', '(', 'cookies', ')', '# Merge with session cookies', 'merged_cookies', '=', 'merge_cookies', '(', 'merge_cookies', '(', 'RequestsCookieJar', '(', ')', ',', 'self', '.', 'cookies', ')', ',', 'cookies', ')', "# Set environment's basic authentication if not explicitly set.", 'auth', '=', 'request', '.', 'auth', 'if', 'self', '.', 'trust_env', 'and', 'not', 'auth', 'and', 'not', 'self', '.', 'auth', ':', 'auth', '=', 'get_netrc_auth', '(', 'request', '.', 'url', ')', 'p', '=', 'PreparedRequest', '(', ')', 'p', '.', 'prepare', '(', 'method', '=', 'request', '.', 'method', '.', 'upper', '(', ')', ',', 'url', '=', 'request', '.', 'url', ',', 'files', '=', 'request', '.', 'files', ',', 'data', '=', 'request', '.', 'data', ',', 'json', '=', 'request', '.', 'json', ',', 'headers', '=', 'merge_setting', '(', 'request', '.', 'headers', ',', 'self', '.', 'headers', ',', 'dict_class', '=', 'CaseInsensitiveDict', ')', ',', 'params', '=', 'merge_setting', '(', 'request', '.', 'params', ',', 'self', '.', 'params', ')', ',', 'auth', '=', 'merge_setting', '(', 'auth', ',', 'self', '.', 'auth', ')', ',', 'cookies', '=', 'merged_cookies', ',', 'hooks', '=', 'merge_hooks', '(', 'request', '.', 'hooks', ',', 'self', '.', 'hooks', ')', ',', ')', 'return', 'p'] | Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
:rtype: requests.PreparedRequest | ['Constructs', 'a', ':', 'class', ':', 'PreparedRequest', '<PreparedRequest', '>', 'for', 'transmission', 'and', 'returns', 'it', '.', 'The', ':', 'class', ':', 'PreparedRequest', 'has', 'settings', 'merged', 'from', 'the', ':', 'class', ':', 'Request', '<Request', '>', 'instance', 'and', 'those', 'of', 'the', ':', 'class', ':', 'Session', '.'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requests/sessions.py#L426-L464 |
1,664 | bwohlberg/sporco | sporco/plot.py | attach_zoom | def attach_zoom(ax, scaling=2.0):
"""
Attach an event handler that supports zooming within a plot using
the mouse scroll wheel.
Parameters
----------
ax : :class:`matplotlib.axes.Axes` object
Axes to which event handling is to be attached
scaling : float, optional (default 2.0)
Scaling factor for zooming in and out
Returns
-------
zoom : function
Mouse scroll wheel event handler function
"""
# See https://stackoverflow.com/questions/11551049
def zoom(event):
# Get the current x and y limits
cur_xlim = ax.get_xlim()
cur_ylim = ax.get_ylim()
# Get event location
xdata = event.xdata
ydata = event.ydata
# Return if cursor is not over valid region of plot
if xdata is None or ydata is None:
return
if event.button == 'up':
# Deal with zoom in
scale_factor = 1.0 / scaling
elif event.button == 'down':
# Deal with zoom out
scale_factor = scaling
# Get distance from the cursor to the edge of the figure frame
x_left = xdata - cur_xlim[0]
x_right = cur_xlim[1] - xdata
y_top = ydata - cur_ylim[0]
y_bottom = cur_ylim[1] - ydata
# Calculate new x and y limits
new_xlim = (xdata - x_left * scale_factor,
xdata + x_right * scale_factor)
new_ylim = (ydata - y_top * scale_factor,
ydata + y_bottom * scale_factor)
# Ensure that x limit range is no larger than that of the reference
if np.diff(new_xlim) > np.diff(zoom.xlim_ref):
new_xlim *= np.diff(zoom.xlim_ref) / np.diff(new_xlim)
# Ensure that lower x limit is not less than that of the reference
if new_xlim[0] < zoom.xlim_ref[0]:
new_xlim += np.array(zoom.xlim_ref[0] - new_xlim[0])
# Ensure that upper x limit is not greater than that of the reference
if new_xlim[1] > zoom.xlim_ref[1]:
new_xlim -= np.array(new_xlim[1] - zoom.xlim_ref[1])
# Ensure that ylim tuple has the smallest value first
if zoom.ylim_ref[1] < zoom.ylim_ref[0]:
ylim_ref = zoom.ylim_ref[::-1]
new_ylim = new_ylim[::-1]
else:
ylim_ref = zoom.ylim_ref
# Ensure that y limit range is no larger than that of the reference
if np.diff(new_ylim) > np.diff(ylim_ref):
new_ylim *= np.diff(ylim_ref) / np.diff(new_ylim)
# Ensure that lower y limit is not less than that of the reference
if new_ylim[0] < ylim_ref[0]:
new_ylim += np.array(ylim_ref[0] - new_ylim[0])
# Ensure that upper y limit is not greater than that of the reference
if new_ylim[1] > ylim_ref[1]:
new_ylim -= np.array(new_ylim[1] - ylim_ref[1])
# Return the ylim tuple to its original order
if zoom.ylim_ref[1] < zoom.ylim_ref[0]:
new_ylim = new_ylim[::-1]
# Set new x and y limits
ax.set_xlim(new_xlim)
ax.set_ylim(new_ylim)
# Force redraw
ax.figure.canvas.draw()
# Record reference x and y limits prior to any zooming
zoom.xlim_ref = ax.get_xlim()
zoom.ylim_ref = ax.get_ylim()
# Get figure for specified axes and attach the event handler
fig = ax.get_figure()
fig.canvas.mpl_connect('scroll_event', zoom)
return zoom | python | def attach_zoom(ax, scaling=2.0):
"""
Attach an event handler that supports zooming within a plot using
the mouse scroll wheel.
Parameters
----------
ax : :class:`matplotlib.axes.Axes` object
Axes to which event handling is to be attached
scaling : float, optional (default 2.0)
Scaling factor for zooming in and out
Returns
-------
zoom : function
Mouse scroll wheel event handler function
"""
# See https://stackoverflow.com/questions/11551049
def zoom(event):
# Get the current x and y limits
cur_xlim = ax.get_xlim()
cur_ylim = ax.get_ylim()
# Get event location
xdata = event.xdata
ydata = event.ydata
# Return if cursor is not over valid region of plot
if xdata is None or ydata is None:
return
if event.button == 'up':
# Deal with zoom in
scale_factor = 1.0 / scaling
elif event.button == 'down':
# Deal with zoom out
scale_factor = scaling
# Get distance from the cursor to the edge of the figure frame
x_left = xdata - cur_xlim[0]
x_right = cur_xlim[1] - xdata
y_top = ydata - cur_ylim[0]
y_bottom = cur_ylim[1] - ydata
# Calculate new x and y limits
new_xlim = (xdata - x_left * scale_factor,
xdata + x_right * scale_factor)
new_ylim = (ydata - y_top * scale_factor,
ydata + y_bottom * scale_factor)
# Ensure that x limit range is no larger than that of the reference
if np.diff(new_xlim) > np.diff(zoom.xlim_ref):
new_xlim *= np.diff(zoom.xlim_ref) / np.diff(new_xlim)
# Ensure that lower x limit is not less than that of the reference
if new_xlim[0] < zoom.xlim_ref[0]:
new_xlim += np.array(zoom.xlim_ref[0] - new_xlim[0])
# Ensure that upper x limit is not greater than that of the reference
if new_xlim[1] > zoom.xlim_ref[1]:
new_xlim -= np.array(new_xlim[1] - zoom.xlim_ref[1])
# Ensure that ylim tuple has the smallest value first
if zoom.ylim_ref[1] < zoom.ylim_ref[0]:
ylim_ref = zoom.ylim_ref[::-1]
new_ylim = new_ylim[::-1]
else:
ylim_ref = zoom.ylim_ref
# Ensure that y limit range is no larger than that of the reference
if np.diff(new_ylim) > np.diff(ylim_ref):
new_ylim *= np.diff(ylim_ref) / np.diff(new_ylim)
# Ensure that lower y limit is not less than that of the reference
if new_ylim[0] < ylim_ref[0]:
new_ylim += np.array(ylim_ref[0] - new_ylim[0])
# Ensure that upper y limit is not greater than that of the reference
if new_ylim[1] > ylim_ref[1]:
new_ylim -= np.array(new_ylim[1] - ylim_ref[1])
# Return the ylim tuple to its original order
if zoom.ylim_ref[1] < zoom.ylim_ref[0]:
new_ylim = new_ylim[::-1]
# Set new x and y limits
ax.set_xlim(new_xlim)
ax.set_ylim(new_ylim)
# Force redraw
ax.figure.canvas.draw()
# Record reference x and y limits prior to any zooming
zoom.xlim_ref = ax.get_xlim()
zoom.ylim_ref = ax.get_ylim()
# Get figure for specified axes and attach the event handler
fig = ax.get_figure()
fig.canvas.mpl_connect('scroll_event', zoom)
return zoom | ['def', 'attach_zoom', '(', 'ax', ',', 'scaling', '=', '2.0', ')', ':', '# See https://stackoverflow.com/questions/11551049', 'def', 'zoom', '(', 'event', ')', ':', '# Get the current x and y limits', 'cur_xlim', '=', 'ax', '.', 'get_xlim', '(', ')', 'cur_ylim', '=', 'ax', '.', 'get_ylim', '(', ')', '# Get event location', 'xdata', '=', 'event', '.', 'xdata', 'ydata', '=', 'event', '.', 'ydata', '# Return if cursor is not over valid region of plot', 'if', 'xdata', 'is', 'None', 'or', 'ydata', 'is', 'None', ':', 'return', 'if', 'event', '.', 'button', '==', "'up'", ':', '# Deal with zoom in', 'scale_factor', '=', '1.0', '/', 'scaling', 'elif', 'event', '.', 'button', '==', "'down'", ':', '# Deal with zoom out', 'scale_factor', '=', 'scaling', '# Get distance from the cursor to the edge of the figure frame', 'x_left', '=', 'xdata', '-', 'cur_xlim', '[', '0', ']', 'x_right', '=', 'cur_xlim', '[', '1', ']', '-', 'xdata', 'y_top', '=', 'ydata', '-', 'cur_ylim', '[', '0', ']', 'y_bottom', '=', 'cur_ylim', '[', '1', ']', '-', 'ydata', '# Calculate new x and y limits', 'new_xlim', '=', '(', 'xdata', '-', 'x_left', '*', 'scale_factor', ',', 'xdata', '+', 'x_right', '*', 'scale_factor', ')', 'new_ylim', '=', '(', 'ydata', '-', 'y_top', '*', 'scale_factor', ',', 'ydata', '+', 'y_bottom', '*', 'scale_factor', ')', '# Ensure that x limit range is no larger than that of the reference', 'if', 'np', '.', 'diff', '(', 'new_xlim', ')', '>', 'np', '.', 'diff', '(', 'zoom', '.', 'xlim_ref', ')', ':', 'new_xlim', '*=', 'np', '.', 'diff', '(', 'zoom', '.', 'xlim_ref', ')', '/', 'np', '.', 'diff', '(', 'new_xlim', ')', '# Ensure that lower x limit is not less than that of the reference', 'if', 'new_xlim', '[', '0', ']', '<', 'zoom', '.', 'xlim_ref', '[', '0', ']', ':', 'new_xlim', '+=', 'np', '.', 'array', '(', 'zoom', '.', 'xlim_ref', '[', '0', ']', '-', 'new_xlim', '[', '0', ']', ')', '# Ensure that upper x limit is not greater than that of the reference', 'if', 'new_xlim', '[', '1', ']', '>', 'zoom', '.', 'xlim_ref', '[', '1', ']', ':', 'new_xlim', '-=', 'np', '.', 'array', '(', 'new_xlim', '[', '1', ']', '-', 'zoom', '.', 'xlim_ref', '[', '1', ']', ')', '# Ensure that ylim tuple has the smallest value first', 'if', 'zoom', '.', 'ylim_ref', '[', '1', ']', '<', 'zoom', '.', 'ylim_ref', '[', '0', ']', ':', 'ylim_ref', '=', 'zoom', '.', 'ylim_ref', '[', ':', ':', '-', '1', ']', 'new_ylim', '=', 'new_ylim', '[', ':', ':', '-', '1', ']', 'else', ':', 'ylim_ref', '=', 'zoom', '.', 'ylim_ref', '# Ensure that y limit range is no larger than that of the reference', 'if', 'np', '.', 'diff', '(', 'new_ylim', ')', '>', 'np', '.', 'diff', '(', 'ylim_ref', ')', ':', 'new_ylim', '*=', 'np', '.', 'diff', '(', 'ylim_ref', ')', '/', 'np', '.', 'diff', '(', 'new_ylim', ')', '# Ensure that lower y limit is not less than that of the reference', 'if', 'new_ylim', '[', '0', ']', '<', 'ylim_ref', '[', '0', ']', ':', 'new_ylim', '+=', 'np', '.', 'array', '(', 'ylim_ref', '[', '0', ']', '-', 'new_ylim', '[', '0', ']', ')', '# Ensure that upper y limit is not greater than that of the reference', 'if', 'new_ylim', '[', '1', ']', '>', 'ylim_ref', '[', '1', ']', ':', 'new_ylim', '-=', 'np', '.', 'array', '(', 'new_ylim', '[', '1', ']', '-', 'ylim_ref', '[', '1', ']', ')', '# Return the ylim tuple to its original order', 'if', 'zoom', '.', 'ylim_ref', '[', '1', ']', '<', 'zoom', '.', 'ylim_ref', '[', '0', ']', ':', 'new_ylim', '=', 'new_ylim', '[', ':', ':', '-', '1', ']', '# Set new x and y limits', 'ax', '.', 'set_xlim', '(', 'new_xlim', ')', 'ax', '.', 'set_ylim', '(', 'new_ylim', ')', '# Force redraw', 'ax', '.', 'figure', '.', 'canvas', '.', 'draw', '(', ')', '# Record reference x and y limits prior to any zooming', 'zoom', '.', 'xlim_ref', '=', 'ax', '.', 'get_xlim', '(', ')', 'zoom', '.', 'ylim_ref', '=', 'ax', '.', 'get_ylim', '(', ')', '# Get figure for specified axes and attach the event handler', 'fig', '=', 'ax', '.', 'get_figure', '(', ')', 'fig', '.', 'canvas', '.', 'mpl_connect', '(', "'scroll_event'", ',', 'zoom', ')', 'return', 'zoom'] | Attach an event handler that supports zooming within a plot using
the mouse scroll wheel.
Parameters
----------
ax : :class:`matplotlib.axes.Axes` object
Axes to which event handling is to be attached
scaling : float, optional (default 2.0)
Scaling factor for zooming in and out
Returns
-------
zoom : function
Mouse scroll wheel event handler function | ['Attach', 'an', 'event', 'handler', 'that', 'supports', 'zooming', 'within', 'a', 'plot', 'using', 'the', 'mouse', 'scroll', 'wheel', '.'] | train | https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/plot.py#L76-L171 |
1,665 | saltstack/salt | salt/states/file.py | _check_user | def _check_user(user, group):
'''
Checks if the named user and group are present on the minion
'''
err = ''
if user:
uid = __salt__['file.user_to_uid'](user)
if uid == '':
err += 'User {0} is not available '.format(user)
if group:
gid = __salt__['file.group_to_gid'](group)
if gid == '':
err += 'Group {0} is not available'.format(group)
return err | python | def _check_user(user, group):
'''
Checks if the named user and group are present on the minion
'''
err = ''
if user:
uid = __salt__['file.user_to_uid'](user)
if uid == '':
err += 'User {0} is not available '.format(user)
if group:
gid = __salt__['file.group_to_gid'](group)
if gid == '':
err += 'Group {0} is not available'.format(group)
return err | ['def', '_check_user', '(', 'user', ',', 'group', ')', ':', 'err', '=', "''", 'if', 'user', ':', 'uid', '=', '__salt__', '[', "'file.user_to_uid'", ']', '(', 'user', ')', 'if', 'uid', '==', "''", ':', 'err', '+=', "'User {0} is not available '", '.', 'format', '(', 'user', ')', 'if', 'group', ':', 'gid', '=', '__salt__', '[', "'file.group_to_gid'", ']', '(', 'group', ')', 'if', 'gid', '==', "''", ':', 'err', '+=', "'Group {0} is not available'", '.', 'format', '(', 'group', ')', 'return', 'err'] | Checks if the named user and group are present on the minion | ['Checks', 'if', 'the', 'named', 'user', 'and', 'group', 'are', 'present', 'on', 'the', 'minion'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/file.py#L380-L393 |
1,666 | jmbeach/KEP.py | src/keppy/simulator_device.py | SimulatorDevice.process_boolean | def process_boolean(self, tag):
"""Process Boolean type tags"""
tag.set_address(self.normal_register.current_bit_address)
self.normal_register.move_to_next_bit_address() | python | def process_boolean(self, tag):
"""Process Boolean type tags"""
tag.set_address(self.normal_register.current_bit_address)
self.normal_register.move_to_next_bit_address() | ['def', 'process_boolean', '(', 'self', ',', 'tag', ')', ':', 'tag', '.', 'set_address', '(', 'self', '.', 'normal_register', '.', 'current_bit_address', ')', 'self', '.', 'normal_register', '.', 'move_to_next_bit_address', '(', ')'] | Process Boolean type tags | ['Process', 'Boolean', 'type', 'tags'] | train | https://github.com/jmbeach/KEP.py/blob/68cda64ab649640a486534867c81274c41e39446/src/keppy/simulator_device.py#L61-L64 |
1,667 | jbittel/django-mama-cas | mama_cas/models.py | TicketManager.create_ticket | def create_ticket(self, ticket=None, **kwargs):
"""
Create a new ``Ticket``. Additional arguments are passed to the
``create()`` function. Return the newly created ``Ticket``.
"""
if not ticket:
ticket = self.create_ticket_str()
if 'service' in kwargs:
kwargs['service'] = clean_service_url(kwargs['service'])
if 'expires' not in kwargs:
expires = now() + timedelta(seconds=self.model.TICKET_EXPIRE)
kwargs['expires'] = expires
t = self.create(ticket=ticket, **kwargs)
logger.debug("Created %s %s" % (t.name, t.ticket))
return t | python | def create_ticket(self, ticket=None, **kwargs):
"""
Create a new ``Ticket``. Additional arguments are passed to the
``create()`` function. Return the newly created ``Ticket``.
"""
if not ticket:
ticket = self.create_ticket_str()
if 'service' in kwargs:
kwargs['service'] = clean_service_url(kwargs['service'])
if 'expires' not in kwargs:
expires = now() + timedelta(seconds=self.model.TICKET_EXPIRE)
kwargs['expires'] = expires
t = self.create(ticket=ticket, **kwargs)
logger.debug("Created %s %s" % (t.name, t.ticket))
return t | ['def', 'create_ticket', '(', 'self', ',', 'ticket', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'not', 'ticket', ':', 'ticket', '=', 'self', '.', 'create_ticket_str', '(', ')', 'if', "'service'", 'in', 'kwargs', ':', 'kwargs', '[', "'service'", ']', '=', 'clean_service_url', '(', 'kwargs', '[', "'service'", ']', ')', 'if', "'expires'", 'not', 'in', 'kwargs', ':', 'expires', '=', 'now', '(', ')', '+', 'timedelta', '(', 'seconds', '=', 'self', '.', 'model', '.', 'TICKET_EXPIRE', ')', 'kwargs', '[', "'expires'", ']', '=', 'expires', 't', '=', 'self', '.', 'create', '(', 'ticket', '=', 'ticket', ',', '*', '*', 'kwargs', ')', 'logger', '.', 'debug', '(', '"Created %s %s"', '%', '(', 't', '.', 'name', ',', 't', '.', 'ticket', ')', ')', 'return', 't'] | Create a new ``Ticket``. Additional arguments are passed to the
``create()`` function. Return the newly created ``Ticket``. | ['Create', 'a', 'new', 'Ticket', '.', 'Additional', 'arguments', 'are', 'passed', 'to', 'the', 'create', '()', 'function', '.', 'Return', 'the', 'newly', 'created', 'Ticket', '.'] | train | https://github.com/jbittel/django-mama-cas/blob/03935d97442b46d8127ab9e1cd8deb96953fe156/mama_cas/models.py#L42-L56 |
1,668 | simonw/datasette | datasette/app.py | Datasette.expand_foreign_keys | async def expand_foreign_keys(self, database, table, column, values):
"Returns dict mapping (column, value) -> label"
labeled_fks = {}
foreign_keys = await self.foreign_keys_for_table(database, table)
# Find the foreign_key for this column
try:
fk = [
foreign_key for foreign_key in foreign_keys
if foreign_key["column"] == column
][0]
except IndexError:
return {}
label_column = await self.label_column_for_table(database, fk["other_table"])
if not label_column:
return {
(fk["column"], value): str(value)
for value in values
}
labeled_fks = {}
sql = '''
select {other_column}, {label_column}
from {other_table}
where {other_column} in ({placeholders})
'''.format(
other_column=escape_sqlite(fk["other_column"]),
label_column=escape_sqlite(label_column),
other_table=escape_sqlite(fk["other_table"]),
placeholders=", ".join(["?"] * len(set(values))),
)
try:
results = await self.execute(
database, sql, list(set(values))
)
except InterruptedError:
pass
else:
for id, value in results:
labeled_fks[(fk["column"], id)] = value
return labeled_fks | python | async def expand_foreign_keys(self, database, table, column, values):
"Returns dict mapping (column, value) -> label"
labeled_fks = {}
foreign_keys = await self.foreign_keys_for_table(database, table)
# Find the foreign_key for this column
try:
fk = [
foreign_key for foreign_key in foreign_keys
if foreign_key["column"] == column
][0]
except IndexError:
return {}
label_column = await self.label_column_for_table(database, fk["other_table"])
if not label_column:
return {
(fk["column"], value): str(value)
for value in values
}
labeled_fks = {}
sql = '''
select {other_column}, {label_column}
from {other_table}
where {other_column} in ({placeholders})
'''.format(
other_column=escape_sqlite(fk["other_column"]),
label_column=escape_sqlite(label_column),
other_table=escape_sqlite(fk["other_table"]),
placeholders=", ".join(["?"] * len(set(values))),
)
try:
results = await self.execute(
database, sql, list(set(values))
)
except InterruptedError:
pass
else:
for id, value in results:
labeled_fks[(fk["column"], id)] = value
return labeled_fks | ['async', 'def', 'expand_foreign_keys', '(', 'self', ',', 'database', ',', 'table', ',', 'column', ',', 'values', ')', ':', 'labeled_fks', '=', '{', '}', 'foreign_keys', '=', 'await', 'self', '.', 'foreign_keys_for_table', '(', 'database', ',', 'table', ')', '# Find the foreign_key for this column', 'try', ':', 'fk', '=', '[', 'foreign_key', 'for', 'foreign_key', 'in', 'foreign_keys', 'if', 'foreign_key', '[', '"column"', ']', '==', 'column', ']', '[', '0', ']', 'except', 'IndexError', ':', 'return', '{', '}', 'label_column', '=', 'await', 'self', '.', 'label_column_for_table', '(', 'database', ',', 'fk', '[', '"other_table"', ']', ')', 'if', 'not', 'label_column', ':', 'return', '{', '(', 'fk', '[', '"column"', ']', ',', 'value', ')', ':', 'str', '(', 'value', ')', 'for', 'value', 'in', 'values', '}', 'labeled_fks', '=', '{', '}', 'sql', '=', "'''\n select {other_column}, {label_column}\n from {other_table}\n where {other_column} in ({placeholders})\n '''", '.', 'format', '(', 'other_column', '=', 'escape_sqlite', '(', 'fk', '[', '"other_column"', ']', ')', ',', 'label_column', '=', 'escape_sqlite', '(', 'label_column', ')', ',', 'other_table', '=', 'escape_sqlite', '(', 'fk', '[', '"other_table"', ']', ')', ',', 'placeholders', '=', '", "', '.', 'join', '(', '[', '"?"', ']', '*', 'len', '(', 'set', '(', 'values', ')', ')', ')', ',', ')', 'try', ':', 'results', '=', 'await', 'self', '.', 'execute', '(', 'database', ',', 'sql', ',', 'list', '(', 'set', '(', 'values', ')', ')', ')', 'except', 'InterruptedError', ':', 'pass', 'else', ':', 'for', 'id', ',', 'value', 'in', 'results', ':', 'labeled_fks', '[', '(', 'fk', '[', '"column"', ']', ',', 'id', ')', ']', '=', 'value', 'return', 'labeled_fks'] | Returns dict mapping (column, value) -> label | ['Returns', 'dict', 'mapping', '(', 'column', 'value', ')', '-', '>', 'label'] | train | https://github.com/simonw/datasette/blob/11b352b4d52fd02a422776edebb14f12e4994d3b/datasette/app.py#L368-L406 |
1,669 | Opentrons/opentrons | api/src/opentrons/deck_calibration/__init__.py | position | def position(axis, hardware, cp=None):
"""
Read position from driver into a tuple and map 3-rd value
to the axis of a pipette currently used
"""
if not ff.use_protocol_api_v2():
p = hardware._driver.position
return (p['X'], p['Y'], p[axis])
else:
p = hardware.gantry_position(axis, critical_point=cp)
return (p.x, p.y, p.z) | python | def position(axis, hardware, cp=None):
"""
Read position from driver into a tuple and map 3-rd value
to the axis of a pipette currently used
"""
if not ff.use_protocol_api_v2():
p = hardware._driver.position
return (p['X'], p['Y'], p[axis])
else:
p = hardware.gantry_position(axis, critical_point=cp)
return (p.x, p.y, p.z) | ['def', 'position', '(', 'axis', ',', 'hardware', ',', 'cp', '=', 'None', ')', ':', 'if', 'not', 'ff', '.', 'use_protocol_api_v2', '(', ')', ':', 'p', '=', 'hardware', '.', '_driver', '.', 'position', 'return', '(', 'p', '[', "'X'", ']', ',', 'p', '[', "'Y'", ']', ',', 'p', '[', 'axis', ']', ')', 'else', ':', 'p', '=', 'hardware', '.', 'gantry_position', '(', 'axis', ',', 'critical_point', '=', 'cp', ')', 'return', '(', 'p', '.', 'x', ',', 'p', '.', 'y', ',', 'p', '.', 'z', ')'] | Read position from driver into a tuple and map 3-rd value
to the axis of a pipette currently used | ['Read', 'position', 'from', 'driver', 'into', 'a', 'tuple', 'and', 'map', '3', '-', 'rd', 'value', 'to', 'the', 'axis', 'of', 'a', 'pipette', 'currently', 'used'] | train | https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/deck_calibration/__init__.py#L66-L77 |
1,670 | saltstack/salt | salt/modules/firewalld.py | get_masquerade | def get_masquerade(zone=None, permanent=True):
'''
Show if masquerading is enabled on a zone.
If zone is omitted, default zone will be used.
CLI Example:
.. code-block:: bash
salt '*' firewalld.get_masquerade zone
'''
zone_info = list_all(zone, permanent)
if 'no' in [zone_info[i]['masquerade'][0] for i in zone_info]:
return False
return True | python | def get_masquerade(zone=None, permanent=True):
'''
Show if masquerading is enabled on a zone.
If zone is omitted, default zone will be used.
CLI Example:
.. code-block:: bash
salt '*' firewalld.get_masquerade zone
'''
zone_info = list_all(zone, permanent)
if 'no' in [zone_info[i]['masquerade'][0] for i in zone_info]:
return False
return True | ['def', 'get_masquerade', '(', 'zone', '=', 'None', ',', 'permanent', '=', 'True', ')', ':', 'zone_info', '=', 'list_all', '(', 'zone', ',', 'permanent', ')', 'if', "'no'", 'in', '[', 'zone_info', '[', 'i', ']', '[', "'masquerade'", ']', '[', '0', ']', 'for', 'i', 'in', 'zone_info', ']', ':', 'return', 'False', 'return', 'True'] | Show if masquerading is enabled on a zone.
If zone is omitted, default zone will be used.
CLI Example:
.. code-block:: bash
salt '*' firewalld.get_masquerade zone | ['Show', 'if', 'masquerading', 'is', 'enabled', 'on', 'a', 'zone', '.', 'If', 'zone', 'is', 'omitted', 'default', 'zone', 'will', 'be', 'used', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/firewalld.py#L541-L557 |
1,671 | pylast/pylast | src/pylast/__init__.py | Artist.get_bio | def get_bio(self, section, language=None):
"""
Returns a section of the bio.
section can be "content", "summary" or
"published" (for published date)
"""
if language:
params = self._get_params()
params["lang"] = language
else:
params = None
return self._extract_cdata_from_request(
self.ws_prefix + ".getInfo", section, params
) | python | def get_bio(self, section, language=None):
"""
Returns a section of the bio.
section can be "content", "summary" or
"published" (for published date)
"""
if language:
params = self._get_params()
params["lang"] = language
else:
params = None
return self._extract_cdata_from_request(
self.ws_prefix + ".getInfo", section, params
) | ['def', 'get_bio', '(', 'self', ',', 'section', ',', 'language', '=', 'None', ')', ':', 'if', 'language', ':', 'params', '=', 'self', '.', '_get_params', '(', ')', 'params', '[', '"lang"', ']', '=', 'language', 'else', ':', 'params', '=', 'None', 'return', 'self', '.', '_extract_cdata_from_request', '(', 'self', '.', 'ws_prefix', '+', '".getInfo"', ',', 'section', ',', 'params', ')'] | Returns a section of the bio.
section can be "content", "summary" or
"published" (for published date) | ['Returns', 'a', 'section', 'of', 'the', 'bio', '.', 'section', 'can', 'be', 'content', 'summary', 'or', 'published', '(', 'for', 'published', 'date', ')'] | train | https://github.com/pylast/pylast/blob/a52f66d316797fc819b5f1d186d77f18ba97b4ff/src/pylast/__init__.py#L1799-L1813 |
1,672 | bretth/woven | woven/deployment.py | _backup_file | def _backup_file(path):
"""
Backup a file but never overwrite an existing backup file
"""
backup_base = '/var/local/woven-backup'
backup_path = ''.join([backup_base,path])
if not exists(backup_path):
directory = ''.join([backup_base,os.path.split(path)[0]])
sudo('mkdir -p %s'% directory)
sudo('cp %s %s'% (path,backup_path)) | python | def _backup_file(path):
"""
Backup a file but never overwrite an existing backup file
"""
backup_base = '/var/local/woven-backup'
backup_path = ''.join([backup_base,path])
if not exists(backup_path):
directory = ''.join([backup_base,os.path.split(path)[0]])
sudo('mkdir -p %s'% directory)
sudo('cp %s %s'% (path,backup_path)) | ['def', '_backup_file', '(', 'path', ')', ':', 'backup_base', '=', "'/var/local/woven-backup'", 'backup_path', '=', "''", '.', 'join', '(', '[', 'backup_base', ',', 'path', ']', ')', 'if', 'not', 'exists', '(', 'backup_path', ')', ':', 'directory', '=', "''", '.', 'join', '(', '[', 'backup_base', ',', 'os', '.', 'path', '.', 'split', '(', 'path', ')', '[', '0', ']', ']', ')', 'sudo', '(', "'mkdir -p %s'", '%', 'directory', ')', 'sudo', '(', "'cp %s %s'", '%', '(', 'path', ',', 'backup_path', ')', ')'] | Backup a file but never overwrite an existing backup file | ['Backup', 'a', 'file', 'but', 'never', 'overwrite', 'an', 'existing', 'backup', 'file'] | train | https://github.com/bretth/woven/blob/ec1da7b401a335f43129e7115fe7a4d145649f1e/woven/deployment.py#L15-L24 |
1,673 | ryanjdillon/pyotelem | pyotelem/glides.py | split_glides | def split_glides(n_samples, dur, fs_a, GL, min_dur=None):
'''Get start/stop indices of each `dur` length sub-glide for glides in GL
Args
----
dur: int
Desired duration of glides
GL: ndarray, (n, 2)
Matrix containing the start time (first column) and end time
(2nd column) of any glides.Times are in seconds.
min_dur: int, default (bool False)
Minimum number of seconds for sub-glide. Default value is `False`, which
makes `min_dur` equal to `dur`, ignoring sub-glides smaller than `dur`.
Attributes
----------
gl_ind_diff: ndarray, (n,3)
GL, with additional column of difference between the first two columns
Returns
-------
SGL: ndarray, (n, 2)
Matrix containing the start time (first column) and end time (2nd
column) of the generated sub-glides. All glides must have duration equal
to the given dur value.Times are in seconds.
'''
import numpy
# Convert `dur` in seconds to duration in number of samples `ndur`
ndur = dur * fs_a
# If minimum duration not passed, set to `min_dur` to skip slices < `dur`
if not min_dur:
min_ndur = dur * fs_a
else:
min_ndur = min_dur * fs_a
# `GL` plus column for total duration of glide, seconds
gl_ind_diff = numpy.vstack((GL.T, GL[:, 1] - GL[:, 0])).T
# Split all glides in `GL`
SGL_started = False
for i in range(len(GL)):
gl_ndur = gl_ind_diff[i, 2]
# Split into sub glides if longer than duration
if abs(gl_ndur) > ndur:
# Make list of index lengths to start of each sub-glide
n_sgl = int(gl_ndur//ndur)
sgl_ndur = numpy.ones(n_sgl)*ndur
sgl_start = numpy.arange(n_sgl)*(ndur+1)
# Add remainder as a sub-glide, skips if `min_ndur` not passed
if (gl_ndur%ndur > min_ndur):
last_ndur = numpy.floor(gl_ndur%ndur)
sgl_ndur = numpy.hstack([sgl_ndur, last_ndur])
last_start = (len(sgl_start)*ndur) + ndur
sgl_start = numpy.hstack([sgl_start, last_start])
# Get start and end index positions for each sub-glide
for k in range(len(sgl_start)):
# starting at original glide start...
# sgl_start_ind: add index increments of ndur+1 for next start idx
next_start_ind = (gl_ind_diff[i, 0] + sgl_start[k]).astype(int)
# end_glide: add `ndur` to that to get ending idx
next_end_ind = (next_start_ind + sgl_ndur[k]).astype(int)
# If first iteration, set equal to first set of indices
if SGL_started == False:
sgl_start_ind = next_start_ind
sgl_end_ind = next_end_ind
SGL_started = True
else:
# Concatenate 1D arrays together, shape (n,)
sgl_start_ind = numpy.hstack((sgl_start_ind, next_start_ind))
sgl_end_ind = numpy.hstack((sgl_end_ind, next_end_ind))
# Stack and transpose indices into shape (n, 2)
SGL = numpy.vstack((sgl_start_ind, sgl_end_ind)).T
# Filter out sub-glides that fall outside of sensor data indices
SGL = SGL[(SGL[:, 0] >= 0) & (SGL[:, 1] < n_samples)]
# check that all sub-glides have a duration of `ndur` seconds
sgl_ndur = SGL[:, 1] - SGL[:, 0]
# If sub-glide `min_ndur` set, make sure all above `min_ndur`, below `ndur`
if min_dur:
assert numpy.all((sgl_ndur <= ndur) & (sgl_ndur >= min_ndur))
# Else make sure all sample number durations equal to `ndur`
else:
assert numpy.all(sgl_ndur == ndur)
# Create `data_sgl_mask`
data_sgl_mask = numpy.zeros(n_samples, dtype=bool)
for start, stop in SGL.astype(int):
data_sgl_mask[start:stop] = True
return SGL, data_sgl_mask | python | def split_glides(n_samples, dur, fs_a, GL, min_dur=None):
'''Get start/stop indices of each `dur` length sub-glide for glides in GL
Args
----
dur: int
Desired duration of glides
GL: ndarray, (n, 2)
Matrix containing the start time (first column) and end time
(2nd column) of any glides.Times are in seconds.
min_dur: int, default (bool False)
Minimum number of seconds for sub-glide. Default value is `False`, which
makes `min_dur` equal to `dur`, ignoring sub-glides smaller than `dur`.
Attributes
----------
gl_ind_diff: ndarray, (n,3)
GL, with additional column of difference between the first two columns
Returns
-------
SGL: ndarray, (n, 2)
Matrix containing the start time (first column) and end time (2nd
column) of the generated sub-glides. All glides must have duration equal
to the given dur value.Times are in seconds.
'''
import numpy
# Convert `dur` in seconds to duration in number of samples `ndur`
ndur = dur * fs_a
# If minimum duration not passed, set to `min_dur` to skip slices < `dur`
if not min_dur:
min_ndur = dur * fs_a
else:
min_ndur = min_dur * fs_a
# `GL` plus column for total duration of glide, seconds
gl_ind_diff = numpy.vstack((GL.T, GL[:, 1] - GL[:, 0])).T
# Split all glides in `GL`
SGL_started = False
for i in range(len(GL)):
gl_ndur = gl_ind_diff[i, 2]
# Split into sub glides if longer than duration
if abs(gl_ndur) > ndur:
# Make list of index lengths to start of each sub-glide
n_sgl = int(gl_ndur//ndur)
sgl_ndur = numpy.ones(n_sgl)*ndur
sgl_start = numpy.arange(n_sgl)*(ndur+1)
# Add remainder as a sub-glide, skips if `min_ndur` not passed
if (gl_ndur%ndur > min_ndur):
last_ndur = numpy.floor(gl_ndur%ndur)
sgl_ndur = numpy.hstack([sgl_ndur, last_ndur])
last_start = (len(sgl_start)*ndur) + ndur
sgl_start = numpy.hstack([sgl_start, last_start])
# Get start and end index positions for each sub-glide
for k in range(len(sgl_start)):
# starting at original glide start...
# sgl_start_ind: add index increments of ndur+1 for next start idx
next_start_ind = (gl_ind_diff[i, 0] + sgl_start[k]).astype(int)
# end_glide: add `ndur` to that to get ending idx
next_end_ind = (next_start_ind + sgl_ndur[k]).astype(int)
# If first iteration, set equal to first set of indices
if SGL_started == False:
sgl_start_ind = next_start_ind
sgl_end_ind = next_end_ind
SGL_started = True
else:
# Concatenate 1D arrays together, shape (n,)
sgl_start_ind = numpy.hstack((sgl_start_ind, next_start_ind))
sgl_end_ind = numpy.hstack((sgl_end_ind, next_end_ind))
# Stack and transpose indices into shape (n, 2)
SGL = numpy.vstack((sgl_start_ind, sgl_end_ind)).T
# Filter out sub-glides that fall outside of sensor data indices
SGL = SGL[(SGL[:, 0] >= 0) & (SGL[:, 1] < n_samples)]
# check that all sub-glides have a duration of `ndur` seconds
sgl_ndur = SGL[:, 1] - SGL[:, 0]
# If sub-glide `min_ndur` set, make sure all above `min_ndur`, below `ndur`
if min_dur:
assert numpy.all((sgl_ndur <= ndur) & (sgl_ndur >= min_ndur))
# Else make sure all sample number durations equal to `ndur`
else:
assert numpy.all(sgl_ndur == ndur)
# Create `data_sgl_mask`
data_sgl_mask = numpy.zeros(n_samples, dtype=bool)
for start, stop in SGL.astype(int):
data_sgl_mask[start:stop] = True
return SGL, data_sgl_mask | ['def', 'split_glides', '(', 'n_samples', ',', 'dur', ',', 'fs_a', ',', 'GL', ',', 'min_dur', '=', 'None', ')', ':', 'import', 'numpy', '# Convert `dur` in seconds to duration in number of samples `ndur`', 'ndur', '=', 'dur', '*', 'fs_a', '# If minimum duration not passed, set to `min_dur` to skip slices < `dur`', 'if', 'not', 'min_dur', ':', 'min_ndur', '=', 'dur', '*', 'fs_a', 'else', ':', 'min_ndur', '=', 'min_dur', '*', 'fs_a', '# `GL` plus column for total duration of glide, seconds', 'gl_ind_diff', '=', 'numpy', '.', 'vstack', '(', '(', 'GL', '.', 'T', ',', 'GL', '[', ':', ',', '1', ']', '-', 'GL', '[', ':', ',', '0', ']', ')', ')', '.', 'T', '# Split all glides in `GL`', 'SGL_started', '=', 'False', 'for', 'i', 'in', 'range', '(', 'len', '(', 'GL', ')', ')', ':', 'gl_ndur', '=', 'gl_ind_diff', '[', 'i', ',', '2', ']', '# Split into sub glides if longer than duration', 'if', 'abs', '(', 'gl_ndur', ')', '>', 'ndur', ':', '# Make list of index lengths to start of each sub-glide', 'n_sgl', '=', 'int', '(', 'gl_ndur', '//', 'ndur', ')', 'sgl_ndur', '=', 'numpy', '.', 'ones', '(', 'n_sgl', ')', '*', 'ndur', 'sgl_start', '=', 'numpy', '.', 'arange', '(', 'n_sgl', ')', '*', '(', 'ndur', '+', '1', ')', '# Add remainder as a sub-glide, skips if `min_ndur` not passed', 'if', '(', 'gl_ndur', '%', 'ndur', '>', 'min_ndur', ')', ':', 'last_ndur', '=', 'numpy', '.', 'floor', '(', 'gl_ndur', '%', 'ndur', ')', 'sgl_ndur', '=', 'numpy', '.', 'hstack', '(', '[', 'sgl_ndur', ',', 'last_ndur', ']', ')', 'last_start', '=', '(', 'len', '(', 'sgl_start', ')', '*', 'ndur', ')', '+', 'ndur', 'sgl_start', '=', 'numpy', '.', 'hstack', '(', '[', 'sgl_start', ',', 'last_start', ']', ')', '# Get start and end index positions for each sub-glide', 'for', 'k', 'in', 'range', '(', 'len', '(', 'sgl_start', ')', ')', ':', '# starting at original glide start...', '# sgl_start_ind: add index increments of ndur+1 for next start idx', 'next_start_ind', '=', '(', 'gl_ind_diff', '[', 'i', ',', '0', ']', '+', 'sgl_start', '[', 'k', ']', ')', '.', 'astype', '(', 'int', ')', '# end_glide: add `ndur` to that to get ending idx', 'next_end_ind', '=', '(', 'next_start_ind', '+', 'sgl_ndur', '[', 'k', ']', ')', '.', 'astype', '(', 'int', ')', '# If first iteration, set equal to first set of indices', 'if', 'SGL_started', '==', 'False', ':', 'sgl_start_ind', '=', 'next_start_ind', 'sgl_end_ind', '=', 'next_end_ind', 'SGL_started', '=', 'True', 'else', ':', '# Concatenate 1D arrays together, shape (n,)', 'sgl_start_ind', '=', 'numpy', '.', 'hstack', '(', '(', 'sgl_start_ind', ',', 'next_start_ind', ')', ')', 'sgl_end_ind', '=', 'numpy', '.', 'hstack', '(', '(', 'sgl_end_ind', ',', 'next_end_ind', ')', ')', '# Stack and transpose indices into shape (n, 2)', 'SGL', '=', 'numpy', '.', 'vstack', '(', '(', 'sgl_start_ind', ',', 'sgl_end_ind', ')', ')', '.', 'T', '# Filter out sub-glides that fall outside of sensor data indices', 'SGL', '=', 'SGL', '[', '(', 'SGL', '[', ':', ',', '0', ']', '>=', '0', ')', '&', '(', 'SGL', '[', ':', ',', '1', ']', '<', 'n_samples', ')', ']', '# check that all sub-glides have a duration of `ndur` seconds', 'sgl_ndur', '=', 'SGL', '[', ':', ',', '1', ']', '-', 'SGL', '[', ':', ',', '0', ']', '# If sub-glide `min_ndur` set, make sure all above `min_ndur`, below `ndur`', 'if', 'min_dur', ':', 'assert', 'numpy', '.', 'all', '(', '(', 'sgl_ndur', '<=', 'ndur', ')', '&', '(', 'sgl_ndur', '>=', 'min_ndur', ')', ')', '# Else make sure all sample number durations equal to `ndur`', 'else', ':', 'assert', 'numpy', '.', 'all', '(', 'sgl_ndur', '==', 'ndur', ')', '# Create `data_sgl_mask`', 'data_sgl_mask', '=', 'numpy', '.', 'zeros', '(', 'n_samples', ',', 'dtype', '=', 'bool', ')', 'for', 'start', ',', 'stop', 'in', 'SGL', '.', 'astype', '(', 'int', ')', ':', 'data_sgl_mask', '[', 'start', ':', 'stop', ']', '=', 'True', 'return', 'SGL', ',', 'data_sgl_mask'] | Get start/stop indices of each `dur` length sub-glide for glides in GL
Args
----
dur: int
Desired duration of glides
GL: ndarray, (n, 2)
Matrix containing the start time (first column) and end time
(2nd column) of any glides.Times are in seconds.
min_dur: int, default (bool False)
Minimum number of seconds for sub-glide. Default value is `False`, which
makes `min_dur` equal to `dur`, ignoring sub-glides smaller than `dur`.
Attributes
----------
gl_ind_diff: ndarray, (n,3)
GL, with additional column of difference between the first two columns
Returns
-------
SGL: ndarray, (n, 2)
Matrix containing the start time (first column) and end time (2nd
column) of the generated sub-glides. All glides must have duration equal
to the given dur value.Times are in seconds. | ['Get', 'start', '/', 'stop', 'indices', 'of', 'each', 'dur', 'length', 'sub', '-', 'glide', 'for', 'glides', 'in', 'GL'] | train | https://github.com/ryanjdillon/pyotelem/blob/816563a9c3feb3fa416f1c2921c6b75db34111ad/pyotelem/glides.py#L189-L291 |
1,674 | ibis-project/ibis | ibis/expr/window.py | trailing_window | def trailing_window(rows, group_by=None, order_by=None):
"""Create a trailing window for use with aggregate window functions.
Parameters
----------
rows : int
Number of trailing rows to include. 0 includes only the current row
group_by : expressions, default None
Either specify here or with TableExpr.group_by
order_by : expressions, default None
For analytic functions requiring an ordering, specify here, or let Ibis
determine the default ordering (for functions like rank)
Returns
-------
Window
"""
return Window(
preceding=rows, following=0, group_by=group_by, order_by=order_by
) | python | def trailing_window(rows, group_by=None, order_by=None):
"""Create a trailing window for use with aggregate window functions.
Parameters
----------
rows : int
Number of trailing rows to include. 0 includes only the current row
group_by : expressions, default None
Either specify here or with TableExpr.group_by
order_by : expressions, default None
For analytic functions requiring an ordering, specify here, or let Ibis
determine the default ordering (for functions like rank)
Returns
-------
Window
"""
return Window(
preceding=rows, following=0, group_by=group_by, order_by=order_by
) | ['def', 'trailing_window', '(', 'rows', ',', 'group_by', '=', 'None', ',', 'order_by', '=', 'None', ')', ':', 'return', 'Window', '(', 'preceding', '=', 'rows', ',', 'following', '=', '0', ',', 'group_by', '=', 'group_by', ',', 'order_by', '=', 'order_by', ')'] | Create a trailing window for use with aggregate window functions.
Parameters
----------
rows : int
Number of trailing rows to include. 0 includes only the current row
group_by : expressions, default None
Either specify here or with TableExpr.group_by
order_by : expressions, default None
For analytic functions requiring an ordering, specify here, or let Ibis
determine the default ordering (for functions like rank)
Returns
-------
Window | ['Create', 'a', 'trailing', 'window', 'for', 'use', 'with', 'aggregate', 'window', 'functions', '.'] | train | https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/window.py#L313-L333 |
1,675 | fabioz/PyDev.Debugger | pydevd_attach_to_process/winappdbg/breakpoint.py | _BreakpointContainer.enable_all_breakpoints | def enable_all_breakpoints(self):
"""
Enables all disabled breakpoints in all processes.
@see:
enable_code_breakpoint,
enable_page_breakpoint,
enable_hardware_breakpoint
"""
# disable code breakpoints
for (pid, bp) in self.get_all_code_breakpoints():
if bp.is_disabled():
self.enable_code_breakpoint(pid, bp.get_address())
# disable page breakpoints
for (pid, bp) in self.get_all_page_breakpoints():
if bp.is_disabled():
self.enable_page_breakpoint(pid, bp.get_address())
# disable hardware breakpoints
for (tid, bp) in self.get_all_hardware_breakpoints():
if bp.is_disabled():
self.enable_hardware_breakpoint(tid, bp.get_address()) | python | def enable_all_breakpoints(self):
"""
Enables all disabled breakpoints in all processes.
@see:
enable_code_breakpoint,
enable_page_breakpoint,
enable_hardware_breakpoint
"""
# disable code breakpoints
for (pid, bp) in self.get_all_code_breakpoints():
if bp.is_disabled():
self.enable_code_breakpoint(pid, bp.get_address())
# disable page breakpoints
for (pid, bp) in self.get_all_page_breakpoints():
if bp.is_disabled():
self.enable_page_breakpoint(pid, bp.get_address())
# disable hardware breakpoints
for (tid, bp) in self.get_all_hardware_breakpoints():
if bp.is_disabled():
self.enable_hardware_breakpoint(tid, bp.get_address()) | ['def', 'enable_all_breakpoints', '(', 'self', ')', ':', '# disable code breakpoints', 'for', '(', 'pid', ',', 'bp', ')', 'in', 'self', '.', 'get_all_code_breakpoints', '(', ')', ':', 'if', 'bp', '.', 'is_disabled', '(', ')', ':', 'self', '.', 'enable_code_breakpoint', '(', 'pid', ',', 'bp', '.', 'get_address', '(', ')', ')', '# disable page breakpoints', 'for', '(', 'pid', ',', 'bp', ')', 'in', 'self', '.', 'get_all_page_breakpoints', '(', ')', ':', 'if', 'bp', '.', 'is_disabled', '(', ')', ':', 'self', '.', 'enable_page_breakpoint', '(', 'pid', ',', 'bp', '.', 'get_address', '(', ')', ')', '# disable hardware breakpoints', 'for', '(', 'tid', ',', 'bp', ')', 'in', 'self', '.', 'get_all_hardware_breakpoints', '(', ')', ':', 'if', 'bp', '.', 'is_disabled', '(', ')', ':', 'self', '.', 'enable_hardware_breakpoint', '(', 'tid', ',', 'bp', '.', 'get_address', '(', ')', ')'] | Enables all disabled breakpoints in all processes.
@see:
enable_code_breakpoint,
enable_page_breakpoint,
enable_hardware_breakpoint | ['Enables', 'all', 'disabled', 'breakpoints', 'in', 'all', 'processes', '.'] | train | https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/breakpoint.py#L3153-L3176 |
1,676 | datastax/python-driver | cassandra/cqlengine/statements.py | BaseCQLStatement.add_conditional_clause | def add_conditional_clause(self, clause):
"""
Adds a iff clause to this statement
:param clause: The clause that will be added to the iff statement
:type clause: ConditionalClause
"""
clause.set_context_id(self.context_counter)
self.context_counter += clause.get_context_size()
self.conditionals.append(clause) | python | def add_conditional_clause(self, clause):
"""
Adds a iff clause to this statement
:param clause: The clause that will be added to the iff statement
:type clause: ConditionalClause
"""
clause.set_context_id(self.context_counter)
self.context_counter += clause.get_context_size()
self.conditionals.append(clause) | ['def', 'add_conditional_clause', '(', 'self', ',', 'clause', ')', ':', 'clause', '.', 'set_context_id', '(', 'self', '.', 'context_counter', ')', 'self', '.', 'context_counter', '+=', 'clause', '.', 'get_context_size', '(', ')', 'self', '.', 'conditionals', '.', 'append', '(', 'clause', ')'] | Adds a iff clause to this statement
:param clause: The clause that will be added to the iff statement
:type clause: ConditionalClause | ['Adds', 'a', 'iff', 'clause', 'to', 'this', 'statement'] | train | https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cqlengine/statements.py#L554-L563 |
1,677 | jart/fabulous | fabulous/text.py | resolve_font | def resolve_font(name):
"""Turns font names into absolute filenames
This is case sensitive. The extension should be omitted.
For example::
>>> path = resolve_font('NotoSans-Bold')
>>> fontdir = os.path.join(os.path.dirname(__file__), 'fonts')
>>> noto_path = os.path.join(fontdir, 'NotoSans-Bold.ttf')
>>> noto_path = os.path.abspath(noto_path)
>>> assert path == noto_path
Absolute paths are allowed::
>>> resolve_font(noto_path) == noto_path
True
Raises :exc:`FontNotFound` on failure::
>>> try:
... resolve_font('blahahaha')
... assert False
... except FontNotFound:
... pass
"""
if os.path.exists(name):
return os.path.abspath(name)
fonts = get_font_files()
if name in fonts:
return fonts[name]
raise FontNotFound("Can't find %r :'( Try adding it to ~/.fonts" % name) | python | def resolve_font(name):
"""Turns font names into absolute filenames
This is case sensitive. The extension should be omitted.
For example::
>>> path = resolve_font('NotoSans-Bold')
>>> fontdir = os.path.join(os.path.dirname(__file__), 'fonts')
>>> noto_path = os.path.join(fontdir, 'NotoSans-Bold.ttf')
>>> noto_path = os.path.abspath(noto_path)
>>> assert path == noto_path
Absolute paths are allowed::
>>> resolve_font(noto_path) == noto_path
True
Raises :exc:`FontNotFound` on failure::
>>> try:
... resolve_font('blahahaha')
... assert False
... except FontNotFound:
... pass
"""
if os.path.exists(name):
return os.path.abspath(name)
fonts = get_font_files()
if name in fonts:
return fonts[name]
raise FontNotFound("Can't find %r :'( Try adding it to ~/.fonts" % name) | ['def', 'resolve_font', '(', 'name', ')', ':', 'if', 'os', '.', 'path', '.', 'exists', '(', 'name', ')', ':', 'return', 'os', '.', 'path', '.', 'abspath', '(', 'name', ')', 'fonts', '=', 'get_font_files', '(', ')', 'if', 'name', 'in', 'fonts', ':', 'return', 'fonts', '[', 'name', ']', 'raise', 'FontNotFound', '(', '"Can\'t find %r :\'( Try adding it to ~/.fonts"', '%', 'name', ')'] | Turns font names into absolute filenames
This is case sensitive. The extension should be omitted.
For example::
>>> path = resolve_font('NotoSans-Bold')
>>> fontdir = os.path.join(os.path.dirname(__file__), 'fonts')
>>> noto_path = os.path.join(fontdir, 'NotoSans-Bold.ttf')
>>> noto_path = os.path.abspath(noto_path)
>>> assert path == noto_path
Absolute paths are allowed::
>>> resolve_font(noto_path) == noto_path
True
Raises :exc:`FontNotFound` on failure::
>>> try:
... resolve_font('blahahaha')
... assert False
... except FontNotFound:
... pass | ['Turns', 'font', 'names', 'into', 'absolute', 'filenames'] | train | https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/text.py#L143-L176 |
1,678 | googleapis/google-cloud-python | logging/google/cloud/logging_v2/gapic/metrics_service_v2_client.py | MetricsServiceV2Client.metric_path | def metric_path(cls, project, metric):
"""Return a fully-qualified metric string."""
return google.api_core.path_template.expand(
"projects/{project}/metrics/{metric}", project=project, metric=metric
) | python | def metric_path(cls, project, metric):
"""Return a fully-qualified metric string."""
return google.api_core.path_template.expand(
"projects/{project}/metrics/{metric}", project=project, metric=metric
) | ['def', 'metric_path', '(', 'cls', ',', 'project', ',', 'metric', ')', ':', 'return', 'google', '.', 'api_core', '.', 'path_template', '.', 'expand', '(', '"projects/{project}/metrics/{metric}"', ',', 'project', '=', 'project', ',', 'metric', '=', 'metric', ')'] | Return a fully-qualified metric string. | ['Return', 'a', 'fully', '-', 'qualified', 'metric', 'string', '.'] | train | https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/google/cloud/logging_v2/gapic/metrics_service_v2_client.py#L87-L91 |
1,679 | pandeylab/pythomics | pythomics/proteomics/parsers.py | MGFIterator.getScan | def getScan(self, title, peptide=None):
"""
allows random lookup
"""
if self.ra.has_key(title):
self.filename.seek(self.ra[title][0],0)
toRead = self.ra[title][1]-self.ra[title][0]
info = self.filename.read(toRead)
scan = self.parseScan(info)
else:
return None
return scan | python | def getScan(self, title, peptide=None):
"""
allows random lookup
"""
if self.ra.has_key(title):
self.filename.seek(self.ra[title][0],0)
toRead = self.ra[title][1]-self.ra[title][0]
info = self.filename.read(toRead)
scan = self.parseScan(info)
else:
return None
return scan | ['def', 'getScan', '(', 'self', ',', 'title', ',', 'peptide', '=', 'None', ')', ':', 'if', 'self', '.', 'ra', '.', 'has_key', '(', 'title', ')', ':', 'self', '.', 'filename', '.', 'seek', '(', 'self', '.', 'ra', '[', 'title', ']', '[', '0', ']', ',', '0', ')', 'toRead', '=', 'self', '.', 'ra', '[', 'title', ']', '[', '1', ']', '-', 'self', '.', 'ra', '[', 'title', ']', '[', '0', ']', 'info', '=', 'self', '.', 'filename', '.', 'read', '(', 'toRead', ')', 'scan', '=', 'self', '.', 'parseScan', '(', 'info', ')', 'else', ':', 'return', 'None', 'return', 'scan'] | allows random lookup | ['allows', 'random', 'lookup'] | train | https://github.com/pandeylab/pythomics/blob/ab0a5651a2e02a25def4d277b35fa09d1631bfcb/pythomics/proteomics/parsers.py#L962-L973 |
1,680 | pypa/pipenv | pipenv/vendor/jinja2/ext.py | InternationalizationExtension._parse_block | def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
if parser.stream.current.type == 'data':
buf.append(parser.stream.current.value.replace('%', '%%'))
next(parser.stream)
elif parser.stream.current.type == 'variable_begin':
next(parser.stream)
name = parser.stream.expect('name').value
referenced.append(name)
buf.append('%%(%s)s' % name)
parser.stream.expect('variable_end')
elif parser.stream.current.type == 'block_begin':
next(parser.stream)
if parser.stream.current.test('name:endtrans'):
break
elif parser.stream.current.test('name:pluralize'):
if allow_pluralize:
break
parser.fail('a translatable section can have only one '
'pluralize section')
parser.fail('control structures in translatable sections are '
'not allowed')
elif parser.stream.eos:
parser.fail('unclosed translation block')
else:
assert False, 'internal parser error'
return referenced, concat(buf) | python | def _parse_block(self, parser, allow_pluralize):
"""Parse until the next block tag with a given name."""
referenced = []
buf = []
while 1:
if parser.stream.current.type == 'data':
buf.append(parser.stream.current.value.replace('%', '%%'))
next(parser.stream)
elif parser.stream.current.type == 'variable_begin':
next(parser.stream)
name = parser.stream.expect('name').value
referenced.append(name)
buf.append('%%(%s)s' % name)
parser.stream.expect('variable_end')
elif parser.stream.current.type == 'block_begin':
next(parser.stream)
if parser.stream.current.test('name:endtrans'):
break
elif parser.stream.current.test('name:pluralize'):
if allow_pluralize:
break
parser.fail('a translatable section can have only one '
'pluralize section')
parser.fail('control structures in translatable sections are '
'not allowed')
elif parser.stream.eos:
parser.fail('unclosed translation block')
else:
assert False, 'internal parser error'
return referenced, concat(buf) | ['def', '_parse_block', '(', 'self', ',', 'parser', ',', 'allow_pluralize', ')', ':', 'referenced', '=', '[', ']', 'buf', '=', '[', ']', 'while', '1', ':', 'if', 'parser', '.', 'stream', '.', 'current', '.', 'type', '==', "'data'", ':', 'buf', '.', 'append', '(', 'parser', '.', 'stream', '.', 'current', '.', 'value', '.', 'replace', '(', "'%'", ',', "'%%'", ')', ')', 'next', '(', 'parser', '.', 'stream', ')', 'elif', 'parser', '.', 'stream', '.', 'current', '.', 'type', '==', "'variable_begin'", ':', 'next', '(', 'parser', '.', 'stream', ')', 'name', '=', 'parser', '.', 'stream', '.', 'expect', '(', "'name'", ')', '.', 'value', 'referenced', '.', 'append', '(', 'name', ')', 'buf', '.', 'append', '(', "'%%(%s)s'", '%', 'name', ')', 'parser', '.', 'stream', '.', 'expect', '(', "'variable_end'", ')', 'elif', 'parser', '.', 'stream', '.', 'current', '.', 'type', '==', "'block_begin'", ':', 'next', '(', 'parser', '.', 'stream', ')', 'if', 'parser', '.', 'stream', '.', 'current', '.', 'test', '(', "'name:endtrans'", ')', ':', 'break', 'elif', 'parser', '.', 'stream', '.', 'current', '.', 'test', '(', "'name:pluralize'", ')', ':', 'if', 'allow_pluralize', ':', 'break', 'parser', '.', 'fail', '(', "'a translatable section can have only one '", "'pluralize section'", ')', 'parser', '.', 'fail', '(', "'control structures in translatable sections are '", "'not allowed'", ')', 'elif', 'parser', '.', 'stream', '.', 'eos', ':', 'parser', '.', 'fail', '(', "'unclosed translation block'", ')', 'else', ':', 'assert', 'False', ',', "'internal parser error'", 'return', 'referenced', ',', 'concat', '(', 'buf', ')'] | Parse until the next block tag with a given name. | ['Parse', 'until', 'the', 'next', 'block', 'tag', 'with', 'a', 'given', 'name', '.'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/jinja2/ext.py#L325-L355 |
1,681 | awslabs/serverless-application-model | samtranslator/intrinsics/actions.py | SubAction._handle_sub_action | def _handle_sub_action(self, input_dict, handler):
"""
Handles resolving replacements in the Sub action based on the handler that is passed as an input.
:param input_dict: Dictionary to be resolved
:param supported_values: One of several different objects that contain the supported values that
need to be changed. See each method above for specifics on these objects.
:param handler: handler that is specific to each implementation.
:return: Resolved value of the Sub dictionary
"""
if not self.can_handle(input_dict):
return input_dict
key = self.intrinsic_name
sub_value = input_dict[key]
input_dict[key] = self._handle_sub_value(sub_value, handler)
return input_dict | python | def _handle_sub_action(self, input_dict, handler):
"""
Handles resolving replacements in the Sub action based on the handler that is passed as an input.
:param input_dict: Dictionary to be resolved
:param supported_values: One of several different objects that contain the supported values that
need to be changed. See each method above for specifics on these objects.
:param handler: handler that is specific to each implementation.
:return: Resolved value of the Sub dictionary
"""
if not self.can_handle(input_dict):
return input_dict
key = self.intrinsic_name
sub_value = input_dict[key]
input_dict[key] = self._handle_sub_value(sub_value, handler)
return input_dict | ['def', '_handle_sub_action', '(', 'self', ',', 'input_dict', ',', 'handler', ')', ':', 'if', 'not', 'self', '.', 'can_handle', '(', 'input_dict', ')', ':', 'return', 'input_dict', 'key', '=', 'self', '.', 'intrinsic_name', 'sub_value', '=', 'input_dict', '[', 'key', ']', 'input_dict', '[', 'key', ']', '=', 'self', '.', '_handle_sub_value', '(', 'sub_value', ',', 'handler', ')', 'return', 'input_dict'] | Handles resolving replacements in the Sub action based on the handler that is passed as an input.
:param input_dict: Dictionary to be resolved
:param supported_values: One of several different objects that contain the supported values that
need to be changed. See each method above for specifics on these objects.
:param handler: handler that is specific to each implementation.
:return: Resolved value of the Sub dictionary | ['Handles', 'resolving', 'replacements', 'in', 'the', 'Sub', 'action', 'based', 'on', 'the', 'handler', 'that', 'is', 'passed', 'as', 'an', 'input', '.'] | train | https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/intrinsics/actions.py#L311-L329 |
1,682 | inspirehep/refextract | refextract/references/tag.py | sum_2_dictionaries | def sum_2_dictionaries(dicta, dictb):
"""Given two dictionaries of totals, where each total refers to a key
in the dictionary, add the totals.
E.g.: dicta = { 'a' : 3, 'b' : 1 }
dictb = { 'a' : 1, 'c' : 5 }
dicta + dictb = { 'a' : 4, 'b' : 1, 'c' : 5 }
@param dicta: (dictionary)
@param dictb: (dictionary)
@return: (dictionary) - the sum of the 2 dictionaries
"""
dict_out = dicta.copy()
for key in dictb.keys():
if 'key' in dict_out:
# Add the sum for key in dictb to that of dict_out:
dict_out[key] += dictb[key]
else:
# the key is not in the first dictionary - add it directly:
dict_out[key] = dictb[key]
return dict_out | python | def sum_2_dictionaries(dicta, dictb):
"""Given two dictionaries of totals, where each total refers to a key
in the dictionary, add the totals.
E.g.: dicta = { 'a' : 3, 'b' : 1 }
dictb = { 'a' : 1, 'c' : 5 }
dicta + dictb = { 'a' : 4, 'b' : 1, 'c' : 5 }
@param dicta: (dictionary)
@param dictb: (dictionary)
@return: (dictionary) - the sum of the 2 dictionaries
"""
dict_out = dicta.copy()
for key in dictb.keys():
if 'key' in dict_out:
# Add the sum for key in dictb to that of dict_out:
dict_out[key] += dictb[key]
else:
# the key is not in the first dictionary - add it directly:
dict_out[key] = dictb[key]
return dict_out | ['def', 'sum_2_dictionaries', '(', 'dicta', ',', 'dictb', ')', ':', 'dict_out', '=', 'dicta', '.', 'copy', '(', ')', 'for', 'key', 'in', 'dictb', '.', 'keys', '(', ')', ':', 'if', "'key'", 'in', 'dict_out', ':', '# Add the sum for key in dictb to that of dict_out:', 'dict_out', '[', 'key', ']', '+=', 'dictb', '[', 'key', ']', 'else', ':', '# the key is not in the first dictionary - add it directly:', 'dict_out', '[', 'key', ']', '=', 'dictb', '[', 'key', ']', 'return', 'dict_out'] | Given two dictionaries of totals, where each total refers to a key
in the dictionary, add the totals.
E.g.: dicta = { 'a' : 3, 'b' : 1 }
dictb = { 'a' : 1, 'c' : 5 }
dicta + dictb = { 'a' : 4, 'b' : 1, 'c' : 5 }
@param dicta: (dictionary)
@param dictb: (dictionary)
@return: (dictionary) - the sum of the 2 dictionaries | ['Given', 'two', 'dictionaries', 'of', 'totals', 'where', 'each', 'total', 'refers', 'to', 'a', 'key', 'in', 'the', 'dictionary', 'add', 'the', 'totals', '.', 'E', '.', 'g', '.', ':', 'dicta', '=', '{', 'a', ':', '3', 'b', ':', '1', '}', 'dictb', '=', '{', 'a', ':', '1', 'c', ':', '5', '}', 'dicta', '+', 'dictb', '=', '{', 'a', ':', '4', 'b', ':', '1', 'c', ':', '5', '}'] | train | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L1031-L1049 |
1,683 | zhexiao/ezhost | ezhost/ServerCommon.py | ServerCommon.spark_config | def spark_config(self):
"""
config spark
:return:
"""
configs = [
'export LD_LIBRARY_PATH={0}/lib/native/:$LD_LIBRARY_PATH'.format(
bigdata_conf.hadoop_home
),
'export SPARK_LOCAL_IP={0}'.format(env.host_string)
]
append(bigdata_conf.global_env_home, configs, use_sudo=True)
run('source {0}'.format(bigdata_conf.global_env_home)) | python | def spark_config(self):
"""
config spark
:return:
"""
configs = [
'export LD_LIBRARY_PATH={0}/lib/native/:$LD_LIBRARY_PATH'.format(
bigdata_conf.hadoop_home
),
'export SPARK_LOCAL_IP={0}'.format(env.host_string)
]
append(bigdata_conf.global_env_home, configs, use_sudo=True)
run('source {0}'.format(bigdata_conf.global_env_home)) | ['def', 'spark_config', '(', 'self', ')', ':', 'configs', '=', '[', "'export LD_LIBRARY_PATH={0}/lib/native/:$LD_LIBRARY_PATH'", '.', 'format', '(', 'bigdata_conf', '.', 'hadoop_home', ')', ',', "'export SPARK_LOCAL_IP={0}'", '.', 'format', '(', 'env', '.', 'host_string', ')', ']', 'append', '(', 'bigdata_conf', '.', 'global_env_home', ',', 'configs', ',', 'use_sudo', '=', 'True', ')', 'run', '(', "'source {0}'", '.', 'format', '(', 'bigdata_conf', '.', 'global_env_home', ')', ')'] | config spark
:return: | ['config', 'spark', ':', 'return', ':'] | train | https://github.com/zhexiao/ezhost/blob/4146bc0be14bb1bfe98ec19283d19fab420871b3/ezhost/ServerCommon.py#L350-L363 |
1,684 | great-expectations/great_expectations | great_expectations/dataset/util.py | continuous_partition_data | def continuous_partition_data(data, bins='auto', n_bins=10):
"""Convenience method for building a partition object on continuous data
Args:
data (list-like): The data from which to construct the estimate.
bins (string): One of 'uniform' (for uniformly spaced bins), 'ntile' (for percentile-spaced bins), or 'auto' (for automatically spaced bins)
n_bins (int): Ignored if bins is auto.
Returns:
A new partition_object::
{
"bins": (list) The endpoints of the partial partition of reals,
"weights": (list) The densities of the bins implied by the partition.
}
"""
if bins == 'uniform':
bins = np.linspace(start=np.min(data), stop=np.max(data), num=n_bins+1)
elif bins == 'ntile':
bins = np.percentile(data, np.linspace(
start=0, stop=100, num=n_bins+1))
elif bins != 'auto':
raise ValueError("Invalid parameter for bins argument")
hist, bin_edges = np.histogram(data, bins, density=False)
return {
"bins": bin_edges,
"weights": hist / len(data)
} | python | def continuous_partition_data(data, bins='auto', n_bins=10):
"""Convenience method for building a partition object on continuous data
Args:
data (list-like): The data from which to construct the estimate.
bins (string): One of 'uniform' (for uniformly spaced bins), 'ntile' (for percentile-spaced bins), or 'auto' (for automatically spaced bins)
n_bins (int): Ignored if bins is auto.
Returns:
A new partition_object::
{
"bins": (list) The endpoints of the partial partition of reals,
"weights": (list) The densities of the bins implied by the partition.
}
"""
if bins == 'uniform':
bins = np.linspace(start=np.min(data), stop=np.max(data), num=n_bins+1)
elif bins == 'ntile':
bins = np.percentile(data, np.linspace(
start=0, stop=100, num=n_bins+1))
elif bins != 'auto':
raise ValueError("Invalid parameter for bins argument")
hist, bin_edges = np.histogram(data, bins, density=False)
return {
"bins": bin_edges,
"weights": hist / len(data)
} | ['def', 'continuous_partition_data', '(', 'data', ',', 'bins', '=', "'auto'", ',', 'n_bins', '=', '10', ')', ':', 'if', 'bins', '==', "'uniform'", ':', 'bins', '=', 'np', '.', 'linspace', '(', 'start', '=', 'np', '.', 'min', '(', 'data', ')', ',', 'stop', '=', 'np', '.', 'max', '(', 'data', ')', ',', 'num', '=', 'n_bins', '+', '1', ')', 'elif', 'bins', '==', "'ntile'", ':', 'bins', '=', 'np', '.', 'percentile', '(', 'data', ',', 'np', '.', 'linspace', '(', 'start', '=', '0', ',', 'stop', '=', '100', ',', 'num', '=', 'n_bins', '+', '1', ')', ')', 'elif', 'bins', '!=', "'auto'", ':', 'raise', 'ValueError', '(', '"Invalid parameter for bins argument"', ')', 'hist', ',', 'bin_edges', '=', 'np', '.', 'histogram', '(', 'data', ',', 'bins', ',', 'density', '=', 'False', ')', 'return', '{', '"bins"', ':', 'bin_edges', ',', '"weights"', ':', 'hist', '/', 'len', '(', 'data', ')', '}'] | Convenience method for building a partition object on continuous data
Args:
data (list-like): The data from which to construct the estimate.
bins (string): One of 'uniform' (for uniformly spaced bins), 'ntile' (for percentile-spaced bins), or 'auto' (for automatically spaced bins)
n_bins (int): Ignored if bins is auto.
Returns:
A new partition_object::
{
"bins": (list) The endpoints of the partial partition of reals,
"weights": (list) The densities of the bins implied by the partition.
} | ['Convenience', 'method', 'for', 'building', 'a', 'partition', 'object', 'on', 'continuous', 'data'] | train | https://github.com/great-expectations/great_expectations/blob/08385c40529d4f14a1c46916788aecc47f33ee9d/great_expectations/dataset/util.py#L131-L160 |
1,685 | odlgroup/odl | odl/space/pspace.py | ProductSpace.astype | def astype(self, dtype):
"""Return a copy of this space with new ``dtype``.
Parameters
----------
dtype :
Scalar data type of the returned space. Can be provided
in any way the `numpy.dtype` constructor understands, e.g.
as built-in type or as a string. Data types with non-trivial
shapes are not allowed.
Returns
-------
newspace : `ProductSpace`
Version of this space with given data type.
"""
if dtype is None:
# Need to filter this out since Numpy iterprets it as 'float'
raise ValueError('`None` is not a valid data type')
dtype = np.dtype(dtype)
current_dtype = getattr(self, 'dtype', object)
if dtype == current_dtype:
return self
else:
return ProductSpace(*[space.astype(dtype)
for space in self.spaces]) | python | def astype(self, dtype):
"""Return a copy of this space with new ``dtype``.
Parameters
----------
dtype :
Scalar data type of the returned space. Can be provided
in any way the `numpy.dtype` constructor understands, e.g.
as built-in type or as a string. Data types with non-trivial
shapes are not allowed.
Returns
-------
newspace : `ProductSpace`
Version of this space with given data type.
"""
if dtype is None:
# Need to filter this out since Numpy iterprets it as 'float'
raise ValueError('`None` is not a valid data type')
dtype = np.dtype(dtype)
current_dtype = getattr(self, 'dtype', object)
if dtype == current_dtype:
return self
else:
return ProductSpace(*[space.astype(dtype)
for space in self.spaces]) | ['def', 'astype', '(', 'self', ',', 'dtype', ')', ':', 'if', 'dtype', 'is', 'None', ':', "# Need to filter this out since Numpy iterprets it as 'float'", 'raise', 'ValueError', '(', "'`None` is not a valid data type'", ')', 'dtype', '=', 'np', '.', 'dtype', '(', 'dtype', ')', 'current_dtype', '=', 'getattr', '(', 'self', ',', "'dtype'", ',', 'object', ')', 'if', 'dtype', '==', 'current_dtype', ':', 'return', 'self', 'else', ':', 'return', 'ProductSpace', '(', '*', '[', 'space', '.', 'astype', '(', 'dtype', ')', 'for', 'space', 'in', 'self', '.', 'spaces', ']', ')'] | Return a copy of this space with new ``dtype``.
Parameters
----------
dtype :
Scalar data type of the returned space. Can be provided
in any way the `numpy.dtype` constructor understands, e.g.
as built-in type or as a string. Data types with non-trivial
shapes are not allowed.
Returns
-------
newspace : `ProductSpace`
Version of this space with given data type. | ['Return', 'a', 'copy', 'of', 'this', 'space', 'with', 'new', 'dtype', '.'] | train | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/pspace.py#L410-L437 |
1,686 | chemlab/chemlab | chemlab/mviewer/api/display.py | load_molecule | def load_molecule(name, format=None):
'''Read a `~chemlab.core.Molecule` from a file.
.. seealso:: `chemlab.io.datafile`
'''
mol = datafile(name, format=format).read('molecule')
display_system(System([mol])) | python | def load_molecule(name, format=None):
'''Read a `~chemlab.core.Molecule` from a file.
.. seealso:: `chemlab.io.datafile`
'''
mol = datafile(name, format=format).read('molecule')
display_system(System([mol])) | ['def', 'load_molecule', '(', 'name', ',', 'format', '=', 'None', ')', ':', 'mol', '=', 'datafile', '(', 'name', ',', 'format', '=', 'format', ')', '.', 'read', '(', "'molecule'", ')', 'display_system', '(', 'System', '(', '[', 'mol', ']', ')', ')'] | Read a `~chemlab.core.Molecule` from a file.
.. seealso:: `chemlab.io.datafile` | ['Read', 'a', '~chemlab', '.', 'core', '.', 'Molecule', 'from', 'a', 'file', '.'] | train | https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/mviewer/api/display.py#L54-L61 |
1,687 | apache/spark | python/pyspark/sql/session.py | SparkSession._convert_from_pandas | def _convert_from_pandas(self, pdf, schema, timezone):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
:return list of records
"""
if timezone is not None:
from pyspark.sql.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if s is not pdf[field.name]:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(series, timezone)
if s is not series:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records] | python | def _convert_from_pandas(self, pdf, schema, timezone):
"""
Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
:return list of records
"""
if timezone is not None:
from pyspark.sql.types import _check_series_convert_timestamps_tz_local
copied = False
if isinstance(schema, StructType):
for field in schema:
# TODO: handle nested timestamps, such as ArrayType(TimestampType())?
if isinstance(field.dataType, TimestampType):
s = _check_series_convert_timestamps_tz_local(pdf[field.name], timezone)
if s is not pdf[field.name]:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[field.name] = s
else:
for column, series in pdf.iteritems():
s = _check_series_convert_timestamps_tz_local(series, timezone)
if s is not series:
if not copied:
# Copy once if the series is modified to prevent the original
# Pandas DataFrame from being updated
pdf = pdf.copy()
copied = True
pdf[column] = s
# Convert pandas.DataFrame to list of numpy records
np_records = pdf.to_records(index=False)
# Check if any columns need to be fixed for Spark to infer properly
if len(np_records) > 0:
record_dtype = self._get_numpy_record_dtype(np_records[0])
if record_dtype is not None:
return [r.astype(record_dtype).tolist() for r in np_records]
# Convert list of numpy records to python lists
return [r.tolist() for r in np_records] | ['def', '_convert_from_pandas', '(', 'self', ',', 'pdf', ',', 'schema', ',', 'timezone', ')', ':', 'if', 'timezone', 'is', 'not', 'None', ':', 'from', 'pyspark', '.', 'sql', '.', 'types', 'import', '_check_series_convert_timestamps_tz_local', 'copied', '=', 'False', 'if', 'isinstance', '(', 'schema', ',', 'StructType', ')', ':', 'for', 'field', 'in', 'schema', ':', '# TODO: handle nested timestamps, such as ArrayType(TimestampType())?', 'if', 'isinstance', '(', 'field', '.', 'dataType', ',', 'TimestampType', ')', ':', 's', '=', '_check_series_convert_timestamps_tz_local', '(', 'pdf', '[', 'field', '.', 'name', ']', ',', 'timezone', ')', 'if', 's', 'is', 'not', 'pdf', '[', 'field', '.', 'name', ']', ':', 'if', 'not', 'copied', ':', '# Copy once if the series is modified to prevent the original', '# Pandas DataFrame from being updated', 'pdf', '=', 'pdf', '.', 'copy', '(', ')', 'copied', '=', 'True', 'pdf', '[', 'field', '.', 'name', ']', '=', 's', 'else', ':', 'for', 'column', ',', 'series', 'in', 'pdf', '.', 'iteritems', '(', ')', ':', 's', '=', '_check_series_convert_timestamps_tz_local', '(', 'series', ',', 'timezone', ')', 'if', 's', 'is', 'not', 'series', ':', 'if', 'not', 'copied', ':', '# Copy once if the series is modified to prevent the original', '# Pandas DataFrame from being updated', 'pdf', '=', 'pdf', '.', 'copy', '(', ')', 'copied', '=', 'True', 'pdf', '[', 'column', ']', '=', 's', '# Convert pandas.DataFrame to list of numpy records', 'np_records', '=', 'pdf', '.', 'to_records', '(', 'index', '=', 'False', ')', '# Check if any columns need to be fixed for Spark to infer properly', 'if', 'len', '(', 'np_records', ')', '>', '0', ':', 'record_dtype', '=', 'self', '.', '_get_numpy_record_dtype', '(', 'np_records', '[', '0', ']', ')', 'if', 'record_dtype', 'is', 'not', 'None', ':', 'return', '[', 'r', '.', 'astype', '(', 'record_dtype', ')', '.', 'tolist', '(', ')', 'for', 'r', 'in', 'np_records', ']', '# Convert list of numpy records to python lists', 'return', '[', 'r', '.', 'tolist', '(', ')', 'for', 'r', 'in', 'np_records', ']'] | Convert a pandas.DataFrame to list of records that can be used to make a DataFrame
:return list of records | ['Convert', 'a', 'pandas', '.', 'DataFrame', 'to', 'list', 'of', 'records', 'that', 'can', 'be', 'used', 'to', 'make', 'a', 'DataFrame', ':', 'return', 'list', 'of', 'records'] | train | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L484-L525 |
1,688 | adamheins/r12 | r12/shell.py | ShellStyle.warn | def warn(self, cmd, desc=''):
''' Style for warning message. '''
return self._label_desc(cmd, desc, self.warn_color) | python | def warn(self, cmd, desc=''):
''' Style for warning message. '''
return self._label_desc(cmd, desc, self.warn_color) | ['def', 'warn', '(', 'self', ',', 'cmd', ',', 'desc', '=', "''", ')', ':', 'return', 'self', '.', '_label_desc', '(', 'cmd', ',', 'desc', ',', 'self', '.', 'warn_color', ')'] | Style for warning message. | ['Style', 'for', 'warning', 'message', '.'] | train | https://github.com/adamheins/r12/blob/ff78178332140930bf46a94a0b15ee082bb92491/r12/shell.py#L53-L55 |
1,689 | SHTOOLS/SHTOOLS | pyshtools/shclasses/shtensor.py | Tensor.compute_invar | def compute_invar(self):
"""
Compute the three invariants (I0, I1, I2) of the tensor, as well as
the quantity I = -(I2/2)**2 / (I1/3)**3.
"""
self.i0 = self.vxx + self.vyy + self.vzz
self.i1 = (self.vxx*self.vyy + self.vyy*self.vzz + self.vxx*self.vzz -
self.vxy**2 - self.vyz**2 - self.vxz**2)
self.i2 = (self.vxx*(self.vyy*self.vzz - self.vyz**2) +
self.vxy*(self.vyz*self.vxz - self.vxy*self.vzz) +
self.vxz*(self.vxy*self.vyz - self.vxz*self.vyy))
self.i = (-1.) * (self.i2 / 2.)**2
self.i.data[1:, :] /= (self.i1.data[1:, :] / 3.)**3 | python | def compute_invar(self):
"""
Compute the three invariants (I0, I1, I2) of the tensor, as well as
the quantity I = -(I2/2)**2 / (I1/3)**3.
"""
self.i0 = self.vxx + self.vyy + self.vzz
self.i1 = (self.vxx*self.vyy + self.vyy*self.vzz + self.vxx*self.vzz -
self.vxy**2 - self.vyz**2 - self.vxz**2)
self.i2 = (self.vxx*(self.vyy*self.vzz - self.vyz**2) +
self.vxy*(self.vyz*self.vxz - self.vxy*self.vzz) +
self.vxz*(self.vxy*self.vyz - self.vxz*self.vyy))
self.i = (-1.) * (self.i2 / 2.)**2
self.i.data[1:, :] /= (self.i1.data[1:, :] / 3.)**3 | ['def', 'compute_invar', '(', 'self', ')', ':', 'self', '.', 'i0', '=', 'self', '.', 'vxx', '+', 'self', '.', 'vyy', '+', 'self', '.', 'vzz', 'self', '.', 'i1', '=', '(', 'self', '.', 'vxx', '*', 'self', '.', 'vyy', '+', 'self', '.', 'vyy', '*', 'self', '.', 'vzz', '+', 'self', '.', 'vxx', '*', 'self', '.', 'vzz', '-', 'self', '.', 'vxy', '**', '2', '-', 'self', '.', 'vyz', '**', '2', '-', 'self', '.', 'vxz', '**', '2', ')', 'self', '.', 'i2', '=', '(', 'self', '.', 'vxx', '*', '(', 'self', '.', 'vyy', '*', 'self', '.', 'vzz', '-', 'self', '.', 'vyz', '**', '2', ')', '+', 'self', '.', 'vxy', '*', '(', 'self', '.', 'vyz', '*', 'self', '.', 'vxz', '-', 'self', '.', 'vxy', '*', 'self', '.', 'vzz', ')', '+', 'self', '.', 'vxz', '*', '(', 'self', '.', 'vxy', '*', 'self', '.', 'vyz', '-', 'self', '.', 'vxz', '*', 'self', '.', 'vyy', ')', ')', 'self', '.', 'i', '=', '(', '-', '1.', ')', '*', '(', 'self', '.', 'i2', '/', '2.', ')', '**', '2', 'self', '.', 'i', '.', 'data', '[', '1', ':', ',', ':', ']', '/=', '(', 'self', '.', 'i1', '.', 'data', '[', '1', ':', ',', ':', ']', '/', '3.', ')', '**', '3'] | Compute the three invariants (I0, I1, I2) of the tensor, as well as
the quantity I = -(I2/2)**2 / (I1/3)**3. | ['Compute', 'the', 'three', 'invariants', '(', 'I0', 'I1', 'I2', ')', 'of', 'the', 'tensor', 'as', 'well', 'as', 'the', 'quantity', 'I', '=', '-', '(', 'I2', '/', '2', ')', '**', '2', '/', '(', 'I1', '/', '3', ')', '**', '3', '.'] | train | https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/shtensor.py#L30-L42 |
1,690 | markuskiller/textblob-de | textblob_de/ext/_pattern/text/__init__.py | Parser.find_keywords | def find_keywords(self, string, **kwargs):
""" Returns a sorted list of keywords in the given string.
"""
return find_keywords(string,
parser = self,
top = kwargs.pop("top", 10),
frequency = kwargs.pop("frequency", {}), **kwargs
) | python | def find_keywords(self, string, **kwargs):
""" Returns a sorted list of keywords in the given string.
"""
return find_keywords(string,
parser = self,
top = kwargs.pop("top", 10),
frequency = kwargs.pop("frequency", {}), **kwargs
) | ['def', 'find_keywords', '(', 'self', ',', 'string', ',', '*', '*', 'kwargs', ')', ':', 'return', 'find_keywords', '(', 'string', ',', 'parser', '=', 'self', ',', 'top', '=', 'kwargs', '.', 'pop', '(', '"top"', ',', '10', ')', ',', 'frequency', '=', 'kwargs', '.', 'pop', '(', '"frequency"', ',', '{', '}', ')', ',', '*', '*', 'kwargs', ')'] | Returns a sorted list of keywords in the given string. | ['Returns', 'a', 'sorted', 'list', 'of', 'keywords', 'in', 'the', 'given', 'string', '.'] | train | https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/ext/_pattern/text/__init__.py#L710-L717 |
1,691 | odlgroup/odl | odl/phantom/geometric.py | _ellipsoid_phantom_3d | def _ellipsoid_phantom_3d(space, ellipsoids):
"""Create an ellipsoid phantom in 3d space.
Parameters
----------
space : `DiscreteLp`
Space in which the phantom should be generated. If ``space.shape`` is
1 in an axis, a corresponding slice of the phantom is created
(instead of squashing the whole phantom into the slice).
ellipsoids : list of lists
Each row should contain the entries ::
'value',
'axis_1', 'axis_2', 'axis_3',
'center_x', 'center_y', 'center_z',
'rotation_phi', 'rotation_theta', 'rotation_psi'
The provided ellipsoids need to be specified relative to the
reference cube ``[-1, -1, -1] x [1, 1, 1]``. Angles are to be given
in radians.
Returns
-------
phantom : ``space`` element
3D ellipsoid phantom in ``space``.
See Also
--------
shepp_logan : The typical use-case for this function.
"""
# Blank volume
p = np.zeros(space.shape, dtype=space.dtype)
minp = space.grid.min_pt
maxp = space.grid.max_pt
# Create the pixel grid
grid_in = space.grid.meshgrid
# Move points to [-1, 1]
grid = []
for i in range(3):
mean_i = (minp[i] + maxp[i]) / 2.0
# Where space.shape = 1, we have minp = maxp, so we set diff_i = 1
# to avoid division by zero. Effectively, this allows constructing
# a slice of a 3D phantom.
diff_i = (maxp[i] - minp[i]) / 2.0 or 1.0
grid.append((grid_in[i] - mean_i) / diff_i)
for ellip in ellipsoids:
assert len(ellip) == 10
intensity = ellip[0]
a_squared = ellip[1] ** 2
b_squared = ellip[2] ** 2
c_squared = ellip[3] ** 2
x0 = ellip[4]
y0 = ellip[5]
z0 = ellip[6]
phi = ellip[7]
theta = ellip[8]
psi = ellip[9]
scales = [1 / a_squared, 1 / b_squared, 1 / c_squared]
center = (np.array([x0, y0, z0]) + 1.0) / 2.0
# Create the offset x,y and z values for the grid
if any([phi, theta, psi]):
# Rotate the points to the expected coordinate system.
cphi = np.cos(phi)
sphi = np.sin(phi)
ctheta = np.cos(theta)
stheta = np.sin(theta)
cpsi = np.cos(psi)
spsi = np.sin(psi)
mat = np.array([[cpsi * cphi - ctheta * sphi * spsi,
cpsi * sphi + ctheta * cphi * spsi,
spsi * stheta],
[-spsi * cphi - ctheta * sphi * cpsi,
-spsi * sphi + ctheta * cphi * cpsi,
cpsi * stheta],
[stheta * sphi,
-stheta * cphi,
ctheta]])
# Calculate the points that could possibly be inside the volume
# Since the points are rotated, we cannot do anything directional
# without more logic
max_radius = np.sqrt(
np.abs(mat).dot([a_squared, b_squared, c_squared]))
idx, shapes = _getshapes_3d(center, max_radius, space.shape)
subgrid = [g[idi] for g, idi in zip(grid, shapes)]
offset_points = [vec * (xi - x0i)[..., None]
for xi, vec, x0i in zip(subgrid,
mat.T,
[x0, y0, z0])]
rotated = offset_points[0] + offset_points[1] + offset_points[2]
np.square(rotated, out=rotated)
radius = np.dot(rotated, scales)
else:
# Calculate the points that could possibly be inside the volume
max_radius = np.sqrt([a_squared, b_squared, c_squared])
idx, shapes = _getshapes_3d(center, max_radius, space.shape)
subgrid = [g[idi] for g, idi in zip(grid, shapes)]
squared_dist = [ai * (xi - x0i) ** 2
for xi, ai, x0i in zip(subgrid,
scales,
[x0, y0, z0])]
# Parentheses to get best order for broadcasting
radius = squared_dist[0] + (squared_dist[1] + squared_dist[2])
# Find the points within the ellipse
inside = radius <= 1
# Add the ellipse intensity to those points
p[idx][inside] += intensity
return space.element(p) | python | def _ellipsoid_phantom_3d(space, ellipsoids):
"""Create an ellipsoid phantom in 3d space.
Parameters
----------
space : `DiscreteLp`
Space in which the phantom should be generated. If ``space.shape`` is
1 in an axis, a corresponding slice of the phantom is created
(instead of squashing the whole phantom into the slice).
ellipsoids : list of lists
Each row should contain the entries ::
'value',
'axis_1', 'axis_2', 'axis_3',
'center_x', 'center_y', 'center_z',
'rotation_phi', 'rotation_theta', 'rotation_psi'
The provided ellipsoids need to be specified relative to the
reference cube ``[-1, -1, -1] x [1, 1, 1]``. Angles are to be given
in radians.
Returns
-------
phantom : ``space`` element
3D ellipsoid phantom in ``space``.
See Also
--------
shepp_logan : The typical use-case for this function.
"""
# Blank volume
p = np.zeros(space.shape, dtype=space.dtype)
minp = space.grid.min_pt
maxp = space.grid.max_pt
# Create the pixel grid
grid_in = space.grid.meshgrid
# Move points to [-1, 1]
grid = []
for i in range(3):
mean_i = (minp[i] + maxp[i]) / 2.0
# Where space.shape = 1, we have minp = maxp, so we set diff_i = 1
# to avoid division by zero. Effectively, this allows constructing
# a slice of a 3D phantom.
diff_i = (maxp[i] - minp[i]) / 2.0 or 1.0
grid.append((grid_in[i] - mean_i) / diff_i)
for ellip in ellipsoids:
assert len(ellip) == 10
intensity = ellip[0]
a_squared = ellip[1] ** 2
b_squared = ellip[2] ** 2
c_squared = ellip[3] ** 2
x0 = ellip[4]
y0 = ellip[5]
z0 = ellip[6]
phi = ellip[7]
theta = ellip[8]
psi = ellip[9]
scales = [1 / a_squared, 1 / b_squared, 1 / c_squared]
center = (np.array([x0, y0, z0]) + 1.0) / 2.0
# Create the offset x,y and z values for the grid
if any([phi, theta, psi]):
# Rotate the points to the expected coordinate system.
cphi = np.cos(phi)
sphi = np.sin(phi)
ctheta = np.cos(theta)
stheta = np.sin(theta)
cpsi = np.cos(psi)
spsi = np.sin(psi)
mat = np.array([[cpsi * cphi - ctheta * sphi * spsi,
cpsi * sphi + ctheta * cphi * spsi,
spsi * stheta],
[-spsi * cphi - ctheta * sphi * cpsi,
-spsi * sphi + ctheta * cphi * cpsi,
cpsi * stheta],
[stheta * sphi,
-stheta * cphi,
ctheta]])
# Calculate the points that could possibly be inside the volume
# Since the points are rotated, we cannot do anything directional
# without more logic
max_radius = np.sqrt(
np.abs(mat).dot([a_squared, b_squared, c_squared]))
idx, shapes = _getshapes_3d(center, max_radius, space.shape)
subgrid = [g[idi] for g, idi in zip(grid, shapes)]
offset_points = [vec * (xi - x0i)[..., None]
for xi, vec, x0i in zip(subgrid,
mat.T,
[x0, y0, z0])]
rotated = offset_points[0] + offset_points[1] + offset_points[2]
np.square(rotated, out=rotated)
radius = np.dot(rotated, scales)
else:
# Calculate the points that could possibly be inside the volume
max_radius = np.sqrt([a_squared, b_squared, c_squared])
idx, shapes = _getshapes_3d(center, max_radius, space.shape)
subgrid = [g[idi] for g, idi in zip(grid, shapes)]
squared_dist = [ai * (xi - x0i) ** 2
for xi, ai, x0i in zip(subgrid,
scales,
[x0, y0, z0])]
# Parentheses to get best order for broadcasting
radius = squared_dist[0] + (squared_dist[1] + squared_dist[2])
# Find the points within the ellipse
inside = radius <= 1
# Add the ellipse intensity to those points
p[idx][inside] += intensity
return space.element(p) | ['def', '_ellipsoid_phantom_3d', '(', 'space', ',', 'ellipsoids', ')', ':', '# Blank volume', 'p', '=', 'np', '.', 'zeros', '(', 'space', '.', 'shape', ',', 'dtype', '=', 'space', '.', 'dtype', ')', 'minp', '=', 'space', '.', 'grid', '.', 'min_pt', 'maxp', '=', 'space', '.', 'grid', '.', 'max_pt', '# Create the pixel grid', 'grid_in', '=', 'space', '.', 'grid', '.', 'meshgrid', '# Move points to [-1, 1]', 'grid', '=', '[', ']', 'for', 'i', 'in', 'range', '(', '3', ')', ':', 'mean_i', '=', '(', 'minp', '[', 'i', ']', '+', 'maxp', '[', 'i', ']', ')', '/', '2.0', '# Where space.shape = 1, we have minp = maxp, so we set diff_i = 1', '# to avoid division by zero. Effectively, this allows constructing', '# a slice of a 3D phantom.', 'diff_i', '=', '(', 'maxp', '[', 'i', ']', '-', 'minp', '[', 'i', ']', ')', '/', '2.0', 'or', '1.0', 'grid', '.', 'append', '(', '(', 'grid_in', '[', 'i', ']', '-', 'mean_i', ')', '/', 'diff_i', ')', 'for', 'ellip', 'in', 'ellipsoids', ':', 'assert', 'len', '(', 'ellip', ')', '==', '10', 'intensity', '=', 'ellip', '[', '0', ']', 'a_squared', '=', 'ellip', '[', '1', ']', '**', '2', 'b_squared', '=', 'ellip', '[', '2', ']', '**', '2', 'c_squared', '=', 'ellip', '[', '3', ']', '**', '2', 'x0', '=', 'ellip', '[', '4', ']', 'y0', '=', 'ellip', '[', '5', ']', 'z0', '=', 'ellip', '[', '6', ']', 'phi', '=', 'ellip', '[', '7', ']', 'theta', '=', 'ellip', '[', '8', ']', 'psi', '=', 'ellip', '[', '9', ']', 'scales', '=', '[', '1', '/', 'a_squared', ',', '1', '/', 'b_squared', ',', '1', '/', 'c_squared', ']', 'center', '=', '(', 'np', '.', 'array', '(', '[', 'x0', ',', 'y0', ',', 'z0', ']', ')', '+', '1.0', ')', '/', '2.0', '# Create the offset x,y and z values for the grid', 'if', 'any', '(', '[', 'phi', ',', 'theta', ',', 'psi', ']', ')', ':', '# Rotate the points to the expected coordinate system.', 'cphi', '=', 'np', '.', 'cos', '(', 'phi', ')', 'sphi', '=', 'np', '.', 'sin', '(', 'phi', ')', 'ctheta', '=', 'np', '.', 'cos', '(', 'theta', ')', 'stheta', '=', 'np', '.', 'sin', '(', 'theta', ')', 'cpsi', '=', 'np', '.', 'cos', '(', 'psi', ')', 'spsi', '=', 'np', '.', 'sin', '(', 'psi', ')', 'mat', '=', 'np', '.', 'array', '(', '[', '[', 'cpsi', '*', 'cphi', '-', 'ctheta', '*', 'sphi', '*', 'spsi', ',', 'cpsi', '*', 'sphi', '+', 'ctheta', '*', 'cphi', '*', 'spsi', ',', 'spsi', '*', 'stheta', ']', ',', '[', '-', 'spsi', '*', 'cphi', '-', 'ctheta', '*', 'sphi', '*', 'cpsi', ',', '-', 'spsi', '*', 'sphi', '+', 'ctheta', '*', 'cphi', '*', 'cpsi', ',', 'cpsi', '*', 'stheta', ']', ',', '[', 'stheta', '*', 'sphi', ',', '-', 'stheta', '*', 'cphi', ',', 'ctheta', ']', ']', ')', '# Calculate the points that could possibly be inside the volume', '# Since the points are rotated, we cannot do anything directional', '# without more logic', 'max_radius', '=', 'np', '.', 'sqrt', '(', 'np', '.', 'abs', '(', 'mat', ')', '.', 'dot', '(', '[', 'a_squared', ',', 'b_squared', ',', 'c_squared', ']', ')', ')', 'idx', ',', 'shapes', '=', '_getshapes_3d', '(', 'center', ',', 'max_radius', ',', 'space', '.', 'shape', ')', 'subgrid', '=', '[', 'g', '[', 'idi', ']', 'for', 'g', ',', 'idi', 'in', 'zip', '(', 'grid', ',', 'shapes', ')', ']', 'offset_points', '=', '[', 'vec', '*', '(', 'xi', '-', 'x0i', ')', '[', '...', ',', 'None', ']', 'for', 'xi', ',', 'vec', ',', 'x0i', 'in', 'zip', '(', 'subgrid', ',', 'mat', '.', 'T', ',', '[', 'x0', ',', 'y0', ',', 'z0', ']', ')', ']', 'rotated', '=', 'offset_points', '[', '0', ']', '+', 'offset_points', '[', '1', ']', '+', 'offset_points', '[', '2', ']', 'np', '.', 'square', '(', 'rotated', ',', 'out', '=', 'rotated', ')', 'radius', '=', 'np', '.', 'dot', '(', 'rotated', ',', 'scales', ')', 'else', ':', '# Calculate the points that could possibly be inside the volume', 'max_radius', '=', 'np', '.', 'sqrt', '(', '[', 'a_squared', ',', 'b_squared', ',', 'c_squared', ']', ')', 'idx', ',', 'shapes', '=', '_getshapes_3d', '(', 'center', ',', 'max_radius', ',', 'space', '.', 'shape', ')', 'subgrid', '=', '[', 'g', '[', 'idi', ']', 'for', 'g', ',', 'idi', 'in', 'zip', '(', 'grid', ',', 'shapes', ')', ']', 'squared_dist', '=', '[', 'ai', '*', '(', 'xi', '-', 'x0i', ')', '**', '2', 'for', 'xi', ',', 'ai', ',', 'x0i', 'in', 'zip', '(', 'subgrid', ',', 'scales', ',', '[', 'x0', ',', 'y0', ',', 'z0', ']', ')', ']', '# Parentheses to get best order for broadcasting', 'radius', '=', 'squared_dist', '[', '0', ']', '+', '(', 'squared_dist', '[', '1', ']', '+', 'squared_dist', '[', '2', ']', ')', '# Find the points within the ellipse', 'inside', '=', 'radius', '<=', '1', '# Add the ellipse intensity to those points', 'p', '[', 'idx', ']', '[', 'inside', ']', '+=', 'intensity', 'return', 'space', '.', 'element', '(', 'p', ')'] | Create an ellipsoid phantom in 3d space.
Parameters
----------
space : `DiscreteLp`
Space in which the phantom should be generated. If ``space.shape`` is
1 in an axis, a corresponding slice of the phantom is created
(instead of squashing the whole phantom into the slice).
ellipsoids : list of lists
Each row should contain the entries ::
'value',
'axis_1', 'axis_2', 'axis_3',
'center_x', 'center_y', 'center_z',
'rotation_phi', 'rotation_theta', 'rotation_psi'
The provided ellipsoids need to be specified relative to the
reference cube ``[-1, -1, -1] x [1, 1, 1]``. Angles are to be given
in radians.
Returns
-------
phantom : ``space`` element
3D ellipsoid phantom in ``space``.
See Also
--------
shepp_logan : The typical use-case for this function. | ['Create', 'an', 'ellipsoid', 'phantom', 'in', '3d', 'space', '.'] | train | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/phantom/geometric.py#L449-L570 |
1,692 | saltstack/salt | salt/modules/panos.py | deactivate_license | def deactivate_license(key_name=None):
'''
Deactivates an installed license.
Required version 7.0.0 or greater.
key_name(str): The file name of the license key installed.
CLI Example:
.. code-block:: bash
salt '*' panos.deactivate_license key_name=License_File_Name.key
'''
_required_version = '7.0.0'
if not __proxy__['panos.is_required_version'](_required_version):
return False, 'The panos device requires version {0} or greater for this command.'.format(_required_version)
if not key_name:
return False, 'You must specify a key_name.'
else:
query = {'type': 'op', 'cmd': '<request><license><deactivate><key><features><member>{0}</member></features>'
'</key></deactivate></license></request>'.format(key_name)}
return __proxy__['panos.call'](query) | python | def deactivate_license(key_name=None):
'''
Deactivates an installed license.
Required version 7.0.0 or greater.
key_name(str): The file name of the license key installed.
CLI Example:
.. code-block:: bash
salt '*' panos.deactivate_license key_name=License_File_Name.key
'''
_required_version = '7.0.0'
if not __proxy__['panos.is_required_version'](_required_version):
return False, 'The panos device requires version {0} or greater for this command.'.format(_required_version)
if not key_name:
return False, 'You must specify a key_name.'
else:
query = {'type': 'op', 'cmd': '<request><license><deactivate><key><features><member>{0}</member></features>'
'</key></deactivate></license></request>'.format(key_name)}
return __proxy__['panos.call'](query) | ['def', 'deactivate_license', '(', 'key_name', '=', 'None', ')', ':', '_required_version', '=', "'7.0.0'", 'if', 'not', '__proxy__', '[', "'panos.is_required_version'", ']', '(', '_required_version', ')', ':', 'return', 'False', ',', "'The panos device requires version {0} or greater for this command.'", '.', 'format', '(', '_required_version', ')', 'if', 'not', 'key_name', ':', 'return', 'False', ',', "'You must specify a key_name.'", 'else', ':', 'query', '=', '{', "'type'", ':', "'op'", ',', "'cmd'", ':', "'<request><license><deactivate><key><features><member>{0}</member></features>'", "'</key></deactivate></license></request>'", '.', 'format', '(', 'key_name', ')', '}', 'return', '__proxy__', '[', "'panos.call'", ']', '(', 'query', ')'] | Deactivates an installed license.
Required version 7.0.0 or greater.
key_name(str): The file name of the license key installed.
CLI Example:
.. code-block:: bash
salt '*' panos.deactivate_license key_name=License_File_Name.key | ['Deactivates', 'an', 'installed', 'license', '.', 'Required', 'version', '7', '.', '0', '.', '0', 'or', 'greater', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/panos.py#L162-L187 |
1,693 | Azure/azure-event-hubs-python | azure/eventhub/client.py | EventHubClient.add_receiver | def add_receiver(
self, consumer_group, partition, offset=None, prefetch=300,
operation=None, keep_alive=30, auto_reconnect=True):
"""
Add a receiver to the client for a particular consumer group and partition.
:param consumer_group: The name of the consumer group.
:type consumer_group: str
:param partition: The ID of the partition.
:type partition: str
:param offset: The offset from which to start receiving.
:type offset: ~azure.eventhub.common.Offset
:param prefetch: The message prefetch count of the receiver. Default is 300.
:type prefetch: int
:operation: An optional operation to be appended to the hostname in the source URL.
The value must start with `/` character.
:type operation: str
:rtype: ~azure.eventhub.receiver.Receiver
"""
path = self.address.path + operation if operation else self.address.path
source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format(
self.address.hostname, path, consumer_group, partition)
handler = Receiver(
self, source_url, offset=offset, prefetch=prefetch,
keep_alive=keep_alive, auto_reconnect=auto_reconnect)
self.clients.append(handler)
return handler | python | def add_receiver(
self, consumer_group, partition, offset=None, prefetch=300,
operation=None, keep_alive=30, auto_reconnect=True):
"""
Add a receiver to the client for a particular consumer group and partition.
:param consumer_group: The name of the consumer group.
:type consumer_group: str
:param partition: The ID of the partition.
:type partition: str
:param offset: The offset from which to start receiving.
:type offset: ~azure.eventhub.common.Offset
:param prefetch: The message prefetch count of the receiver. Default is 300.
:type prefetch: int
:operation: An optional operation to be appended to the hostname in the source URL.
The value must start with `/` character.
:type operation: str
:rtype: ~azure.eventhub.receiver.Receiver
"""
path = self.address.path + operation if operation else self.address.path
source_url = "amqps://{}{}/ConsumerGroups/{}/Partitions/{}".format(
self.address.hostname, path, consumer_group, partition)
handler = Receiver(
self, source_url, offset=offset, prefetch=prefetch,
keep_alive=keep_alive, auto_reconnect=auto_reconnect)
self.clients.append(handler)
return handler | ['def', 'add_receiver', '(', 'self', ',', 'consumer_group', ',', 'partition', ',', 'offset', '=', 'None', ',', 'prefetch', '=', '300', ',', 'operation', '=', 'None', ',', 'keep_alive', '=', '30', ',', 'auto_reconnect', '=', 'True', ')', ':', 'path', '=', 'self', '.', 'address', '.', 'path', '+', 'operation', 'if', 'operation', 'else', 'self', '.', 'address', '.', 'path', 'source_url', '=', '"amqps://{}{}/ConsumerGroups/{}/Partitions/{}"', '.', 'format', '(', 'self', '.', 'address', '.', 'hostname', ',', 'path', ',', 'consumer_group', ',', 'partition', ')', 'handler', '=', 'Receiver', '(', 'self', ',', 'source_url', ',', 'offset', '=', 'offset', ',', 'prefetch', '=', 'prefetch', ',', 'keep_alive', '=', 'keep_alive', ',', 'auto_reconnect', '=', 'auto_reconnect', ')', 'self', '.', 'clients', '.', 'append', '(', 'handler', ')', 'return', 'handler'] | Add a receiver to the client for a particular consumer group and partition.
:param consumer_group: The name of the consumer group.
:type consumer_group: str
:param partition: The ID of the partition.
:type partition: str
:param offset: The offset from which to start receiving.
:type offset: ~azure.eventhub.common.Offset
:param prefetch: The message prefetch count of the receiver. Default is 300.
:type prefetch: int
:operation: An optional operation to be appended to the hostname in the source URL.
The value must start with `/` character.
:type operation: str
:rtype: ~azure.eventhub.receiver.Receiver | ['Add', 'a', 'receiver', 'to', 'the', 'client', 'for', 'a', 'particular', 'consumer', 'group', 'and', 'partition', '.'] | train | https://github.com/Azure/azure-event-hubs-python/blob/737c5f966557ada2cf10fa0d8f3c19671ae96348/azure/eventhub/client.py#L381-L407 |
1,694 | SheffieldML/GPy | GPy/plotting/matplot_dep/base_plots.py | align_subplot_array | def align_subplot_array(axes,xlim=None, ylim=None):
"""
Make all of the axes in the array hae the same limits, turn off unnecessary ticks
use plt.subplots() to get an array of axes
"""
#find sensible xlim,ylim
if xlim is None:
xlim = [np.inf,-np.inf]
for ax in axes.flatten():
xlim[0] = min(xlim[0],ax.get_xlim()[0])
xlim[1] = max(xlim[1],ax.get_xlim()[1])
if ylim is None:
ylim = [np.inf,-np.inf]
for ax in axes.flatten():
ylim[0] = min(ylim[0],ax.get_ylim()[0])
ylim[1] = max(ylim[1],ax.get_ylim()[1])
N,M = axes.shape
for i,ax in enumerate(axes.flatten()):
ax.set_xlim(xlim)
ax.set_ylim(ylim)
if (i)%M:
ax.set_yticks([])
else:
removeRightTicks(ax)
if i<(M*(N-1)):
ax.set_xticks([])
else:
removeUpperTicks(ax) | python | def align_subplot_array(axes,xlim=None, ylim=None):
"""
Make all of the axes in the array hae the same limits, turn off unnecessary ticks
use plt.subplots() to get an array of axes
"""
#find sensible xlim,ylim
if xlim is None:
xlim = [np.inf,-np.inf]
for ax in axes.flatten():
xlim[0] = min(xlim[0],ax.get_xlim()[0])
xlim[1] = max(xlim[1],ax.get_xlim()[1])
if ylim is None:
ylim = [np.inf,-np.inf]
for ax in axes.flatten():
ylim[0] = min(ylim[0],ax.get_ylim()[0])
ylim[1] = max(ylim[1],ax.get_ylim()[1])
N,M = axes.shape
for i,ax in enumerate(axes.flatten()):
ax.set_xlim(xlim)
ax.set_ylim(ylim)
if (i)%M:
ax.set_yticks([])
else:
removeRightTicks(ax)
if i<(M*(N-1)):
ax.set_xticks([])
else:
removeUpperTicks(ax) | ['def', 'align_subplot_array', '(', 'axes', ',', 'xlim', '=', 'None', ',', 'ylim', '=', 'None', ')', ':', '#find sensible xlim,ylim', 'if', 'xlim', 'is', 'None', ':', 'xlim', '=', '[', 'np', '.', 'inf', ',', '-', 'np', '.', 'inf', ']', 'for', 'ax', 'in', 'axes', '.', 'flatten', '(', ')', ':', 'xlim', '[', '0', ']', '=', 'min', '(', 'xlim', '[', '0', ']', ',', 'ax', '.', 'get_xlim', '(', ')', '[', '0', ']', ')', 'xlim', '[', '1', ']', '=', 'max', '(', 'xlim', '[', '1', ']', ',', 'ax', '.', 'get_xlim', '(', ')', '[', '1', ']', ')', 'if', 'ylim', 'is', 'None', ':', 'ylim', '=', '[', 'np', '.', 'inf', ',', '-', 'np', '.', 'inf', ']', 'for', 'ax', 'in', 'axes', '.', 'flatten', '(', ')', ':', 'ylim', '[', '0', ']', '=', 'min', '(', 'ylim', '[', '0', ']', ',', 'ax', '.', 'get_ylim', '(', ')', '[', '0', ']', ')', 'ylim', '[', '1', ']', '=', 'max', '(', 'ylim', '[', '1', ']', ',', 'ax', '.', 'get_ylim', '(', ')', '[', '1', ']', ')', 'N', ',', 'M', '=', 'axes', '.', 'shape', 'for', 'i', ',', 'ax', 'in', 'enumerate', '(', 'axes', '.', 'flatten', '(', ')', ')', ':', 'ax', '.', 'set_xlim', '(', 'xlim', ')', 'ax', '.', 'set_ylim', '(', 'ylim', ')', 'if', '(', 'i', ')', '%', 'M', ':', 'ax', '.', 'set_yticks', '(', '[', ']', ')', 'else', ':', 'removeRightTicks', '(', 'ax', ')', 'if', 'i', '<', '(', 'M', '*', '(', 'N', '-', '1', ')', ')', ':', 'ax', '.', 'set_xticks', '(', '[', ']', ')', 'else', ':', 'removeUpperTicks', '(', 'ax', ')'] | Make all of the axes in the array hae the same limits, turn off unnecessary ticks
use plt.subplots() to get an array of axes | ['Make', 'all', 'of', 'the', 'axes', 'in', 'the', 'array', 'hae', 'the', 'same', 'limits', 'turn', 'off', 'unnecessary', 'ticks', 'use', 'plt', '.', 'subplots', '()', 'to', 'get', 'an', 'array', 'of', 'axes'] | train | https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/plotting/matplot_dep/base_plots.py#L199-L227 |
1,695 | siznax/wptools | wptools/core.py | WPTools.response | def response(self, action=None):
"""
returns cached response (as dict) for given action,
or list of cached actions
"""
if action in self.cache:
return utils.json_loads(self.cache[action]['response'])
return self.cache.keys() or None | python | def response(self, action=None):
"""
returns cached response (as dict) for given action,
or list of cached actions
"""
if action in self.cache:
return utils.json_loads(self.cache[action]['response'])
return self.cache.keys() or None | ['def', 'response', '(', 'self', ',', 'action', '=', 'None', ')', ':', 'if', 'action', 'in', 'self', '.', 'cache', ':', 'return', 'utils', '.', 'json_loads', '(', 'self', '.', 'cache', '[', 'action', ']', '[', "'response'", ']', ')', 'return', 'self', '.', 'cache', '.', 'keys', '(', ')', 'or', 'None'] | returns cached response (as dict) for given action,
or list of cached actions | ['returns', 'cached', 'response', '(', 'as', 'dict', ')', 'for', 'given', 'action', 'or', 'list', 'of', 'cached', 'actions'] | train | https://github.com/siznax/wptools/blob/100eaea585c34aa9ad87a9eda8982bb4898f6ec9/wptools/core.py#L264-L271 |
1,696 | dgraph-io/pydgraph | pydgraph/client_stub.py | DgraphClientStub.commit_or_abort | def commit_or_abort(self, ctx, timeout=None, metadata=None,
credentials=None):
"""Runs commit or abort operation."""
return self.stub.CommitOrAbort(ctx, timeout=timeout, metadata=metadata,
credentials=credentials) | python | def commit_or_abort(self, ctx, timeout=None, metadata=None,
credentials=None):
"""Runs commit or abort operation."""
return self.stub.CommitOrAbort(ctx, timeout=timeout, metadata=metadata,
credentials=credentials) | ['def', 'commit_or_abort', '(', 'self', ',', 'ctx', ',', 'timeout', '=', 'None', ',', 'metadata', '=', 'None', ',', 'credentials', '=', 'None', ')', ':', 'return', 'self', '.', 'stub', '.', 'CommitOrAbort', '(', 'ctx', ',', 'timeout', '=', 'timeout', ',', 'metadata', '=', 'metadata', ',', 'credentials', '=', 'credentials', ')'] | Runs commit or abort operation. | ['Runs', 'commit', 'or', 'abort', 'operation', '.'] | train | https://github.com/dgraph-io/pydgraph/blob/0fe85f6593cb2148475750bc8555a6fdf509054b/pydgraph/client_stub.py#L58-L62 |
1,697 | spyder-ide/spyder | spyder/utils/programs.py | get_python_args | def get_python_args(fname, python_args, interact, debug, end_args):
"""Construct Python interpreter arguments"""
p_args = []
if python_args is not None:
p_args += python_args.split()
if interact:
p_args.append('-i')
if debug:
p_args.extend(['-m', 'pdb'])
if fname is not None:
if os.name == 'nt' and debug:
# When calling pdb on Windows, one has to replace backslashes by
# slashes to avoid confusion with escape characters (otherwise,
# for example, '\t' will be interpreted as a tabulation):
p_args.append(osp.normpath(fname).replace(os.sep, '/'))
else:
p_args.append(fname)
if end_args:
p_args.extend(shell_split(end_args))
return p_args | python | def get_python_args(fname, python_args, interact, debug, end_args):
"""Construct Python interpreter arguments"""
p_args = []
if python_args is not None:
p_args += python_args.split()
if interact:
p_args.append('-i')
if debug:
p_args.extend(['-m', 'pdb'])
if fname is not None:
if os.name == 'nt' and debug:
# When calling pdb on Windows, one has to replace backslashes by
# slashes to avoid confusion with escape characters (otherwise,
# for example, '\t' will be interpreted as a tabulation):
p_args.append(osp.normpath(fname).replace(os.sep, '/'))
else:
p_args.append(fname)
if end_args:
p_args.extend(shell_split(end_args))
return p_args | ['def', 'get_python_args', '(', 'fname', ',', 'python_args', ',', 'interact', ',', 'debug', ',', 'end_args', ')', ':', 'p_args', '=', '[', ']', 'if', 'python_args', 'is', 'not', 'None', ':', 'p_args', '+=', 'python_args', '.', 'split', '(', ')', 'if', 'interact', ':', 'p_args', '.', 'append', '(', "'-i'", ')', 'if', 'debug', ':', 'p_args', '.', 'extend', '(', '[', "'-m'", ',', "'pdb'", ']', ')', 'if', 'fname', 'is', 'not', 'None', ':', 'if', 'os', '.', 'name', '==', "'nt'", 'and', 'debug', ':', '# When calling pdb on Windows, one has to replace backslashes by\r', '# slashes to avoid confusion with escape characters (otherwise, \r', "# for example, '\\t' will be interpreted as a tabulation):\r", 'p_args', '.', 'append', '(', 'osp', '.', 'normpath', '(', 'fname', ')', '.', 'replace', '(', 'os', '.', 'sep', ',', "'/'", ')', ')', 'else', ':', 'p_args', '.', 'append', '(', 'fname', ')', 'if', 'end_args', ':', 'p_args', '.', 'extend', '(', 'shell_split', '(', 'end_args', ')', ')', 'return', 'p_args'] | Construct Python interpreter arguments | ['Construct', 'Python', 'interpreter', 'arguments'] | train | https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/programs.py#L251-L270 |
1,698 | modin-project/modin | modin/pandas/base.py | BasePandasDataset.size | def size(self):
"""Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame.
"""
return len(self._query_compiler.index) * len(self._query_compiler.columns) | python | def size(self):
"""Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame.
"""
return len(self._query_compiler.index) * len(self._query_compiler.columns) | ['def', 'size', '(', 'self', ')', ':', 'return', 'len', '(', 'self', '.', '_query_compiler', '.', 'index', ')', '*', 'len', '(', 'self', '.', '_query_compiler', '.', 'columns', ')'] | Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame. | ['Get', 'the', 'number', 'of', 'elements', 'in', 'the', 'DataFrame', '.', 'Returns', ':', 'The', 'number', 'of', 'elements', 'in', 'the', 'DataFrame', '.'] | train | https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/pandas/base.py#L3238-L3244 |
1,699 | LCAV/pylocus | pylocus/point_set.py | AngleSet.get_polygon_constraints_m | def get_polygon_constraints_m(self, polygons_m, print_out=False):
"""
:param range_polygones: list of numbers of polygones to test.
:return A, b: the constraints on the theta-vector of the form A*theta = b
"""
rows_b = []
rows_A = []
m = len(polygons_m[0])
rows_b.append((m - 2) * pi * np.ones(
len(polygons_m), ))
for p in polygons_m:
row = np.zeros((self.theta.shape[0], ))
for k in range(m):
index = get_index(self.corners, p[1], (p[0], p[2]))
row[index] = 1
p = np.roll(p, 1)
assert np.sum(row) == m
rows_A.append(row)
A = np.vstack(rows_A)
b = np.hstack(rows_b)
num_constraints = A.shape[0]
A_repeat = np.repeat(A.astype(bool), 3).reshape((1, -1))
corners = self.corners.reshape((1, -1))
corners_tiled = np.tile(corners, num_constraints)
if (print_out):
print('shape of A {}'.format(A.shape))
if (print_out):
print('chosen angles m={}:\n{}'.format(m, (corners_tiled)[A_repeat]
.reshape((-1, m * 3))))
if (print_out):
print('{}-polygones: {}'.format(m, rows_A))
self.A = A
self.b = b
return A, b | python | def get_polygon_constraints_m(self, polygons_m, print_out=False):
"""
:param range_polygones: list of numbers of polygones to test.
:return A, b: the constraints on the theta-vector of the form A*theta = b
"""
rows_b = []
rows_A = []
m = len(polygons_m[0])
rows_b.append((m - 2) * pi * np.ones(
len(polygons_m), ))
for p in polygons_m:
row = np.zeros((self.theta.shape[0], ))
for k in range(m):
index = get_index(self.corners, p[1], (p[0], p[2]))
row[index] = 1
p = np.roll(p, 1)
assert np.sum(row) == m
rows_A.append(row)
A = np.vstack(rows_A)
b = np.hstack(rows_b)
num_constraints = A.shape[0]
A_repeat = np.repeat(A.astype(bool), 3).reshape((1, -1))
corners = self.corners.reshape((1, -1))
corners_tiled = np.tile(corners, num_constraints)
if (print_out):
print('shape of A {}'.format(A.shape))
if (print_out):
print('chosen angles m={}:\n{}'.format(m, (corners_tiled)[A_repeat]
.reshape((-1, m * 3))))
if (print_out):
print('{}-polygones: {}'.format(m, rows_A))
self.A = A
self.b = b
return A, b | ['def', 'get_polygon_constraints_m', '(', 'self', ',', 'polygons_m', ',', 'print_out', '=', 'False', ')', ':', 'rows_b', '=', '[', ']', 'rows_A', '=', '[', ']', 'm', '=', 'len', '(', 'polygons_m', '[', '0', ']', ')', 'rows_b', '.', 'append', '(', '(', 'm', '-', '2', ')', '*', 'pi', '*', 'np', '.', 'ones', '(', 'len', '(', 'polygons_m', ')', ',', ')', ')', 'for', 'p', 'in', 'polygons_m', ':', 'row', '=', 'np', '.', 'zeros', '(', '(', 'self', '.', 'theta', '.', 'shape', '[', '0', ']', ',', ')', ')', 'for', 'k', 'in', 'range', '(', 'm', ')', ':', 'index', '=', 'get_index', '(', 'self', '.', 'corners', ',', 'p', '[', '1', ']', ',', '(', 'p', '[', '0', ']', ',', 'p', '[', '2', ']', ')', ')', 'row', '[', 'index', ']', '=', '1', 'p', '=', 'np', '.', 'roll', '(', 'p', ',', '1', ')', 'assert', 'np', '.', 'sum', '(', 'row', ')', '==', 'm', 'rows_A', '.', 'append', '(', 'row', ')', 'A', '=', 'np', '.', 'vstack', '(', 'rows_A', ')', 'b', '=', 'np', '.', 'hstack', '(', 'rows_b', ')', 'num_constraints', '=', 'A', '.', 'shape', '[', '0', ']', 'A_repeat', '=', 'np', '.', 'repeat', '(', 'A', '.', 'astype', '(', 'bool', ')', ',', '3', ')', '.', 'reshape', '(', '(', '1', ',', '-', '1', ')', ')', 'corners', '=', 'self', '.', 'corners', '.', 'reshape', '(', '(', '1', ',', '-', '1', ')', ')', 'corners_tiled', '=', 'np', '.', 'tile', '(', 'corners', ',', 'num_constraints', ')', 'if', '(', 'print_out', ')', ':', 'print', '(', "'shape of A {}'", '.', 'format', '(', 'A', '.', 'shape', ')', ')', 'if', '(', 'print_out', ')', ':', 'print', '(', "'chosen angles m={}:\\n{}'", '.', 'format', '(', 'm', ',', '(', 'corners_tiled', ')', '[', 'A_repeat', ']', '.', 'reshape', '(', '(', '-', '1', ',', 'm', '*', '3', ')', ')', ')', ')', 'if', '(', 'print_out', ')', ':', 'print', '(', "'{}-polygones: {}'", '.', 'format', '(', 'm', ',', 'rows_A', ')', ')', 'self', '.', 'A', '=', 'A', 'self', '.', 'b', '=', 'b', 'return', 'A', ',', 'b'] | :param range_polygones: list of numbers of polygones to test.
:return A, b: the constraints on the theta-vector of the form A*theta = b | [':', 'param', 'range_polygones', ':', 'list', 'of', 'numbers', 'of', 'polygones', 'to', 'test', '.'] | train | https://github.com/LCAV/pylocus/blob/c56a38c251d8a435caf4641a8ae6027ecba2c8c6/pylocus/point_set.py#L581-L615 |
Subsets and Splits