Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
7,800 | faucamp/python-gsmmodem | gsmmodem/pdu.py | packSeptets | def packSeptets(octets, padBits=0):
""" Packs the specified octets into septets
Typically the output of encodeGsm7 would be used as input to this function. The resulting
bytearray contains the original GSM-7 characters packed into septets ready for transmission.
:rtype: bytearray
"""
result = bytearray()
if type(octets) == str:
octets = iter(rawStrToByteArray(octets))
elif type(octets) == bytearray:
octets = iter(octets)
shift = padBits
if padBits == 0:
prevSeptet = next(octets)
else:
prevSeptet = 0x00
for octet in octets:
septet = octet & 0x7f;
if shift == 7:
# prevSeptet has already been fully added to result
shift = 0
prevSeptet = septet
continue
b = ((septet << (7 - shift)) & 0xFF) | (prevSeptet >> shift)
prevSeptet = septet
shift += 1
result.append(b)
if shift != 7:
# There is a bit "left over" from prevSeptet
result.append(prevSeptet >> shift)
return result | python | def packSeptets(octets, padBits=0):
""" Packs the specified octets into septets
Typically the output of encodeGsm7 would be used as input to this function. The resulting
bytearray contains the original GSM-7 characters packed into septets ready for transmission.
:rtype: bytearray
"""
result = bytearray()
if type(octets) == str:
octets = iter(rawStrToByteArray(octets))
elif type(octets) == bytearray:
octets = iter(octets)
shift = padBits
if padBits == 0:
prevSeptet = next(octets)
else:
prevSeptet = 0x00
for octet in octets:
septet = octet & 0x7f;
if shift == 7:
# prevSeptet has already been fully added to result
shift = 0
prevSeptet = septet
continue
b = ((septet << (7 - shift)) & 0xFF) | (prevSeptet >> shift)
prevSeptet = septet
shift += 1
result.append(b)
if shift != 7:
# There is a bit "left over" from prevSeptet
result.append(prevSeptet >> shift)
return result | ['def', 'packSeptets', '(', 'octets', ',', 'padBits', '=', '0', ')', ':', 'result', '=', 'bytearray', '(', ')', 'if', 'type', '(', 'octets', ')', '==', 'str', ':', 'octets', '=', 'iter', '(', 'rawStrToByteArray', '(', 'octets', ')', ')', 'elif', 'type', '(', 'octets', ')', '==', 'bytearray', ':', 'octets', '=', 'iter', '(', 'octets', ')', 'shift', '=', 'padBits', 'if', 'padBits', '==', '0', ':', 'prevSeptet', '=', 'next', '(', 'octets', ')', 'else', ':', 'prevSeptet', '=', '0x00', 'for', 'octet', 'in', 'octets', ':', 'septet', '=', 'octet', '&', '0x7f', 'if', 'shift', '==', '7', ':', '# prevSeptet has already been fully added to result', 'shift', '=', '0', 'prevSeptet', '=', 'septet', 'continue', 'b', '=', '(', '(', 'septet', '<<', '(', '7', '-', 'shift', ')', ')', '&', '0xFF', ')', '|', '(', 'prevSeptet', '>>', 'shift', ')', 'prevSeptet', '=', 'septet', 'shift', '+=', '1', 'result', '.', 'append', '(', 'b', ')', 'if', 'shift', '!=', '7', ':', '# There is a bit "left over" from prevSeptet', 'result', '.', 'append', '(', 'prevSeptet', '>>', 'shift', ')', 'return', 'result'] | Packs the specified octets into septets
Typically the output of encodeGsm7 would be used as input to this function. The resulting
bytearray contains the original GSM-7 characters packed into septets ready for transmission.
:rtype: bytearray | ['Packs', 'the', 'specified', 'octets', 'into', 'septets', 'Typically', 'the', 'output', 'of', 'encodeGsm7', 'would', 'be', 'used', 'as', 'input', 'to', 'this', 'function', '.', 'The', 'resulting', 'bytearray', 'contains', 'the', 'original', 'GSM', '-', '7', 'characters', 'packed', 'into', 'septets', 'ready', 'for', 'transmission', '.', ':', 'rtype', ':', 'bytearray'] | train | https://github.com/faucamp/python-gsmmodem/blob/834c68b1387ca2c91e2210faa8f75526b39723b5/gsmmodem/pdu.py#L714-L746 |
7,801 | ontio/ontology-python-sdk | ontology/network/rpc.py | RpcClient.get_smart_contract | def get_smart_contract(self, hex_contract_address: str, is_full: bool = False) -> dict:
"""
This interface is used to get the information of smart contract based on the specified hexadecimal hash value.
:param hex_contract_address: str, a hexadecimal hash value.
:param is_full:
:return: the information of smart contract in dictionary form.
"""
if not isinstance(hex_contract_address, str):
raise SDKException(ErrorCode.param_err('a hexadecimal contract address is required.'))
if len(hex_contract_address) != 40:
raise SDKException(ErrorCode.param_err('the length of the contract address should be 40 bytes.'))
payload = self.generate_json_rpc_payload(RpcMethod.GET_SMART_CONTRACT, [hex_contract_address, 1])
response = self.__post(self.__url, payload)
if is_full:
return response
return response['result'] | python | def get_smart_contract(self, hex_contract_address: str, is_full: bool = False) -> dict:
"""
This interface is used to get the information of smart contract based on the specified hexadecimal hash value.
:param hex_contract_address: str, a hexadecimal hash value.
:param is_full:
:return: the information of smart contract in dictionary form.
"""
if not isinstance(hex_contract_address, str):
raise SDKException(ErrorCode.param_err('a hexadecimal contract address is required.'))
if len(hex_contract_address) != 40:
raise SDKException(ErrorCode.param_err('the length of the contract address should be 40 bytes.'))
payload = self.generate_json_rpc_payload(RpcMethod.GET_SMART_CONTRACT, [hex_contract_address, 1])
response = self.__post(self.__url, payload)
if is_full:
return response
return response['result'] | ['def', 'get_smart_contract', '(', 'self', ',', 'hex_contract_address', ':', 'str', ',', 'is_full', ':', 'bool', '=', 'False', ')', '->', 'dict', ':', 'if', 'not', 'isinstance', '(', 'hex_contract_address', ',', 'str', ')', ':', 'raise', 'SDKException', '(', 'ErrorCode', '.', 'param_err', '(', "'a hexadecimal contract address is required.'", ')', ')', 'if', 'len', '(', 'hex_contract_address', ')', '!=', '40', ':', 'raise', 'SDKException', '(', 'ErrorCode', '.', 'param_err', '(', "'the length of the contract address should be 40 bytes.'", ')', ')', 'payload', '=', 'self', '.', 'generate_json_rpc_payload', '(', 'RpcMethod', '.', 'GET_SMART_CONTRACT', ',', '[', 'hex_contract_address', ',', '1', ']', ')', 'response', '=', 'self', '.', '__post', '(', 'self', '.', '__url', ',', 'payload', ')', 'if', 'is_full', ':', 'return', 'response', 'return', 'response', '[', "'result'", ']'] | This interface is used to get the information of smart contract based on the specified hexadecimal hash value.
:param hex_contract_address: str, a hexadecimal hash value.
:param is_full:
:return: the information of smart contract in dictionary form. | ['This', 'interface', 'is', 'used', 'to', 'get', 'the', 'information', 'of', 'smart', 'contract', 'based', 'on', 'the', 'specified', 'hexadecimal', 'hash', 'value', '.'] | train | https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/network/rpc.py#L401-L417 |
7,802 | takuti/flurs | flurs/base.py | RecommenderMixin.scores2recos | def scores2recos(self, scores, candidates, rev=False):
"""Get recommendation list for a user u_index based on scores.
Args:
scores (numpy array; (n_target_items,)):
Scores for the target items. Smaller score indicates a promising item.
candidates (numpy array; (# target items, )): Target items' indices. Only these items are considered as the recommendation candidates.
rev (bool): If true, return items in an descending order. A ascending order (i.e., smaller scores are more promising) is default.
Returns:
(numpy array, numpy array) : (Sorted list of items, Sorted scores).
"""
sorted_indices = np.argsort(scores)
if rev:
sorted_indices = sorted_indices[::-1]
return candidates[sorted_indices], scores[sorted_indices] | python | def scores2recos(self, scores, candidates, rev=False):
"""Get recommendation list for a user u_index based on scores.
Args:
scores (numpy array; (n_target_items,)):
Scores for the target items. Smaller score indicates a promising item.
candidates (numpy array; (# target items, )): Target items' indices. Only these items are considered as the recommendation candidates.
rev (bool): If true, return items in an descending order. A ascending order (i.e., smaller scores are more promising) is default.
Returns:
(numpy array, numpy array) : (Sorted list of items, Sorted scores).
"""
sorted_indices = np.argsort(scores)
if rev:
sorted_indices = sorted_indices[::-1]
return candidates[sorted_indices], scores[sorted_indices] | ['def', 'scores2recos', '(', 'self', ',', 'scores', ',', 'candidates', ',', 'rev', '=', 'False', ')', ':', 'sorted_indices', '=', 'np', '.', 'argsort', '(', 'scores', ')', 'if', 'rev', ':', 'sorted_indices', '=', 'sorted_indices', '[', ':', ':', '-', '1', ']', 'return', 'candidates', '[', 'sorted_indices', ']', ',', 'scores', '[', 'sorted_indices', ']'] | Get recommendation list for a user u_index based on scores.
Args:
scores (numpy array; (n_target_items,)):
Scores for the target items. Smaller score indicates a promising item.
candidates (numpy array; (# target items, )): Target items' indices. Only these items are considered as the recommendation candidates.
rev (bool): If true, return items in an descending order. A ascending order (i.e., smaller scores are more promising) is default.
Returns:
(numpy array, numpy array) : (Sorted list of items, Sorted scores). | ['Get', 'recommendation', 'list', 'for', 'a', 'user', 'u_index', 'based', 'on', 'scores', '.'] | train | https://github.com/takuti/flurs/blob/a998fc180b45db7eaf38dbbbf8125a93100b8a8c/flurs/base.py#L115-L133 |
7,803 | ktbyers/netmiko | netmiko/utilities.py | clitable_to_dict | def clitable_to_dict(cli_table):
"""Converts TextFSM cli_table object to list of dictionaries."""
objs = []
for row in cli_table:
temp_dict = {}
for index, element in enumerate(row):
temp_dict[cli_table.header[index].lower()] = element
objs.append(temp_dict)
return objs | python | def clitable_to_dict(cli_table):
"""Converts TextFSM cli_table object to list of dictionaries."""
objs = []
for row in cli_table:
temp_dict = {}
for index, element in enumerate(row):
temp_dict[cli_table.header[index].lower()] = element
objs.append(temp_dict)
return objs | ['def', 'clitable_to_dict', '(', 'cli_table', ')', ':', 'objs', '=', '[', ']', 'for', 'row', 'in', 'cli_table', ':', 'temp_dict', '=', '{', '}', 'for', 'index', ',', 'element', 'in', 'enumerate', '(', 'row', ')', ':', 'temp_dict', '[', 'cli_table', '.', 'header', '[', 'index', ']', '.', 'lower', '(', ')', ']', '=', 'element', 'objs', '.', 'append', '(', 'temp_dict', ')', 'return', 'objs'] | Converts TextFSM cli_table object to list of dictionaries. | ['Converts', 'TextFSM', 'cli_table', 'object', 'to', 'list', 'of', 'dictionaries', '.'] | train | https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/utilities.py#L219-L227 |
7,804 | jaseg/python-mpv | mpv.py | _make_node_str_list | def _make_node_str_list(l):
"""Take a list of python objects and make a MPV string node array from it.
As an example, the python list ``l = [ "foo", 23, false ]`` will result in the following MPV node object::
struct mpv_node {
.format = MPV_NODE_ARRAY,
.u.list = *(struct mpv_node_array){
.num = len(l),
.keys = NULL,
.values = struct mpv_node[len(l)] {
{ .format = MPV_NODE_STRING, .u.string = l[0] },
{ .format = MPV_NODE_STRING, .u.string = l[1] },
...
}
}
}
"""
char_ps = [ c_char_p(_mpv_coax_proptype(e, str)) for e in l ]
node_list = MpvNodeList(
num=len(l),
keys=None,
values=( MpvNode * len(l))( *[ MpvNode(
format=MpvFormat.STRING,
val=MpvNodeUnion(string=p))
for p in char_ps ]))
node = MpvNode(
format=MpvFormat.NODE_ARRAY,
val=MpvNodeUnion(list=pointer(node_list)))
return char_ps, node_list, node, cast(pointer(node), c_void_p) | python | def _make_node_str_list(l):
"""Take a list of python objects and make a MPV string node array from it.
As an example, the python list ``l = [ "foo", 23, false ]`` will result in the following MPV node object::
struct mpv_node {
.format = MPV_NODE_ARRAY,
.u.list = *(struct mpv_node_array){
.num = len(l),
.keys = NULL,
.values = struct mpv_node[len(l)] {
{ .format = MPV_NODE_STRING, .u.string = l[0] },
{ .format = MPV_NODE_STRING, .u.string = l[1] },
...
}
}
}
"""
char_ps = [ c_char_p(_mpv_coax_proptype(e, str)) for e in l ]
node_list = MpvNodeList(
num=len(l),
keys=None,
values=( MpvNode * len(l))( *[ MpvNode(
format=MpvFormat.STRING,
val=MpvNodeUnion(string=p))
for p in char_ps ]))
node = MpvNode(
format=MpvFormat.NODE_ARRAY,
val=MpvNodeUnion(list=pointer(node_list)))
return char_ps, node_list, node, cast(pointer(node), c_void_p) | ['def', '_make_node_str_list', '(', 'l', ')', ':', 'char_ps', '=', '[', 'c_char_p', '(', '_mpv_coax_proptype', '(', 'e', ',', 'str', ')', ')', 'for', 'e', 'in', 'l', ']', 'node_list', '=', 'MpvNodeList', '(', 'num', '=', 'len', '(', 'l', ')', ',', 'keys', '=', 'None', ',', 'values', '=', '(', 'MpvNode', '*', 'len', '(', 'l', ')', ')', '(', '*', '[', 'MpvNode', '(', 'format', '=', 'MpvFormat', '.', 'STRING', ',', 'val', '=', 'MpvNodeUnion', '(', 'string', '=', 'p', ')', ')', 'for', 'p', 'in', 'char_ps', ']', ')', ')', 'node', '=', 'MpvNode', '(', 'format', '=', 'MpvFormat', '.', 'NODE_ARRAY', ',', 'val', '=', 'MpvNodeUnion', '(', 'list', '=', 'pointer', '(', 'node_list', ')', ')', ')', 'return', 'char_ps', ',', 'node_list', ',', 'node', ',', 'cast', '(', 'pointer', '(', 'node', ')', ',', 'c_void_p', ')'] | Take a list of python objects and make a MPV string node array from it.
As an example, the python list ``l = [ "foo", 23, false ]`` will result in the following MPV node object::
struct mpv_node {
.format = MPV_NODE_ARRAY,
.u.list = *(struct mpv_node_array){
.num = len(l),
.keys = NULL,
.values = struct mpv_node[len(l)] {
{ .format = MPV_NODE_STRING, .u.string = l[0] },
{ .format = MPV_NODE_STRING, .u.string = l[1] },
...
}
}
} | ['Take', 'a', 'list', 'of', 'python', 'objects', 'and', 'make', 'a', 'MPV', 'string', 'node', 'array', 'from', 'it', '.'] | train | https://github.com/jaseg/python-mpv/blob/7117de4005cc470a45efd9cf2e9657bdf63a9079/mpv.py#L411-L440 |
7,805 | BlueBrain/NeuroM | neurom/check/neuron_checks.py | has_no_dangling_branch | def has_no_dangling_branch(neuron):
'''Check if the neuron has dangling neurites'''
soma_center = neuron.soma.points[:, COLS.XYZ].mean(axis=0)
recentered_soma = neuron.soma.points[:, COLS.XYZ] - soma_center
radius = np.linalg.norm(recentered_soma, axis=1)
soma_max_radius = radius.max()
def is_dangling(neurite):
'''Is the neurite dangling ?'''
starting_point = neurite.points[1][COLS.XYZ]
if np.linalg.norm(starting_point - soma_center) - soma_max_radius <= 12.:
return False
if neurite.type != NeuriteType.axon:
return True
all_points = list(chain.from_iterable(n.points[1:]
for n in iter_neurites(neurite)
if n.type != NeuriteType.axon))
res = [np.linalg.norm(starting_point - p[COLS.XYZ]) >= 2 * p[COLS.R] + 2
for p in all_points]
return all(res)
bad_ids = [(n.root_node.id, [n.root_node.points[1]])
for n in iter_neurites(neuron) if is_dangling(n)]
return CheckResult(len(bad_ids) == 0, bad_ids) | python | def has_no_dangling_branch(neuron):
'''Check if the neuron has dangling neurites'''
soma_center = neuron.soma.points[:, COLS.XYZ].mean(axis=0)
recentered_soma = neuron.soma.points[:, COLS.XYZ] - soma_center
radius = np.linalg.norm(recentered_soma, axis=1)
soma_max_radius = radius.max()
def is_dangling(neurite):
'''Is the neurite dangling ?'''
starting_point = neurite.points[1][COLS.XYZ]
if np.linalg.norm(starting_point - soma_center) - soma_max_radius <= 12.:
return False
if neurite.type != NeuriteType.axon:
return True
all_points = list(chain.from_iterable(n.points[1:]
for n in iter_neurites(neurite)
if n.type != NeuriteType.axon))
res = [np.linalg.norm(starting_point - p[COLS.XYZ]) >= 2 * p[COLS.R] + 2
for p in all_points]
return all(res)
bad_ids = [(n.root_node.id, [n.root_node.points[1]])
for n in iter_neurites(neuron) if is_dangling(n)]
return CheckResult(len(bad_ids) == 0, bad_ids) | ['def', 'has_no_dangling_branch', '(', 'neuron', ')', ':', 'soma_center', '=', 'neuron', '.', 'soma', '.', 'points', '[', ':', ',', 'COLS', '.', 'XYZ', ']', '.', 'mean', '(', 'axis', '=', '0', ')', 'recentered_soma', '=', 'neuron', '.', 'soma', '.', 'points', '[', ':', ',', 'COLS', '.', 'XYZ', ']', '-', 'soma_center', 'radius', '=', 'np', '.', 'linalg', '.', 'norm', '(', 'recentered_soma', ',', 'axis', '=', '1', ')', 'soma_max_radius', '=', 'radius', '.', 'max', '(', ')', 'def', 'is_dangling', '(', 'neurite', ')', ':', "'''Is the neurite dangling ?'''", 'starting_point', '=', 'neurite', '.', 'points', '[', '1', ']', '[', 'COLS', '.', 'XYZ', ']', 'if', 'np', '.', 'linalg', '.', 'norm', '(', 'starting_point', '-', 'soma_center', ')', '-', 'soma_max_radius', '<=', '12.', ':', 'return', 'False', 'if', 'neurite', '.', 'type', '!=', 'NeuriteType', '.', 'axon', ':', 'return', 'True', 'all_points', '=', 'list', '(', 'chain', '.', 'from_iterable', '(', 'n', '.', 'points', '[', '1', ':', ']', 'for', 'n', 'in', 'iter_neurites', '(', 'neurite', ')', 'if', 'n', '.', 'type', '!=', 'NeuriteType', '.', 'axon', ')', ')', 'res', '=', '[', 'np', '.', 'linalg', '.', 'norm', '(', 'starting_point', '-', 'p', '[', 'COLS', '.', 'XYZ', ']', ')', '>=', '2', '*', 'p', '[', 'COLS', '.', 'R', ']', '+', '2', 'for', 'p', 'in', 'all_points', ']', 'return', 'all', '(', 'res', ')', 'bad_ids', '=', '[', '(', 'n', '.', 'root_node', '.', 'id', ',', '[', 'n', '.', 'root_node', '.', 'points', '[', '1', ']', ']', ')', 'for', 'n', 'in', 'iter_neurites', '(', 'neuron', ')', 'if', 'is_dangling', '(', 'n', ')', ']', 'return', 'CheckResult', '(', 'len', '(', 'bad_ids', ')', '==', '0', ',', 'bad_ids', ')'] | Check if the neuron has dangling neurites | ['Check', 'if', 'the', 'neuron', 'has', 'dangling', 'neurites'] | train | https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/neuron_checks.py#L269-L295 |
7,806 | ga4gh/ga4gh-server | ga4gh/server/datamodel/variants.py | AbstractVariantAnnotationSet.hashVariantAnnotation | def hashVariantAnnotation(cls, gaVariant, gaVariantAnnotation):
"""
Produces an MD5 hash of the gaVariant and gaVariantAnnotation objects
"""
treffs = [treff.id for treff in gaVariantAnnotation.transcript_effects]
return hashlib.md5(
"{}\t{}\t{}\t".format(
gaVariant.reference_bases, tuple(gaVariant.alternate_bases),
treffs)
).hexdigest() | python | def hashVariantAnnotation(cls, gaVariant, gaVariantAnnotation):
"""
Produces an MD5 hash of the gaVariant and gaVariantAnnotation objects
"""
treffs = [treff.id for treff in gaVariantAnnotation.transcript_effects]
return hashlib.md5(
"{}\t{}\t{}\t".format(
gaVariant.reference_bases, tuple(gaVariant.alternate_bases),
treffs)
).hexdigest() | ['def', 'hashVariantAnnotation', '(', 'cls', ',', 'gaVariant', ',', 'gaVariantAnnotation', ')', ':', 'treffs', '=', '[', 'treff', '.', 'id', 'for', 'treff', 'in', 'gaVariantAnnotation', '.', 'transcript_effects', ']', 'return', 'hashlib', '.', 'md5', '(', '"{}\\t{}\\t{}\\t"', '.', 'format', '(', 'gaVariant', '.', 'reference_bases', ',', 'tuple', '(', 'gaVariant', '.', 'alternate_bases', ')', ',', 'treffs', ')', ')', '.', 'hexdigest', '(', ')'] | Produces an MD5 hash of the gaVariant and gaVariantAnnotation objects | ['Produces', 'an', 'MD5', 'hash', 'of', 'the', 'gaVariant', 'and', 'gaVariantAnnotation', 'objects'] | train | https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/variants.py#L929-L938 |
7,807 | saltstack/salt | salt/modules/snapper.py | _get_jid_snapshots | def _get_jid_snapshots(jid, config='root'):
'''
Returns pre/post snapshots made by a given Salt jid
Looks for 'salt_jid' entries into snapshots userdata which are created
when 'snapper.run' is executed.
'''
jid_snapshots = [x for x in list_snapshots(config) if x['userdata'].get("salt_jid") == jid]
pre_snapshot = [x for x in jid_snapshots if x['type'] == "pre"]
post_snapshot = [x for x in jid_snapshots if x['type'] == "post"]
if not pre_snapshot or not post_snapshot:
raise CommandExecutionError("Jid '{0}' snapshots not found".format(jid))
return (
pre_snapshot[0]['id'],
post_snapshot[0]['id']
) | python | def _get_jid_snapshots(jid, config='root'):
'''
Returns pre/post snapshots made by a given Salt jid
Looks for 'salt_jid' entries into snapshots userdata which are created
when 'snapper.run' is executed.
'''
jid_snapshots = [x for x in list_snapshots(config) if x['userdata'].get("salt_jid") == jid]
pre_snapshot = [x for x in jid_snapshots if x['type'] == "pre"]
post_snapshot = [x for x in jid_snapshots if x['type'] == "post"]
if not pre_snapshot or not post_snapshot:
raise CommandExecutionError("Jid '{0}' snapshots not found".format(jid))
return (
pre_snapshot[0]['id'],
post_snapshot[0]['id']
) | ['def', '_get_jid_snapshots', '(', 'jid', ',', 'config', '=', "'root'", ')', ':', 'jid_snapshots', '=', '[', 'x', 'for', 'x', 'in', 'list_snapshots', '(', 'config', ')', 'if', 'x', '[', "'userdata'", ']', '.', 'get', '(', '"salt_jid"', ')', '==', 'jid', ']', 'pre_snapshot', '=', '[', 'x', 'for', 'x', 'in', 'jid_snapshots', 'if', 'x', '[', "'type'", ']', '==', '"pre"', ']', 'post_snapshot', '=', '[', 'x', 'for', 'x', 'in', 'jid_snapshots', 'if', 'x', '[', "'type'", ']', '==', '"post"', ']', 'if', 'not', 'pre_snapshot', 'or', 'not', 'post_snapshot', ':', 'raise', 'CommandExecutionError', '(', '"Jid \'{0}\' snapshots not found"', '.', 'format', '(', 'jid', ')', ')', 'return', '(', 'pre_snapshot', '[', '0', ']', '[', "'id'", ']', ',', 'post_snapshot', '[', '0', ']', '[', "'id'", ']', ')'] | Returns pre/post snapshots made by a given Salt jid
Looks for 'salt_jid' entries into snapshots userdata which are created
when 'snapper.run' is executed. | ['Returns', 'pre', '/', 'post', 'snapshots', 'made', 'by', 'a', 'given', 'Salt', 'jid'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/snapper.py#L714-L731 |
7,808 | roclark/sportsreference | sportsreference/ncaaf/boxscore.py | Boxscore.dataframe | def dataframe(self):
"""
Returns a pandas DataFrame containing all other class properties and
values. The index for the DataFrame is the string URI that is used to
instantiate the class, such as '2018-01-08-georgia'.
"""
if self._away_points is None and self._home_points is None:
return None
fields_to_include = {
'away_first_downs': self.away_first_downs,
'away_fumbles': self.away_fumbles,
'away_fumbles_lost': self.away_fumbles_lost,
'away_interceptions': self.away_interceptions,
'away_pass_attempts': self.away_pass_attempts,
'away_pass_completions': self.away_pass_completions,
'away_pass_touchdowns': self.away_pass_touchdowns,
'away_pass_yards': self.away_pass_yards,
'away_penalties': self.away_penalties,
'away_points': self.away_points,
'away_rush_attempts': self.away_rush_attempts,
'away_rush_touchdowns': self.away_rush_touchdowns,
'away_rush_yards': self.away_rush_yards,
'away_total_yards': self.away_total_yards,
'away_turnovers': self.away_turnovers,
'away_yards_from_penalties': self.away_yards_from_penalties,
'date': self.date,
'home_first_downs': self.home_first_downs,
'home_fumbles': self.home_fumbles,
'home_fumbles_lost': self.home_fumbles_lost,
'home_interceptions': self.home_interceptions,
'home_pass_attempts': self.home_pass_attempts,
'home_pass_completions': self.home_pass_completions,
'home_pass_touchdowns': self.home_pass_touchdowns,
'home_pass_yards': self.home_pass_yards,
'home_penalties': self.home_penalties,
'home_points': self.home_points,
'home_rush_attempts': self.home_rush_attempts,
'home_rush_touchdowns': self.home_rush_touchdowns,
'home_rush_yards': self.home_rush_yards,
'home_total_yards': self.home_total_yards,
'home_turnovers': self.home_turnovers,
'home_yards_from_penalties': self.home_yards_from_penalties,
'losing_abbr': self.losing_abbr,
'losing_name': self.losing_name,
'stadium': self.stadium,
'time': self.time,
'winner': self.winner,
'winning_abbr': self.winning_abbr,
'winning_name': self.winning_name
}
return pd.DataFrame([fields_to_include], index=[self._uri]) | python | def dataframe(self):
"""
Returns a pandas DataFrame containing all other class properties and
values. The index for the DataFrame is the string URI that is used to
instantiate the class, such as '2018-01-08-georgia'.
"""
if self._away_points is None and self._home_points is None:
return None
fields_to_include = {
'away_first_downs': self.away_first_downs,
'away_fumbles': self.away_fumbles,
'away_fumbles_lost': self.away_fumbles_lost,
'away_interceptions': self.away_interceptions,
'away_pass_attempts': self.away_pass_attempts,
'away_pass_completions': self.away_pass_completions,
'away_pass_touchdowns': self.away_pass_touchdowns,
'away_pass_yards': self.away_pass_yards,
'away_penalties': self.away_penalties,
'away_points': self.away_points,
'away_rush_attempts': self.away_rush_attempts,
'away_rush_touchdowns': self.away_rush_touchdowns,
'away_rush_yards': self.away_rush_yards,
'away_total_yards': self.away_total_yards,
'away_turnovers': self.away_turnovers,
'away_yards_from_penalties': self.away_yards_from_penalties,
'date': self.date,
'home_first_downs': self.home_first_downs,
'home_fumbles': self.home_fumbles,
'home_fumbles_lost': self.home_fumbles_lost,
'home_interceptions': self.home_interceptions,
'home_pass_attempts': self.home_pass_attempts,
'home_pass_completions': self.home_pass_completions,
'home_pass_touchdowns': self.home_pass_touchdowns,
'home_pass_yards': self.home_pass_yards,
'home_penalties': self.home_penalties,
'home_points': self.home_points,
'home_rush_attempts': self.home_rush_attempts,
'home_rush_touchdowns': self.home_rush_touchdowns,
'home_rush_yards': self.home_rush_yards,
'home_total_yards': self.home_total_yards,
'home_turnovers': self.home_turnovers,
'home_yards_from_penalties': self.home_yards_from_penalties,
'losing_abbr': self.losing_abbr,
'losing_name': self.losing_name,
'stadium': self.stadium,
'time': self.time,
'winner': self.winner,
'winning_abbr': self.winning_abbr,
'winning_name': self.winning_name
}
return pd.DataFrame([fields_to_include], index=[self._uri]) | ['def', 'dataframe', '(', 'self', ')', ':', 'if', 'self', '.', '_away_points', 'is', 'None', 'and', 'self', '.', '_home_points', 'is', 'None', ':', 'return', 'None', 'fields_to_include', '=', '{', "'away_first_downs'", ':', 'self', '.', 'away_first_downs', ',', "'away_fumbles'", ':', 'self', '.', 'away_fumbles', ',', "'away_fumbles_lost'", ':', 'self', '.', 'away_fumbles_lost', ',', "'away_interceptions'", ':', 'self', '.', 'away_interceptions', ',', "'away_pass_attempts'", ':', 'self', '.', 'away_pass_attempts', ',', "'away_pass_completions'", ':', 'self', '.', 'away_pass_completions', ',', "'away_pass_touchdowns'", ':', 'self', '.', 'away_pass_touchdowns', ',', "'away_pass_yards'", ':', 'self', '.', 'away_pass_yards', ',', "'away_penalties'", ':', 'self', '.', 'away_penalties', ',', "'away_points'", ':', 'self', '.', 'away_points', ',', "'away_rush_attempts'", ':', 'self', '.', 'away_rush_attempts', ',', "'away_rush_touchdowns'", ':', 'self', '.', 'away_rush_touchdowns', ',', "'away_rush_yards'", ':', 'self', '.', 'away_rush_yards', ',', "'away_total_yards'", ':', 'self', '.', 'away_total_yards', ',', "'away_turnovers'", ':', 'self', '.', 'away_turnovers', ',', "'away_yards_from_penalties'", ':', 'self', '.', 'away_yards_from_penalties', ',', "'date'", ':', 'self', '.', 'date', ',', "'home_first_downs'", ':', 'self', '.', 'home_first_downs', ',', "'home_fumbles'", ':', 'self', '.', 'home_fumbles', ',', "'home_fumbles_lost'", ':', 'self', '.', 'home_fumbles_lost', ',', "'home_interceptions'", ':', 'self', '.', 'home_interceptions', ',', "'home_pass_attempts'", ':', 'self', '.', 'home_pass_attempts', ',', "'home_pass_completions'", ':', 'self', '.', 'home_pass_completions', ',', "'home_pass_touchdowns'", ':', 'self', '.', 'home_pass_touchdowns', ',', "'home_pass_yards'", ':', 'self', '.', 'home_pass_yards', ',', "'home_penalties'", ':', 'self', '.', 'home_penalties', ',', "'home_points'", ':', 'self', '.', 'home_points', ',', "'home_rush_attempts'", ':', 'self', '.', 'home_rush_attempts', ',', "'home_rush_touchdowns'", ':', 'self', '.', 'home_rush_touchdowns', ',', "'home_rush_yards'", ':', 'self', '.', 'home_rush_yards', ',', "'home_total_yards'", ':', 'self', '.', 'home_total_yards', ',', "'home_turnovers'", ':', 'self', '.', 'home_turnovers', ',', "'home_yards_from_penalties'", ':', 'self', '.', 'home_yards_from_penalties', ',', "'losing_abbr'", ':', 'self', '.', 'losing_abbr', ',', "'losing_name'", ':', 'self', '.', 'losing_name', ',', "'stadium'", ':', 'self', '.', 'stadium', ',', "'time'", ':', 'self', '.', 'time', ',', "'winner'", ':', 'self', '.', 'winner', ',', "'winning_abbr'", ':', 'self', '.', 'winning_abbr', ',', "'winning_name'", ':', 'self', '.', 'winning_name', '}', 'return', 'pd', '.', 'DataFrame', '(', '[', 'fields_to_include', ']', ',', 'index', '=', '[', 'self', '.', '_uri', ']', ')'] | Returns a pandas DataFrame containing all other class properties and
values. The index for the DataFrame is the string URI that is used to
instantiate the class, such as '2018-01-08-georgia'. | ['Returns', 'a', 'pandas', 'DataFrame', 'containing', 'all', 'other', 'class', 'properties', 'and', 'values', '.', 'The', 'index', 'for', 'the', 'DataFrame', 'is', 'the', 'string', 'URI', 'that', 'is', 'used', 'to', 'instantiate', 'the', 'class', 'such', 'as', '2018', '-', '01', '-', '08', '-', 'georgia', '.'] | train | https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/ncaaf/boxscore.py#L708-L758 |
7,809 | bwohlberg/sporco | docs/source/docntbk.py | construct_notebook_index | def construct_notebook_index(title, pthlst, pthidx):
"""
Construct a string containing a markdown format index for the list
of paths in `pthlst`. The title for the index is in `title`, and
`pthidx` is a dict giving label text for each path.
"""
# Insert title text
txt = '"""\n## %s\n"""\n\n"""' % title
# Insert entry for each item in pthlst
for pth in pthlst:
# If pth refers to a .py file, replace .py with .ipynb, otherwise
# assume it's a directory name and append '/index.ipynb'
if pth[-3:] == '.py':
link = os.path.splitext(pth)[0] + '.ipynb'
else:
link = os.path.join(pth, 'index.ipynb')
txt += '- [%s](%s)\n' % (pthidx[pth], link)
txt += '"""'
return txt | python | def construct_notebook_index(title, pthlst, pthidx):
"""
Construct a string containing a markdown format index for the list
of paths in `pthlst`. The title for the index is in `title`, and
`pthidx` is a dict giving label text for each path.
"""
# Insert title text
txt = '"""\n## %s\n"""\n\n"""' % title
# Insert entry for each item in pthlst
for pth in pthlst:
# If pth refers to a .py file, replace .py with .ipynb, otherwise
# assume it's a directory name and append '/index.ipynb'
if pth[-3:] == '.py':
link = os.path.splitext(pth)[0] + '.ipynb'
else:
link = os.path.join(pth, 'index.ipynb')
txt += '- [%s](%s)\n' % (pthidx[pth], link)
txt += '"""'
return txt | ['def', 'construct_notebook_index', '(', 'title', ',', 'pthlst', ',', 'pthidx', ')', ':', '# Insert title text', 'txt', '=', '\'"""\\n## %s\\n"""\\n\\n"""\'', '%', 'title', '# Insert entry for each item in pthlst', 'for', 'pth', 'in', 'pthlst', ':', '# If pth refers to a .py file, replace .py with .ipynb, otherwise', "# assume it's a directory name and append '/index.ipynb'", 'if', 'pth', '[', '-', '3', ':', ']', '==', "'.py'", ':', 'link', '=', 'os', '.', 'path', '.', 'splitext', '(', 'pth', ')', '[', '0', ']', '+', "'.ipynb'", 'else', ':', 'link', '=', 'os', '.', 'path', '.', 'join', '(', 'pth', ',', "'index.ipynb'", ')', 'txt', '+=', "'- [%s](%s)\\n'", '%', '(', 'pthidx', '[', 'pth', ']', ',', 'link', ')', 'txt', '+=', '\'"""\'', 'return', 'txt'] | Construct a string containing a markdown format index for the list
of paths in `pthlst`. The title for the index is in `title`, and
`pthidx` is a dict giving label text for each path. | ['Construct', 'a', 'string', 'containing', 'a', 'markdown', 'format', 'index', 'for', 'the', 'list', 'of', 'paths', 'in', 'pthlst', '.', 'The', 'title', 'for', 'the', 'index', 'is', 'in', 'title', 'and', 'pthidx', 'is', 'a', 'dict', 'giving', 'label', 'text', 'for', 'each', 'path', '.'] | train | https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/docs/source/docntbk.py#L334-L353 |
7,810 | EelcoHoogendoorn/Numpy_arraysetops_EP | numpy_indexed/utility.py | axis_as_object | def axis_as_object(arr, axis=-1):
"""cast the given axis of an array to a void object
if the axis to be cast is contiguous, a view is returned, otherwise a copy is made
this is useful for efficiently sorting by the content of an axis, for instance
Parameters
----------
arr : ndarray
array to view as void object type
axis : int
axis to view as a void object type
Returns
-------
ndarray
array with the given axis viewed as a void object
"""
shape = arr.shape
# make axis to be viewed as a void object as contiguous items
arr = np.ascontiguousarray(np.rollaxis(arr, axis, arr.ndim))
# number of bytes in each void object
nbytes = arr.dtype.itemsize * shape[axis]
# void type with the correct number of bytes
voidtype = np.dtype((np.void, nbytes))
# return the view as such, with the reduced shape
return arr.view(voidtype).reshape(np.delete(shape, axis)) | python | def axis_as_object(arr, axis=-1):
"""cast the given axis of an array to a void object
if the axis to be cast is contiguous, a view is returned, otherwise a copy is made
this is useful for efficiently sorting by the content of an axis, for instance
Parameters
----------
arr : ndarray
array to view as void object type
axis : int
axis to view as a void object type
Returns
-------
ndarray
array with the given axis viewed as a void object
"""
shape = arr.shape
# make axis to be viewed as a void object as contiguous items
arr = np.ascontiguousarray(np.rollaxis(arr, axis, arr.ndim))
# number of bytes in each void object
nbytes = arr.dtype.itemsize * shape[axis]
# void type with the correct number of bytes
voidtype = np.dtype((np.void, nbytes))
# return the view as such, with the reduced shape
return arr.view(voidtype).reshape(np.delete(shape, axis)) | ['def', 'axis_as_object', '(', 'arr', ',', 'axis', '=', '-', '1', ')', ':', 'shape', '=', 'arr', '.', 'shape', '# make axis to be viewed as a void object as contiguous items', 'arr', '=', 'np', '.', 'ascontiguousarray', '(', 'np', '.', 'rollaxis', '(', 'arr', ',', 'axis', ',', 'arr', '.', 'ndim', ')', ')', '# number of bytes in each void object', 'nbytes', '=', 'arr', '.', 'dtype', '.', 'itemsize', '*', 'shape', '[', 'axis', ']', '# void type with the correct number of bytes', 'voidtype', '=', 'np', '.', 'dtype', '(', '(', 'np', '.', 'void', ',', 'nbytes', ')', ')', '# return the view as such, with the reduced shape', 'return', 'arr', '.', 'view', '(', 'voidtype', ')', '.', 'reshape', '(', 'np', '.', 'delete', '(', 'shape', ',', 'axis', ')', ')'] | cast the given axis of an array to a void object
if the axis to be cast is contiguous, a view is returned, otherwise a copy is made
this is useful for efficiently sorting by the content of an axis, for instance
Parameters
----------
arr : ndarray
array to view as void object type
axis : int
axis to view as a void object type
Returns
-------
ndarray
array with the given axis viewed as a void object | ['cast', 'the', 'given', 'axis', 'of', 'an', 'array', 'to', 'a', 'void', 'object', 'if', 'the', 'axis', 'to', 'be', 'cast', 'is', 'contiguous', 'a', 'view', 'is', 'returned', 'otherwise', 'a', 'copy', 'is', 'made', 'this', 'is', 'useful', 'for', 'efficiently', 'sorting', 'by', 'the', 'content', 'of', 'an', 'axis', 'for', 'instance'] | train | https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/utility.py#L34-L59 |
7,811 | cjdrake/pyeda | pyeda/boolalg/expr.py | Not | def Not(x, simplify=True):
"""Expression negation operator
If *simplify* is ``True``, return a simplified expression.
"""
x = Expression.box(x).node
y = exprnode.not_(x)
if simplify:
y = y.simplify()
return _expr(y) | python | def Not(x, simplify=True):
"""Expression negation operator
If *simplify* is ``True``, return a simplified expression.
"""
x = Expression.box(x).node
y = exprnode.not_(x)
if simplify:
y = y.simplify()
return _expr(y) | ['def', 'Not', '(', 'x', ',', 'simplify', '=', 'True', ')', ':', 'x', '=', 'Expression', '.', 'box', '(', 'x', ')', '.', 'node', 'y', '=', 'exprnode', '.', 'not_', '(', 'x', ')', 'if', 'simplify', ':', 'y', '=', 'y', '.', 'simplify', '(', ')', 'return', '_expr', '(', 'y', ')'] | Expression negation operator
If *simplify* is ``True``, return a simplified expression. | ['Expression', 'negation', 'operator'] | train | https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/boolalg/expr.py#L309-L318 |
7,812 | gitpython-developers/smmap | smmap/mman.py | WindowCursor._destroy | def _destroy(self):
"""Destruction code to decrement counters"""
self.unuse_region()
if self._rlist is not None:
# Actual client count, which doesn't include the reference kept by the manager, nor ours
# as we are about to be deleted
try:
if len(self._rlist) == 0:
# Free all resources associated with the mapped file
self._manager._fdict.pop(self._rlist.path_or_fd())
# END remove regions list from manager
except (TypeError, KeyError):
# sometimes, during shutdown, getrefcount is None. Its possible
# to re-import it, however, its probably better to just ignore
# this python problem (for now).
# The next step is to get rid of the error prone getrefcount alltogether.
pass | python | def _destroy(self):
"""Destruction code to decrement counters"""
self.unuse_region()
if self._rlist is not None:
# Actual client count, which doesn't include the reference kept by the manager, nor ours
# as we are about to be deleted
try:
if len(self._rlist) == 0:
# Free all resources associated with the mapped file
self._manager._fdict.pop(self._rlist.path_or_fd())
# END remove regions list from manager
except (TypeError, KeyError):
# sometimes, during shutdown, getrefcount is None. Its possible
# to re-import it, however, its probably better to just ignore
# this python problem (for now).
# The next step is to get rid of the error prone getrefcount alltogether.
pass | ['def', '_destroy', '(', 'self', ')', ':', 'self', '.', 'unuse_region', '(', ')', 'if', 'self', '.', '_rlist', 'is', 'not', 'None', ':', "# Actual client count, which doesn't include the reference kept by the manager, nor ours", '# as we are about to be deleted', 'try', ':', 'if', 'len', '(', 'self', '.', '_rlist', ')', '==', '0', ':', '# Free all resources associated with the mapped file', 'self', '.', '_manager', '.', '_fdict', '.', 'pop', '(', 'self', '.', '_rlist', '.', 'path_or_fd', '(', ')', ')', '# END remove regions list from manager', 'except', '(', 'TypeError', ',', 'KeyError', ')', ':', '# sometimes, during shutdown, getrefcount is None. Its possible', '# to re-import it, however, its probably better to just ignore', '# this python problem (for now).', '# The next step is to get rid of the error prone getrefcount alltogether.', 'pass'] | Destruction code to decrement counters | ['Destruction', 'code', 'to', 'decrement', 'counters'] | train | https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/mman.py#L55-L72 |
7,813 | delph-in/pydelphin | delphin/mrs/compare.py | _isomorphisms | def _isomorphisms(q, g, check_varprops=True):
"""
Inspired by Turbo_ISO: http://dl.acm.org/citation.cfm?id=2465300
"""
# convert MRSs to be more graph-like, and add some indices
qig = _IsoGraph(q, varprops=check_varprops) # qig = q isograph
gig = _IsoGraph(g, varprops=check_varprops) # gig = q isograph
# qsigs, qsigidx = _isomorphism_sigs(q, check_varprops)
# gsigs, gsigidx = _isomorphism_sigs(g, check_varprops)
# (it would be nice to not have to do this... maybe later)
# qadj = _isomorphism_adj(q, qsigidx)
# gadj = _isomorphism_adj(g, gsigidx)
# the degree of each node is useful (but can it be combined with adj?)
# qdeg = _isomorphism_deg(qadj)
# gdeg = _isomorphism_deg(gadj)
u_s = _isomorphism_choose_start_q_vertex(qig, gig, subgraph=False)
q_ = _isomorphism_rewrite_to_NECtree(u_s, qig)
for v_s in gsigs.get(qsigidx[u_s], []):
cr = _isomorphism_explore_CR(q_, {v_s}, qig, gig)
if cr is None:
continue
order = _isomorphism_determine_matching_order(q_, cr)
update_state(M,F,{u_s}, {v_s})
subraph_search(q, q_, g, order, 1) # 1="the first query vertex to match"
restore_state(M, F, {u_s}, {v_s}) | python | def _isomorphisms(q, g, check_varprops=True):
"""
Inspired by Turbo_ISO: http://dl.acm.org/citation.cfm?id=2465300
"""
# convert MRSs to be more graph-like, and add some indices
qig = _IsoGraph(q, varprops=check_varprops) # qig = q isograph
gig = _IsoGraph(g, varprops=check_varprops) # gig = q isograph
# qsigs, qsigidx = _isomorphism_sigs(q, check_varprops)
# gsigs, gsigidx = _isomorphism_sigs(g, check_varprops)
# (it would be nice to not have to do this... maybe later)
# qadj = _isomorphism_adj(q, qsigidx)
# gadj = _isomorphism_adj(g, gsigidx)
# the degree of each node is useful (but can it be combined with adj?)
# qdeg = _isomorphism_deg(qadj)
# gdeg = _isomorphism_deg(gadj)
u_s = _isomorphism_choose_start_q_vertex(qig, gig, subgraph=False)
q_ = _isomorphism_rewrite_to_NECtree(u_s, qig)
for v_s in gsigs.get(qsigidx[u_s], []):
cr = _isomorphism_explore_CR(q_, {v_s}, qig, gig)
if cr is None:
continue
order = _isomorphism_determine_matching_order(q_, cr)
update_state(M,F,{u_s}, {v_s})
subraph_search(q, q_, g, order, 1) # 1="the first query vertex to match"
restore_state(M, F, {u_s}, {v_s}) | ['def', '_isomorphisms', '(', 'q', ',', 'g', ',', 'check_varprops', '=', 'True', ')', ':', '# convert MRSs to be more graph-like, and add some indices', 'qig', '=', '_IsoGraph', '(', 'q', ',', 'varprops', '=', 'check_varprops', ')', '# qig = q isograph', 'gig', '=', '_IsoGraph', '(', 'g', ',', 'varprops', '=', 'check_varprops', ')', '# gig = q isograph', '# qsigs, qsigidx = _isomorphism_sigs(q, check_varprops)', '# gsigs, gsigidx = _isomorphism_sigs(g, check_varprops)', '# (it would be nice to not have to do this... maybe later)', '# qadj = _isomorphism_adj(q, qsigidx)', '# gadj = _isomorphism_adj(g, gsigidx)', '# the degree of each node is useful (but can it be combined with adj?)', '# qdeg = _isomorphism_deg(qadj)', '# gdeg = _isomorphism_deg(gadj)', 'u_s', '=', '_isomorphism_choose_start_q_vertex', '(', 'qig', ',', 'gig', ',', 'subgraph', '=', 'False', ')', 'q_', '=', '_isomorphism_rewrite_to_NECtree', '(', 'u_s', ',', 'qig', ')', 'for', 'v_s', 'in', 'gsigs', '.', 'get', '(', 'qsigidx', '[', 'u_s', ']', ',', '[', ']', ')', ':', 'cr', '=', '_isomorphism_explore_CR', '(', 'q_', ',', '{', 'v_s', '}', ',', 'qig', ',', 'gig', ')', 'if', 'cr', 'is', 'None', ':', 'continue', 'order', '=', '_isomorphism_determine_matching_order', '(', 'q_', ',', 'cr', ')', 'update_state', '(', 'M', ',', 'F', ',', '{', 'u_s', '}', ',', '{', 'v_s', '}', ')', 'subraph_search', '(', 'q', ',', 'q_', ',', 'g', ',', 'order', ',', '1', ')', '# 1="the first query vertex to match"', 'restore_state', '(', 'M', ',', 'F', ',', '{', 'u_s', '}', ',', '{', 'v_s', '}', ')'] | Inspired by Turbo_ISO: http://dl.acm.org/citation.cfm?id=2465300 | ['Inspired', 'by', 'Turbo_ISO', ':', 'http', ':', '//', 'dl', '.', 'acm', '.', 'org', '/', 'citation', '.', 'cfm?id', '=', '2465300'] | train | https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/mrs/compare.py#L81-L106 |
7,814 | geertj/gruvi | lib/gruvi/callbacks.py | clear_callbacks | def clear_callbacks(obj):
"""Remove all callbacks from an object."""
callbacks = obj._callbacks
if isinstance(callbacks, dllist):
# Help the garbage collector by clearing all links.
callbacks.clear()
obj._callbacks = None | python | def clear_callbacks(obj):
"""Remove all callbacks from an object."""
callbacks = obj._callbacks
if isinstance(callbacks, dllist):
# Help the garbage collector by clearing all links.
callbacks.clear()
obj._callbacks = None | ['def', 'clear_callbacks', '(', 'obj', ')', ':', 'callbacks', '=', 'obj', '.', '_callbacks', 'if', 'isinstance', '(', 'callbacks', ',', 'dllist', ')', ':', '# Help the garbage collector by clearing all links.', 'callbacks', '.', 'clear', '(', ')', 'obj', '.', '_callbacks', '=', 'None'] | Remove all callbacks from an object. | ['Remove', 'all', 'callbacks', 'from', 'an', 'object', '.'] | train | https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/callbacks.py#L82-L88 |
7,815 | scot-dev/scot | scot/ooapi.py | Workspace.keep_sources | def keep_sources(self, keep):
"""Keep only the specified sources in the decomposition.
"""
if self.unmixing_ is None or self.mixing_ is None:
raise RuntimeError("No sources available (run do_mvarica first)")
n_sources = self.mixing_.shape[0]
self.remove_sources(np.setdiff1d(np.arange(n_sources), np.array(keep)))
return self | python | def keep_sources(self, keep):
"""Keep only the specified sources in the decomposition.
"""
if self.unmixing_ is None or self.mixing_ is None:
raise RuntimeError("No sources available (run do_mvarica first)")
n_sources = self.mixing_.shape[0]
self.remove_sources(np.setdiff1d(np.arange(n_sources), np.array(keep)))
return self | ['def', 'keep_sources', '(', 'self', ',', 'keep', ')', ':', 'if', 'self', '.', 'unmixing_', 'is', 'None', 'or', 'self', '.', 'mixing_', 'is', 'None', ':', 'raise', 'RuntimeError', '(', '"No sources available (run do_mvarica first)"', ')', 'n_sources', '=', 'self', '.', 'mixing_', '.', 'shape', '[', '0', ']', 'self', '.', 'remove_sources', '(', 'np', '.', 'setdiff1d', '(', 'np', '.', 'arange', '(', 'n_sources', ')', ',', 'np', '.', 'array', '(', 'keep', ')', ')', ')', 'return', 'self'] | Keep only the specified sources in the decomposition. | ['Keep', 'only', 'the', 'specified', 'sources', 'in', 'the', 'decomposition', '.'] | train | https://github.com/scot-dev/scot/blob/48598b79d4400dad893b134cd2194715511facda/scot/ooapi.py#L375-L382 |
7,816 | CivicSpleen/ambry | ambry/bundle/bundle.py | Bundle.build_table | def build_table(self, table, force=False):
"""Build all of the sources for a table """
sources = self._resolve_sources(None, [table])
for source in sources:
self.build_source(None, source, force=force)
self.unify_partitions() | python | def build_table(self, table, force=False):
"""Build all of the sources for a table """
sources = self._resolve_sources(None, [table])
for source in sources:
self.build_source(None, source, force=force)
self.unify_partitions() | ['def', 'build_table', '(', 'self', ',', 'table', ',', 'force', '=', 'False', ')', ':', 'sources', '=', 'self', '.', '_resolve_sources', '(', 'None', ',', '[', 'table', ']', ')', 'for', 'source', 'in', 'sources', ':', 'self', '.', 'build_source', '(', 'None', ',', 'source', ',', 'force', '=', 'force', ')', 'self', '.', 'unify_partitions', '(', ')'] | Build all of the sources for a table | ['Build', 'all', 'of', 'the', 'sources', 'for', 'a', 'table'] | train | https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/bundle/bundle.py#L2332-L2340 |
7,817 | nugget/python-insteonplm | insteonplm/address.py | Address.x10_unitcode | def x10_unitcode(self):
"""Emit the X10 unit code."""
unitcode = None
if self.is_x10:
unitcode = insteonplm.utils.byte_to_unitcode(self.addr[2])
return unitcode | python | def x10_unitcode(self):
"""Emit the X10 unit code."""
unitcode = None
if self.is_x10:
unitcode = insteonplm.utils.byte_to_unitcode(self.addr[2])
return unitcode | ['def', 'x10_unitcode', '(', 'self', ')', ':', 'unitcode', '=', 'None', 'if', 'self', '.', 'is_x10', ':', 'unitcode', '=', 'insteonplm', '.', 'utils', '.', 'byte_to_unitcode', '(', 'self', '.', 'addr', '[', '2', ']', ')', 'return', 'unitcode'] | Emit the X10 unit code. | ['Emit', 'the', 'X10', 'unit', 'code', '.'] | train | https://github.com/nugget/python-insteonplm/blob/65548041f1b0729ae1ae904443dd81b0c6cbf1bf/insteonplm/address.py#L177-L182 |
7,818 | DLR-RM/RAFCON | source/rafcon/core/states/container_state.py | ContainerState.start_state_id | def start_state_id(self, start_state_id, to_outcome=None):
"""Set the start state of the container state
See property
:param start_state_id: The state id of the state which should be executed first in the Container state
:raises exceptions.ValueError: if the start_state_id does not exist in
:py:attr:`rafcon.core.states.container_state.ContainerState.states`
"""
if start_state_id is not None and start_state_id not in self.states:
raise ValueError("start_state_id does not exist")
if start_state_id is None and to_outcome is not None: # this is the case if the start state is the state itself
if to_outcome not in self.outcomes:
raise ValueError("to_outcome does not exist")
if start_state_id != self.state_id:
raise ValueError("to_outcome defined but start_state_id is not state_id")
# First we remove the transition to the start state
for transition_id in self.transitions:
if self.transitions[transition_id].from_state is None:
# If the current start state is the same as the old one, we don't have to do anything
if self.transitions[transition_id].to_state == start_state_id:
return
self.remove_transition(transition_id)
break
if start_state_id is not None:
self.add_transition(None, None, start_state_id, to_outcome) | python | def start_state_id(self, start_state_id, to_outcome=None):
"""Set the start state of the container state
See property
:param start_state_id: The state id of the state which should be executed first in the Container state
:raises exceptions.ValueError: if the start_state_id does not exist in
:py:attr:`rafcon.core.states.container_state.ContainerState.states`
"""
if start_state_id is not None and start_state_id not in self.states:
raise ValueError("start_state_id does not exist")
if start_state_id is None and to_outcome is not None: # this is the case if the start state is the state itself
if to_outcome not in self.outcomes:
raise ValueError("to_outcome does not exist")
if start_state_id != self.state_id:
raise ValueError("to_outcome defined but start_state_id is not state_id")
# First we remove the transition to the start state
for transition_id in self.transitions:
if self.transitions[transition_id].from_state is None:
# If the current start state is the same as the old one, we don't have to do anything
if self.transitions[transition_id].to_state == start_state_id:
return
self.remove_transition(transition_id)
break
if start_state_id is not None:
self.add_transition(None, None, start_state_id, to_outcome) | ['def', 'start_state_id', '(', 'self', ',', 'start_state_id', ',', 'to_outcome', '=', 'None', ')', ':', 'if', 'start_state_id', 'is', 'not', 'None', 'and', 'start_state_id', 'not', 'in', 'self', '.', 'states', ':', 'raise', 'ValueError', '(', '"start_state_id does not exist"', ')', 'if', 'start_state_id', 'is', 'None', 'and', 'to_outcome', 'is', 'not', 'None', ':', '# this is the case if the start state is the state itself', 'if', 'to_outcome', 'not', 'in', 'self', '.', 'outcomes', ':', 'raise', 'ValueError', '(', '"to_outcome does not exist"', ')', 'if', 'start_state_id', '!=', 'self', '.', 'state_id', ':', 'raise', 'ValueError', '(', '"to_outcome defined but start_state_id is not state_id"', ')', '# First we remove the transition to the start state', 'for', 'transition_id', 'in', 'self', '.', 'transitions', ':', 'if', 'self', '.', 'transitions', '[', 'transition_id', ']', '.', 'from_state', 'is', 'None', ':', "# If the current start state is the same as the old one, we don't have to do anything", 'if', 'self', '.', 'transitions', '[', 'transition_id', ']', '.', 'to_state', '==', 'start_state_id', ':', 'return', 'self', '.', 'remove_transition', '(', 'transition_id', ')', 'break', 'if', 'start_state_id', 'is', 'not', 'None', ':', 'self', '.', 'add_transition', '(', 'None', ',', 'None', ',', 'start_state_id', ',', 'to_outcome', ')'] | Set the start state of the container state
See property
:param start_state_id: The state id of the state which should be executed first in the Container state
:raises exceptions.ValueError: if the start_state_id does not exist in
:py:attr:`rafcon.core.states.container_state.ContainerState.states` | ['Set', 'the', 'start', 'state', 'of', 'the', 'container', 'state'] | train | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/container_state.py#L2266-L2293 |
7,819 | Grunny/zap-cli | zapcli/zap_helper.py | ZAPHelper.xml_report | def xml_report(self, file_path):
"""Generate and save XML report"""
self.logger.debug('Generating XML report')
report = self.zap.core.xmlreport()
self._write_report(report, file_path) | python | def xml_report(self, file_path):
"""Generate and save XML report"""
self.logger.debug('Generating XML report')
report = self.zap.core.xmlreport()
self._write_report(report, file_path) | ['def', 'xml_report', '(', 'self', ',', 'file_path', ')', ':', 'self', '.', 'logger', '.', 'debug', '(', "'Generating XML report'", ')', 'report', '=', 'self', '.', 'zap', '.', 'core', '.', 'xmlreport', '(', ')', 'self', '.', '_write_report', '(', 'report', ',', 'file_path', ')'] | Generate and save XML report | ['Generate', 'and', 'save', 'XML', 'report'] | train | https://github.com/Grunny/zap-cli/blob/d58d4850ecfc5467badfac5e5bcc841d064bd419/zapcli/zap_helper.py#L368-L372 |
7,820 | MartinThoma/hwrt | hwrt/utils.py | print_status | def print_status(total, current, start_time=None):
"""
Show how much work was done / how much work is remaining.
Parameters
----------
total : float
The total amount of work
current : float
The work that has been done so far
start_time : int
The start time in seconds since 1970 to estimate the remaining time.
"""
percentage_done = float(current) / total
sys.stdout.write("\r%0.2f%% " % (percentage_done * 100))
if start_time is not None:
current_running_time = time.time() - start_time
remaining_seconds = current_running_time / percentage_done
tmp = datetime.timedelta(seconds=remaining_seconds)
sys.stdout.write("(%s remaining) " % str(tmp))
sys.stdout.flush() | python | def print_status(total, current, start_time=None):
"""
Show how much work was done / how much work is remaining.
Parameters
----------
total : float
The total amount of work
current : float
The work that has been done so far
start_time : int
The start time in seconds since 1970 to estimate the remaining time.
"""
percentage_done = float(current) / total
sys.stdout.write("\r%0.2f%% " % (percentage_done * 100))
if start_time is not None:
current_running_time = time.time() - start_time
remaining_seconds = current_running_time / percentage_done
tmp = datetime.timedelta(seconds=remaining_seconds)
sys.stdout.write("(%s remaining) " % str(tmp))
sys.stdout.flush() | ['def', 'print_status', '(', 'total', ',', 'current', ',', 'start_time', '=', 'None', ')', ':', 'percentage_done', '=', 'float', '(', 'current', ')', '/', 'total', 'sys', '.', 'stdout', '.', 'write', '(', '"\\r%0.2f%% "', '%', '(', 'percentage_done', '*', '100', ')', ')', 'if', 'start_time', 'is', 'not', 'None', ':', 'current_running_time', '=', 'time', '.', 'time', '(', ')', '-', 'start_time', 'remaining_seconds', '=', 'current_running_time', '/', 'percentage_done', 'tmp', '=', 'datetime', '.', 'timedelta', '(', 'seconds', '=', 'remaining_seconds', ')', 'sys', '.', 'stdout', '.', 'write', '(', '"(%s remaining) "', '%', 'str', '(', 'tmp', ')', ')', 'sys', '.', 'stdout', '.', 'flush', '(', ')'] | Show how much work was done / how much work is remaining.
Parameters
----------
total : float
The total amount of work
current : float
The work that has been done so far
start_time : int
The start time in seconds since 1970 to estimate the remaining time. | ['Show', 'how', 'much', 'work', 'was', 'done', '/', 'how', 'much', 'work', 'is', 'remaining', '.'] | train | https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/utils.py#L32-L52 |
7,821 | jobovy/galpy | galpy/potential/Potential.py | Potential.Rzderiv | def Rzderiv(self,R,Z,phi=0.,t=0.):
"""
NAME:
Rzderiv
PURPOSE:
evaluate the mixed R,z derivative
INPUT:
R - Galactocentric radius (can be Quantity)
Z - vertical height (can be Quantity)
phi - Galactocentric azimuth (can be Quantity)
t - time (can be Quantity)
OUTPUT:
d2phi/dz/dR
HISTORY:
2013-08-26 - Written - Bovy (IAS)
"""
try:
return self._amp*self._Rzderiv(R,Z,phi=phi,t=t)
except AttributeError: #pragma: no cover
raise PotentialError("'_Rzderiv' function not implemented for this potential") | python | def Rzderiv(self,R,Z,phi=0.,t=0.):
"""
NAME:
Rzderiv
PURPOSE:
evaluate the mixed R,z derivative
INPUT:
R - Galactocentric radius (can be Quantity)
Z - vertical height (can be Quantity)
phi - Galactocentric azimuth (can be Quantity)
t - time (can be Quantity)
OUTPUT:
d2phi/dz/dR
HISTORY:
2013-08-26 - Written - Bovy (IAS)
"""
try:
return self._amp*self._Rzderiv(R,Z,phi=phi,t=t)
except AttributeError: #pragma: no cover
raise PotentialError("'_Rzderiv' function not implemented for this potential") | ['def', 'Rzderiv', '(', 'self', ',', 'R', ',', 'Z', ',', 'phi', '=', '0.', ',', 't', '=', '0.', ')', ':', 'try', ':', 'return', 'self', '.', '_amp', '*', 'self', '.', '_Rzderiv', '(', 'R', ',', 'Z', ',', 'phi', '=', 'phi', ',', 't', '=', 't', ')', 'except', 'AttributeError', ':', '#pragma: no cover', 'raise', 'PotentialError', '(', '"\'_Rzderiv\' function not implemented for this potential"', ')'] | NAME:
Rzderiv
PURPOSE:
evaluate the mixed R,z derivative
INPUT:
R - Galactocentric radius (can be Quantity)
Z - vertical height (can be Quantity)
phi - Galactocentric azimuth (can be Quantity)
t - time (can be Quantity)
OUTPUT:
d2phi/dz/dR
HISTORY:
2013-08-26 - Written - Bovy (IAS) | ['NAME', ':'] | train | https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/Potential.py#L515-L547 |
7,822 | InspectorMustache/base16-builder-python | pybase16_builder/cli.py | update_mode | def update_mode(arg_namespace):
"""Check command line arguments and run update function."""
try:
updater.update(custom_sources=arg_namespace.custom)
except (PermissionError, FileNotFoundError) as exception:
if isinstance(exception, PermissionError):
print('No write permission for current working directory.')
if isinstance(exception, FileNotFoundError):
print('Necessary resources for updating not found in current '
'working directory.') | python | def update_mode(arg_namespace):
"""Check command line arguments and run update function."""
try:
updater.update(custom_sources=arg_namespace.custom)
except (PermissionError, FileNotFoundError) as exception:
if isinstance(exception, PermissionError):
print('No write permission for current working directory.')
if isinstance(exception, FileNotFoundError):
print('Necessary resources for updating not found in current '
'working directory.') | ['def', 'update_mode', '(', 'arg_namespace', ')', ':', 'try', ':', 'updater', '.', 'update', '(', 'custom_sources', '=', 'arg_namespace', '.', 'custom', ')', 'except', '(', 'PermissionError', ',', 'FileNotFoundError', ')', 'as', 'exception', ':', 'if', 'isinstance', '(', 'exception', ',', 'PermissionError', ')', ':', 'print', '(', "'No write permission for current working directory.'", ')', 'if', 'isinstance', '(', 'exception', ',', 'FileNotFoundError', ')', ':', 'print', '(', "'Necessary resources for updating not found in current '", "'working directory.'", ')'] | Check command line arguments and run update function. | ['Check', 'command', 'line', 'arguments', 'and', 'run', 'update', 'function', '.'] | train | https://github.com/InspectorMustache/base16-builder-python/blob/586f1f87ee9f70696ab19c542af6ef55c6548a2e/pybase16_builder/cli.py#L43-L52 |
7,823 | cloudera/cm_api | nagios/cm_nagios.py | parse_args | def parse_args():
''' Parse the script arguments
'''
parser = optparse.OptionParser()
parser.add_option("-v", "--verbose", action="store_true")
mode_group = optparse.OptionGroup(parser, "Program Mode")
mode_group.add_option("-u", "--update-status", action="store_const",
dest="mode", const="update_status")
mode_group.add_option("-g", "--generate-cfg", action="store_const",
dest="mode", const="generate_cfg")
parser.add_option_group(mode_group)
parser.set_defaults(mode="update_status")
general_options = optparse.OptionGroup(parser, "CM API Configuration")
general_options.add_option("-H", "--host", metavar="HOST",
help="CM API hostname")
general_options.add_option("-p", "--port", help="CM API port", default=None)
general_options.add_option("-P", "--passfile", metavar="FILE",
help="File containing CM API username and password, "
"colon-delimited on a single line. E.g. "
"\"user:pass\"")
general_options.add_option("--use-tls", action="store_true",
help="Use TLS", default=False)
parser.add_option_group(general_options)
polling_options = optparse.OptionGroup(parser, "Status Update Options")
polling_options.add_option("-c", "--cmd-file", metavar="FILE",
help="Path to the file that Nagios checks for "
"external command requests.")
polling_options.add_option("-n", "--use-send-nsca", action="store_true",
default=False,
help="Use send_nsca to report status via a nsca "
"daemon. When using this option, the "
"send_nsca program must be available and the "
"nsca daemon host and port must be provided."
"Default is false.")
polling_options.add_option("--send-nsca-path", metavar="PATH",
default="/usr/sbin/send_nsca",
help="Path to send_nsca, default is "
"/usr/sbin/send_nsca")
polling_options.add_option("--nsca-host", metavar="HOST",
default="localhost",
help="When using send_nsca, the hostname of NSCA "
"server, default is localhost.")
polling_options.add_option("--nsca-port", metavar="PORT", default=None,
help="When using send_nsca, the port on which the "
"server is running, default is 5667.")
polling_options.add_option("--send-nsca-config", metavar="FILE", default=None,
help="Config file passed to send_nsca -c. Default"
" is to not specify the config parameter.")
parser.add_option_group(polling_options)
generate_options = optparse.OptionGroup(parser, "Generate Config Options")
generate_options.add_option("--cfg-dir", metavar="DIR", default=getcwd(),
help="Directory for generated Nagios cfg files.")
parser.add_option_group(generate_options)
(options, args) = parser.parse_args()
''' Parse the 'passfile' - it must contain the username and password,
colon-delimited on a single line. E.g.:
$ cat ~/protected/cm_pass
admin:admin
'''
required = ["host", "passfile"]
if options.mode == "update_status":
if not options.use_send_nsca:
required.append("cmd_file")
for required_opt in required:
if getattr(options, required_opt) is None:
parser.error("Please specify the required argument: --%s" %
(required_opt.replace('_','-'),))
return (options, args) | python | def parse_args():
''' Parse the script arguments
'''
parser = optparse.OptionParser()
parser.add_option("-v", "--verbose", action="store_true")
mode_group = optparse.OptionGroup(parser, "Program Mode")
mode_group.add_option("-u", "--update-status", action="store_const",
dest="mode", const="update_status")
mode_group.add_option("-g", "--generate-cfg", action="store_const",
dest="mode", const="generate_cfg")
parser.add_option_group(mode_group)
parser.set_defaults(mode="update_status")
general_options = optparse.OptionGroup(parser, "CM API Configuration")
general_options.add_option("-H", "--host", metavar="HOST",
help="CM API hostname")
general_options.add_option("-p", "--port", help="CM API port", default=None)
general_options.add_option("-P", "--passfile", metavar="FILE",
help="File containing CM API username and password, "
"colon-delimited on a single line. E.g. "
"\"user:pass\"")
general_options.add_option("--use-tls", action="store_true",
help="Use TLS", default=False)
parser.add_option_group(general_options)
polling_options = optparse.OptionGroup(parser, "Status Update Options")
polling_options.add_option("-c", "--cmd-file", metavar="FILE",
help="Path to the file that Nagios checks for "
"external command requests.")
polling_options.add_option("-n", "--use-send-nsca", action="store_true",
default=False,
help="Use send_nsca to report status via a nsca "
"daemon. When using this option, the "
"send_nsca program must be available and the "
"nsca daemon host and port must be provided."
"Default is false.")
polling_options.add_option("--send-nsca-path", metavar="PATH",
default="/usr/sbin/send_nsca",
help="Path to send_nsca, default is "
"/usr/sbin/send_nsca")
polling_options.add_option("--nsca-host", metavar="HOST",
default="localhost",
help="When using send_nsca, the hostname of NSCA "
"server, default is localhost.")
polling_options.add_option("--nsca-port", metavar="PORT", default=None,
help="When using send_nsca, the port on which the "
"server is running, default is 5667.")
polling_options.add_option("--send-nsca-config", metavar="FILE", default=None,
help="Config file passed to send_nsca -c. Default"
" is to not specify the config parameter.")
parser.add_option_group(polling_options)
generate_options = optparse.OptionGroup(parser, "Generate Config Options")
generate_options.add_option("--cfg-dir", metavar="DIR", default=getcwd(),
help="Directory for generated Nagios cfg files.")
parser.add_option_group(generate_options)
(options, args) = parser.parse_args()
''' Parse the 'passfile' - it must contain the username and password,
colon-delimited on a single line. E.g.:
$ cat ~/protected/cm_pass
admin:admin
'''
required = ["host", "passfile"]
if options.mode == "update_status":
if not options.use_send_nsca:
required.append("cmd_file")
for required_opt in required:
if getattr(options, required_opt) is None:
parser.error("Please specify the required argument: --%s" %
(required_opt.replace('_','-'),))
return (options, args) | ['def', 'parse_args', '(', ')', ':', 'parser', '=', 'optparse', '.', 'OptionParser', '(', ')', 'parser', '.', 'add_option', '(', '"-v"', ',', '"--verbose"', ',', 'action', '=', '"store_true"', ')', 'mode_group', '=', 'optparse', '.', 'OptionGroup', '(', 'parser', ',', '"Program Mode"', ')', 'mode_group', '.', 'add_option', '(', '"-u"', ',', '"--update-status"', ',', 'action', '=', '"store_const"', ',', 'dest', '=', '"mode"', ',', 'const', '=', '"update_status"', ')', 'mode_group', '.', 'add_option', '(', '"-g"', ',', '"--generate-cfg"', ',', 'action', '=', '"store_const"', ',', 'dest', '=', '"mode"', ',', 'const', '=', '"generate_cfg"', ')', 'parser', '.', 'add_option_group', '(', 'mode_group', ')', 'parser', '.', 'set_defaults', '(', 'mode', '=', '"update_status"', ')', 'general_options', '=', 'optparse', '.', 'OptionGroup', '(', 'parser', ',', '"CM API Configuration"', ')', 'general_options', '.', 'add_option', '(', '"-H"', ',', '"--host"', ',', 'metavar', '=', '"HOST"', ',', 'help', '=', '"CM API hostname"', ')', 'general_options', '.', 'add_option', '(', '"-p"', ',', '"--port"', ',', 'help', '=', '"CM API port"', ',', 'default', '=', 'None', ')', 'general_options', '.', 'add_option', '(', '"-P"', ',', '"--passfile"', ',', 'metavar', '=', '"FILE"', ',', 'help', '=', '"File containing CM API username and password, "', '"colon-delimited on a single line. E.g. "', '"\\"user:pass\\""', ')', 'general_options', '.', 'add_option', '(', '"--use-tls"', ',', 'action', '=', '"store_true"', ',', 'help', '=', '"Use TLS"', ',', 'default', '=', 'False', ')', 'parser', '.', 'add_option_group', '(', 'general_options', ')', 'polling_options', '=', 'optparse', '.', 'OptionGroup', '(', 'parser', ',', '"Status Update Options"', ')', 'polling_options', '.', 'add_option', '(', '"-c"', ',', '"--cmd-file"', ',', 'metavar', '=', '"FILE"', ',', 'help', '=', '"Path to the file that Nagios checks for "', '"external command requests."', ')', 'polling_options', '.', 'add_option', '(', '"-n"', ',', '"--use-send-nsca"', ',', 'action', '=', '"store_true"', ',', 'default', '=', 'False', ',', 'help', '=', '"Use send_nsca to report status via a nsca "', '"daemon. When using this option, the "', '"send_nsca program must be available and the "', '"nsca daemon host and port must be provided."', '"Default is false."', ')', 'polling_options', '.', 'add_option', '(', '"--send-nsca-path"', ',', 'metavar', '=', '"PATH"', ',', 'default', '=', '"/usr/sbin/send_nsca"', ',', 'help', '=', '"Path to send_nsca, default is "', '"/usr/sbin/send_nsca"', ')', 'polling_options', '.', 'add_option', '(', '"--nsca-host"', ',', 'metavar', '=', '"HOST"', ',', 'default', '=', '"localhost"', ',', 'help', '=', '"When using send_nsca, the hostname of NSCA "', '"server, default is localhost."', ')', 'polling_options', '.', 'add_option', '(', '"--nsca-port"', ',', 'metavar', '=', '"PORT"', ',', 'default', '=', 'None', ',', 'help', '=', '"When using send_nsca, the port on which the "', '"server is running, default is 5667."', ')', 'polling_options', '.', 'add_option', '(', '"--send-nsca-config"', ',', 'metavar', '=', '"FILE"', ',', 'default', '=', 'None', ',', 'help', '=', '"Config file passed to send_nsca -c. Default"', '" is to not specify the config parameter."', ')', 'parser', '.', 'add_option_group', '(', 'polling_options', ')', 'generate_options', '=', 'optparse', '.', 'OptionGroup', '(', 'parser', ',', '"Generate Config Options"', ')', 'generate_options', '.', 'add_option', '(', '"--cfg-dir"', ',', 'metavar', '=', '"DIR"', ',', 'default', '=', 'getcwd', '(', ')', ',', 'help', '=', '"Directory for generated Nagios cfg files."', ')', 'parser', '.', 'add_option_group', '(', 'generate_options', ')', '(', 'options', ',', 'args', ')', '=', 'parser', '.', 'parse_args', '(', ')', "''' Parse the 'passfile' - it must contain the username and password,\n colon-delimited on a single line. E.g.:\n $ cat ~/protected/cm_pass\n admin:admin\n '''", 'required', '=', '[', '"host"', ',', '"passfile"', ']', 'if', 'options', '.', 'mode', '==', '"update_status"', ':', 'if', 'not', 'options', '.', 'use_send_nsca', ':', 'required', '.', 'append', '(', '"cmd_file"', ')', 'for', 'required_opt', 'in', 'required', ':', 'if', 'getattr', '(', 'options', ',', 'required_opt', ')', 'is', 'None', ':', 'parser', '.', 'error', '(', '"Please specify the required argument: --%s"', '%', '(', 'required_opt', '.', 'replace', '(', "'_'", ',', "'-'", ')', ',', ')', ')', 'return', '(', 'options', ',', 'args', ')'] | Parse the script arguments | ['Parse', 'the', 'script', 'arguments'] | train | https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/nagios/cm_nagios.py#L118-L196 |
7,824 | tensorpack/tensorpack | tensorpack/graph_builder/utils.py | GradientPacker.unpack_all | def unpack_all(self, all_packed, devices):
"""
Args:
all_packed: K lists of packed gradients.
"""
all_grads = [] # #GPU x #Var
for dev, packed_grads_single_device in zip(devices, all_packed):
with tf.device(dev):
all_grads.append(self.unpack(packed_grads_single_device))
return all_grads | python | def unpack_all(self, all_packed, devices):
"""
Args:
all_packed: K lists of packed gradients.
"""
all_grads = [] # #GPU x #Var
for dev, packed_grads_single_device in zip(devices, all_packed):
with tf.device(dev):
all_grads.append(self.unpack(packed_grads_single_device))
return all_grads | ['def', 'unpack_all', '(', 'self', ',', 'all_packed', ',', 'devices', ')', ':', 'all_grads', '=', '[', ']', '# #GPU x #Var', 'for', 'dev', ',', 'packed_grads_single_device', 'in', 'zip', '(', 'devices', ',', 'all_packed', ')', ':', 'with', 'tf', '.', 'device', '(', 'dev', ')', ':', 'all_grads', '.', 'append', '(', 'self', '.', 'unpack', '(', 'packed_grads_single_device', ')', ')', 'return', 'all_grads'] | Args:
all_packed: K lists of packed gradients. | ['Args', ':', 'all_packed', ':', 'K', 'lists', 'of', 'packed', 'gradients', '.'] | train | https://github.com/tensorpack/tensorpack/blob/d7a13cb74c9066bc791d7aafc3b744b60ee79a9f/tensorpack/graph_builder/utils.py#L402-L411 |
7,825 | horejsek/python-webdriverwrapper | webdriverwrapper/wrapper.py | _WebdriverBaseWrapper.wait_for_element | def wait_for_element(self, timeout=None, message='', *args, **kwds):
"""
Shortcut for waiting for element. If it not ends with exception, it
returns that element. Detault timeout is `~.default_wait_timeout`.
Same as following:
.. code-block:: python
selenium.webdriver.support.wait.WebDriverWait(driver, timeout).until(lambda driver: driver.get_elm(...))
.. versionchanged:: 2.5
Waits only for visible elements.
.. versionchanged:: 2.6
Returned functionality back in favor of new method
:py:meth:`~._WebdriverBaseWrapper.wait_for_element_show`.
"""
if not timeout:
timeout = self.default_wait_timeout
if not message:
message = _create_exception_msg(*args, url=self.current_url, **kwds)
self.wait(timeout).until(lambda driver: driver.get_elm(*args, **kwds), message=message)
# Also return that element for which is waiting.
elm = self.get_elm(*args, **kwds)
return elm | python | def wait_for_element(self, timeout=None, message='', *args, **kwds):
"""
Shortcut for waiting for element. If it not ends with exception, it
returns that element. Detault timeout is `~.default_wait_timeout`.
Same as following:
.. code-block:: python
selenium.webdriver.support.wait.WebDriverWait(driver, timeout).until(lambda driver: driver.get_elm(...))
.. versionchanged:: 2.5
Waits only for visible elements.
.. versionchanged:: 2.6
Returned functionality back in favor of new method
:py:meth:`~._WebdriverBaseWrapper.wait_for_element_show`.
"""
if not timeout:
timeout = self.default_wait_timeout
if not message:
message = _create_exception_msg(*args, url=self.current_url, **kwds)
self.wait(timeout).until(lambda driver: driver.get_elm(*args, **kwds), message=message)
# Also return that element for which is waiting.
elm = self.get_elm(*args, **kwds)
return elm | ['def', 'wait_for_element', '(', 'self', ',', 'timeout', '=', 'None', ',', 'message', '=', "''", ',', '*', 'args', ',', '*', '*', 'kwds', ')', ':', 'if', 'not', 'timeout', ':', 'timeout', '=', 'self', '.', 'default_wait_timeout', 'if', 'not', 'message', ':', 'message', '=', '_create_exception_msg', '(', '*', 'args', ',', 'url', '=', 'self', '.', 'current_url', ',', '*', '*', 'kwds', ')', 'self', '.', 'wait', '(', 'timeout', ')', '.', 'until', '(', 'lambda', 'driver', ':', 'driver', '.', 'get_elm', '(', '*', 'args', ',', '*', '*', 'kwds', ')', ',', 'message', '=', 'message', ')', '# Also return that element for which is waiting.', 'elm', '=', 'self', '.', 'get_elm', '(', '*', 'args', ',', '*', '*', 'kwds', ')', 'return', 'elm'] | Shortcut for waiting for element. If it not ends with exception, it
returns that element. Detault timeout is `~.default_wait_timeout`.
Same as following:
.. code-block:: python
selenium.webdriver.support.wait.WebDriverWait(driver, timeout).until(lambda driver: driver.get_elm(...))
.. versionchanged:: 2.5
Waits only for visible elements.
.. versionchanged:: 2.6
Returned functionality back in favor of new method
:py:meth:`~._WebdriverBaseWrapper.wait_for_element_show`. | ['Shortcut', 'for', 'waiting', 'for', 'element', '.', 'If', 'it', 'not', 'ends', 'with', 'exception', 'it', 'returns', 'that', 'element', '.', 'Detault', 'timeout', 'is', '~', '.', 'default_wait_timeout', '.', 'Same', 'as', 'following', ':'] | train | https://github.com/horejsek/python-webdriverwrapper/blob/a492f79ab60ed83d860dd817b6a0961500d7e3f5/webdriverwrapper/wrapper.py#L275-L299 |
7,826 | jantman/pypi-download-stats | pypi_download_stats/projectstats.py | ProjectStats._get_cache_dates | def _get_cache_dates(self):
"""
Get s list of dates (:py:class:`datetime.datetime`) present in cache,
beginning with the longest contiguous set of dates that isn't missing
more than one date in series.
:return: list of datetime objects for contiguous dates in cache
:rtype: ``list``
"""
all_dates = self.cache.get_dates_for_project(self.project_name)
dates = []
last_date = None
for val in sorted(all_dates):
if last_date is None:
last_date = val
continue
if val - last_date > timedelta(hours=48):
# reset dates to start from here
logger.warning("Last cache date was %s, current date is %s; "
"delta is too large. Starting cache date series "
"at current date.", last_date, val)
dates = []
last_date = val
dates.append(val)
# find the first download record, and only look at dates after that
for idx, cache_date in enumerate(dates):
data = self._cache_get(cache_date)
if not self._is_empty_cache_record(data):
logger.debug("First cache date with data: %s", cache_date)
return dates[idx:]
return dates | python | def _get_cache_dates(self):
"""
Get s list of dates (:py:class:`datetime.datetime`) present in cache,
beginning with the longest contiguous set of dates that isn't missing
more than one date in series.
:return: list of datetime objects for contiguous dates in cache
:rtype: ``list``
"""
all_dates = self.cache.get_dates_for_project(self.project_name)
dates = []
last_date = None
for val in sorted(all_dates):
if last_date is None:
last_date = val
continue
if val - last_date > timedelta(hours=48):
# reset dates to start from here
logger.warning("Last cache date was %s, current date is %s; "
"delta is too large. Starting cache date series "
"at current date.", last_date, val)
dates = []
last_date = val
dates.append(val)
# find the first download record, and only look at dates after that
for idx, cache_date in enumerate(dates):
data = self._cache_get(cache_date)
if not self._is_empty_cache_record(data):
logger.debug("First cache date with data: %s", cache_date)
return dates[idx:]
return dates | ['def', '_get_cache_dates', '(', 'self', ')', ':', 'all_dates', '=', 'self', '.', 'cache', '.', 'get_dates_for_project', '(', 'self', '.', 'project_name', ')', 'dates', '=', '[', ']', 'last_date', '=', 'None', 'for', 'val', 'in', 'sorted', '(', 'all_dates', ')', ':', 'if', 'last_date', 'is', 'None', ':', 'last_date', '=', 'val', 'continue', 'if', 'val', '-', 'last_date', '>', 'timedelta', '(', 'hours', '=', '48', ')', ':', '# reset dates to start from here', 'logger', '.', 'warning', '(', '"Last cache date was %s, current date is %s; "', '"delta is too large. Starting cache date series "', '"at current date."', ',', 'last_date', ',', 'val', ')', 'dates', '=', '[', ']', 'last_date', '=', 'val', 'dates', '.', 'append', '(', 'val', ')', '# find the first download record, and only look at dates after that', 'for', 'idx', ',', 'cache_date', 'in', 'enumerate', '(', 'dates', ')', ':', 'data', '=', 'self', '.', '_cache_get', '(', 'cache_date', ')', 'if', 'not', 'self', '.', '_is_empty_cache_record', '(', 'data', ')', ':', 'logger', '.', 'debug', '(', '"First cache date with data: %s"', ',', 'cache_date', ')', 'return', 'dates', '[', 'idx', ':', ']', 'return', 'dates'] | Get s list of dates (:py:class:`datetime.datetime`) present in cache,
beginning with the longest contiguous set of dates that isn't missing
more than one date in series.
:return: list of datetime objects for contiguous dates in cache
:rtype: ``list`` | ['Get', 's', 'list', 'of', 'dates', '(', ':', 'py', ':', 'class', ':', 'datetime', '.', 'datetime', ')', 'present', 'in', 'cache', 'beginning', 'with', 'the', 'longest', 'contiguous', 'set', 'of', 'dates', 'that', 'isn', 't', 'missing', 'more', 'than', 'one', 'date', 'in', 'series', '.'] | train | https://github.com/jantman/pypi-download-stats/blob/44a7a6bbcd61a9e7f02bd02c52584a98183f80c5/pypi_download_stats/projectstats.py#L68-L98 |
7,827 | pyviz/holoviews | holoviews/plotting/plot.py | DimensionedPlot.compute_ranges | def compute_ranges(self, obj, key, ranges):
"""
Given an object, a specific key, and the normalization options,
this method will find the specified normalization options on
the appropriate OptionTree, group the elements according to
the selected normalization option (i.e. either per frame or
over the whole animation) and finally compute the dimension
ranges in each group. The new set of ranges is returned.
"""
all_table = all(isinstance(el, Table) for el in obj.traverse(lambda x: x, [Element]))
if obj is None or not self.normalize or all_table:
return OrderedDict()
# Get inherited ranges
ranges = self.ranges if ranges is None else dict(ranges)
# Get element identifiers from current object and resolve
# with selected normalization options
norm_opts = self._get_norm_opts(obj)
# Traverse displayed object if normalization applies
# at this level, and ranges for the group have not
# been supplied from a composite plot
return_fn = lambda x: x if isinstance(x, Element) else None
for group, (axiswise, framewise) in norm_opts.items():
elements = []
# Skip if ranges are cached or already computed by a
# higher-level container object.
framewise = framewise or self.dynamic or len(elements) == 1
if group in ranges and (not framewise or ranges is not self.ranges):
continue
elif not framewise: # Traverse to get all elements
elements = obj.traverse(return_fn, [group])
elif key is not None: # Traverse to get elements for each frame
frame = self._get_frame(key)
elements = [] if frame is None else frame.traverse(return_fn, [group])
# Only compute ranges if not axiswise on a composite plot
# or not framewise on a Overlay or ElementPlot
if (not (axiswise and not isinstance(obj, HoloMap)) or
(not framewise and isinstance(obj, HoloMap))):
self._compute_group_range(group, elements, ranges)
self.ranges.update(ranges)
return ranges | python | def compute_ranges(self, obj, key, ranges):
"""
Given an object, a specific key, and the normalization options,
this method will find the specified normalization options on
the appropriate OptionTree, group the elements according to
the selected normalization option (i.e. either per frame or
over the whole animation) and finally compute the dimension
ranges in each group. The new set of ranges is returned.
"""
all_table = all(isinstance(el, Table) for el in obj.traverse(lambda x: x, [Element]))
if obj is None or not self.normalize or all_table:
return OrderedDict()
# Get inherited ranges
ranges = self.ranges if ranges is None else dict(ranges)
# Get element identifiers from current object and resolve
# with selected normalization options
norm_opts = self._get_norm_opts(obj)
# Traverse displayed object if normalization applies
# at this level, and ranges for the group have not
# been supplied from a composite plot
return_fn = lambda x: x if isinstance(x, Element) else None
for group, (axiswise, framewise) in norm_opts.items():
elements = []
# Skip if ranges are cached or already computed by a
# higher-level container object.
framewise = framewise or self.dynamic or len(elements) == 1
if group in ranges and (not framewise or ranges is not self.ranges):
continue
elif not framewise: # Traverse to get all elements
elements = obj.traverse(return_fn, [group])
elif key is not None: # Traverse to get elements for each frame
frame = self._get_frame(key)
elements = [] if frame is None else frame.traverse(return_fn, [group])
# Only compute ranges if not axiswise on a composite plot
# or not framewise on a Overlay or ElementPlot
if (not (axiswise and not isinstance(obj, HoloMap)) or
(not framewise and isinstance(obj, HoloMap))):
self._compute_group_range(group, elements, ranges)
self.ranges.update(ranges)
return ranges | ['def', 'compute_ranges', '(', 'self', ',', 'obj', ',', 'key', ',', 'ranges', ')', ':', 'all_table', '=', 'all', '(', 'isinstance', '(', 'el', ',', 'Table', ')', 'for', 'el', 'in', 'obj', '.', 'traverse', '(', 'lambda', 'x', ':', 'x', ',', '[', 'Element', ']', ')', ')', 'if', 'obj', 'is', 'None', 'or', 'not', 'self', '.', 'normalize', 'or', 'all_table', ':', 'return', 'OrderedDict', '(', ')', '# Get inherited ranges', 'ranges', '=', 'self', '.', 'ranges', 'if', 'ranges', 'is', 'None', 'else', 'dict', '(', 'ranges', ')', '# Get element identifiers from current object and resolve', '# with selected normalization options', 'norm_opts', '=', 'self', '.', '_get_norm_opts', '(', 'obj', ')', '# Traverse displayed object if normalization applies', '# at this level, and ranges for the group have not', '# been supplied from a composite plot', 'return_fn', '=', 'lambda', 'x', ':', 'x', 'if', 'isinstance', '(', 'x', ',', 'Element', ')', 'else', 'None', 'for', 'group', ',', '(', 'axiswise', ',', 'framewise', ')', 'in', 'norm_opts', '.', 'items', '(', ')', ':', 'elements', '=', '[', ']', '# Skip if ranges are cached or already computed by a', '# higher-level container object.', 'framewise', '=', 'framewise', 'or', 'self', '.', 'dynamic', 'or', 'len', '(', 'elements', ')', '==', '1', 'if', 'group', 'in', 'ranges', 'and', '(', 'not', 'framewise', 'or', 'ranges', 'is', 'not', 'self', '.', 'ranges', ')', ':', 'continue', 'elif', 'not', 'framewise', ':', '# Traverse to get all elements', 'elements', '=', 'obj', '.', 'traverse', '(', 'return_fn', ',', '[', 'group', ']', ')', 'elif', 'key', 'is', 'not', 'None', ':', '# Traverse to get elements for each frame', 'frame', '=', 'self', '.', '_get_frame', '(', 'key', ')', 'elements', '=', '[', ']', 'if', 'frame', 'is', 'None', 'else', 'frame', '.', 'traverse', '(', 'return_fn', ',', '[', 'group', ']', ')', '# Only compute ranges if not axiswise on a composite plot', '# or not framewise on a Overlay or ElementPlot', 'if', '(', 'not', '(', 'axiswise', 'and', 'not', 'isinstance', '(', 'obj', ',', 'HoloMap', ')', ')', 'or', '(', 'not', 'framewise', 'and', 'isinstance', '(', 'obj', ',', 'HoloMap', ')', ')', ')', ':', 'self', '.', '_compute_group_range', '(', 'group', ',', 'elements', ',', 'ranges', ')', 'self', '.', 'ranges', '.', 'update', '(', 'ranges', ')', 'return', 'ranges'] | Given an object, a specific key, and the normalization options,
this method will find the specified normalization options on
the appropriate OptionTree, group the elements according to
the selected normalization option (i.e. either per frame or
over the whole animation) and finally compute the dimension
ranges in each group. The new set of ranges is returned. | ['Given', 'an', 'object', 'a', 'specific', 'key', 'and', 'the', 'normalization', 'options', 'this', 'method', 'will', 'find', 'the', 'specified', 'normalization', 'options', 'on', 'the', 'appropriate', 'OptionTree', 'group', 'the', 'elements', 'according', 'to', 'the', 'selected', 'normalization', 'option', '(', 'i', '.', 'e', '.', 'either', 'per', 'frame', 'or', 'over', 'the', 'whole', 'animation', ')', 'and', 'finally', 'compute', 'the', 'dimension', 'ranges', 'in', 'each', 'group', '.', 'The', 'new', 'set', 'of', 'ranges', 'is', 'returned', '.'] | train | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/plot.py#L352-L393 |
7,828 | senaite/senaite.jsonapi | src/senaite/jsonapi/datamanagers.py | ATDataManager.set | def set(self, name, value, **kw):
"""Set the field to the given value.
The keyword arguments represent the other field values
to integrate constraints to other values.
"""
# fetch the field by name
field = api.get_field(self.context, name)
# bail out if we have no field
if not field:
return False
# call the field adapter and set the value
fieldmanager = IFieldManager(field)
return fieldmanager.set(self.context, value, **kw) | python | def set(self, name, value, **kw):
"""Set the field to the given value.
The keyword arguments represent the other field values
to integrate constraints to other values.
"""
# fetch the field by name
field = api.get_field(self.context, name)
# bail out if we have no field
if not field:
return False
# call the field adapter and set the value
fieldmanager = IFieldManager(field)
return fieldmanager.set(self.context, value, **kw) | ['def', 'set', '(', 'self', ',', 'name', ',', 'value', ',', '*', '*', 'kw', ')', ':', '# fetch the field by name', 'field', '=', 'api', '.', 'get_field', '(', 'self', '.', 'context', ',', 'name', ')', '# bail out if we have no field', 'if', 'not', 'field', ':', 'return', 'False', '# call the field adapter and set the value', 'fieldmanager', '=', 'IFieldManager', '(', 'field', ')', 'return', 'fieldmanager', '.', 'set', '(', 'self', '.', 'context', ',', 'value', ',', '*', '*', 'kw', ')'] | Set the field to the given value.
The keyword arguments represent the other field values
to integrate constraints to other values. | ['Set', 'the', 'field', 'to', 'the', 'given', 'value', '.'] | train | https://github.com/senaite/senaite.jsonapi/blob/871959f4b1c9edbb477e9456325527ca78e13ec6/src/senaite/jsonapi/datamanagers.py#L141-L157 |
7,829 | DLR-RM/RAFCON | source/rafcon/gui/controllers/utils/tree_view_controller.py | ListViewController.update_selection_sm_prior | def update_selection_sm_prior(self):
"""State machine prior update of tree selection"""
if self._do_selection_update:
return
self._do_selection_update = True
tree_selection, selected_model_list, sm_selection, sm_selected_model_list = self.get_selections()
if tree_selection is not None:
for path, row in enumerate(self.list_store):
model = row[self.MODEL_STORAGE_ID]
if model not in sm_selected_model_list and model in selected_model_list:
tree_selection.unselect_path(Gtk.TreePath.new_from_indices([path]))
if model in sm_selected_model_list and model not in selected_model_list:
tree_selection.select_path(Gtk.TreePath.new_from_indices([path]))
self._do_selection_update = False | python | def update_selection_sm_prior(self):
"""State machine prior update of tree selection"""
if self._do_selection_update:
return
self._do_selection_update = True
tree_selection, selected_model_list, sm_selection, sm_selected_model_list = self.get_selections()
if tree_selection is not None:
for path, row in enumerate(self.list_store):
model = row[self.MODEL_STORAGE_ID]
if model not in sm_selected_model_list and model in selected_model_list:
tree_selection.unselect_path(Gtk.TreePath.new_from_indices([path]))
if model in sm_selected_model_list and model not in selected_model_list:
tree_selection.select_path(Gtk.TreePath.new_from_indices([path]))
self._do_selection_update = False | ['def', 'update_selection_sm_prior', '(', 'self', ')', ':', 'if', 'self', '.', '_do_selection_update', ':', 'return', 'self', '.', '_do_selection_update', '=', 'True', 'tree_selection', ',', 'selected_model_list', ',', 'sm_selection', ',', 'sm_selected_model_list', '=', 'self', '.', 'get_selections', '(', ')', 'if', 'tree_selection', 'is', 'not', 'None', ':', 'for', 'path', ',', 'row', 'in', 'enumerate', '(', 'self', '.', 'list_store', ')', ':', 'model', '=', 'row', '[', 'self', '.', 'MODEL_STORAGE_ID', ']', 'if', 'model', 'not', 'in', 'sm_selected_model_list', 'and', 'model', 'in', 'selected_model_list', ':', 'tree_selection', '.', 'unselect_path', '(', 'Gtk', '.', 'TreePath', '.', 'new_from_indices', '(', '[', 'path', ']', ')', ')', 'if', 'model', 'in', 'sm_selected_model_list', 'and', 'model', 'not', 'in', 'selected_model_list', ':', 'tree_selection', '.', 'select_path', '(', 'Gtk', '.', 'TreePath', '.', 'new_from_indices', '(', '[', 'path', ']', ')', ')', 'self', '.', '_do_selection_update', '=', 'False'] | State machine prior update of tree selection | ['State', 'machine', 'prior', 'update', 'of', 'tree', 'selection'] | train | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/utils/tree_view_controller.py#L588-L602 |
7,830 | quodlibet/mutagen | mutagen/_senf/_print.py | _encode_codepage | def _encode_codepage(codepage, text):
"""
Args:
codepage (int)
text (text)
Returns:
`bytes`
Encode text using the given code page. Will not fail if a char
can't be encoded using that codepage.
"""
assert isinstance(text, text_type)
if not text:
return b""
size = (len(text.encode("utf-16-le", _surrogatepass)) //
ctypes.sizeof(winapi.WCHAR))
# get the required buffer size
length = winapi.WideCharToMultiByte(
codepage, 0, text, size, None, 0, None, None)
if length == 0:
raise ctypes.WinError()
# decode to the buffer
buf = ctypes.create_string_buffer(length)
length = winapi.WideCharToMultiByte(
codepage, 0, text, size, buf, length, None, None)
if length == 0:
raise ctypes.WinError()
return buf[:length] | python | def _encode_codepage(codepage, text):
"""
Args:
codepage (int)
text (text)
Returns:
`bytes`
Encode text using the given code page. Will not fail if a char
can't be encoded using that codepage.
"""
assert isinstance(text, text_type)
if not text:
return b""
size = (len(text.encode("utf-16-le", _surrogatepass)) //
ctypes.sizeof(winapi.WCHAR))
# get the required buffer size
length = winapi.WideCharToMultiByte(
codepage, 0, text, size, None, 0, None, None)
if length == 0:
raise ctypes.WinError()
# decode to the buffer
buf = ctypes.create_string_buffer(length)
length = winapi.WideCharToMultiByte(
codepage, 0, text, size, buf, length, None, None)
if length == 0:
raise ctypes.WinError()
return buf[:length] | ['def', '_encode_codepage', '(', 'codepage', ',', 'text', ')', ':', 'assert', 'isinstance', '(', 'text', ',', 'text_type', ')', 'if', 'not', 'text', ':', 'return', 'b""', 'size', '=', '(', 'len', '(', 'text', '.', 'encode', '(', '"utf-16-le"', ',', '_surrogatepass', ')', ')', '//', 'ctypes', '.', 'sizeof', '(', 'winapi', '.', 'WCHAR', ')', ')', '# get the required buffer size', 'length', '=', 'winapi', '.', 'WideCharToMultiByte', '(', 'codepage', ',', '0', ',', 'text', ',', 'size', ',', 'None', ',', '0', ',', 'None', ',', 'None', ')', 'if', 'length', '==', '0', ':', 'raise', 'ctypes', '.', 'WinError', '(', ')', '# decode to the buffer', 'buf', '=', 'ctypes', '.', 'create_string_buffer', '(', 'length', ')', 'length', '=', 'winapi', '.', 'WideCharToMultiByte', '(', 'codepage', ',', '0', ',', 'text', ',', 'size', ',', 'buf', ',', 'length', ',', 'None', ',', 'None', ')', 'if', 'length', '==', '0', ':', 'raise', 'ctypes', '.', 'WinError', '(', ')', 'return', 'buf', '[', ':', 'length', ']'] | Args:
codepage (int)
text (text)
Returns:
`bytes`
Encode text using the given code page. Will not fail if a char
can't be encoded using that codepage. | ['Args', ':', 'codepage', '(', 'int', ')', 'text', '(', 'text', ')', 'Returns', ':', 'bytes'] | train | https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/_senf/_print.py#L282-L314 |
7,831 | BernardFW/bernard | src/bernard/storage/register/redis.py | RedisRegisterStore._replace | async def _replace(self, key: Text, data: Dict[Text, Any]) -> None:
"""
Replace the register with a new value.
"""
with await self.pool as r:
await r.set(self.register_key(key), ujson.dumps(data)) | python | async def _replace(self, key: Text, data: Dict[Text, Any]) -> None:
"""
Replace the register with a new value.
"""
with await self.pool as r:
await r.set(self.register_key(key), ujson.dumps(data)) | ['async', 'def', '_replace', '(', 'self', ',', 'key', ':', 'Text', ',', 'data', ':', 'Dict', '[', 'Text', ',', 'Any', ']', ')', '->', 'None', ':', 'with', 'await', 'self', '.', 'pool', 'as', 'r', ':', 'await', 'r', '.', 'set', '(', 'self', '.', 'register_key', '(', 'key', ')', ',', 'ujson', '.', 'dumps', '(', 'data', ')', ')'] | Replace the register with a new value. | ['Replace', 'the', 'register', 'with', 'a', 'new', 'value', '.'] | train | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/storage/register/redis.py#L82-L88 |
7,832 | ddorn/GUI | GUI/gui_examples/empty_template.py | gui | def gui():
"""Main function"""
global SCREEN_SIZE
# #######
# setup all objects
# #######
os.environ['SDL_VIDEO_CENTERED'] = '1' # centers the windows
screen = new_screen()
pygame.display.set_caption('Empty project')
pygame.event.set_allowed([QUIT, KEYDOWN, MOUSEBUTTONDOWN])
clock = pygame.time.Clock()
fps = FPSIndicator(clock)
while True:
# #######
# Input loop
# #######
# mouse = pygame.mouse.get_pos()
for e in pygame.event.get():
if e.type == QUIT:
return 0
elif e.type == KEYDOWN:
if e.key == K_ESCAPE:
return 0
if e.key == K_F4 and e.mod & KMOD_ALT: # Alt+F4 --> quits
return 0
if e.type == VIDEORESIZE:
SCREEN_SIZE = e.size
screen = new_screen()
# #######
# Draw all
# #######
screen.fill(WHITE)
fps.render(screen)
pygame.display.update()
clock.tick(FPS) | python | def gui():
"""Main function"""
global SCREEN_SIZE
# #######
# setup all objects
# #######
os.environ['SDL_VIDEO_CENTERED'] = '1' # centers the windows
screen = new_screen()
pygame.display.set_caption('Empty project')
pygame.event.set_allowed([QUIT, KEYDOWN, MOUSEBUTTONDOWN])
clock = pygame.time.Clock()
fps = FPSIndicator(clock)
while True:
# #######
# Input loop
# #######
# mouse = pygame.mouse.get_pos()
for e in pygame.event.get():
if e.type == QUIT:
return 0
elif e.type == KEYDOWN:
if e.key == K_ESCAPE:
return 0
if e.key == K_F4 and e.mod & KMOD_ALT: # Alt+F4 --> quits
return 0
if e.type == VIDEORESIZE:
SCREEN_SIZE = e.size
screen = new_screen()
# #######
# Draw all
# #######
screen.fill(WHITE)
fps.render(screen)
pygame.display.update()
clock.tick(FPS) | ['def', 'gui', '(', ')', ':', 'global', 'SCREEN_SIZE', '# #######', '# setup all objects', '# #######', 'os', '.', 'environ', '[', "'SDL_VIDEO_CENTERED'", ']', '=', "'1'", '# centers the windows', 'screen', '=', 'new_screen', '(', ')', 'pygame', '.', 'display', '.', 'set_caption', '(', "'Empty project'", ')', 'pygame', '.', 'event', '.', 'set_allowed', '(', '[', 'QUIT', ',', 'KEYDOWN', ',', 'MOUSEBUTTONDOWN', ']', ')', 'clock', '=', 'pygame', '.', 'time', '.', 'Clock', '(', ')', 'fps', '=', 'FPSIndicator', '(', 'clock', ')', 'while', 'True', ':', '# #######', '# Input loop', '# #######', '# mouse = pygame.mouse.get_pos()', 'for', 'e', 'in', 'pygame', '.', 'event', '.', 'get', '(', ')', ':', 'if', 'e', '.', 'type', '==', 'QUIT', ':', 'return', '0', 'elif', 'e', '.', 'type', '==', 'KEYDOWN', ':', 'if', 'e', '.', 'key', '==', 'K_ESCAPE', ':', 'return', '0', 'if', 'e', '.', 'key', '==', 'K_F4', 'and', 'e', '.', 'mod', '&', 'KMOD_ALT', ':', '# Alt+F4 --> quits', 'return', '0', 'if', 'e', '.', 'type', '==', 'VIDEORESIZE', ':', 'SCREEN_SIZE', '=', 'e', '.', 'size', 'screen', '=', 'new_screen', '(', ')', '# #######', '# Draw all', '# #######', 'screen', '.', 'fill', '(', 'WHITE', ')', 'fps', '.', 'render', '(', 'screen', ')', 'pygame', '.', 'display', '.', 'update', '(', ')', 'clock', '.', 'tick', '(', 'FPS', ')'] | Main function | ['Main', 'function'] | train | https://github.com/ddorn/GUI/blob/e1fcb5286d24e0995f280d5180222e51895c368c/GUI/gui_examples/empty_template.py#L25-L72 |
7,833 | callowayproject/Transmogrify | transmogrify/filesystem/s3.py | file_exists | def file_exists(original_file):
"""
Validate the original file is in the S3 bucket
"""
s3 = boto3.resource('s3')
bucket_name, object_key = _parse_s3_file(original_file)
bucket = s3.Bucket(bucket_name)
bucket_iterator = bucket.objects.filter(Prefix=object_key)
bucket_list = [x for x in bucket_iterator]
logger.debug("Bucket List: {0}".format(", ".join([x.key for x in bucket_list])))
logger.debug("bucket_list length: {0}".format(len(bucket_list)))
return len(bucket_list) == 1 | python | def file_exists(original_file):
"""
Validate the original file is in the S3 bucket
"""
s3 = boto3.resource('s3')
bucket_name, object_key = _parse_s3_file(original_file)
bucket = s3.Bucket(bucket_name)
bucket_iterator = bucket.objects.filter(Prefix=object_key)
bucket_list = [x for x in bucket_iterator]
logger.debug("Bucket List: {0}".format(", ".join([x.key for x in bucket_list])))
logger.debug("bucket_list length: {0}".format(len(bucket_list)))
return len(bucket_list) == 1 | ['def', 'file_exists', '(', 'original_file', ')', ':', 's3', '=', 'boto3', '.', 'resource', '(', "'s3'", ')', 'bucket_name', ',', 'object_key', '=', '_parse_s3_file', '(', 'original_file', ')', 'bucket', '=', 's3', '.', 'Bucket', '(', 'bucket_name', ')', 'bucket_iterator', '=', 'bucket', '.', 'objects', '.', 'filter', '(', 'Prefix', '=', 'object_key', ')', 'bucket_list', '=', '[', 'x', 'for', 'x', 'in', 'bucket_iterator', ']', 'logger', '.', 'debug', '(', '"Bucket List: {0}"', '.', 'format', '(', '", "', '.', 'join', '(', '[', 'x', '.', 'key', 'for', 'x', 'in', 'bucket_list', ']', ')', ')', ')', 'logger', '.', 'debug', '(', '"bucket_list length: {0}"', '.', 'format', '(', 'len', '(', 'bucket_list', ')', ')', ')', 'return', 'len', '(', 'bucket_list', ')', '==', '1'] | Validate the original file is in the S3 bucket | ['Validate', 'the', 'original', 'file', 'is', 'in', 'the', 'S3', 'bucket'] | train | https://github.com/callowayproject/Transmogrify/blob/f1f891b8b923b3a1ede5eac7f60531c1c472379e/transmogrify/filesystem/s3.py#L19-L30 |
7,834 | zetaops/zengine | zengine/engine.py | ZEngine.run_activity | def run_activity(self):
"""
runs the method that referenced from current task
"""
activity = self.current.activity
if activity:
if activity not in self.wf_activities:
self._load_activity(activity)
self.current.log.debug(
"Calling Activity %s from %s" % (activity, self.wf_activities[activity]))
self.wf_activities[self.current.activity](self.current) | python | def run_activity(self):
"""
runs the method that referenced from current task
"""
activity = self.current.activity
if activity:
if activity not in self.wf_activities:
self._load_activity(activity)
self.current.log.debug(
"Calling Activity %s from %s" % (activity, self.wf_activities[activity]))
self.wf_activities[self.current.activity](self.current) | ['def', 'run_activity', '(', 'self', ')', ':', 'activity', '=', 'self', '.', 'current', '.', 'activity', 'if', 'activity', ':', 'if', 'activity', 'not', 'in', 'self', '.', 'wf_activities', ':', 'self', '.', '_load_activity', '(', 'activity', ')', 'self', '.', 'current', '.', 'log', '.', 'debug', '(', '"Calling Activity %s from %s"', '%', '(', 'activity', ',', 'self', '.', 'wf_activities', '[', 'activity', ']', ')', ')', 'self', '.', 'wf_activities', '[', 'self', '.', 'current', '.', 'activity', ']', '(', 'self', '.', 'current', ')'] | runs the method that referenced from current task | ['runs', 'the', 'method', 'that', 'referenced', 'from', 'current', 'task'] | train | https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/engine.py#L573-L583 |
7,835 | mikedh/trimesh | trimesh/scene/scene.py | Scene.explode | def explode(self, vector=None, origin=None):
"""
Explode a scene around a point and vector.
Parameters
-----------
vector : (3,) float or float
Explode radially around a direction vector or spherically
origin : (3,) float
Point to explode around
"""
if origin is None:
origin = self.centroid
if vector is None:
vector = self.scale / 25.0
vector = np.asanyarray(vector, dtype=np.float64)
origin = np.asanyarray(origin, dtype=np.float64)
for node_name in self.graph.nodes_geometry:
transform, geometry_name = self.graph[node_name]
centroid = self.geometry[geometry_name].centroid
# transform centroid into nodes location
centroid = np.dot(transform,
np.append(centroid, 1))[:3]
if vector.shape == ():
# case where our vector is a single number
offset = (centroid - origin) * vector
elif np.shape(vector) == (3,):
projected = np.dot(vector, (centroid - origin))
offset = vector * projected
else:
raise ValueError('explode vector wrong shape!')
transform[0:3, 3] += offset
self.graph[node_name] = transform | python | def explode(self, vector=None, origin=None):
"""
Explode a scene around a point and vector.
Parameters
-----------
vector : (3,) float or float
Explode radially around a direction vector or spherically
origin : (3,) float
Point to explode around
"""
if origin is None:
origin = self.centroid
if vector is None:
vector = self.scale / 25.0
vector = np.asanyarray(vector, dtype=np.float64)
origin = np.asanyarray(origin, dtype=np.float64)
for node_name in self.graph.nodes_geometry:
transform, geometry_name = self.graph[node_name]
centroid = self.geometry[geometry_name].centroid
# transform centroid into nodes location
centroid = np.dot(transform,
np.append(centroid, 1))[:3]
if vector.shape == ():
# case where our vector is a single number
offset = (centroid - origin) * vector
elif np.shape(vector) == (3,):
projected = np.dot(vector, (centroid - origin))
offset = vector * projected
else:
raise ValueError('explode vector wrong shape!')
transform[0:3, 3] += offset
self.graph[node_name] = transform | ['def', 'explode', '(', 'self', ',', 'vector', '=', 'None', ',', 'origin', '=', 'None', ')', ':', 'if', 'origin', 'is', 'None', ':', 'origin', '=', 'self', '.', 'centroid', 'if', 'vector', 'is', 'None', ':', 'vector', '=', 'self', '.', 'scale', '/', '25.0', 'vector', '=', 'np', '.', 'asanyarray', '(', 'vector', ',', 'dtype', '=', 'np', '.', 'float64', ')', 'origin', '=', 'np', '.', 'asanyarray', '(', 'origin', ',', 'dtype', '=', 'np', '.', 'float64', ')', 'for', 'node_name', 'in', 'self', '.', 'graph', '.', 'nodes_geometry', ':', 'transform', ',', 'geometry_name', '=', 'self', '.', 'graph', '[', 'node_name', ']', 'centroid', '=', 'self', '.', 'geometry', '[', 'geometry_name', ']', '.', 'centroid', '# transform centroid into nodes location', 'centroid', '=', 'np', '.', 'dot', '(', 'transform', ',', 'np', '.', 'append', '(', 'centroid', ',', '1', ')', ')', '[', ':', '3', ']', 'if', 'vector', '.', 'shape', '==', '(', ')', ':', '# case where our vector is a single number', 'offset', '=', '(', 'centroid', '-', 'origin', ')', '*', 'vector', 'elif', 'np', '.', 'shape', '(', 'vector', ')', '==', '(', '3', ',', ')', ':', 'projected', '=', 'np', '.', 'dot', '(', 'vector', ',', '(', 'centroid', '-', 'origin', ')', ')', 'offset', '=', 'vector', '*', 'projected', 'else', ':', 'raise', 'ValueError', '(', "'explode vector wrong shape!'", ')', 'transform', '[', '0', ':', '3', ',', '3', ']', '+=', 'offset', 'self', '.', 'graph', '[', 'node_name', ']', '=', 'transform'] | Explode a scene around a point and vector.
Parameters
-----------
vector : (3,) float or float
Explode radially around a direction vector or spherically
origin : (3,) float
Point to explode around | ['Explode', 'a', 'scene', 'around', 'a', 'point', 'and', 'vector', '.'] | train | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/scene/scene.py#L740-L777 |
7,836 | hobson/pug-dj | pug/dj/crawlnmine/fabfile/django_fabric_aws.py | _create_ec2_instance | def _create_ec2_instance():
"""
Creates EC2 Instance
"""
print(_yellow("Creating instance"))
conn = boto.ec2.connect_to_region(ec2_region, aws_access_key_id=fabconf['AWS_ACCESS_KEY'], aws_secret_access_key=fabconf['AWS_SECRET_KEY'])
image = conn.get_all_images(ec2_amis)
reservation = image[0].run(1, 1, ec2_keypair, ec2_secgroups,
instance_type=ec2_instancetype)
instance = reservation.instances[0]
conn.create_tags([instance.id], {"Name":fabconf['INSTANCE_NAME_TAG']})
while instance.state == u'pending':
print(_yellow("Instance state: %s" % instance.state))
time.sleep(10)
instance.update()
print(_green("Instance state: %s" % instance.state))
print(_green("Public dns: %s" % instance.public_dns_name))
return instance.public_dns_name | python | def _create_ec2_instance():
"""
Creates EC2 Instance
"""
print(_yellow("Creating instance"))
conn = boto.ec2.connect_to_region(ec2_region, aws_access_key_id=fabconf['AWS_ACCESS_KEY'], aws_secret_access_key=fabconf['AWS_SECRET_KEY'])
image = conn.get_all_images(ec2_amis)
reservation = image[0].run(1, 1, ec2_keypair, ec2_secgroups,
instance_type=ec2_instancetype)
instance = reservation.instances[0]
conn.create_tags([instance.id], {"Name":fabconf['INSTANCE_NAME_TAG']})
while instance.state == u'pending':
print(_yellow("Instance state: %s" % instance.state))
time.sleep(10)
instance.update()
print(_green("Instance state: %s" % instance.state))
print(_green("Public dns: %s" % instance.public_dns_name))
return instance.public_dns_name | ['def', '_create_ec2_instance', '(', ')', ':', 'print', '(', '_yellow', '(', '"Creating instance"', ')', ')', 'conn', '=', 'boto', '.', 'ec2', '.', 'connect_to_region', '(', 'ec2_region', ',', 'aws_access_key_id', '=', 'fabconf', '[', "'AWS_ACCESS_KEY'", ']', ',', 'aws_secret_access_key', '=', 'fabconf', '[', "'AWS_SECRET_KEY'", ']', ')', 'image', '=', 'conn', '.', 'get_all_images', '(', 'ec2_amis', ')', 'reservation', '=', 'image', '[', '0', ']', '.', 'run', '(', '1', ',', '1', ',', 'ec2_keypair', ',', 'ec2_secgroups', ',', 'instance_type', '=', 'ec2_instancetype', ')', 'instance', '=', 'reservation', '.', 'instances', '[', '0', ']', 'conn', '.', 'create_tags', '(', '[', 'instance', '.', 'id', ']', ',', '{', '"Name"', ':', 'fabconf', '[', "'INSTANCE_NAME_TAG'", ']', '}', ')', 'while', 'instance', '.', 'state', '==', "u'pending'", ':', 'print', '(', '_yellow', '(', '"Instance state: %s"', '%', 'instance', '.', 'state', ')', ')', 'time', '.', 'sleep', '(', '10', ')', 'instance', '.', 'update', '(', ')', 'print', '(', '_green', '(', '"Instance state: %s"', '%', 'instance', '.', 'state', ')', ')', 'print', '(', '_green', '(', '"Public dns: %s"', '%', 'instance', '.', 'public_dns_name', ')', ')', 'return', 'instance', '.', 'public_dns_name'] | Creates EC2 Instance | ['Creates', 'EC2', 'Instance'] | train | https://github.com/hobson/pug-dj/blob/55678b08755a55366ce18e7d3b8ea8fa4491ab04/pug/dj/crawlnmine/fabfile/django_fabric_aws.py#L172-L195 |
7,837 | byt3bl33d3r/CrackMapExec | cme/modules/shellcode_inject.py | CMEModule.options | def options(self, context, module_options):
'''
PATH Path to the file containing raw shellcode to inject
PROCID Process ID to inject into (default: current powershell process)
'''
if not 'PATH' in module_options:
context.log.error('PATH option is required!')
exit(1)
self.shellcode_path = os.path.expanduser(module_options['PATH'])
if not os.path.exists(self.shellcode_path):
context.log.error('Invalid path to shellcode!')
exit(1)
self.procid = None
if 'PROCID' in module_options.keys():
self.procid = module_options['PROCID']
self.ps_script = obfs_ps_script('powersploit/CodeExecution/Invoke-Shellcode.ps1') | python | def options(self, context, module_options):
'''
PATH Path to the file containing raw shellcode to inject
PROCID Process ID to inject into (default: current powershell process)
'''
if not 'PATH' in module_options:
context.log.error('PATH option is required!')
exit(1)
self.shellcode_path = os.path.expanduser(module_options['PATH'])
if not os.path.exists(self.shellcode_path):
context.log.error('Invalid path to shellcode!')
exit(1)
self.procid = None
if 'PROCID' in module_options.keys():
self.procid = module_options['PROCID']
self.ps_script = obfs_ps_script('powersploit/CodeExecution/Invoke-Shellcode.ps1') | ['def', 'options', '(', 'self', ',', 'context', ',', 'module_options', ')', ':', 'if', 'not', "'PATH'", 'in', 'module_options', ':', 'context', '.', 'log', '.', 'error', '(', "'PATH option is required!'", ')', 'exit', '(', '1', ')', 'self', '.', 'shellcode_path', '=', 'os', '.', 'path', '.', 'expanduser', '(', 'module_options', '[', "'PATH'", ']', ')', 'if', 'not', 'os', '.', 'path', '.', 'exists', '(', 'self', '.', 'shellcode_path', ')', ':', 'context', '.', 'log', '.', 'error', '(', "'Invalid path to shellcode!'", ')', 'exit', '(', '1', ')', 'self', '.', 'procid', '=', 'None', 'if', "'PROCID'", 'in', 'module_options', '.', 'keys', '(', ')', ':', 'self', '.', 'procid', '=', 'module_options', '[', "'PROCID'", ']', 'self', '.', 'ps_script', '=', 'obfs_ps_script', '(', "'powersploit/CodeExecution/Invoke-Shellcode.ps1'", ')'] | PATH Path to the file containing raw shellcode to inject
PROCID Process ID to inject into (default: current powershell process) | ['PATH', 'Path', 'to', 'the', 'file', 'containing', 'raw', 'shellcode', 'to', 'inject', 'PROCID', 'Process', 'ID', 'to', 'inject', 'into', '(', 'default', ':', 'current', 'powershell', 'process', ')'] | train | https://github.com/byt3bl33d3r/CrackMapExec/blob/333f1c4e06884e85b2776459963ef85d182aba8e/cme/modules/shellcode_inject.py#L16-L36 |
7,838 | mikedh/trimesh | trimesh/proximity.py | closest_point_naive | def closest_point_naive(mesh, points):
"""
Given a mesh and a list of points find the closest point
on any triangle.
Does this by constructing a very large intermediate array and
comparing every point to every triangle.
Parameters
----------
mesh : Trimesh
Takes mesh to have same interfaces as `closest_point`
points : (m, 3) float
Points in space
Returns
----------
closest : (m, 3) float
Closest point on triangles for each point
distance : (m,) float
Distances between point and triangle
triangle_id : (m,) int
Index of triangle containing closest point
"""
# get triangles from mesh
triangles = mesh.triangles.view(np.ndarray)
# establish that input points are sane
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError('triangles shape incorrect')
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)')
# create a giant tiled array of each point tiled len(triangles) times
points_tiled = np.tile(points, (1, len(triangles)))
on_triangle = np.array([closest_point_corresponding(
triangles, i.reshape((-1, 3))) for i in points_tiled])
# distance squared
distance_2 = [((i - q)**2).sum(axis=1)
for i, q in zip(on_triangle, points)]
triangle_id = np.array([i.argmin() for i in distance_2])
# closest cartesian point
closest = np.array([g[i] for i, g in zip(triangle_id, on_triangle)])
distance = np.array([g[i] for i, g in zip(triangle_id, distance_2)]) ** .5
return closest, distance, triangle_id | python | def closest_point_naive(mesh, points):
"""
Given a mesh and a list of points find the closest point
on any triangle.
Does this by constructing a very large intermediate array and
comparing every point to every triangle.
Parameters
----------
mesh : Trimesh
Takes mesh to have same interfaces as `closest_point`
points : (m, 3) float
Points in space
Returns
----------
closest : (m, 3) float
Closest point on triangles for each point
distance : (m,) float
Distances between point and triangle
triangle_id : (m,) int
Index of triangle containing closest point
"""
# get triangles from mesh
triangles = mesh.triangles.view(np.ndarray)
# establish that input points are sane
points = np.asanyarray(points, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError('triangles shape incorrect')
if not util.is_shape(points, (-1, 3)):
raise ValueError('points must be (n,3)')
# create a giant tiled array of each point tiled len(triangles) times
points_tiled = np.tile(points, (1, len(triangles)))
on_triangle = np.array([closest_point_corresponding(
triangles, i.reshape((-1, 3))) for i in points_tiled])
# distance squared
distance_2 = [((i - q)**2).sum(axis=1)
for i, q in zip(on_triangle, points)]
triangle_id = np.array([i.argmin() for i in distance_2])
# closest cartesian point
closest = np.array([g[i] for i, g in zip(triangle_id, on_triangle)])
distance = np.array([g[i] for i, g in zip(triangle_id, distance_2)]) ** .5
return closest, distance, triangle_id | ['def', 'closest_point_naive', '(', 'mesh', ',', 'points', ')', ':', '# get triangles from mesh', 'triangles', '=', 'mesh', '.', 'triangles', '.', 'view', '(', 'np', '.', 'ndarray', ')', '# establish that input points are sane', 'points', '=', 'np', '.', 'asanyarray', '(', 'points', ',', 'dtype', '=', 'np', '.', 'float64', ')', 'if', 'not', 'util', '.', 'is_shape', '(', 'triangles', ',', '(', '-', '1', ',', '3', ',', '3', ')', ')', ':', 'raise', 'ValueError', '(', "'triangles shape incorrect'", ')', 'if', 'not', 'util', '.', 'is_shape', '(', 'points', ',', '(', '-', '1', ',', '3', ')', ')', ':', 'raise', 'ValueError', '(', "'points must be (n,3)'", ')', '# create a giant tiled array of each point tiled len(triangles) times', 'points_tiled', '=', 'np', '.', 'tile', '(', 'points', ',', '(', '1', ',', 'len', '(', 'triangles', ')', ')', ')', 'on_triangle', '=', 'np', '.', 'array', '(', '[', 'closest_point_corresponding', '(', 'triangles', ',', 'i', '.', 'reshape', '(', '(', '-', '1', ',', '3', ')', ')', ')', 'for', 'i', 'in', 'points_tiled', ']', ')', '# distance squared', 'distance_2', '=', '[', '(', '(', 'i', '-', 'q', ')', '**', '2', ')', '.', 'sum', '(', 'axis', '=', '1', ')', 'for', 'i', ',', 'q', 'in', 'zip', '(', 'on_triangle', ',', 'points', ')', ']', 'triangle_id', '=', 'np', '.', 'array', '(', '[', 'i', '.', 'argmin', '(', ')', 'for', 'i', 'in', 'distance_2', ']', ')', '# closest cartesian point', 'closest', '=', 'np', '.', 'array', '(', '[', 'g', '[', 'i', ']', 'for', 'i', ',', 'g', 'in', 'zip', '(', 'triangle_id', ',', 'on_triangle', ')', ']', ')', 'distance', '=', 'np', '.', 'array', '(', '[', 'g', '[', 'i', ']', 'for', 'i', ',', 'g', 'in', 'zip', '(', 'triangle_id', ',', 'distance_2', ')', ']', ')', '**', '.5', 'return', 'closest', ',', 'distance', ',', 'triangle_id'] | Given a mesh and a list of points find the closest point
on any triangle.
Does this by constructing a very large intermediate array and
comparing every point to every triangle.
Parameters
----------
mesh : Trimesh
Takes mesh to have same interfaces as `closest_point`
points : (m, 3) float
Points in space
Returns
----------
closest : (m, 3) float
Closest point on triangles for each point
distance : (m,) float
Distances between point and triangle
triangle_id : (m,) int
Index of triangle containing closest point | ['Given', 'a', 'mesh', 'and', 'a', 'list', 'of', 'points', 'find', 'the', 'closest', 'point', 'on', 'any', 'triangle', '.'] | train | https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/proximity.py#L61-L109 |
7,839 | eqcorrscan/EQcorrscan | eqcorrscan/utils/archive_read.py | read_data | def read_data(archive, arc_type, day, stachans, length=86400):
"""
Function to read the appropriate data from an archive for a day.
:type archive: str
:param archive:
The archive source - if arc_type is seishub, this should be a url,
if the arc_type is FDSN then this can be either a url or a known obspy
client. If arc_type is day_vols, then this is the path to the top
directory.
:type arc_type: str
:param arc_type: The type of archive, can be: seishub, FDSN, day_volumes
:type day: datetime.date
:param day: Date to retrieve data for
:type stachans: list
:param stachans: List of tuples of Stations and channels to try and get,
will not fail if stations are not available, but will warn.
:type length: float
:param length: Data length to extract in seconds, defaults to 1 day.
:returns: Stream of data
:rtype: obspy.core.stream.Stream
.. note:: A note on arc_types, if arc_type is day_vols, then this will \
look for directories labelled in the IRIS DMC conventions of \
Yyyyy/Rjjj.01/... where yyyy is the year and jjj is the julian day. \
Data within these files directories should be stored as day-long, \
single-channel files. This is not implemented in the fasted way \
possible to allow for a more general situation. If you require more \
speed you will need to re-write this.
.. rubric:: Example
>>> from obspy import UTCDateTime
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, missing data
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1'), ('GCSZ', 'HHZ')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, local day-volumes
>>> # Get the path to the test data
>>> import eqcorrscan
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('WHYM', 'SHZ'), ('EORO', 'SHZ')]
>>> st = read_data(TEST_PATH + '/day_vols', 'day_vols',
... t1, stachans)
>>> print(st)
2 Trace(s) in Stream:
AF.WHYM..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples
AF.EORO..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples
"""
st = []
available_stations = _check_available_data(archive, arc_type, day)
for station in stachans:
if len(station[1]) == 2:
# Cope with two char channel naming in seisan
station_map = (station[0], station[1][0] + '*' + station[1][1])
available_stations_map = [(sta[0], sta[1][0] + '*' + sta[1][-1])
for sta in available_stations]
else:
station_map = station
available_stations_map = available_stations
if station_map not in available_stations_map:
msg = ' '.join([station[0], station_map[1], 'is not available for',
day.strftime('%Y/%m/%d')])
warnings.warn(msg)
continue
if arc_type.lower() == 'seishub':
client = SeishubClient(archive)
st += client.get_waveforms(
network='*', station=station_map[0], location='*',
channel=station_map[1], starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + length)
elif arc_type.upper() == "FDSN":
client = FDSNClient(archive)
try:
st += client.get_waveforms(
network='*', station=station_map[0], location='*',
channel=station_map[1], starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + length)
except FDSNException:
warnings.warn('No data on server despite station being ' +
'available...')
continue
elif arc_type.lower() == 'day_vols':
wavfiles = _get_station_file(os.path.join(
archive, day.strftime('Y%Y' + os.sep + 'R%j.01')),
station_map[0], station_map[1])
for wavfile in wavfiles:
st += read(wavfile, starttime=day, endtime=day + length)
st = Stream(st)
return st | python | def read_data(archive, arc_type, day, stachans, length=86400):
"""
Function to read the appropriate data from an archive for a day.
:type archive: str
:param archive:
The archive source - if arc_type is seishub, this should be a url,
if the arc_type is FDSN then this can be either a url or a known obspy
client. If arc_type is day_vols, then this is the path to the top
directory.
:type arc_type: str
:param arc_type: The type of archive, can be: seishub, FDSN, day_volumes
:type day: datetime.date
:param day: Date to retrieve data for
:type stachans: list
:param stachans: List of tuples of Stations and channels to try and get,
will not fail if stations are not available, but will warn.
:type length: float
:param length: Data length to extract in seconds, defaults to 1 day.
:returns: Stream of data
:rtype: obspy.core.stream.Stream
.. note:: A note on arc_types, if arc_type is day_vols, then this will \
look for directories labelled in the IRIS DMC conventions of \
Yyyyy/Rjjj.01/... where yyyy is the year and jjj is the julian day. \
Data within these files directories should be stored as day-long, \
single-channel files. This is not implemented in the fasted way \
possible to allow for a more general situation. If you require more \
speed you will need to re-write this.
.. rubric:: Example
>>> from obspy import UTCDateTime
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, missing data
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1'), ('GCSZ', 'HHZ')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, local day-volumes
>>> # Get the path to the test data
>>> import eqcorrscan
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('WHYM', 'SHZ'), ('EORO', 'SHZ')]
>>> st = read_data(TEST_PATH + '/day_vols', 'day_vols',
... t1, stachans)
>>> print(st)
2 Trace(s) in Stream:
AF.WHYM..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples
AF.EORO..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples
"""
st = []
available_stations = _check_available_data(archive, arc_type, day)
for station in stachans:
if len(station[1]) == 2:
# Cope with two char channel naming in seisan
station_map = (station[0], station[1][0] + '*' + station[1][1])
available_stations_map = [(sta[0], sta[1][0] + '*' + sta[1][-1])
for sta in available_stations]
else:
station_map = station
available_stations_map = available_stations
if station_map not in available_stations_map:
msg = ' '.join([station[0], station_map[1], 'is not available for',
day.strftime('%Y/%m/%d')])
warnings.warn(msg)
continue
if arc_type.lower() == 'seishub':
client = SeishubClient(archive)
st += client.get_waveforms(
network='*', station=station_map[0], location='*',
channel=station_map[1], starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + length)
elif arc_type.upper() == "FDSN":
client = FDSNClient(archive)
try:
st += client.get_waveforms(
network='*', station=station_map[0], location='*',
channel=station_map[1], starttime=UTCDateTime(day),
endtime=UTCDateTime(day) + length)
except FDSNException:
warnings.warn('No data on server despite station being ' +
'available...')
continue
elif arc_type.lower() == 'day_vols':
wavfiles = _get_station_file(os.path.join(
archive, day.strftime('Y%Y' + os.sep + 'R%j.01')),
station_map[0], station_map[1])
for wavfile in wavfiles:
st += read(wavfile, starttime=day, endtime=day + length)
st = Stream(st)
return st | ['def', 'read_data', '(', 'archive', ',', 'arc_type', ',', 'day', ',', 'stachans', ',', 'length', '=', '86400', ')', ':', 'st', '=', '[', ']', 'available_stations', '=', '_check_available_data', '(', 'archive', ',', 'arc_type', ',', 'day', ')', 'for', 'station', 'in', 'stachans', ':', 'if', 'len', '(', 'station', '[', '1', ']', ')', '==', '2', ':', '# Cope with two char channel naming in seisan', 'station_map', '=', '(', 'station', '[', '0', ']', ',', 'station', '[', '1', ']', '[', '0', ']', '+', "'*'", '+', 'station', '[', '1', ']', '[', '1', ']', ')', 'available_stations_map', '=', '[', '(', 'sta', '[', '0', ']', ',', 'sta', '[', '1', ']', '[', '0', ']', '+', "'*'", '+', 'sta', '[', '1', ']', '[', '-', '1', ']', ')', 'for', 'sta', 'in', 'available_stations', ']', 'else', ':', 'station_map', '=', 'station', 'available_stations_map', '=', 'available_stations', 'if', 'station_map', 'not', 'in', 'available_stations_map', ':', 'msg', '=', "' '", '.', 'join', '(', '[', 'station', '[', '0', ']', ',', 'station_map', '[', '1', ']', ',', "'is not available for'", ',', 'day', '.', 'strftime', '(', "'%Y/%m/%d'", ')', ']', ')', 'warnings', '.', 'warn', '(', 'msg', ')', 'continue', 'if', 'arc_type', '.', 'lower', '(', ')', '==', "'seishub'", ':', 'client', '=', 'SeishubClient', '(', 'archive', ')', 'st', '+=', 'client', '.', 'get_waveforms', '(', 'network', '=', "'*'", ',', 'station', '=', 'station_map', '[', '0', ']', ',', 'location', '=', "'*'", ',', 'channel', '=', 'station_map', '[', '1', ']', ',', 'starttime', '=', 'UTCDateTime', '(', 'day', ')', ',', 'endtime', '=', 'UTCDateTime', '(', 'day', ')', '+', 'length', ')', 'elif', 'arc_type', '.', 'upper', '(', ')', '==', '"FDSN"', ':', 'client', '=', 'FDSNClient', '(', 'archive', ')', 'try', ':', 'st', '+=', 'client', '.', 'get_waveforms', '(', 'network', '=', "'*'", ',', 'station', '=', 'station_map', '[', '0', ']', ',', 'location', '=', "'*'", ',', 'channel', '=', 'station_map', '[', '1', ']', ',', 'starttime', '=', 'UTCDateTime', '(', 'day', ')', ',', 'endtime', '=', 'UTCDateTime', '(', 'day', ')', '+', 'length', ')', 'except', 'FDSNException', ':', 'warnings', '.', 'warn', '(', "'No data on server despite station being '", '+', "'available...'", ')', 'continue', 'elif', 'arc_type', '.', 'lower', '(', ')', '==', "'day_vols'", ':', 'wavfiles', '=', '_get_station_file', '(', 'os', '.', 'path', '.', 'join', '(', 'archive', ',', 'day', '.', 'strftime', '(', "'Y%Y'", '+', 'os', '.', 'sep', '+', "'R%j.01'", ')', ')', ',', 'station_map', '[', '0', ']', ',', 'station_map', '[', '1', ']', ')', 'for', 'wavfile', 'in', 'wavfiles', ':', 'st', '+=', 'read', '(', 'wavfile', ',', 'starttime', '=', 'day', ',', 'endtime', '=', 'day', '+', 'length', ')', 'st', '=', 'Stream', '(', 'st', ')', 'return', 'st'] | Function to read the appropriate data from an archive for a day.
:type archive: str
:param archive:
The archive source - if arc_type is seishub, this should be a url,
if the arc_type is FDSN then this can be either a url or a known obspy
client. If arc_type is day_vols, then this is the path to the top
directory.
:type arc_type: str
:param arc_type: The type of archive, can be: seishub, FDSN, day_volumes
:type day: datetime.date
:param day: Date to retrieve data for
:type stachans: list
:param stachans: List of tuples of Stations and channels to try and get,
will not fail if stations are not available, but will warn.
:type length: float
:param length: Data length to extract in seconds, defaults to 1 day.
:returns: Stream of data
:rtype: obspy.core.stream.Stream
.. note:: A note on arc_types, if arc_type is day_vols, then this will \
look for directories labelled in the IRIS DMC conventions of \
Yyyyy/Rjjj.01/... where yyyy is the year and jjj is the julian day. \
Data within these files directories should be stored as day-long, \
single-channel files. This is not implemented in the fasted way \
possible to allow for a more general situation. If you require more \
speed you will need to re-write this.
.. rubric:: Example
>>> from obspy import UTCDateTime
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, missing data
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('JCNB', 'SP1'), ('GCSZ', 'HHZ')]
>>> st = read_data('NCEDC', 'FDSN', t1, stachans)
>>> print(st)
1 Trace(s) in Stream:
BP.JCNB.40.SP1 | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.\
950000Z | 20.0 Hz, 1728000 samples
.. rubric:: Example, local day-volumes
>>> # Get the path to the test data
>>> import eqcorrscan
>>> TEST_PATH = os.path.dirname(eqcorrscan.__file__) + '/tests/test_data'
>>> t1 = UTCDateTime(2012, 3, 26)
>>> stachans = [('WHYM', 'SHZ'), ('EORO', 'SHZ')]
>>> st = read_data(TEST_PATH + '/day_vols', 'day_vols',
... t1, stachans)
>>> print(st)
2 Trace(s) in Stream:
AF.WHYM..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples
AF.EORO..SHZ | 2012-03-26T00:00:00.000000Z - 2012-03-26T23:59:59.000000Z \
| 1.0 Hz, 86400 samples | ['Function', 'to', 'read', 'the', 'appropriate', 'data', 'from', 'an', 'archive', 'for', 'a', 'day', '.'] | train | https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/archive_read.py#L31-L140 |
7,840 | saltstack/salt | salt/modules/ps.py | virtual_memory | def virtual_memory():
'''
.. versionadded:: 2014.7.0
Return a dict that describes statistics about system memory usage.
.. note::
This function is only available in psutil version 0.6.0 and above.
CLI Example:
.. code-block:: bash
salt '*' ps.virtual_memory
'''
if psutil.version_info < (0, 6, 0):
msg = 'virtual_memory is only available in psutil 0.6.0 or greater'
raise CommandExecutionError(msg)
return dict(psutil.virtual_memory()._asdict()) | python | def virtual_memory():
'''
.. versionadded:: 2014.7.0
Return a dict that describes statistics about system memory usage.
.. note::
This function is only available in psutil version 0.6.0 and above.
CLI Example:
.. code-block:: bash
salt '*' ps.virtual_memory
'''
if psutil.version_info < (0, 6, 0):
msg = 'virtual_memory is only available in psutil 0.6.0 or greater'
raise CommandExecutionError(msg)
return dict(psutil.virtual_memory()._asdict()) | ['def', 'virtual_memory', '(', ')', ':', 'if', 'psutil', '.', 'version_info', '<', '(', '0', ',', '6', ',', '0', ')', ':', 'msg', '=', "'virtual_memory is only available in psutil 0.6.0 or greater'", 'raise', 'CommandExecutionError', '(', 'msg', ')', 'return', 'dict', '(', 'psutil', '.', 'virtual_memory', '(', ')', '.', '_asdict', '(', ')', ')'] | .. versionadded:: 2014.7.0
Return a dict that describes statistics about system memory usage.
.. note::
This function is only available in psutil version 0.6.0 and above.
CLI Example:
.. code-block:: bash
salt '*' ps.virtual_memory | ['..', 'versionadded', '::', '2014', '.', '7', '.', '0'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/ps.py#L416-L435 |
7,841 | MillionIntegrals/vel | vel/rl/models/deterministic_policy_model.py | DeterministicPolicyEvaluator.model_actions | def model_actions(self):
""" Estimate state-value of the transition next state """
observations = self.get('rollout:observations')
model_action = self.model.action(observations)
return model_action | python | def model_actions(self):
""" Estimate state-value of the transition next state """
observations = self.get('rollout:observations')
model_action = self.model.action(observations)
return model_action | ['def', 'model_actions', '(', 'self', ')', ':', 'observations', '=', 'self', '.', 'get', '(', "'rollout:observations'", ')', 'model_action', '=', 'self', '.', 'model', '.', 'action', '(', 'observations', ')', 'return', 'model_action'] | Estimate state-value of the transition next state | ['Estimate', 'state', '-', 'value', 'of', 'the', 'transition', 'next', 'state'] | train | https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/models/deterministic_policy_model.py#L29-L33 |
7,842 | WZBSocialScienceCenter/tmtoolkit | tmtoolkit/topicmod/visualize.py | plot_doc_topic_heatmap | def plot_doc_topic_heatmap(fig, ax, doc_topic_distrib, doc_labels, topic_labels=None,
which_documents=None, which_document_indices=None,
which_topics=None, which_topic_indices=None,
xaxislabel=None, yaxislabel=None,
**kwargs):
"""
Plot a heatmap for a document-topic distribution `doc_topic_distrib` to a matplotlib Figure `fig` and Axes `ax`
using `doc_labels` as document labels on the y-axis and topics from 1 to `n_topics=doc_topic_distrib.shape[1]` on
the x-axis.
Custom topic labels can be passed as `topic_labels`.
A subset of documents can be specified either with a sequence `which_documents` containing a subset of document
labels from `doc_labels` or `which_document_indices` containing a sequence of document indices.
A subset of topics can be specified either with a sequence `which_topics` containing sequence of numbers between
[1, n_topics] or `which_topic_indices` which is a number between [0, n_topics-1]
Additional arguments can be passed via `kwargs` to `plot_heatmap`.
Please note that it is almost always necessary to select a subset of your document-topic distribution with the
`which_documents` or `which_topics` parameters, as otherwise the amount of data to be plotted will be too high
to give a reasonable picture.
"""
if which_documents is not None and which_document_indices is not None:
raise ValueError('only `which_documents` or `which_document_indices` can be set, not both')
if which_topics is not None and which_topic_indices is not None:
raise ValueError('only `which_topics` or `which_topic_indices` can be set, not both')
if which_documents is not None:
which_document_indices = np.where(np.isin(doc_labels, which_documents))[0]
if which_topics is not None:
which_topic_indices = np.array(which_topics) - 1
select_distrib_subset = False
if topic_labels is None:
topic_labels = np.array(range(1, doc_topic_distrib.shape[1]+1))
elif not isinstance(topic_labels, np.ndarray):
topic_labels = np.array(topic_labels)
if which_document_indices is not None:
select_distrib_subset = True
doc_labels = np.array(doc_labels)[which_document_indices]
if which_topic_indices is not None:
select_distrib_subset = True
topic_labels = topic_labels[which_topic_indices]
if select_distrib_subset:
doc_topic_distrib = mat2d_window_from_indices(doc_topic_distrib, which_document_indices, which_topic_indices)
return plot_heatmap(fig, ax, doc_topic_distrib,
xaxislabel=xaxislabel or 'topic',
yaxislabel=yaxislabel or 'document',
xticklabels=topic_labels,
yticklabels=doc_labels,
**kwargs) | python | def plot_doc_topic_heatmap(fig, ax, doc_topic_distrib, doc_labels, topic_labels=None,
which_documents=None, which_document_indices=None,
which_topics=None, which_topic_indices=None,
xaxislabel=None, yaxislabel=None,
**kwargs):
"""
Plot a heatmap for a document-topic distribution `doc_topic_distrib` to a matplotlib Figure `fig` and Axes `ax`
using `doc_labels` as document labels on the y-axis and topics from 1 to `n_topics=doc_topic_distrib.shape[1]` on
the x-axis.
Custom topic labels can be passed as `topic_labels`.
A subset of documents can be specified either with a sequence `which_documents` containing a subset of document
labels from `doc_labels` or `which_document_indices` containing a sequence of document indices.
A subset of topics can be specified either with a sequence `which_topics` containing sequence of numbers between
[1, n_topics] or `which_topic_indices` which is a number between [0, n_topics-1]
Additional arguments can be passed via `kwargs` to `plot_heatmap`.
Please note that it is almost always necessary to select a subset of your document-topic distribution with the
`which_documents` or `which_topics` parameters, as otherwise the amount of data to be plotted will be too high
to give a reasonable picture.
"""
if which_documents is not None and which_document_indices is not None:
raise ValueError('only `which_documents` or `which_document_indices` can be set, not both')
if which_topics is not None and which_topic_indices is not None:
raise ValueError('only `which_topics` or `which_topic_indices` can be set, not both')
if which_documents is not None:
which_document_indices = np.where(np.isin(doc_labels, which_documents))[0]
if which_topics is not None:
which_topic_indices = np.array(which_topics) - 1
select_distrib_subset = False
if topic_labels is None:
topic_labels = np.array(range(1, doc_topic_distrib.shape[1]+1))
elif not isinstance(topic_labels, np.ndarray):
topic_labels = np.array(topic_labels)
if which_document_indices is not None:
select_distrib_subset = True
doc_labels = np.array(doc_labels)[which_document_indices]
if which_topic_indices is not None:
select_distrib_subset = True
topic_labels = topic_labels[which_topic_indices]
if select_distrib_subset:
doc_topic_distrib = mat2d_window_from_indices(doc_topic_distrib, which_document_indices, which_topic_indices)
return plot_heatmap(fig, ax, doc_topic_distrib,
xaxislabel=xaxislabel or 'topic',
yaxislabel=yaxislabel or 'document',
xticklabels=topic_labels,
yticklabels=doc_labels,
**kwargs) | ['def', 'plot_doc_topic_heatmap', '(', 'fig', ',', 'ax', ',', 'doc_topic_distrib', ',', 'doc_labels', ',', 'topic_labels', '=', 'None', ',', 'which_documents', '=', 'None', ',', 'which_document_indices', '=', 'None', ',', 'which_topics', '=', 'None', ',', 'which_topic_indices', '=', 'None', ',', 'xaxislabel', '=', 'None', ',', 'yaxislabel', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'which_documents', 'is', 'not', 'None', 'and', 'which_document_indices', 'is', 'not', 'None', ':', 'raise', 'ValueError', '(', "'only `which_documents` or `which_document_indices` can be set, not both'", ')', 'if', 'which_topics', 'is', 'not', 'None', 'and', 'which_topic_indices', 'is', 'not', 'None', ':', 'raise', 'ValueError', '(', "'only `which_topics` or `which_topic_indices` can be set, not both'", ')', 'if', 'which_documents', 'is', 'not', 'None', ':', 'which_document_indices', '=', 'np', '.', 'where', '(', 'np', '.', 'isin', '(', 'doc_labels', ',', 'which_documents', ')', ')', '[', '0', ']', 'if', 'which_topics', 'is', 'not', 'None', ':', 'which_topic_indices', '=', 'np', '.', 'array', '(', 'which_topics', ')', '-', '1', 'select_distrib_subset', '=', 'False', 'if', 'topic_labels', 'is', 'None', ':', 'topic_labels', '=', 'np', '.', 'array', '(', 'range', '(', '1', ',', 'doc_topic_distrib', '.', 'shape', '[', '1', ']', '+', '1', ')', ')', 'elif', 'not', 'isinstance', '(', 'topic_labels', ',', 'np', '.', 'ndarray', ')', ':', 'topic_labels', '=', 'np', '.', 'array', '(', 'topic_labels', ')', 'if', 'which_document_indices', 'is', 'not', 'None', ':', 'select_distrib_subset', '=', 'True', 'doc_labels', '=', 'np', '.', 'array', '(', 'doc_labels', ')', '[', 'which_document_indices', ']', 'if', 'which_topic_indices', 'is', 'not', 'None', ':', 'select_distrib_subset', '=', 'True', 'topic_labels', '=', 'topic_labels', '[', 'which_topic_indices', ']', 'if', 'select_distrib_subset', ':', 'doc_topic_distrib', '=', 'mat2d_window_from_indices', '(', 'doc_topic_distrib', ',', 'which_document_indices', ',', 'which_topic_indices', ')', 'return', 'plot_heatmap', '(', 'fig', ',', 'ax', ',', 'doc_topic_distrib', ',', 'xaxislabel', '=', 'xaxislabel', 'or', "'topic'", ',', 'yaxislabel', '=', 'yaxislabel', 'or', "'document'", ',', 'xticklabels', '=', 'topic_labels', ',', 'yticklabels', '=', 'doc_labels', ',', '*', '*', 'kwargs', ')'] | Plot a heatmap for a document-topic distribution `doc_topic_distrib` to a matplotlib Figure `fig` and Axes `ax`
using `doc_labels` as document labels on the y-axis and topics from 1 to `n_topics=doc_topic_distrib.shape[1]` on
the x-axis.
Custom topic labels can be passed as `topic_labels`.
A subset of documents can be specified either with a sequence `which_documents` containing a subset of document
labels from `doc_labels` or `which_document_indices` containing a sequence of document indices.
A subset of topics can be specified either with a sequence `which_topics` containing sequence of numbers between
[1, n_topics] or `which_topic_indices` which is a number between [0, n_topics-1]
Additional arguments can be passed via `kwargs` to `plot_heatmap`.
Please note that it is almost always necessary to select a subset of your document-topic distribution with the
`which_documents` or `which_topics` parameters, as otherwise the amount of data to be plotted will be too high
to give a reasonable picture. | ['Plot', 'a', 'heatmap', 'for', 'a', 'document', '-', 'topic', 'distribution', 'doc_topic_distrib', 'to', 'a', 'matplotlib', 'Figure', 'fig', 'and', 'Axes', 'ax', 'using', 'doc_labels', 'as', 'document', 'labels', 'on', 'the', 'y', '-', 'axis', 'and', 'topics', 'from', '1', 'to', 'n_topics', '=', 'doc_topic_distrib', '.', 'shape', '[', '1', ']', 'on', 'the', 'x', '-', 'axis', '.', 'Custom', 'topic', 'labels', 'can', 'be', 'passed', 'as', 'topic_labels', '.', 'A', 'subset', 'of', 'documents', 'can', 'be', 'specified', 'either', 'with', 'a', 'sequence', 'which_documents', 'containing', 'a', 'subset', 'of', 'document', 'labels', 'from', 'doc_labels', 'or', 'which_document_indices', 'containing', 'a', 'sequence', 'of', 'document', 'indices', '.', 'A', 'subset', 'of', 'topics', 'can', 'be', 'specified', 'either', 'with', 'a', 'sequence', 'which_topics', 'containing', 'sequence', 'of', 'numbers', 'between', '[', '1', 'n_topics', ']', 'or', 'which_topic_indices', 'which', 'is', 'a', 'number', 'between', '[', '0', 'n_topics', '-', '1', ']', 'Additional', 'arguments', 'can', 'be', 'passed', 'via', 'kwargs', 'to', 'plot_heatmap', '.'] | train | https://github.com/WZBSocialScienceCenter/tmtoolkit/blob/ca8b9d072e37ccc82b533f47d48bd9755722305b/tmtoolkit/topicmod/visualize.py#L124-L179 |
7,843 | dmlc/gluon-nlp | scripts/parsing/parser/biaffine_parser.py | BiaffineParser.parameter_from_numpy | def parameter_from_numpy(self, name, array):
""" Create parameter with its value initialized according to a numpy tensor
Parameters
----------
name : str
parameter name
array : np.ndarray
initiation value
Returns
-------
mxnet.gluon.parameter
a parameter object
"""
p = self.params.get(name, shape=array.shape, init=mx.init.Constant(array))
return p | python | def parameter_from_numpy(self, name, array):
""" Create parameter with its value initialized according to a numpy tensor
Parameters
----------
name : str
parameter name
array : np.ndarray
initiation value
Returns
-------
mxnet.gluon.parameter
a parameter object
"""
p = self.params.get(name, shape=array.shape, init=mx.init.Constant(array))
return p | ['def', 'parameter_from_numpy', '(', 'self', ',', 'name', ',', 'array', ')', ':', 'p', '=', 'self', '.', 'params', '.', 'get', '(', 'name', ',', 'shape', '=', 'array', '.', 'shape', ',', 'init', '=', 'mx', '.', 'init', '.', 'Constant', '(', 'array', ')', ')', 'return', 'p'] | Create parameter with its value initialized according to a numpy tensor
Parameters
----------
name : str
parameter name
array : np.ndarray
initiation value
Returns
-------
mxnet.gluon.parameter
a parameter object | ['Create', 'parameter', 'with', 'its', 'value', 'initialized', 'according', 'to', 'a', 'numpy', 'tensor'] | train | https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/parsing/parser/biaffine_parser.py#L122-L138 |
7,844 | merll/docker-fabric | dockerfabric/apiclient.py | DockerFabricClient.create_container | def create_container(self, image, name=None, **kwargs):
"""
Identical to :meth:`docker.api.container.ContainerApiMixin.create_container` with additional logging.
"""
name_str = " '{0}'".format(name) if name else ""
self.push_log("Creating container{0} from image '{1}'.".format(name_str, image))
return super(DockerFabricClient, self).create_container(image, name=name, **kwargs) | python | def create_container(self, image, name=None, **kwargs):
"""
Identical to :meth:`docker.api.container.ContainerApiMixin.create_container` with additional logging.
"""
name_str = " '{0}'".format(name) if name else ""
self.push_log("Creating container{0} from image '{1}'.".format(name_str, image))
return super(DockerFabricClient, self).create_container(image, name=name, **kwargs) | ['def', 'create_container', '(', 'self', ',', 'image', ',', 'name', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'name_str', '=', '" \'{0}\'"', '.', 'format', '(', 'name', ')', 'if', 'name', 'else', '""', 'self', '.', 'push_log', '(', '"Creating container{0} from image \'{1}\'."', '.', 'format', '(', 'name_str', ',', 'image', ')', ')', 'return', 'super', '(', 'DockerFabricClient', ',', 'self', ')', '.', 'create_container', '(', 'image', ',', 'name', '=', 'name', ',', '*', '*', 'kwargs', ')'] | Identical to :meth:`docker.api.container.ContainerApiMixin.create_container` with additional logging. | ['Identical', 'to', ':', 'meth', ':', 'docker', '.', 'api', '.', 'container', '.', 'ContainerApiMixin', '.', 'create_container', 'with', 'additional', 'logging', '.'] | train | https://github.com/merll/docker-fabric/blob/785d84e40e17265b667d8b11a6e30d8e6b2bf8d4/dockerfabric/apiclient.py#L158-L164 |
7,845 | LogicalDash/LiSE | ELiDE/ELiDE/dialog.py | DialogLayout.advance_dialog | def advance_dialog(self, *args):
"""Try to display the next dialog described in my ``todo``."""
self.clear_widgets()
try:
self._update_dialog(self.todo[self.idx])
except IndexError:
pass | python | def advance_dialog(self, *args):
"""Try to display the next dialog described in my ``todo``."""
self.clear_widgets()
try:
self._update_dialog(self.todo[self.idx])
except IndexError:
pass | ['def', 'advance_dialog', '(', 'self', ',', '*', 'args', ')', ':', 'self', '.', 'clear_widgets', '(', ')', 'try', ':', 'self', '.', '_update_dialog', '(', 'self', '.', 'todo', '[', 'self', '.', 'idx', ']', ')', 'except', 'IndexError', ':', 'pass'] | Try to display the next dialog described in my ``todo``. | ['Try', 'to', 'display', 'the', 'next', 'dialog', 'described', 'in', 'my', 'todo', '.'] | train | https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/dialog.py#L186-L192 |
7,846 | akissa/clamavmirror | setup.py | main | def main():
"""Main"""
opts = dict(
name="clamavmirror",
version='0.0.4',
description="ClamAV Signature Mirroring Tool",
long_description=get_readme(),
keywords="clamav mirror mirroring mirror-tool signatures",
author="Andrew Colin Kissa",
author_email="[email protected]",
url="https://github.com/akissa/clamavmirror",
license="MPL 2.0",
packages=[],
entry_points={
'console_scripts': [
'clamavmirror=clamavmirror:main'
],
},
include_package_data=True,
zip_safe=False,
install_requires=['urllib3', 'dnspython', 'certifi'],
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: System Administrators',
'Environment :: Console',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Natural Language :: English',
'Operating System :: OS Independent'],)
setup(**opts) | python | def main():
"""Main"""
opts = dict(
name="clamavmirror",
version='0.0.4',
description="ClamAV Signature Mirroring Tool",
long_description=get_readme(),
keywords="clamav mirror mirroring mirror-tool signatures",
author="Andrew Colin Kissa",
author_email="[email protected]",
url="https://github.com/akissa/clamavmirror",
license="MPL 2.0",
packages=[],
entry_points={
'console_scripts': [
'clamavmirror=clamavmirror:main'
],
},
include_package_data=True,
zip_safe=False,
install_requires=['urllib3', 'dnspython', 'certifi'],
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Intended Audience :: System Administrators',
'Environment :: Console',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Natural Language :: English',
'Operating System :: OS Independent'],)
setup(**opts) | ['def', 'main', '(', ')', ':', 'opts', '=', 'dict', '(', 'name', '=', '"clamavmirror"', ',', 'version', '=', "'0.0.4'", ',', 'description', '=', '"ClamAV Signature Mirroring Tool"', ',', 'long_description', '=', 'get_readme', '(', ')', ',', 'keywords', '=', '"clamav mirror mirroring mirror-tool signatures"', ',', 'author', '=', '"Andrew Colin Kissa"', ',', 'author_email', '=', '"[email protected]"', ',', 'url', '=', '"https://github.com/akissa/clamavmirror"', ',', 'license', '=', '"MPL 2.0"', ',', 'packages', '=', '[', ']', ',', 'entry_points', '=', '{', "'console_scripts'", ':', '[', "'clamavmirror=clamavmirror:main'", ']', ',', '}', ',', 'include_package_data', '=', 'True', ',', 'zip_safe', '=', 'False', ',', 'install_requires', '=', '[', "'urllib3'", ',', "'dnspython'", ',', "'certifi'", ']', ',', 'classifiers', '=', '[', "'Development Status :: 4 - Beta'", ',', "'Programming Language :: Python'", ',', "'Programming Language :: Python :: 2.6'", ',', "'Programming Language :: Python :: 2.7'", ',', "'Topic :: Software Development :: Libraries :: Python Modules'", ',', "'Intended Audience :: System Administrators'", ',', "'Environment :: Console'", ',', "'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)'", ',', "'Natural Language :: English'", ',', "'Operating System :: OS Independent'", ']', ',', ')', 'setup', '(', '*', '*', 'opts', ')'] | Main | ['Main'] | train | https://github.com/akissa/clamavmirror/blob/6ef1cfa9fb4fa4a7b8439004f1cd8775f51d77f6/setup.py#L39-L72 |
7,847 | franciscogarate/pyliferisk | pyliferisk/__init__.py | AExn | def AExn(mt, x, n):
""" AExn : Returns the EPV of a endowment insurance.
An endowment insurance provides a combination of a term insurance and a pure endowment
"""
return (mt.Mx[x] - mt.Mx[x + n]) / mt.Dx[x] + mt.Dx[x + n] / mt.Dx[x] | python | def AExn(mt, x, n):
""" AExn : Returns the EPV of a endowment insurance.
An endowment insurance provides a combination of a term insurance and a pure endowment
"""
return (mt.Mx[x] - mt.Mx[x + n]) / mt.Dx[x] + mt.Dx[x + n] / mt.Dx[x] | ['def', 'AExn', '(', 'mt', ',', 'x', ',', 'n', ')', ':', 'return', '(', 'mt', '.', 'Mx', '[', 'x', ']', '-', 'mt', '.', 'Mx', '[', 'x', '+', 'n', ']', ')', '/', 'mt', '.', 'Dx', '[', 'x', ']', '+', 'mt', '.', 'Dx', '[', 'x', '+', 'n', ']', '/', 'mt', '.', 'Dx', '[', 'x', ']'] | AExn : Returns the EPV of a endowment insurance.
An endowment insurance provides a combination of a term insurance and a pure endowment | ['AExn', ':', 'Returns', 'the', 'EPV', 'of', 'a', 'endowment', 'insurance', '.', 'An', 'endowment', 'insurance', 'provides', 'a', 'combination', 'of', 'a', 'term', 'insurance', 'and', 'a', 'pure', 'endowment'] | train | https://github.com/franciscogarate/pyliferisk/blob/8d906bed04df1ba00fa1cacc6f31030ce5ab6233/pyliferisk/__init__.py#L310-L314 |
7,848 | biosignalsnotebooks/biosignalsnotebooks | biosignalsnotebooks/build/lib/biosignalsnotebooks/__notebook_support__.py | plot_before_after_filter | def plot_before_after_filter(signal, sr, band_begin, band_end, order=1, x_lim=[], y_lim=[],
orientation="hor", show_plot=False, file_name=None):
"""
-----
Brief
-----
The use of the current function is very useful for comparing two power spectrum's (before and
after filtering the signal).
This function invokes "plot_informational_band" in order to get the power spectrum before
applying the signal to the lowpass filter.
-----------
Description
-----------
The FFT Power Spectrum, of an input signal, can be generated through plotfft function of
novainstrumentation package (or periogram function of scipy package).
The x axis (freqs) represents the frequency components of the signal, after decomposition was
achieved by applying the Fourier Transform. The y axis (power) defines the relative weight of
each frequency component (sinusoidal function) in the process of reconstructing the signal by
re-summing of decomposition components.
It is presented a 1x2 gridplot for compaing the differences in frequency composition of the
signal under analysis (before and after filtering).
Additionally, it is also graphically presented a rectangular box showing which are the frequency
components with relevant information for studying our input physiological signal.
Applied in the Notebook "Digital Filtering - A Fundamental Pre-Processing Step".
----------
Parameters
----------
signal : list
List containing the acquired signal samples.
sr : int
Sampling rate.
band_begin : float
Lower frequency inside the signal informational band.
band_end : float
Higher frequency inside the signal informational band.
order : int
Filter order.
x_lim : list
A list with length equal to 2, defining the first and last x value that should be presented.
y_lim : list
A list with length equal to 2, defining the first and last y value that should be presented.
orientation : str
If "hor" then the generated figures will be joined together in an horizontal gridplot.
When "vert" the gridplot will be a vertical grid and when "same" the plots are generated at
the same figure.
show_plot : bool
If True then the generated figure/plot will be shown to the user.
file_name : str
Path containing the destination folder where the Bokeh figure will be stored.
Returns
-------
out : list
List of Bokeh figures that compose the generated gridplot.
"""
# Generation of the HTML file where the plot will be stored.
#file_name = _generate_bokeh_file(file_name)
# Generation of FFT power spectrum accordingly to the filter order.
for i in range(0, order + 1):
# Initialisation and appending of data to the figures list.
if i == 0:
# Power spectrum
freqs_after, power_after = plotfft(signal, sr)
figure_after = plot_informational_band(freqs_after, power_after, signal, sr,
band_begin, band_end,
legend="Signal Power Spectrum", x_lim=x_lim,
y_lim=y_lim)
# List that store the figure handler
list_figures = [[figure_after]]
else:
filter_signal = lowpass(signal, f=band_end, order=i, fs=sr)
# Power spectrum
freqs_after, power_after = plotfft(filter_signal, sr)
if orientation != "same":
figure_after = plot_informational_band(freqs_after, power_after, filter_signal, sr,
band_begin, band_end,
legend="Filtered FFT (Order " + str(i) + ")",
x_lim=x_lim, y_lim=y_lim)
# Append data accordingly to the desired direction of representation.
if orientation == "hor":
# Append to the figure list the power spectrum of the signal after filtering.
list_figures[-1].append(figure_after)
elif orientation == "vert":
list_figures.append([figure_after])
else:
list_figures[-1][0].line(freqs_after, power_after, legend="Filtered FFT (Order " + str(i) + ")",
**opensignals_kwargs("line"))
# Show gridplot.
grid_plot_1 = gridplot(list_figures, **opensignals_kwargs("gridplot"))
if show_plot is True:
show(grid_plot_1)
return list_figures | python | def plot_before_after_filter(signal, sr, band_begin, band_end, order=1, x_lim=[], y_lim=[],
orientation="hor", show_plot=False, file_name=None):
"""
-----
Brief
-----
The use of the current function is very useful for comparing two power spectrum's (before and
after filtering the signal).
This function invokes "plot_informational_band" in order to get the power spectrum before
applying the signal to the lowpass filter.
-----------
Description
-----------
The FFT Power Spectrum, of an input signal, can be generated through plotfft function of
novainstrumentation package (or periogram function of scipy package).
The x axis (freqs) represents the frequency components of the signal, after decomposition was
achieved by applying the Fourier Transform. The y axis (power) defines the relative weight of
each frequency component (sinusoidal function) in the process of reconstructing the signal by
re-summing of decomposition components.
It is presented a 1x2 gridplot for compaing the differences in frequency composition of the
signal under analysis (before and after filtering).
Additionally, it is also graphically presented a rectangular box showing which are the frequency
components with relevant information for studying our input physiological signal.
Applied in the Notebook "Digital Filtering - A Fundamental Pre-Processing Step".
----------
Parameters
----------
signal : list
List containing the acquired signal samples.
sr : int
Sampling rate.
band_begin : float
Lower frequency inside the signal informational band.
band_end : float
Higher frequency inside the signal informational band.
order : int
Filter order.
x_lim : list
A list with length equal to 2, defining the first and last x value that should be presented.
y_lim : list
A list with length equal to 2, defining the first and last y value that should be presented.
orientation : str
If "hor" then the generated figures will be joined together in an horizontal gridplot.
When "vert" the gridplot will be a vertical grid and when "same" the plots are generated at
the same figure.
show_plot : bool
If True then the generated figure/plot will be shown to the user.
file_name : str
Path containing the destination folder where the Bokeh figure will be stored.
Returns
-------
out : list
List of Bokeh figures that compose the generated gridplot.
"""
# Generation of the HTML file where the plot will be stored.
#file_name = _generate_bokeh_file(file_name)
# Generation of FFT power spectrum accordingly to the filter order.
for i in range(0, order + 1):
# Initialisation and appending of data to the figures list.
if i == 0:
# Power spectrum
freqs_after, power_after = plotfft(signal, sr)
figure_after = plot_informational_band(freqs_after, power_after, signal, sr,
band_begin, band_end,
legend="Signal Power Spectrum", x_lim=x_lim,
y_lim=y_lim)
# List that store the figure handler
list_figures = [[figure_after]]
else:
filter_signal = lowpass(signal, f=band_end, order=i, fs=sr)
# Power spectrum
freqs_after, power_after = plotfft(filter_signal, sr)
if orientation != "same":
figure_after = plot_informational_band(freqs_after, power_after, filter_signal, sr,
band_begin, band_end,
legend="Filtered FFT (Order " + str(i) + ")",
x_lim=x_lim, y_lim=y_lim)
# Append data accordingly to the desired direction of representation.
if orientation == "hor":
# Append to the figure list the power spectrum of the signal after filtering.
list_figures[-1].append(figure_after)
elif orientation == "vert":
list_figures.append([figure_after])
else:
list_figures[-1][0].line(freqs_after, power_after, legend="Filtered FFT (Order " + str(i) + ")",
**opensignals_kwargs("line"))
# Show gridplot.
grid_plot_1 = gridplot(list_figures, **opensignals_kwargs("gridplot"))
if show_plot is True:
show(grid_plot_1)
return list_figures | ['def', 'plot_before_after_filter', '(', 'signal', ',', 'sr', ',', 'band_begin', ',', 'band_end', ',', 'order', '=', '1', ',', 'x_lim', '=', '[', ']', ',', 'y_lim', '=', '[', ']', ',', 'orientation', '=', '"hor"', ',', 'show_plot', '=', 'False', ',', 'file_name', '=', 'None', ')', ':', '# Generation of the HTML file where the plot will be stored.', '#file_name = _generate_bokeh_file(file_name)', '# Generation of FFT power spectrum accordingly to the filter order.', 'for', 'i', 'in', 'range', '(', '0', ',', 'order', '+', '1', ')', ':', '# Initialisation and appending of data to the figures list.', 'if', 'i', '==', '0', ':', '# Power spectrum', 'freqs_after', ',', 'power_after', '=', 'plotfft', '(', 'signal', ',', 'sr', ')', 'figure_after', '=', 'plot_informational_band', '(', 'freqs_after', ',', 'power_after', ',', 'signal', ',', 'sr', ',', 'band_begin', ',', 'band_end', ',', 'legend', '=', '"Signal Power Spectrum"', ',', 'x_lim', '=', 'x_lim', ',', 'y_lim', '=', 'y_lim', ')', '# List that store the figure handler', 'list_figures', '=', '[', '[', 'figure_after', ']', ']', 'else', ':', 'filter_signal', '=', 'lowpass', '(', 'signal', ',', 'f', '=', 'band_end', ',', 'order', '=', 'i', ',', 'fs', '=', 'sr', ')', '# Power spectrum', 'freqs_after', ',', 'power_after', '=', 'plotfft', '(', 'filter_signal', ',', 'sr', ')', 'if', 'orientation', '!=', '"same"', ':', 'figure_after', '=', 'plot_informational_band', '(', 'freqs_after', ',', 'power_after', ',', 'filter_signal', ',', 'sr', ',', 'band_begin', ',', 'band_end', ',', 'legend', '=', '"Filtered FFT (Order "', '+', 'str', '(', 'i', ')', '+', '")"', ',', 'x_lim', '=', 'x_lim', ',', 'y_lim', '=', 'y_lim', ')', '# Append data accordingly to the desired direction of representation.', 'if', 'orientation', '==', '"hor"', ':', '# Append to the figure list the power spectrum of the signal after filtering.', 'list_figures', '[', '-', '1', ']', '.', 'append', '(', 'figure_after', ')', 'elif', 'orientation', '==', '"vert"', ':', 'list_figures', '.', 'append', '(', '[', 'figure_after', ']', ')', 'else', ':', 'list_figures', '[', '-', '1', ']', '[', '0', ']', '.', 'line', '(', 'freqs_after', ',', 'power_after', ',', 'legend', '=', '"Filtered FFT (Order "', '+', 'str', '(', 'i', ')', '+', '")"', ',', '*', '*', 'opensignals_kwargs', '(', '"line"', ')', ')', '# Show gridplot.', 'grid_plot_1', '=', 'gridplot', '(', 'list_figures', ',', '*', '*', 'opensignals_kwargs', '(', '"gridplot"', ')', ')', 'if', 'show_plot', 'is', 'True', ':', 'show', '(', 'grid_plot_1', ')', 'return', 'list_figures'] | -----
Brief
-----
The use of the current function is very useful for comparing two power spectrum's (before and
after filtering the signal).
This function invokes "plot_informational_band" in order to get the power spectrum before
applying the signal to the lowpass filter.
-----------
Description
-----------
The FFT Power Spectrum, of an input signal, can be generated through plotfft function of
novainstrumentation package (or periogram function of scipy package).
The x axis (freqs) represents the frequency components of the signal, after decomposition was
achieved by applying the Fourier Transform. The y axis (power) defines the relative weight of
each frequency component (sinusoidal function) in the process of reconstructing the signal by
re-summing of decomposition components.
It is presented a 1x2 gridplot for compaing the differences in frequency composition of the
signal under analysis (before and after filtering).
Additionally, it is also graphically presented a rectangular box showing which are the frequency
components with relevant information for studying our input physiological signal.
Applied in the Notebook "Digital Filtering - A Fundamental Pre-Processing Step".
----------
Parameters
----------
signal : list
List containing the acquired signal samples.
sr : int
Sampling rate.
band_begin : float
Lower frequency inside the signal informational band.
band_end : float
Higher frequency inside the signal informational band.
order : int
Filter order.
x_lim : list
A list with length equal to 2, defining the first and last x value that should be presented.
y_lim : list
A list with length equal to 2, defining the first and last y value that should be presented.
orientation : str
If "hor" then the generated figures will be joined together in an horizontal gridplot.
When "vert" the gridplot will be a vertical grid and when "same" the plots are generated at
the same figure.
show_plot : bool
If True then the generated figure/plot will be shown to the user.
file_name : str
Path containing the destination folder where the Bokeh figure will be stored.
Returns
-------
out : list
List of Bokeh figures that compose the generated gridplot. | ['-----', 'Brief', '-----', 'The', 'use', 'of', 'the', 'current', 'function', 'is', 'very', 'useful', 'for', 'comparing', 'two', 'power', 'spectrum', 's', '(', 'before', 'and', 'after', 'filtering', 'the', 'signal', ')', '.', 'This', 'function', 'invokes', 'plot_informational_band', 'in', 'order', 'to', 'get', 'the', 'power', 'spectrum', 'before', 'applying', 'the', 'signal', 'to', 'the', 'lowpass', 'filter', '.'] | train | https://github.com/biosignalsnotebooks/biosignalsnotebooks/blob/aaa01d4125180b3a34f1e26e0d3ff08c23f666d3/biosignalsnotebooks/build/lib/biosignalsnotebooks/__notebook_support__.py#L1023-L1135 |
7,849 | miguelgrinberg/python-engineio | engineio/asyncio_server.py | AsyncServer._trigger_event | async def _trigger_event(self, event, *args, **kwargs):
"""Invoke an event handler."""
run_async = kwargs.pop('run_async', False)
ret = None
if event in self.handlers:
if asyncio.iscoroutinefunction(self.handlers[event]) is True:
if run_async:
return self.start_background_task(self.handlers[event],
*args)
else:
try:
ret = await self.handlers[event](*args)
except asyncio.CancelledError: # pragma: no cover
pass
except:
self.logger.exception(event + ' async handler error')
if event == 'connect':
# if connect handler raised error we reject the
# connection
return False
else:
if run_async:
async def async_handler():
return self.handlers[event](*args)
return self.start_background_task(async_handler)
else:
try:
ret = self.handlers[event](*args)
except:
self.logger.exception(event + ' handler error')
if event == 'connect':
# if connect handler raised error we reject the
# connection
return False
return ret | python | async def _trigger_event(self, event, *args, **kwargs):
"""Invoke an event handler."""
run_async = kwargs.pop('run_async', False)
ret = None
if event in self.handlers:
if asyncio.iscoroutinefunction(self.handlers[event]) is True:
if run_async:
return self.start_background_task(self.handlers[event],
*args)
else:
try:
ret = await self.handlers[event](*args)
except asyncio.CancelledError: # pragma: no cover
pass
except:
self.logger.exception(event + ' async handler error')
if event == 'connect':
# if connect handler raised error we reject the
# connection
return False
else:
if run_async:
async def async_handler():
return self.handlers[event](*args)
return self.start_background_task(async_handler)
else:
try:
ret = self.handlers[event](*args)
except:
self.logger.exception(event + ' handler error')
if event == 'connect':
# if connect handler raised error we reject the
# connection
return False
return ret | ['async', 'def', '_trigger_event', '(', 'self', ',', 'event', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'run_async', '=', 'kwargs', '.', 'pop', '(', "'run_async'", ',', 'False', ')', 'ret', '=', 'None', 'if', 'event', 'in', 'self', '.', 'handlers', ':', 'if', 'asyncio', '.', 'iscoroutinefunction', '(', 'self', '.', 'handlers', '[', 'event', ']', ')', 'is', 'True', ':', 'if', 'run_async', ':', 'return', 'self', '.', 'start_background_task', '(', 'self', '.', 'handlers', '[', 'event', ']', ',', '*', 'args', ')', 'else', ':', 'try', ':', 'ret', '=', 'await', 'self', '.', 'handlers', '[', 'event', ']', '(', '*', 'args', ')', 'except', 'asyncio', '.', 'CancelledError', ':', '# pragma: no cover', 'pass', 'except', ':', 'self', '.', 'logger', '.', 'exception', '(', 'event', '+', "' async handler error'", ')', 'if', 'event', '==', "'connect'", ':', '# if connect handler raised error we reject the', '# connection', 'return', 'False', 'else', ':', 'if', 'run_async', ':', 'async', 'def', 'async_handler', '(', ')', ':', 'return', 'self', '.', 'handlers', '[', 'event', ']', '(', '*', 'args', ')', 'return', 'self', '.', 'start_background_task', '(', 'async_handler', ')', 'else', ':', 'try', ':', 'ret', '=', 'self', '.', 'handlers', '[', 'event', ']', '(', '*', 'args', ')', 'except', ':', 'self', '.', 'logger', '.', 'exception', '(', 'event', '+', "' handler error'", ')', 'if', 'event', '==', "'connect'", ':', '# if connect handler raised error we reject the', '# connection', 'return', 'False', 'return', 'ret'] | Invoke an event handler. | ['Invoke', 'an', 'event', 'handler', '.'] | train | https://github.com/miguelgrinberg/python-engineio/blob/261fd67103cb5d9a44369415748e66fdf62de6fb/engineio/asyncio_server.py#L364-L399 |
7,850 | cdumay/kser | src/kser/controller.py | BaseController._onerror | def _onerror(cls, kmsg, result):
""" To execute on execution failure
:param kser.schemas.Message kmsg: Kafka message
:param kser.result.Result result: Execution result
:return: Execution result
:rtype: kser.result.Result
"""
logger.error(
"{}.Failed: {}[{}]: {}".format(
cls.__name__, kmsg.entrypoint, kmsg.uuid, result
),
extra=dict(
kmsg=kmsg.dump(),
kresult=ResultSchema().dump(result) if result else dict()
)
)
return cls.onerror(kmsg, result) | python | def _onerror(cls, kmsg, result):
""" To execute on execution failure
:param kser.schemas.Message kmsg: Kafka message
:param kser.result.Result result: Execution result
:return: Execution result
:rtype: kser.result.Result
"""
logger.error(
"{}.Failed: {}[{}]: {}".format(
cls.__name__, kmsg.entrypoint, kmsg.uuid, result
),
extra=dict(
kmsg=kmsg.dump(),
kresult=ResultSchema().dump(result) if result else dict()
)
)
return cls.onerror(kmsg, result) | ['def', '_onerror', '(', 'cls', ',', 'kmsg', ',', 'result', ')', ':', 'logger', '.', 'error', '(', '"{}.Failed: {}[{}]: {}"', '.', 'format', '(', 'cls', '.', '__name__', ',', 'kmsg', '.', 'entrypoint', ',', 'kmsg', '.', 'uuid', ',', 'result', ')', ',', 'extra', '=', 'dict', '(', 'kmsg', '=', 'kmsg', '.', 'dump', '(', ')', ',', 'kresult', '=', 'ResultSchema', '(', ')', '.', 'dump', '(', 'result', ')', 'if', 'result', 'else', 'dict', '(', ')', ')', ')', 'return', 'cls', '.', 'onerror', '(', 'kmsg', ',', 'result', ')'] | To execute on execution failure
:param kser.schemas.Message kmsg: Kafka message
:param kser.result.Result result: Execution result
:return: Execution result
:rtype: kser.result.Result | ['To', 'execute', 'on', 'execution', 'failure'] | train | https://github.com/cdumay/kser/blob/fbd6fe9ab34b8b89d9937e5ff727614304af48c1/src/kser/controller.py#L56-L73 |
7,851 | gmr/tinman | tinman/controller.py | Controller.set_base_path | def set_base_path(self, value):
"""Munge in the base path into the configuration values
:param str value: The path value
"""
if config.PATHS not in self.config.application:
self.config.application[config.PATHS] = dict()
if config.BASE not in self.config.application[config.PATHS]:
self.config.application[config.PATHS][config.BASE] = value | python | def set_base_path(self, value):
"""Munge in the base path into the configuration values
:param str value: The path value
"""
if config.PATHS not in self.config.application:
self.config.application[config.PATHS] = dict()
if config.BASE not in self.config.application[config.PATHS]:
self.config.application[config.PATHS][config.BASE] = value | ['def', 'set_base_path', '(', 'self', ',', 'value', ')', ':', 'if', 'config', '.', 'PATHS', 'not', 'in', 'self', '.', 'config', '.', 'application', ':', 'self', '.', 'config', '.', 'application', '[', 'config', '.', 'PATHS', ']', '=', 'dict', '(', ')', 'if', 'config', '.', 'BASE', 'not', 'in', 'self', '.', 'config', '.', 'application', '[', 'config', '.', 'PATHS', ']', ':', 'self', '.', 'config', '.', 'application', '[', 'config', '.', 'PATHS', ']', '[', 'config', '.', 'BASE', ']', '=', 'value'] | Munge in the base path into the configuration values
:param str value: The path value | ['Munge', 'in', 'the', 'base', 'path', 'into', 'the', 'configuration', 'values'] | train | https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/controller.py#L88-L98 |
7,852 | apache/spark | python/pyspark/sql/session.py | SparkSession.range | def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped) | python | def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped) | ['def', 'range', '(', 'self', ',', 'start', ',', 'end', '=', 'None', ',', 'step', '=', '1', ',', 'numPartitions', '=', 'None', ')', ':', 'if', 'numPartitions', 'is', 'None', ':', 'numPartitions', '=', 'self', '.', '_sc', '.', 'defaultParallelism', 'if', 'end', 'is', 'None', ':', 'jdf', '=', 'self', '.', '_jsparkSession', '.', 'range', '(', '0', ',', 'int', '(', 'start', ')', ',', 'int', '(', 'step', ')', ',', 'int', '(', 'numPartitions', ')', ')', 'else', ':', 'jdf', '=', 'self', '.', '_jsparkSession', '.', 'range', '(', 'int', '(', 'start', ')', ',', 'int', '(', 'end', ')', ',', 'int', '(', 'step', ')', ',', 'int', '(', 'numPartitions', ')', ')', 'return', 'DataFrame', '(', 'jdf', ',', 'self', '.', '_wrapped', ')'] | Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)] | ['Create', 'a', ':', 'class', ':', 'DataFrame', 'with', 'single', ':', 'class', ':', 'pyspark', '.', 'sql', '.', 'types', '.', 'LongType', 'column', 'named', 'id', 'containing', 'elements', 'in', 'a', 'range', 'from', 'start', 'to', 'end', '(', 'exclusive', ')', 'with', 'step', 'value', 'step', '.'] | train | https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/session.py#L333-L361 |
7,853 | tango-controls/pytango | tango/utils.py | StdStringVector_2_seq | def StdStringVector_2_seq(vec, seq=None):
"""Converts a :class:`tango.StdStringVector` to a python sequence<str>
:param seq: the :class:`tango.StdStringVector`
:type seq: :class:`tango.StdStringVector`
:param vec: (optional, default is None) a python sequence to be filled.
If None is given, a new list is created
:return: a python sequence filled with the same contents as seq
:rtype: sequence<str>
"""
if seq is None:
seq = []
if not isinstance(vec, StdStringVector):
raise TypeError('vec must be a tango.StdStringVector')
for e in vec:
seq.append(str(e))
return seq | python | def StdStringVector_2_seq(vec, seq=None):
"""Converts a :class:`tango.StdStringVector` to a python sequence<str>
:param seq: the :class:`tango.StdStringVector`
:type seq: :class:`tango.StdStringVector`
:param vec: (optional, default is None) a python sequence to be filled.
If None is given, a new list is created
:return: a python sequence filled with the same contents as seq
:rtype: sequence<str>
"""
if seq is None:
seq = []
if not isinstance(vec, StdStringVector):
raise TypeError('vec must be a tango.StdStringVector')
for e in vec:
seq.append(str(e))
return seq | ['def', 'StdStringVector_2_seq', '(', 'vec', ',', 'seq', '=', 'None', ')', ':', 'if', 'seq', 'is', 'None', ':', 'seq', '=', '[', ']', 'if', 'not', 'isinstance', '(', 'vec', ',', 'StdStringVector', ')', ':', 'raise', 'TypeError', '(', "'vec must be a tango.StdStringVector'", ')', 'for', 'e', 'in', 'vec', ':', 'seq', '.', 'append', '(', 'str', '(', 'e', ')', ')', 'return', 'seq'] | Converts a :class:`tango.StdStringVector` to a python sequence<str>
:param seq: the :class:`tango.StdStringVector`
:type seq: :class:`tango.StdStringVector`
:param vec: (optional, default is None) a python sequence to be filled.
If None is given, a new list is created
:return: a python sequence filled with the same contents as seq
:rtype: sequence<str> | ['Converts', 'a', ':', 'class', ':', 'tango', '.', 'StdStringVector', 'to', 'a', 'python', 'sequence<str', '>'] | train | https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/utils.py#L745-L761 |
7,854 | pypa/pipenv | pipenv/vendor/orderedmultidict/orderedmultidict.py | omdict.reverse | def reverse(self):
"""
Reverse the order of all items in the dictionary.
Example:
omd = omdict([(1,1), (1,11), (1,111), (2,2), (3,3)])
omd.reverse()
omd.allitems() == [(3,3), (2,2), (1,111), (1,11), (1,1)]
Returns: <self>.
"""
for key in six.iterkeys(self._map):
self._map[key].reverse()
self._items.reverse()
return self | python | def reverse(self):
"""
Reverse the order of all items in the dictionary.
Example:
omd = omdict([(1,1), (1,11), (1,111), (2,2), (3,3)])
omd.reverse()
omd.allitems() == [(3,3), (2,2), (1,111), (1,11), (1,1)]
Returns: <self>.
"""
for key in six.iterkeys(self._map):
self._map[key].reverse()
self._items.reverse()
return self | ['def', 'reverse', '(', 'self', ')', ':', 'for', 'key', 'in', 'six', '.', 'iterkeys', '(', 'self', '.', '_map', ')', ':', 'self', '.', '_map', '[', 'key', ']', '.', 'reverse', '(', ')', 'self', '.', '_items', '.', 'reverse', '(', ')', 'return', 'self'] | Reverse the order of all items in the dictionary.
Example:
omd = omdict([(1,1), (1,11), (1,111), (2,2), (3,3)])
omd.reverse()
omd.allitems() == [(3,3), (2,2), (1,111), (1,11), (1,1)]
Returns: <self>. | ['Reverse', 'the', 'order', 'of', 'all', 'items', 'in', 'the', 'dictionary', '.'] | train | https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/orderedmultidict/orderedmultidict.py#L746-L760 |
7,855 | markuskiller/textblob-de | textblob_de/tokenizers.py | PatternTokenizer.sent_tokenize | def sent_tokenize(self, text, **kwargs):
"""Returns a list of sentences.
Each sentence is a space-separated string of tokens (words).
Handles common cases of abbreviations (e.g., etc., ...).
Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence.
Headings without an ending period are inferred by line breaks.
"""
sentences = find_sentences(text,
punctuation=kwargs.get(
"punctuation",
PUNCTUATION),
abbreviations=kwargs.get(
"abbreviations",
ABBREVIATIONS_DE),
replace=kwargs.get("replace", replacements),
linebreak=r"\n{2,}")
return sentences | python | def sent_tokenize(self, text, **kwargs):
"""Returns a list of sentences.
Each sentence is a space-separated string of tokens (words).
Handles common cases of abbreviations (e.g., etc., ...).
Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence.
Headings without an ending period are inferred by line breaks.
"""
sentences = find_sentences(text,
punctuation=kwargs.get(
"punctuation",
PUNCTUATION),
abbreviations=kwargs.get(
"abbreviations",
ABBREVIATIONS_DE),
replace=kwargs.get("replace", replacements),
linebreak=r"\n{2,}")
return sentences | ['def', 'sent_tokenize', '(', 'self', ',', 'text', ',', '*', '*', 'kwargs', ')', ':', 'sentences', '=', 'find_sentences', '(', 'text', ',', 'punctuation', '=', 'kwargs', '.', 'get', '(', '"punctuation"', ',', 'PUNCTUATION', ')', ',', 'abbreviations', '=', 'kwargs', '.', 'get', '(', '"abbreviations"', ',', 'ABBREVIATIONS_DE', ')', ',', 'replace', '=', 'kwargs', '.', 'get', '(', '"replace"', ',', 'replacements', ')', ',', 'linebreak', '=', 'r"\\n{2,}"', ')', 'return', 'sentences'] | Returns a list of sentences.
Each sentence is a space-separated string of tokens (words).
Handles common cases of abbreviations (e.g., etc., ...).
Punctuation marks are split from other words. Periods (or ?!) mark the end of a sentence.
Headings without an ending period are inferred by line breaks. | ['Returns', 'a', 'list', 'of', 'sentences', '.'] | train | https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/tokenizers.py#L173-L192 |
7,856 | galaxyproject/pulsar | pulsar/managers/util/drmaa/__init__.py | DrmaaSession.run_job | def run_job(self, **kwds):
"""
Create a DRMAA job template, populate with specified properties,
run the job, and return the external_job_id.
"""
template = DrmaaSession.session.createJobTemplate()
try:
for key in kwds:
setattr(template, key, kwds[key])
with DrmaaSession.session_lock:
return DrmaaSession.session.runJob(template)
finally:
DrmaaSession.session.deleteJobTemplate(template) | python | def run_job(self, **kwds):
"""
Create a DRMAA job template, populate with specified properties,
run the job, and return the external_job_id.
"""
template = DrmaaSession.session.createJobTemplate()
try:
for key in kwds:
setattr(template, key, kwds[key])
with DrmaaSession.session_lock:
return DrmaaSession.session.runJob(template)
finally:
DrmaaSession.session.deleteJobTemplate(template) | ['def', 'run_job', '(', 'self', ',', '*', '*', 'kwds', ')', ':', 'template', '=', 'DrmaaSession', '.', 'session', '.', 'createJobTemplate', '(', ')', 'try', ':', 'for', 'key', 'in', 'kwds', ':', 'setattr', '(', 'template', ',', 'key', ',', 'kwds', '[', 'key', ']', ')', 'with', 'DrmaaSession', '.', 'session_lock', ':', 'return', 'DrmaaSession', '.', 'session', '.', 'runJob', '(', 'template', ')', 'finally', ':', 'DrmaaSession', '.', 'session', '.', 'deleteJobTemplate', '(', 'template', ')'] | Create a DRMAA job template, populate with specified properties,
run the job, and return the external_job_id. | ['Create', 'a', 'DRMAA', 'job', 'template', 'populate', 'with', 'specified', 'properties', 'run', 'the', 'job', 'and', 'return', 'the', 'external_job_id', '.'] | train | https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/util/drmaa/__init__.py#L57-L69 |
7,857 | vallis/libstempo | libstempo/plot.py | plotgwsrc | def plotgwsrc(gwb):
"""
Plot a GWB source population as a mollweide projection.
"""
theta, phi, omega, polarization = gwb.gw_dist()
rho = phi-N.pi
eta = 0.5*N.pi - theta
# I don't know how to get rid of the RuntimeWarning -- RvH, Oct 10, 2014:
# /Users/vhaaster/env/dev/lib/python2.7/site-packages/matplotlib/projections/geo.py:485:
# RuntimeWarning: invalid value encountered in arcsin theta = np.arcsin(y / np.sqrt(2))
#old_settings = N.seterr(invalid='ignore')
P.title("GWB source population")
ax = P.axes(projection='mollweide')
foo = P.scatter(rho, eta, marker='.', s=1)
#bar = N.seterr(**old_settings)
return foo | python | def plotgwsrc(gwb):
"""
Plot a GWB source population as a mollweide projection.
"""
theta, phi, omega, polarization = gwb.gw_dist()
rho = phi-N.pi
eta = 0.5*N.pi - theta
# I don't know how to get rid of the RuntimeWarning -- RvH, Oct 10, 2014:
# /Users/vhaaster/env/dev/lib/python2.7/site-packages/matplotlib/projections/geo.py:485:
# RuntimeWarning: invalid value encountered in arcsin theta = np.arcsin(y / np.sqrt(2))
#old_settings = N.seterr(invalid='ignore')
P.title("GWB source population")
ax = P.axes(projection='mollweide')
foo = P.scatter(rho, eta, marker='.', s=1)
#bar = N.seterr(**old_settings)
return foo | ['def', 'plotgwsrc', '(', 'gwb', ')', ':', 'theta', ',', 'phi', ',', 'omega', ',', 'polarization', '=', 'gwb', '.', 'gw_dist', '(', ')', 'rho', '=', 'phi', '-', 'N', '.', 'pi', 'eta', '=', '0.5', '*', 'N', '.', 'pi', '-', 'theta', "# I don't know how to get rid of the RuntimeWarning -- RvH, Oct 10, 2014:", '# /Users/vhaaster/env/dev/lib/python2.7/site-packages/matplotlib/projections/geo.py:485:', '# RuntimeWarning: invalid value encountered in arcsin theta = np.arcsin(y / np.sqrt(2))', "#old_settings = N.seterr(invalid='ignore')", 'P', '.', 'title', '(', '"GWB source population"', ')', 'ax', '=', 'P', '.', 'axes', '(', 'projection', '=', "'mollweide'", ')', 'foo', '=', 'P', '.', 'scatter', '(', 'rho', ',', 'eta', ',', 'marker', '=', "'.'", ',', 's', '=', '1', ')', '#bar = N.seterr(**old_settings)', 'return', 'foo'] | Plot a GWB source population as a mollweide projection. | ['Plot', 'a', 'GWB', 'source', 'population', 'as', 'a', 'mollweide', 'projection', '.'] | train | https://github.com/vallis/libstempo/blob/0b19300a9b24d64c9ddc25cd6ddbfd12b6231990/libstempo/plot.py#L304-L324 |
7,858 | bpython/curtsies | curtsies/events.py | could_be_unfinished_char | def could_be_unfinished_char(seq, encoding):
"""Whether seq bytes might create a char in encoding if more bytes were added"""
if decodable(seq, encoding):
return False # any sensible encoding surely doesn't require lookahead (right?)
# (if seq bytes encoding a character, adding another byte shouldn't also encode something)
if encodings.codecs.getdecoder('utf8') is encodings.codecs.getdecoder(encoding):
return could_be_unfinished_utf8(seq)
elif encodings.codecs.getdecoder('ascii') is encodings.codecs.getdecoder(encoding):
return False
else:
return True | python | def could_be_unfinished_char(seq, encoding):
"""Whether seq bytes might create a char in encoding if more bytes were added"""
if decodable(seq, encoding):
return False # any sensible encoding surely doesn't require lookahead (right?)
# (if seq bytes encoding a character, adding another byte shouldn't also encode something)
if encodings.codecs.getdecoder('utf8') is encodings.codecs.getdecoder(encoding):
return could_be_unfinished_utf8(seq)
elif encodings.codecs.getdecoder('ascii') is encodings.codecs.getdecoder(encoding):
return False
else:
return True | ['def', 'could_be_unfinished_char', '(', 'seq', ',', 'encoding', ')', ':', 'if', 'decodable', '(', 'seq', ',', 'encoding', ')', ':', 'return', 'False', "# any sensible encoding surely doesn't require lookahead (right?)", "# (if seq bytes encoding a character, adding another byte shouldn't also encode something)", 'if', 'encodings', '.', 'codecs', '.', 'getdecoder', '(', "'utf8'", ')', 'is', 'encodings', '.', 'codecs', '.', 'getdecoder', '(', 'encoding', ')', ':', 'return', 'could_be_unfinished_utf8', '(', 'seq', ')', 'elif', 'encodings', '.', 'codecs', '.', 'getdecoder', '(', "'ascii'", ')', 'is', 'encodings', '.', 'codecs', '.', 'getdecoder', '(', 'encoding', ')', ':', 'return', 'False', 'else', ':', 'return', 'True'] | Whether seq bytes might create a char in encoding if more bytes were added | ['Whether', 'seq', 'bytes', 'might', 'create', 'a', 'char', 'in', 'encoding', 'if', 'more', 'bytes', 'were', 'added'] | train | https://github.com/bpython/curtsies/blob/223e42b97fbf6c86b479ed4f0963a067333c5a63/curtsies/events.py#L220-L231 |
7,859 | aiogram/aiogram | aiogram/bot/bot.py | Bot.set_chat_photo | async def set_chat_photo(self, chat_id: typing.Union[base.Integer, base.String],
photo: base.InputFile) -> base.Boolean:
"""
Use this method to set a new profile photo for the chat. Photos can't be changed for private chats.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’
setting is off in the target group.
Source: https://core.telegram.org/bots/api#setchatphoto
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param photo: New chat photo, uploaded using multipart/form-data
:type photo: :obj:`base.InputFile`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals(), exclude=['photo'])
files = {}
prepare_file(payload, files, 'photo', photo)
result = await self.request(api.Methods.SET_CHAT_PHOTO, payload, files)
return result | python | async def set_chat_photo(self, chat_id: typing.Union[base.Integer, base.String],
photo: base.InputFile) -> base.Boolean:
"""
Use this method to set a new profile photo for the chat. Photos can't be changed for private chats.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’
setting is off in the target group.
Source: https://core.telegram.org/bots/api#setchatphoto
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param photo: New chat photo, uploaded using multipart/form-data
:type photo: :obj:`base.InputFile`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
"""
payload = generate_payload(**locals(), exclude=['photo'])
files = {}
prepare_file(payload, files, 'photo', photo)
result = await self.request(api.Methods.SET_CHAT_PHOTO, payload, files)
return result | ['async', 'def', 'set_chat_photo', '(', 'self', ',', 'chat_id', ':', 'typing', '.', 'Union', '[', 'base', '.', 'Integer', ',', 'base', '.', 'String', ']', ',', 'photo', ':', 'base', '.', 'InputFile', ')', '->', 'base', '.', 'Boolean', ':', 'payload', '=', 'generate_payload', '(', '*', '*', 'locals', '(', ')', ',', 'exclude', '=', '[', "'photo'", ']', ')', 'files', '=', '{', '}', 'prepare_file', '(', 'payload', ',', 'files', ',', "'photo'", ',', 'photo', ')', 'result', '=', 'await', 'self', '.', 'request', '(', 'api', '.', 'Methods', '.', 'SET_CHAT_PHOTO', ',', 'payload', ',', 'files', ')', 'return', 'result'] | Use this method to set a new profile photo for the chat. Photos can't be changed for private chats.
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note: In regular groups (non-supergroups), this method will only work if the ‘All Members Are Admins’
setting is off in the target group.
Source: https://core.telegram.org/bots/api#setchatphoto
:param chat_id: Unique identifier for the target chat or username of the target channel
:type chat_id: :obj:`typing.Union[base.Integer, base.String]`
:param photo: New chat photo, uploaded using multipart/form-data
:type photo: :obj:`base.InputFile`
:return: Returns True on success
:rtype: :obj:`base.Boolean` | ['Use', 'this', 'method', 'to', 'set', 'a', 'new', 'profile', 'photo', 'for', 'the', 'chat', '.', 'Photos', 'can', 't', 'be', 'changed', 'for', 'private', 'chats', '.', 'The', 'bot', 'must', 'be', 'an', 'administrator', 'in', 'the', 'chat', 'for', 'this', 'to', 'work', 'and', 'must', 'have', 'the', 'appropriate', 'admin', 'rights', '.'] | train | https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/bot/bot.py#L1108-L1132 |
7,860 | BernardFW/bernard | src/bernard/platforms/facebook/platform.py | sign_message | def sign_message(body: ByteString, secret: Text) -> Text:
"""
Compute a message's signature.
"""
return 'sha1={}'.format(
hmac.new(secret.encode(), body, sha1).hexdigest()
) | python | def sign_message(body: ByteString, secret: Text) -> Text:
"""
Compute a message's signature.
"""
return 'sha1={}'.format(
hmac.new(secret.encode(), body, sha1).hexdigest()
) | ['def', 'sign_message', '(', 'body', ':', 'ByteString', ',', 'secret', ':', 'Text', ')', '->', 'Text', ':', 'return', "'sha1={}'", '.', 'format', '(', 'hmac', '.', 'new', '(', 'secret', '.', 'encode', '(', ')', ',', 'body', ',', 'sha1', ')', '.', 'hexdigest', '(', ')', ')'] | Compute a message's signature. | ['Compute', 'a', 'message', 's', 'signature', '.'] | train | https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/platforms/facebook/platform.py#L112-L119 |
7,861 | push-things/django-th | th_evernote/my_evernote.py | ServiceEvernote._notebook | def _notebook(trigger, note_store):
"""
:param trigger: trigger object
:param note_store: note_store object
:return: note object
"""
note = Types.Note()
if trigger.notebook:
# get the notebookGUID ...
notebook_id = EvernoteMgr.get_notebook(note_store, trigger.notebook)
# create notebookGUID if it does not exist then return its id
note.notebookGuid = EvernoteMgr.set_notebook(note_store, trigger.notebook, notebook_id)
if trigger.tag:
# ... and get the tagGUID if a tag has been provided
tag_id = EvernoteMgr.get_tag(note_store, trigger.tag)
if tag_id is False:
tag_id = EvernoteMgr.set_tag(note_store, trigger.tag, tag_id)
# set the tag to the note if a tag has been provided
if tag_id:
note.tagGuids = tag_id
logger.debug("notebook that will be used %s", trigger.notebook)
return note | python | def _notebook(trigger, note_store):
"""
:param trigger: trigger object
:param note_store: note_store object
:return: note object
"""
note = Types.Note()
if trigger.notebook:
# get the notebookGUID ...
notebook_id = EvernoteMgr.get_notebook(note_store, trigger.notebook)
# create notebookGUID if it does not exist then return its id
note.notebookGuid = EvernoteMgr.set_notebook(note_store, trigger.notebook, notebook_id)
if trigger.tag:
# ... and get the tagGUID if a tag has been provided
tag_id = EvernoteMgr.get_tag(note_store, trigger.tag)
if tag_id is False:
tag_id = EvernoteMgr.set_tag(note_store, trigger.tag, tag_id)
# set the tag to the note if a tag has been provided
if tag_id:
note.tagGuids = tag_id
logger.debug("notebook that will be used %s", trigger.notebook)
return note | ['def', '_notebook', '(', 'trigger', ',', 'note_store', ')', ':', 'note', '=', 'Types', '.', 'Note', '(', ')', 'if', 'trigger', '.', 'notebook', ':', '# get the notebookGUID ...', 'notebook_id', '=', 'EvernoteMgr', '.', 'get_notebook', '(', 'note_store', ',', 'trigger', '.', 'notebook', ')', '# create notebookGUID if it does not exist then return its id', 'note', '.', 'notebookGuid', '=', 'EvernoteMgr', '.', 'set_notebook', '(', 'note_store', ',', 'trigger', '.', 'notebook', ',', 'notebook_id', ')', 'if', 'trigger', '.', 'tag', ':', '# ... and get the tagGUID if a tag has been provided', 'tag_id', '=', 'EvernoteMgr', '.', 'get_tag', '(', 'note_store', ',', 'trigger', '.', 'tag', ')', 'if', 'tag_id', 'is', 'False', ':', 'tag_id', '=', 'EvernoteMgr', '.', 'set_tag', '(', 'note_store', ',', 'trigger', '.', 'tag', ',', 'tag_id', ')', '# set the tag to the note if a tag has been provided', 'if', 'tag_id', ':', 'note', '.', 'tagGuids', '=', 'tag_id', 'logger', '.', 'debug', '(', '"notebook that will be used %s"', ',', 'trigger', '.', 'notebook', ')', 'return', 'note'] | :param trigger: trigger object
:param note_store: note_store object
:return: note object | [':', 'param', 'trigger', ':', 'trigger', 'object', ':', 'param', 'note_store', ':', 'note_store', 'object', ':', 'return', ':', 'note', 'object'] | train | https://github.com/push-things/django-th/blob/86c999d16bcf30b6224206e5b40824309834ac8c/th_evernote/my_evernote.py#L191-L214 |
7,862 | aegirhall/console-menu | consolemenu/menu_formatter.py | MenuFormatBuilder.set_right_margin | def set_right_margin(self, right_margin):
"""
Set the right margin of the menu. This will determine the number of spaces between the right edge of the
screen and the right menu border.
:param right_margin: an integer value
"""
self.__header.style.margins.right = right_margin
self.__prologue.style.margins.right = right_margin
self.__items_section.style.margins.right = right_margin
self.__epilogue.style.margins.right = right_margin
self.__footer.style.margins.right = right_margin
self.__prompt.style.margins.right = right_margin
return self | python | def set_right_margin(self, right_margin):
"""
Set the right margin of the menu. This will determine the number of spaces between the right edge of the
screen and the right menu border.
:param right_margin: an integer value
"""
self.__header.style.margins.right = right_margin
self.__prologue.style.margins.right = right_margin
self.__items_section.style.margins.right = right_margin
self.__epilogue.style.margins.right = right_margin
self.__footer.style.margins.right = right_margin
self.__prompt.style.margins.right = right_margin
return self | ['def', 'set_right_margin', '(', 'self', ',', 'right_margin', ')', ':', 'self', '.', '__header', '.', 'style', '.', 'margins', '.', 'right', '=', 'right_margin', 'self', '.', '__prologue', '.', 'style', '.', 'margins', '.', 'right', '=', 'right_margin', 'self', '.', '__items_section', '.', 'style', '.', 'margins', '.', 'right', '=', 'right_margin', 'self', '.', '__epilogue', '.', 'style', '.', 'margins', '.', 'right', '=', 'right_margin', 'self', '.', '__footer', '.', 'style', '.', 'margins', '.', 'right', '=', 'right_margin', 'self', '.', '__prompt', '.', 'style', '.', 'margins', '.', 'right', '=', 'right_margin', 'return', 'self'] | Set the right margin of the menu. This will determine the number of spaces between the right edge of the
screen and the right menu border.
:param right_margin: an integer value | ['Set', 'the', 'right', 'margin', 'of', 'the', 'menu', '.', 'This', 'will', 'determine', 'the', 'number', 'of', 'spaces', 'between', 'the', 'right', 'edge', 'of', 'the', 'screen', 'and', 'the', 'right', 'menu', 'border', '.', ':', 'param', 'right_margin', ':', 'an', 'integer', 'value'] | train | https://github.com/aegirhall/console-menu/blob/1a28959d6f1dd6ac79c87b11efd8529d05532422/consolemenu/menu_formatter.py#L93-L105 |
7,863 | materialsproject/pymatgen | pymatgen/symmetry/groups.py | in_array_list | def in_array_list(array_list, a, tol=1e-5):
"""
Extremely efficient nd-array comparison using numpy's broadcasting. This
function checks if a particular array a, is present in a list of arrays.
It works for arrays of any size, e.g., even matrix searches.
Args:
array_list ([array]): A list of arrays to compare to.
a (array): The test array for comparison.
tol (float): The tolerance. Defaults to 1e-5. If 0, an exact match is
done.
Returns:
(bool)
"""
if len(array_list) == 0:
return False
axes = tuple(range(1, a.ndim + 1))
if not tol:
return np.any(np.all(np.equal(array_list, a[None, :]), axes))
else:
return np.any(np.sum(np.abs(array_list - a[None, :]), axes) < tol) | python | def in_array_list(array_list, a, tol=1e-5):
"""
Extremely efficient nd-array comparison using numpy's broadcasting. This
function checks if a particular array a, is present in a list of arrays.
It works for arrays of any size, e.g., even matrix searches.
Args:
array_list ([array]): A list of arrays to compare to.
a (array): The test array for comparison.
tol (float): The tolerance. Defaults to 1e-5. If 0, an exact match is
done.
Returns:
(bool)
"""
if len(array_list) == 0:
return False
axes = tuple(range(1, a.ndim + 1))
if not tol:
return np.any(np.all(np.equal(array_list, a[None, :]), axes))
else:
return np.any(np.sum(np.abs(array_list - a[None, :]), axes) < tol) | ['def', 'in_array_list', '(', 'array_list', ',', 'a', ',', 'tol', '=', '1e-5', ')', ':', 'if', 'len', '(', 'array_list', ')', '==', '0', ':', 'return', 'False', 'axes', '=', 'tuple', '(', 'range', '(', '1', ',', 'a', '.', 'ndim', '+', '1', ')', ')', 'if', 'not', 'tol', ':', 'return', 'np', '.', 'any', '(', 'np', '.', 'all', '(', 'np', '.', 'equal', '(', 'array_list', ',', 'a', '[', 'None', ',', ':', ']', ')', ',', 'axes', ')', ')', 'else', ':', 'return', 'np', '.', 'any', '(', 'np', '.', 'sum', '(', 'np', '.', 'abs', '(', 'array_list', '-', 'a', '[', 'None', ',', ':', ']', ')', ',', 'axes', ')', '<', 'tol', ')'] | Extremely efficient nd-array comparison using numpy's broadcasting. This
function checks if a particular array a, is present in a list of arrays.
It works for arrays of any size, e.g., even matrix searches.
Args:
array_list ([array]): A list of arrays to compare to.
a (array): The test array for comparison.
tol (float): The tolerance. Defaults to 1e-5. If 0, an exact match is
done.
Returns:
(bool) | ['Extremely', 'efficient', 'nd', '-', 'array', 'comparison', 'using', 'numpy', 's', 'broadcasting', '.', 'This', 'function', 'checks', 'if', 'a', 'particular', 'array', 'a', 'is', 'present', 'in', 'a', 'list', 'of', 'arrays', '.', 'It', 'works', 'for', 'arrays', 'of', 'any', 'size', 'e', '.', 'g', '.', 'even', 'matrix', 'searches', '.'] | train | https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/symmetry/groups.py#L515-L536 |
7,864 | gawel/irc3 | irc3/base.py | IrcObject.run | def run(self, forever=True):
"""start the bot"""
loop = self.create_connection()
self.add_signal_handlers()
if forever:
loop.run_forever() | python | def run(self, forever=True):
"""start the bot"""
loop = self.create_connection()
self.add_signal_handlers()
if forever:
loop.run_forever() | ['def', 'run', '(', 'self', ',', 'forever', '=', 'True', ')', ':', 'loop', '=', 'self', '.', 'create_connection', '(', ')', 'self', '.', 'add_signal_handlers', '(', ')', 'if', 'forever', ':', 'loop', '.', 'run_forever', '(', ')'] | start the bot | ['start', 'the', 'bot'] | train | https://github.com/gawel/irc3/blob/cd27840a5809a1f803dc620860fe75d83d2a2ec8/irc3/base.py#L365-L370 |
7,865 | jeremymcrae/denovonear | denovonear/ensembl_requester.py | EnsemblRequest.get_genomic_seq_for_transcript | def get_genomic_seq_for_transcript(self, transcript_id, expand):
""" obtain the sequence for a transcript from ensembl
"""
headers = {"content-type": "application/json"}
self.attempt = 0
ext = "/sequence/id/{0}?type=genomic;expand_3prime={1};expand_5prime={1}".format(transcript_id, expand)
r = self.ensembl_request(ext, headers)
gene = json.loads(r)
seq = gene["seq"]
seq_id = gene["id"]
if seq_id != transcript_id:
raise ValueError("ensembl gave the wrong transcript")
desc = gene["desc"].split(":")
chrom = desc[2]
start = int(desc[3]) + expand
end = int(desc[4]) - expand
strand_temp = int(desc[5])
strand = "+"
if strand_temp == -1:
strand = "-"
return (chrom, start, end, strand, seq) | python | def get_genomic_seq_for_transcript(self, transcript_id, expand):
""" obtain the sequence for a transcript from ensembl
"""
headers = {"content-type": "application/json"}
self.attempt = 0
ext = "/sequence/id/{0}?type=genomic;expand_3prime={1};expand_5prime={1}".format(transcript_id, expand)
r = self.ensembl_request(ext, headers)
gene = json.loads(r)
seq = gene["seq"]
seq_id = gene["id"]
if seq_id != transcript_id:
raise ValueError("ensembl gave the wrong transcript")
desc = gene["desc"].split(":")
chrom = desc[2]
start = int(desc[3]) + expand
end = int(desc[4]) - expand
strand_temp = int(desc[5])
strand = "+"
if strand_temp == -1:
strand = "-"
return (chrom, start, end, strand, seq) | ['def', 'get_genomic_seq_for_transcript', '(', 'self', ',', 'transcript_id', ',', 'expand', ')', ':', 'headers', '=', '{', '"content-type"', ':', '"application/json"', '}', 'self', '.', 'attempt', '=', '0', 'ext', '=', '"/sequence/id/{0}?type=genomic;expand_3prime={1};expand_5prime={1}"', '.', 'format', '(', 'transcript_id', ',', 'expand', ')', 'r', '=', 'self', '.', 'ensembl_request', '(', 'ext', ',', 'headers', ')', 'gene', '=', 'json', '.', 'loads', '(', 'r', ')', 'seq', '=', 'gene', '[', '"seq"', ']', 'seq_id', '=', 'gene', '[', '"id"', ']', 'if', 'seq_id', '!=', 'transcript_id', ':', 'raise', 'ValueError', '(', '"ensembl gave the wrong transcript"', ')', 'desc', '=', 'gene', '[', '"desc"', ']', '.', 'split', '(', '":"', ')', 'chrom', '=', 'desc', '[', '2', ']', 'start', '=', 'int', '(', 'desc', '[', '3', ']', ')', '+', 'expand', 'end', '=', 'int', '(', 'desc', '[', '4', ']', ')', '-', 'expand', 'strand_temp', '=', 'int', '(', 'desc', '[', '5', ']', ')', 'strand', '=', '"+"', 'if', 'strand_temp', '==', '-', '1', ':', 'strand', '=', '"-"', 'return', '(', 'chrom', ',', 'start', ',', 'end', ',', 'strand', ',', 'seq', ')'] | obtain the sequence for a transcript from ensembl | ['obtain', 'the', 'sequence', 'for', 'a', 'transcript', 'from', 'ensembl'] | train | https://github.com/jeremymcrae/denovonear/blob/feaab0fc77e89d70b31e8092899e4f0e68bac9fe/denovonear/ensembl_requester.py#L245-L273 |
7,866 | abilian/abilian-core | abilian/services/auth/service.py | AuthService.do_access_control | def do_access_control(self):
"""`before_request` handler to check if user should be redirected to
login page."""
from abilian.services import get_service
if current_app.testing and current_app.config.get("NO_LOGIN"):
# Special case for tests
user = User.query.get(0)
login_user(user, force=True)
return
state = self.app_state
user = unwrap(current_user)
# Another special case for tests
if current_app.testing and getattr(user, "is_admin", False):
return
security = get_service("security")
user_roles = frozenset(security.get_roles(user))
endpoint = request.endpoint
blueprint = request.blueprint
access_controllers = []
access_controllers.extend(state.bp_access_controllers.get(None, []))
if blueprint and blueprint in state.bp_access_controllers:
access_controllers.extend(state.bp_access_controllers[blueprint])
if endpoint and endpoint in state.endpoint_access_controllers:
access_controllers.extend(state.endpoint_access_controllers[endpoint])
for access_controller in reversed(access_controllers):
verdict = access_controller(user=user, roles=user_roles)
if verdict is None:
continue
elif verdict is True:
return
else:
if user.is_anonymous:
return self.redirect_to_login()
raise Forbidden()
# default policy
if current_app.config.get("PRIVATE_SITE") and user.is_anonymous:
return self.redirect_to_login() | python | def do_access_control(self):
"""`before_request` handler to check if user should be redirected to
login page."""
from abilian.services import get_service
if current_app.testing and current_app.config.get("NO_LOGIN"):
# Special case for tests
user = User.query.get(0)
login_user(user, force=True)
return
state = self.app_state
user = unwrap(current_user)
# Another special case for tests
if current_app.testing and getattr(user, "is_admin", False):
return
security = get_service("security")
user_roles = frozenset(security.get_roles(user))
endpoint = request.endpoint
blueprint = request.blueprint
access_controllers = []
access_controllers.extend(state.bp_access_controllers.get(None, []))
if blueprint and blueprint in state.bp_access_controllers:
access_controllers.extend(state.bp_access_controllers[blueprint])
if endpoint and endpoint in state.endpoint_access_controllers:
access_controllers.extend(state.endpoint_access_controllers[endpoint])
for access_controller in reversed(access_controllers):
verdict = access_controller(user=user, roles=user_roles)
if verdict is None:
continue
elif verdict is True:
return
else:
if user.is_anonymous:
return self.redirect_to_login()
raise Forbidden()
# default policy
if current_app.config.get("PRIVATE_SITE") and user.is_anonymous:
return self.redirect_to_login() | ['def', 'do_access_control', '(', 'self', ')', ':', 'from', 'abilian', '.', 'services', 'import', 'get_service', 'if', 'current_app', '.', 'testing', 'and', 'current_app', '.', 'config', '.', 'get', '(', '"NO_LOGIN"', ')', ':', '# Special case for tests', 'user', '=', 'User', '.', 'query', '.', 'get', '(', '0', ')', 'login_user', '(', 'user', ',', 'force', '=', 'True', ')', 'return', 'state', '=', 'self', '.', 'app_state', 'user', '=', 'unwrap', '(', 'current_user', ')', '# Another special case for tests', 'if', 'current_app', '.', 'testing', 'and', 'getattr', '(', 'user', ',', '"is_admin"', ',', 'False', ')', ':', 'return', 'security', '=', 'get_service', '(', '"security"', ')', 'user_roles', '=', 'frozenset', '(', 'security', '.', 'get_roles', '(', 'user', ')', ')', 'endpoint', '=', 'request', '.', 'endpoint', 'blueprint', '=', 'request', '.', 'blueprint', 'access_controllers', '=', '[', ']', 'access_controllers', '.', 'extend', '(', 'state', '.', 'bp_access_controllers', '.', 'get', '(', 'None', ',', '[', ']', ')', ')', 'if', 'blueprint', 'and', 'blueprint', 'in', 'state', '.', 'bp_access_controllers', ':', 'access_controllers', '.', 'extend', '(', 'state', '.', 'bp_access_controllers', '[', 'blueprint', ']', ')', 'if', 'endpoint', 'and', 'endpoint', 'in', 'state', '.', 'endpoint_access_controllers', ':', 'access_controllers', '.', 'extend', '(', 'state', '.', 'endpoint_access_controllers', '[', 'endpoint', ']', ')', 'for', 'access_controller', 'in', 'reversed', '(', 'access_controllers', ')', ':', 'verdict', '=', 'access_controller', '(', 'user', '=', 'user', ',', 'roles', '=', 'user_roles', ')', 'if', 'verdict', 'is', 'None', ':', 'continue', 'elif', 'verdict', 'is', 'True', ':', 'return', 'else', ':', 'if', 'user', '.', 'is_anonymous', ':', 'return', 'self', '.', 'redirect_to_login', '(', ')', 'raise', 'Forbidden', '(', ')', '# default policy', 'if', 'current_app', '.', 'config', '.', 'get', '(', '"PRIVATE_SITE"', ')', 'and', 'user', '.', 'is_anonymous', ':', 'return', 'self', '.', 'redirect_to_login', '(', ')'] | `before_request` handler to check if user should be redirected to
login page. | ['before_request', 'handler', 'to', 'check', 'if', 'user', 'should', 'be', 'redirected', 'to', 'login', 'page', '.'] | train | https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/services/auth/service.py#L174-L219 |
7,867 | praekeltfoundation/molo | molo/core/api/endpoints.py | MoloPagesEndpoint.get_queryset | def get_queryset(self):
'''
This is overwritten in order to not exclude drafts
and pages submitted for moderation
'''
request = self.request
# Allow pages to be filtered to a specific type
if 'type' not in request.GET:
model = Page
else:
model_name = request.GET['type']
try:
model = resolve_model_string(model_name)
except LookupError:
raise BadRequestError("type doesn't exist")
if not issubclass(model, Page):
raise BadRequestError("type doesn't exist")
# This is the overwritten line
queryset = model.objects.public() # exclude .live()
# Filter by site
queryset = queryset.descendant_of(
request.site.root_page, inclusive=True)
return queryset | python | def get_queryset(self):
'''
This is overwritten in order to not exclude drafts
and pages submitted for moderation
'''
request = self.request
# Allow pages to be filtered to a specific type
if 'type' not in request.GET:
model = Page
else:
model_name = request.GET['type']
try:
model = resolve_model_string(model_name)
except LookupError:
raise BadRequestError("type doesn't exist")
if not issubclass(model, Page):
raise BadRequestError("type doesn't exist")
# This is the overwritten line
queryset = model.objects.public() # exclude .live()
# Filter by site
queryset = queryset.descendant_of(
request.site.root_page, inclusive=True)
return queryset | ['def', 'get_queryset', '(', 'self', ')', ':', 'request', '=', 'self', '.', 'request', '# Allow pages to be filtered to a specific type', 'if', "'type'", 'not', 'in', 'request', '.', 'GET', ':', 'model', '=', 'Page', 'else', ':', 'model_name', '=', 'request', '.', 'GET', '[', "'type'", ']', 'try', ':', 'model', '=', 'resolve_model_string', '(', 'model_name', ')', 'except', 'LookupError', ':', 'raise', 'BadRequestError', '(', '"type doesn\'t exist"', ')', 'if', 'not', 'issubclass', '(', 'model', ',', 'Page', ')', ':', 'raise', 'BadRequestError', '(', '"type doesn\'t exist"', ')', '# This is the overwritten line', 'queryset', '=', 'model', '.', 'objects', '.', 'public', '(', ')', '# exclude .live()', '# Filter by site', 'queryset', '=', 'queryset', '.', 'descendant_of', '(', 'request', '.', 'site', '.', 'root_page', ',', 'inclusive', '=', 'True', ')', 'return', 'queryset'] | This is overwritten in order to not exclude drafts
and pages submitted for moderation | ['This', 'is', 'overwritten', 'in', 'order', 'to', 'not', 'exclude', 'drafts', 'and', 'pages', 'submitted', 'for', 'moderation'] | train | https://github.com/praekeltfoundation/molo/blob/57702fda4fab261d67591415f7d46bc98fa38525/molo/core/api/endpoints.py#L48-L74 |
7,868 | pip-services3-python/pip-services3-commons-python | pip_services3_commons/validate/ArraySchema.py | ArraySchema._perform_validation | def _perform_validation(self, path, value, results):
"""
Validates a given value against the schema and configured validation rules.
:param path: a dot notation path to the value.
:param value: a value to be validated.
:param results: a list with validation results to add new results.
"""
name = path if path != None else "value"
value = ObjectReader.get_value(value)
super(ArraySchema, self)._perform_validation(path, value, results)
if value == None:
return
if isinstance(value, list) or isinstance(value, set) or isinstance(value, tuple):
index = 0
for element in value:
element_path = str(index) if path == None or len(path) == 0 else path + "." + str(index)
self._perform_type_validation(element_path, self.value_type, element, results)
index += 1
else:
results.append(
ValidationResult(
path,
ValidationResultType.Error,
"VALUE_ISNOT_ARRAY",
name + " type must be List or Array",
"List",
type(value)
)
) | python | def _perform_validation(self, path, value, results):
"""
Validates a given value against the schema and configured validation rules.
:param path: a dot notation path to the value.
:param value: a value to be validated.
:param results: a list with validation results to add new results.
"""
name = path if path != None else "value"
value = ObjectReader.get_value(value)
super(ArraySchema, self)._perform_validation(path, value, results)
if value == None:
return
if isinstance(value, list) or isinstance(value, set) or isinstance(value, tuple):
index = 0
for element in value:
element_path = str(index) if path == None or len(path) == 0 else path + "." + str(index)
self._perform_type_validation(element_path, self.value_type, element, results)
index += 1
else:
results.append(
ValidationResult(
path,
ValidationResultType.Error,
"VALUE_ISNOT_ARRAY",
name + " type must be List or Array",
"List",
type(value)
)
) | ['def', '_perform_validation', '(', 'self', ',', 'path', ',', 'value', ',', 'results', ')', ':', 'name', '=', 'path', 'if', 'path', '!=', 'None', 'else', '"value"', 'value', '=', 'ObjectReader', '.', 'get_value', '(', 'value', ')', 'super', '(', 'ArraySchema', ',', 'self', ')', '.', '_perform_validation', '(', 'path', ',', 'value', ',', 'results', ')', 'if', 'value', '==', 'None', ':', 'return', 'if', 'isinstance', '(', 'value', ',', 'list', ')', 'or', 'isinstance', '(', 'value', ',', 'set', ')', 'or', 'isinstance', '(', 'value', ',', 'tuple', ')', ':', 'index', '=', '0', 'for', 'element', 'in', 'value', ':', 'element_path', '=', 'str', '(', 'index', ')', 'if', 'path', '==', 'None', 'or', 'len', '(', 'path', ')', '==', '0', 'else', 'path', '+', '"."', '+', 'str', '(', 'index', ')', 'self', '.', '_perform_type_validation', '(', 'element_path', ',', 'self', '.', 'value_type', ',', 'element', ',', 'results', ')', 'index', '+=', '1', 'else', ':', 'results', '.', 'append', '(', 'ValidationResult', '(', 'path', ',', 'ValidationResultType', '.', 'Error', ',', '"VALUE_ISNOT_ARRAY"', ',', 'name', '+', '" type must be List or Array"', ',', '"List"', ',', 'type', '(', 'value', ')', ')', ')'] | Validates a given value against the schema and configured validation rules.
:param path: a dot notation path to the value.
:param value: a value to be validated.
:param results: a list with validation results to add new results. | ['Validates', 'a', 'given', 'value', 'against', 'the', 'schema', 'and', 'configured', 'validation', 'rules', '.'] | train | https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/validate/ArraySchema.py#L39-L73 |
7,869 | kiwiz/gkeepapi | gkeepapi/node.py | Node.text | def text(self, value):
"""Set the text value.
Args:
value (str): Text value.
"""
self._text = value
self.timestamps.edited = datetime.datetime.utcnow()
self.touch(True) | python | def text(self, value):
"""Set the text value.
Args:
value (str): Text value.
"""
self._text = value
self.timestamps.edited = datetime.datetime.utcnow()
self.touch(True) | ['def', 'text', '(', 'self', ',', 'value', ')', ':', 'self', '.', '_text', '=', 'value', 'self', '.', 'timestamps', '.', 'edited', '=', 'datetime', '.', 'datetime', '.', 'utcnow', '(', ')', 'self', '.', 'touch', '(', 'True', ')'] | Set the text value.
Args:
value (str): Text value. | ['Set', 'the', 'text', 'value', '.'] | train | https://github.com/kiwiz/gkeepapi/blob/78aaae8b988b1cf616e3973f7f15d4c6d5e996cc/gkeepapi/node.py#L1087-L1095 |
7,870 | marcharper/python-ternary | ternary/colormapping.py | colormapper | def colormapper(value, lower=0, upper=1, cmap=None):
"""
Maps values to colors by normalizing within [a,b], obtaining rgba from the
given matplotlib color map for heatmap polygon coloring.
Parameters
----------
value: float
The value to be colormapped
lower: float
Lower bound of colors
upper: float
Upper bound of colors
cmap: String or matplotlib.colors.Colormap (optional)
Colormap object to prevent repeated lookup
Returns
-------
hex_, float
The value mapped to an appropriate RGBA color value
"""
cmap = get_cmap(cmap)
if upper - lower == 0:
rgba = cmap(0)
else:
rgba = cmap((value - lower) / float(upper - lower))
hex_ = rgb2hex(rgba)
return hex_ | python | def colormapper(value, lower=0, upper=1, cmap=None):
"""
Maps values to colors by normalizing within [a,b], obtaining rgba from the
given matplotlib color map for heatmap polygon coloring.
Parameters
----------
value: float
The value to be colormapped
lower: float
Lower bound of colors
upper: float
Upper bound of colors
cmap: String or matplotlib.colors.Colormap (optional)
Colormap object to prevent repeated lookup
Returns
-------
hex_, float
The value mapped to an appropriate RGBA color value
"""
cmap = get_cmap(cmap)
if upper - lower == 0:
rgba = cmap(0)
else:
rgba = cmap((value - lower) / float(upper - lower))
hex_ = rgb2hex(rgba)
return hex_ | ['def', 'colormapper', '(', 'value', ',', 'lower', '=', '0', ',', 'upper', '=', '1', ',', 'cmap', '=', 'None', ')', ':', 'cmap', '=', 'get_cmap', '(', 'cmap', ')', 'if', 'upper', '-', 'lower', '==', '0', ':', 'rgba', '=', 'cmap', '(', '0', ')', 'else', ':', 'rgba', '=', 'cmap', '(', '(', 'value', '-', 'lower', ')', '/', 'float', '(', 'upper', '-', 'lower', ')', ')', 'hex_', '=', 'rgb2hex', '(', 'rgba', ')', 'return', 'hex_'] | Maps values to colors by normalizing within [a,b], obtaining rgba from the
given matplotlib color map for heatmap polygon coloring.
Parameters
----------
value: float
The value to be colormapped
lower: float
Lower bound of colors
upper: float
Upper bound of colors
cmap: String or matplotlib.colors.Colormap (optional)
Colormap object to prevent repeated lookup
Returns
-------
hex_, float
The value mapped to an appropriate RGBA color value | ['Maps', 'values', 'to', 'colors', 'by', 'normalizing', 'within', '[', 'a', 'b', ']', 'obtaining', 'rgba', 'from', 'the', 'given', 'matplotlib', 'color', 'map', 'for', 'heatmap', 'polygon', 'coloring', '.'] | train | https://github.com/marcharper/python-ternary/blob/a4bef393ec9df130d4b55707293c750498a01843/ternary/colormapping.py#L42-L70 |
7,871 | osrg/ryu | ryu/services/protocols/bgp/peer.py | Peer._construct_as_path_attr | def _construct_as_path_attr(self, as_path_attr, as4_path_attr):
"""Marge AS_PATH and AS4_PATH attribute instances into
a single AS_PATH instance."""
def _listify(li):
"""Reconstruct AS_PATH list.
Example::
>>> _listify([[1, 2, 3], {4, 5}, [6, 7]])
[1, 2, 3, {4, 5}, 6, 7]
"""
lo = []
for l in li:
if isinstance(l, list):
lo.extend(l)
elif isinstance(l, set):
lo.append(l)
else:
pass
return lo
# If AS4_PATH attribute is None, returns the given AS_PATH attribute
if as4_path_attr is None:
return as_path_attr
# If AS_PATH is shorter than AS4_PATH, AS4_PATH should be ignored.
if as_path_attr.get_as_path_len() < as4_path_attr.get_as_path_len():
return as_path_attr
org_as_path_list = _listify(as_path_attr.path_seg_list)
as4_path_list = _listify(as4_path_attr.path_seg_list)
# Reverse to compare backward.
org_as_path_list.reverse()
as4_path_list.reverse()
new_as_path_list = []
tmp_list = []
for as_path, as4_path in zip_longest(org_as_path_list, as4_path_list):
if as4_path is None:
if isinstance(as_path, int):
tmp_list.insert(0, as_path)
elif isinstance(as_path, set):
if tmp_list:
new_as_path_list.insert(0, tmp_list)
tmp_list = []
new_as_path_list.insert(0, as_path)
else:
pass
elif isinstance(as4_path, int):
tmp_list.insert(0, as4_path)
elif isinstance(as4_path, set):
if tmp_list:
new_as_path_list.insert(0, tmp_list)
tmp_list = []
new_as_path_list.insert(0, as4_path)
else:
pass
if tmp_list:
new_as_path_list.insert(0, tmp_list)
return bgp.BGPPathAttributeAsPath(new_as_path_list) | python | def _construct_as_path_attr(self, as_path_attr, as4_path_attr):
"""Marge AS_PATH and AS4_PATH attribute instances into
a single AS_PATH instance."""
def _listify(li):
"""Reconstruct AS_PATH list.
Example::
>>> _listify([[1, 2, 3], {4, 5}, [6, 7]])
[1, 2, 3, {4, 5}, 6, 7]
"""
lo = []
for l in li:
if isinstance(l, list):
lo.extend(l)
elif isinstance(l, set):
lo.append(l)
else:
pass
return lo
# If AS4_PATH attribute is None, returns the given AS_PATH attribute
if as4_path_attr is None:
return as_path_attr
# If AS_PATH is shorter than AS4_PATH, AS4_PATH should be ignored.
if as_path_attr.get_as_path_len() < as4_path_attr.get_as_path_len():
return as_path_attr
org_as_path_list = _listify(as_path_attr.path_seg_list)
as4_path_list = _listify(as4_path_attr.path_seg_list)
# Reverse to compare backward.
org_as_path_list.reverse()
as4_path_list.reverse()
new_as_path_list = []
tmp_list = []
for as_path, as4_path in zip_longest(org_as_path_list, as4_path_list):
if as4_path is None:
if isinstance(as_path, int):
tmp_list.insert(0, as_path)
elif isinstance(as_path, set):
if tmp_list:
new_as_path_list.insert(0, tmp_list)
tmp_list = []
new_as_path_list.insert(0, as_path)
else:
pass
elif isinstance(as4_path, int):
tmp_list.insert(0, as4_path)
elif isinstance(as4_path, set):
if tmp_list:
new_as_path_list.insert(0, tmp_list)
tmp_list = []
new_as_path_list.insert(0, as4_path)
else:
pass
if tmp_list:
new_as_path_list.insert(0, tmp_list)
return bgp.BGPPathAttributeAsPath(new_as_path_list) | ['def', '_construct_as_path_attr', '(', 'self', ',', 'as_path_attr', ',', 'as4_path_attr', ')', ':', 'def', '_listify', '(', 'li', ')', ':', '"""Reconstruct AS_PATH list.\n\n Example::\n\n >>> _listify([[1, 2, 3], {4, 5}, [6, 7]])\n [1, 2, 3, {4, 5}, 6, 7]\n """', 'lo', '=', '[', ']', 'for', 'l', 'in', 'li', ':', 'if', 'isinstance', '(', 'l', ',', 'list', ')', ':', 'lo', '.', 'extend', '(', 'l', ')', 'elif', 'isinstance', '(', 'l', ',', 'set', ')', ':', 'lo', '.', 'append', '(', 'l', ')', 'else', ':', 'pass', 'return', 'lo', '# If AS4_PATH attribute is None, returns the given AS_PATH attribute', 'if', 'as4_path_attr', 'is', 'None', ':', 'return', 'as_path_attr', '# If AS_PATH is shorter than AS4_PATH, AS4_PATH should be ignored.', 'if', 'as_path_attr', '.', 'get_as_path_len', '(', ')', '<', 'as4_path_attr', '.', 'get_as_path_len', '(', ')', ':', 'return', 'as_path_attr', 'org_as_path_list', '=', '_listify', '(', 'as_path_attr', '.', 'path_seg_list', ')', 'as4_path_list', '=', '_listify', '(', 'as4_path_attr', '.', 'path_seg_list', ')', '# Reverse to compare backward.', 'org_as_path_list', '.', 'reverse', '(', ')', 'as4_path_list', '.', 'reverse', '(', ')', 'new_as_path_list', '=', '[', ']', 'tmp_list', '=', '[', ']', 'for', 'as_path', ',', 'as4_path', 'in', 'zip_longest', '(', 'org_as_path_list', ',', 'as4_path_list', ')', ':', 'if', 'as4_path', 'is', 'None', ':', 'if', 'isinstance', '(', 'as_path', ',', 'int', ')', ':', 'tmp_list', '.', 'insert', '(', '0', ',', 'as_path', ')', 'elif', 'isinstance', '(', 'as_path', ',', 'set', ')', ':', 'if', 'tmp_list', ':', 'new_as_path_list', '.', 'insert', '(', '0', ',', 'tmp_list', ')', 'tmp_list', '=', '[', ']', 'new_as_path_list', '.', 'insert', '(', '0', ',', 'as_path', ')', 'else', ':', 'pass', 'elif', 'isinstance', '(', 'as4_path', ',', 'int', ')', ':', 'tmp_list', '.', 'insert', '(', '0', ',', 'as4_path', ')', 'elif', 'isinstance', '(', 'as4_path', ',', 'set', ')', ':', 'if', 'tmp_list', ':', 'new_as_path_list', '.', 'insert', '(', '0', ',', 'tmp_list', ')', 'tmp_list', '=', '[', ']', 'new_as_path_list', '.', 'insert', '(', '0', ',', 'as4_path', ')', 'else', ':', 'pass', 'if', 'tmp_list', ':', 'new_as_path_list', '.', 'insert', '(', '0', ',', 'tmp_list', ')', 'return', 'bgp', '.', 'BGPPathAttributeAsPath', '(', 'new_as_path_list', ')'] | Marge AS_PATH and AS4_PATH attribute instances into
a single AS_PATH instance. | ['Marge', 'AS_PATH', 'and', 'AS4_PATH', 'attribute', 'instances', 'into', 'a', 'single', 'AS_PATH', 'instance', '.'] | train | https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/peer.py#L852-L914 |
7,872 | jessamynsmith/pipreq | pipreq/command.py | Command.determine_extra_packages | def determine_extra_packages(self, packages):
""" Return all packages that are installed, but missing from "packages".
Return value is a tuple of the package names """
args = [
"pip",
"freeze",
]
installed = subprocess.check_output(args, universal_newlines=True)
installed_list = set()
lines = installed.strip().split('\n')
for (package, version) in self._parse_requirements(lines):
installed_list.add(package)
package_list = set()
for (package, version) in self._parse_requirements(packages.readlines()):
package_list.add(package)
removal_list = installed_list - package_list
return tuple(removal_list) | python | def determine_extra_packages(self, packages):
""" Return all packages that are installed, but missing from "packages".
Return value is a tuple of the package names """
args = [
"pip",
"freeze",
]
installed = subprocess.check_output(args, universal_newlines=True)
installed_list = set()
lines = installed.strip().split('\n')
for (package, version) in self._parse_requirements(lines):
installed_list.add(package)
package_list = set()
for (package, version) in self._parse_requirements(packages.readlines()):
package_list.add(package)
removal_list = installed_list - package_list
return tuple(removal_list) | ['def', 'determine_extra_packages', '(', 'self', ',', 'packages', ')', ':', 'args', '=', '[', '"pip"', ',', '"freeze"', ',', ']', 'installed', '=', 'subprocess', '.', 'check_output', '(', 'args', ',', 'universal_newlines', '=', 'True', ')', 'installed_list', '=', 'set', '(', ')', 'lines', '=', 'installed', '.', 'strip', '(', ')', '.', 'split', '(', "'\\n'", ')', 'for', '(', 'package', ',', 'version', ')', 'in', 'self', '.', '_parse_requirements', '(', 'lines', ')', ':', 'installed_list', '.', 'add', '(', 'package', ')', 'package_list', '=', 'set', '(', ')', 'for', '(', 'package', ',', 'version', ')', 'in', 'self', '.', '_parse_requirements', '(', 'packages', '.', 'readlines', '(', ')', ')', ':', 'package_list', '.', 'add', '(', 'package', ')', 'removal_list', '=', 'installed_list', '-', 'package_list', 'return', 'tuple', '(', 'removal_list', ')'] | Return all packages that are installed, but missing from "packages".
Return value is a tuple of the package names | ['Return', 'all', 'packages', 'that', 'are', 'installed', 'but', 'missing', 'from', 'packages', '.', 'Return', 'value', 'is', 'a', 'tuple', 'of', 'the', 'package', 'names'] | train | https://github.com/jessamynsmith/pipreq/blob/4081c1238722166445f58ae57e939207f8a6fb83/pipreq/command.py#L228-L248 |
7,873 | ByteInternet/amqpconsumer | amqpconsumer/events.py | EventConsumer.setup_exchange | def setup_exchange(self):
"""Declare the exchange
When completed, the on_exchange_declareok method will be invoked by pika.
"""
logger.debug('Declaring exchange %s', self._exchange)
self._channel.exchange_declare(self.on_exchange_declareok,
self._exchange,
self._exchange_type,
durable=True) | python | def setup_exchange(self):
"""Declare the exchange
When completed, the on_exchange_declareok method will be invoked by pika.
"""
logger.debug('Declaring exchange %s', self._exchange)
self._channel.exchange_declare(self.on_exchange_declareok,
self._exchange,
self._exchange_type,
durable=True) | ['def', 'setup_exchange', '(', 'self', ')', ':', 'logger', '.', 'debug', '(', "'Declaring exchange %s'", ',', 'self', '.', '_exchange', ')', 'self', '.', '_channel', '.', 'exchange_declare', '(', 'self', '.', 'on_exchange_declareok', ',', 'self', '.', '_exchange', ',', 'self', '.', '_exchange_type', ',', 'durable', '=', 'True', ')'] | Declare the exchange
When completed, the on_exchange_declareok method will be invoked by pika. | ['Declare', 'the', 'exchange'] | train | https://github.com/ByteInternet/amqpconsumer/blob/144ab16b3fbba8ad30f8688ae1c58e3a6423b88b/amqpconsumer/events.py#L166-L175 |
7,874 | phoebe-project/phoebe2 | phoebe/parameters/parameters.py | Parameter.is_visible | def is_visible(self):
"""
see also :meth:`visible_if`
:return: whether this parameter is currently visible (and
therefore shown in ParameterSets and visible to :meth:`ParameterSet.filter`)
:rtype: bool
"""
def is_visible_single(visible_if):
# visible_if syntax: [ignore,these]qualifier:value
if visible_if.lower() == 'false':
return False
# otherwise we need to find the parameter we're referencing and check its value
if visible_if[0]=='[':
remove_metawargs, visible_if = visible_if[1:].split(']')
remove_metawargs = remove_metawargs.split(',')
else:
remove_metawargs = []
qualifier, value = visible_if.split(':')
if 'hierarchy.' in qualifier:
# TODO: set specific syntax (hierarchy.get_meshables:2)
# then this needs to do some logic on the hierarchy
hier = self._bundle.hierarchy
if not len(hier.get_value()):
# then hierarchy hasn't been set yet, so we can't do any
# of these tests
return True
method = qualifier.split('.')[1]
if value in ['true', 'True']:
value = True
elif value in ['false', 'False']:
value = False
return getattr(hier, method)(self.component) == value
else:
# the parameter needs to have all the same meta data except qualifier
# TODO: switch this to use self.get_parent_ps ?
metawargs = {k:v for k,v in self.get_meta(ignore=['twig', 'uniquetwig', 'uniqueid']+remove_metawargs).items() if v is not None}
metawargs['qualifier'] = qualifier
# metawargs['twig'] = None
# metawargs['uniquetwig'] = None
# metawargs['uniqueid'] = None
# if metawargs.get('component', None) == '_default':
# metawargs['component'] = None
try:
# this call is quite expensive and bloats every get_parameter(check_visible=True)
param = self._bundle.get_parameter(check_visible=False, check_default=False, **metawargs)
except ValueError:
# let's not let this hold us up - sometimes this can happen when copying
# parameters (from copy_for) in order that the visible_if parameter
# happens later
logger.debug("parameter not found when trying to determine if visible, {}".format(metawargs))
return True
#~ print "***", qualifier, param.qualifier, param.get_value(), value
if isinstance(param, BoolParameter):
if value in ['true', 'True']:
value = True
elif value in ['false', 'False']:
value = False
if isinstance(value, str) and value[0] in ['!', '~']:
return param.get_value() != value[1:]
elif value=='<notempty>':
return len(param.get_value()) > 0
else:
return param.get_value() == value
if self.visible_if is None:
return True
if not self._bundle:
# then we may not be able to do the check, for now let's just return True
return True
return np.all([is_visible_single(visible_if_i) for visible_if_i in self.visible_if.split(',')]) | python | def is_visible(self):
"""
see also :meth:`visible_if`
:return: whether this parameter is currently visible (and
therefore shown in ParameterSets and visible to :meth:`ParameterSet.filter`)
:rtype: bool
"""
def is_visible_single(visible_if):
# visible_if syntax: [ignore,these]qualifier:value
if visible_if.lower() == 'false':
return False
# otherwise we need to find the parameter we're referencing and check its value
if visible_if[0]=='[':
remove_metawargs, visible_if = visible_if[1:].split(']')
remove_metawargs = remove_metawargs.split(',')
else:
remove_metawargs = []
qualifier, value = visible_if.split(':')
if 'hierarchy.' in qualifier:
# TODO: set specific syntax (hierarchy.get_meshables:2)
# then this needs to do some logic on the hierarchy
hier = self._bundle.hierarchy
if not len(hier.get_value()):
# then hierarchy hasn't been set yet, so we can't do any
# of these tests
return True
method = qualifier.split('.')[1]
if value in ['true', 'True']:
value = True
elif value in ['false', 'False']:
value = False
return getattr(hier, method)(self.component) == value
else:
# the parameter needs to have all the same meta data except qualifier
# TODO: switch this to use self.get_parent_ps ?
metawargs = {k:v for k,v in self.get_meta(ignore=['twig', 'uniquetwig', 'uniqueid']+remove_metawargs).items() if v is not None}
metawargs['qualifier'] = qualifier
# metawargs['twig'] = None
# metawargs['uniquetwig'] = None
# metawargs['uniqueid'] = None
# if metawargs.get('component', None) == '_default':
# metawargs['component'] = None
try:
# this call is quite expensive and bloats every get_parameter(check_visible=True)
param = self._bundle.get_parameter(check_visible=False, check_default=False, **metawargs)
except ValueError:
# let's not let this hold us up - sometimes this can happen when copying
# parameters (from copy_for) in order that the visible_if parameter
# happens later
logger.debug("parameter not found when trying to determine if visible, {}".format(metawargs))
return True
#~ print "***", qualifier, param.qualifier, param.get_value(), value
if isinstance(param, BoolParameter):
if value in ['true', 'True']:
value = True
elif value in ['false', 'False']:
value = False
if isinstance(value, str) and value[0] in ['!', '~']:
return param.get_value() != value[1:]
elif value=='<notempty>':
return len(param.get_value()) > 0
else:
return param.get_value() == value
if self.visible_if is None:
return True
if not self._bundle:
# then we may not be able to do the check, for now let's just return True
return True
return np.all([is_visible_single(visible_if_i) for visible_if_i in self.visible_if.split(',')]) | ['def', 'is_visible', '(', 'self', ')', ':', 'def', 'is_visible_single', '(', 'visible_if', ')', ':', '# visible_if syntax: [ignore,these]qualifier:value', 'if', 'visible_if', '.', 'lower', '(', ')', '==', "'false'", ':', 'return', 'False', "# otherwise we need to find the parameter we're referencing and check its value", 'if', 'visible_if', '[', '0', ']', '==', "'['", ':', 'remove_metawargs', ',', 'visible_if', '=', 'visible_if', '[', '1', ':', ']', '.', 'split', '(', "']'", ')', 'remove_metawargs', '=', 'remove_metawargs', '.', 'split', '(', "','", ')', 'else', ':', 'remove_metawargs', '=', '[', ']', 'qualifier', ',', 'value', '=', 'visible_if', '.', 'split', '(', "':'", ')', 'if', "'hierarchy.'", 'in', 'qualifier', ':', '# TODO: set specific syntax (hierarchy.get_meshables:2)', '# then this needs to do some logic on the hierarchy', 'hier', '=', 'self', '.', '_bundle', '.', 'hierarchy', 'if', 'not', 'len', '(', 'hier', '.', 'get_value', '(', ')', ')', ':', "# then hierarchy hasn't been set yet, so we can't do any", '# of these tests', 'return', 'True', 'method', '=', 'qualifier', '.', 'split', '(', "'.'", ')', '[', '1', ']', 'if', 'value', 'in', '[', "'true'", ',', "'True'", ']', ':', 'value', '=', 'True', 'elif', 'value', 'in', '[', "'false'", ',', "'False'", ']', ':', 'value', '=', 'False', 'return', 'getattr', '(', 'hier', ',', 'method', ')', '(', 'self', '.', 'component', ')', '==', 'value', 'else', ':', '# the parameter needs to have all the same meta data except qualifier', '# TODO: switch this to use self.get_parent_ps ?', 'metawargs', '=', '{', 'k', ':', 'v', 'for', 'k', ',', 'v', 'in', 'self', '.', 'get_meta', '(', 'ignore', '=', '[', "'twig'", ',', "'uniquetwig'", ',', "'uniqueid'", ']', '+', 'remove_metawargs', ')', '.', 'items', '(', ')', 'if', 'v', 'is', 'not', 'None', '}', 'metawargs', '[', "'qualifier'", ']', '=', 'qualifier', "# metawargs['twig'] = None", "# metawargs['uniquetwig'] = None", "# metawargs['uniqueid'] = None", "# if metawargs.get('component', None) == '_default':", "# metawargs['component'] = None", 'try', ':', '# this call is quite expensive and bloats every get_parameter(check_visible=True)', 'param', '=', 'self', '.', '_bundle', '.', 'get_parameter', '(', 'check_visible', '=', 'False', ',', 'check_default', '=', 'False', ',', '*', '*', 'metawargs', ')', 'except', 'ValueError', ':', "# let's not let this hold us up - sometimes this can happen when copying", '# parameters (from copy_for) in order that the visible_if parameter', '# happens later', 'logger', '.', 'debug', '(', '"parameter not found when trying to determine if visible, {}"', '.', 'format', '(', 'metawargs', ')', ')', 'return', 'True', '#~ print "***", qualifier, param.qualifier, param.get_value(), value', 'if', 'isinstance', '(', 'param', ',', 'BoolParameter', ')', ':', 'if', 'value', 'in', '[', "'true'", ',', "'True'", ']', ':', 'value', '=', 'True', 'elif', 'value', 'in', '[', "'false'", ',', "'False'", ']', ':', 'value', '=', 'False', 'if', 'isinstance', '(', 'value', ',', 'str', ')', 'and', 'value', '[', '0', ']', 'in', '[', "'!'", ',', "'~'", ']', ':', 'return', 'param', '.', 'get_value', '(', ')', '!=', 'value', '[', '1', ':', ']', 'elif', 'value', '==', "'<notempty>'", ':', 'return', 'len', '(', 'param', '.', 'get_value', '(', ')', ')', '>', '0', 'else', ':', 'return', 'param', '.', 'get_value', '(', ')', '==', 'value', 'if', 'self', '.', 'visible_if', 'is', 'None', ':', 'return', 'True', 'if', 'not', 'self', '.', '_bundle', ':', "# then we may not be able to do the check, for now let's just return True", 'return', 'True', 'return', 'np', '.', 'all', '(', '[', 'is_visible_single', '(', 'visible_if_i', ')', 'for', 'visible_if_i', 'in', 'self', '.', 'visible_if', '.', 'split', '(', "','", ')', ']', ')'] | see also :meth:`visible_if`
:return: whether this parameter is currently visible (and
therefore shown in ParameterSets and visible to :meth:`ParameterSet.filter`)
:rtype: bool | ['see', 'also', ':', 'meth', ':', 'visible_if'] | train | https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/parameters.py#L3307-L3395 |
7,875 | ansible/molecule | molecule/command/init/role.py | Role.execute | def execute(self):
"""
Execute the actions necessary to perform a `molecule init role` and
returns None.
:return: None
"""
role_name = self._command_args['role_name']
role_directory = os.getcwd()
msg = 'Initializing new role {}...'.format(role_name)
LOG.info(msg)
if os.path.isdir(role_name):
msg = ('The directory {} exists. '
'Cannot create new role.').format(role_name)
util.sysexit_with_message(msg)
template_directory = ''
if 'template' in self._command_args.keys():
template_directory = self._command_args['template']
else:
template_directory = 'role'
self._process_templates(template_directory, self._command_args,
role_directory)
scenario_base_directory = os.path.join(role_directory, role_name)
templates = [
'scenario/driver/{driver_name}'.format(**self._command_args),
'scenario/verifier/{verifier_name}'.format(**self._command_args),
]
for template in templates:
self._process_templates(template, self._command_args,
scenario_base_directory)
self._process_templates('molecule', self._command_args, role_directory)
role_directory = os.path.join(role_directory, role_name)
msg = 'Initialized role in {} successfully.'.format(role_directory)
LOG.success(msg) | python | def execute(self):
"""
Execute the actions necessary to perform a `molecule init role` and
returns None.
:return: None
"""
role_name = self._command_args['role_name']
role_directory = os.getcwd()
msg = 'Initializing new role {}...'.format(role_name)
LOG.info(msg)
if os.path.isdir(role_name):
msg = ('The directory {} exists. '
'Cannot create new role.').format(role_name)
util.sysexit_with_message(msg)
template_directory = ''
if 'template' in self._command_args.keys():
template_directory = self._command_args['template']
else:
template_directory = 'role'
self._process_templates(template_directory, self._command_args,
role_directory)
scenario_base_directory = os.path.join(role_directory, role_name)
templates = [
'scenario/driver/{driver_name}'.format(**self._command_args),
'scenario/verifier/{verifier_name}'.format(**self._command_args),
]
for template in templates:
self._process_templates(template, self._command_args,
scenario_base_directory)
self._process_templates('molecule', self._command_args, role_directory)
role_directory = os.path.join(role_directory, role_name)
msg = 'Initialized role in {} successfully.'.format(role_directory)
LOG.success(msg) | ['def', 'execute', '(', 'self', ')', ':', 'role_name', '=', 'self', '.', '_command_args', '[', "'role_name'", ']', 'role_directory', '=', 'os', '.', 'getcwd', '(', ')', 'msg', '=', "'Initializing new role {}...'", '.', 'format', '(', 'role_name', ')', 'LOG', '.', 'info', '(', 'msg', ')', 'if', 'os', '.', 'path', '.', 'isdir', '(', 'role_name', ')', ':', 'msg', '=', '(', "'The directory {} exists. '", "'Cannot create new role.'", ')', '.', 'format', '(', 'role_name', ')', 'util', '.', 'sysexit_with_message', '(', 'msg', ')', 'template_directory', '=', "''", 'if', "'template'", 'in', 'self', '.', '_command_args', '.', 'keys', '(', ')', ':', 'template_directory', '=', 'self', '.', '_command_args', '[', "'template'", ']', 'else', ':', 'template_directory', '=', "'role'", 'self', '.', '_process_templates', '(', 'template_directory', ',', 'self', '.', '_command_args', ',', 'role_directory', ')', 'scenario_base_directory', '=', 'os', '.', 'path', '.', 'join', '(', 'role_directory', ',', 'role_name', ')', 'templates', '=', '[', "'scenario/driver/{driver_name}'", '.', 'format', '(', '*', '*', 'self', '.', '_command_args', ')', ',', "'scenario/verifier/{verifier_name}'", '.', 'format', '(', '*', '*', 'self', '.', '_command_args', ')', ',', ']', 'for', 'template', 'in', 'templates', ':', 'self', '.', '_process_templates', '(', 'template', ',', 'self', '.', '_command_args', ',', 'scenario_base_directory', ')', 'self', '.', '_process_templates', '(', "'molecule'", ',', 'self', '.', '_command_args', ',', 'role_directory', ')', 'role_directory', '=', 'os', '.', 'path', '.', 'join', '(', 'role_directory', ',', 'role_name', ')', 'msg', '=', "'Initialized role in {} successfully.'", '.', 'format', '(', 'role_directory', ')', 'LOG', '.', 'success', '(', 'msg', ')'] | Execute the actions necessary to perform a `molecule init role` and
returns None.
:return: None | ['Execute', 'the', 'actions', 'necessary', 'to', 'perform', 'a', 'molecule', 'init', 'role', 'and', 'returns', 'None', '.'] | train | https://github.com/ansible/molecule/blob/766dc35b0b0ce498cd5e3a62b40f828742d0d08c/molecule/command/init/role.py#L56-L93 |
7,876 | volfpeter/graphscraper | src/graphscraper/base.py | Graph.add_edge_by_index | def add_edge_by_index(self, source_index: int, target_index: int,
weight: float, save_to_cache: bool = True) -> None:
"""
Adds an edge between the nodes with the specified indices to the graph.
Arguments:
source_index (int): The index of the source node of the edge to add.
target_index (int): The index of the target node of the edge to add.
weight (float): The weight of the edge.
save_to_cache (bool): Whether the edge should be saved to the local database. This
argument is necessary (and `False`) when we load edges from
the local cache.
"""
source: Node = self._nodes.get_node(source_index)
target: Node = self._nodes.get_node(target_index)
if source is None or target is None:
return
self.add_edge(
source=source,
target=target,
weight=weight,
save_to_cache=save_to_cache
) | python | def add_edge_by_index(self, source_index: int, target_index: int,
weight: float, save_to_cache: bool = True) -> None:
"""
Adds an edge between the nodes with the specified indices to the graph.
Arguments:
source_index (int): The index of the source node of the edge to add.
target_index (int): The index of the target node of the edge to add.
weight (float): The weight of the edge.
save_to_cache (bool): Whether the edge should be saved to the local database. This
argument is necessary (and `False`) when we load edges from
the local cache.
"""
source: Node = self._nodes.get_node(source_index)
target: Node = self._nodes.get_node(target_index)
if source is None or target is None:
return
self.add_edge(
source=source,
target=target,
weight=weight,
save_to_cache=save_to_cache
) | ['def', 'add_edge_by_index', '(', 'self', ',', 'source_index', ':', 'int', ',', 'target_index', ':', 'int', ',', 'weight', ':', 'float', ',', 'save_to_cache', ':', 'bool', '=', 'True', ')', '->', 'None', ':', 'source', ':', 'Node', '=', 'self', '.', '_nodes', '.', 'get_node', '(', 'source_index', ')', 'target', ':', 'Node', '=', 'self', '.', '_nodes', '.', 'get_node', '(', 'target_index', ')', 'if', 'source', 'is', 'None', 'or', 'target', 'is', 'None', ':', 'return', 'self', '.', 'add_edge', '(', 'source', '=', 'source', ',', 'target', '=', 'target', ',', 'weight', '=', 'weight', ',', 'save_to_cache', '=', 'save_to_cache', ')'] | Adds an edge between the nodes with the specified indices to the graph.
Arguments:
source_index (int): The index of the source node of the edge to add.
target_index (int): The index of the target node of the edge to add.
weight (float): The weight of the edge.
save_to_cache (bool): Whether the edge should be saved to the local database. This
argument is necessary (and `False`) when we load edges from
the local cache. | ['Adds', 'an', 'edge', 'between', 'the', 'nodes', 'with', 'the', 'specified', 'indices', 'to', 'the', 'graph', '.', 'Arguments', ':', 'source_index', '(', 'int', ')', ':', 'The', 'index', 'of', 'the', 'source', 'node', 'of', 'the', 'edge', 'to', 'add', '.', 'target_index', '(', 'int', ')', ':', 'The', 'index', 'of', 'the', 'target', 'node', 'of', 'the', 'edge', 'to', 'add', '.', 'weight', '(', 'float', ')', ':', 'The', 'weight', 'of', 'the', 'edge', '.', 'save_to_cache', '(', 'bool', ')', ':', 'Whether', 'the', 'edge', 'should', 'be', 'saved', 'to', 'the', 'local', 'database', '.', 'This', 'argument', 'is', 'necessary', '(', 'and', 'False', ')', 'when', 'we', 'load', 'edges', 'from', 'the', 'local', 'cache', '.'] | train | https://github.com/volfpeter/graphscraper/blob/11d407509956a282ee25190ed6491a162fc0fe7f/src/graphscraper/base.py#L658-L681 |
7,877 | rmorshea/spectate | spectate/mvc/base.py | Control.after | def after(self, callback: Union[Callable, str]) -> "Control":
"""Register a control method that reacts after the trigger method is called.
Parameters:
callback:
The control method. If given as a callable, then that function will be
used as the callback. If given as a string, then the control will look
up a method with that name when reacting (useful when subclassing).
"""
if isinstance(callback, Control):
callback = callback._after
self._after = callback
return self | python | def after(self, callback: Union[Callable, str]) -> "Control":
"""Register a control method that reacts after the trigger method is called.
Parameters:
callback:
The control method. If given as a callable, then that function will be
used as the callback. If given as a string, then the control will look
up a method with that name when reacting (useful when subclassing).
"""
if isinstance(callback, Control):
callback = callback._after
self._after = callback
return self | ['def', 'after', '(', 'self', ',', 'callback', ':', 'Union', '[', 'Callable', ',', 'str', ']', ')', '->', '"Control"', ':', 'if', 'isinstance', '(', 'callback', ',', 'Control', ')', ':', 'callback', '=', 'callback', '.', '_after', 'self', '.', '_after', '=', 'callback', 'return', 'self'] | Register a control method that reacts after the trigger method is called.
Parameters:
callback:
The control method. If given as a callable, then that function will be
used as the callback. If given as a string, then the control will look
up a method with that name when reacting (useful when subclassing). | ['Register', 'a', 'control', 'method', 'that', 'reacts', 'after', 'the', 'trigger', 'method', 'is', 'called', '.'] | train | https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/mvc/base.py#L151-L163 |
7,878 | LettError/MutatorMath | Lib/mutatorMath/ufo/document.py | DesignSpaceDocumentReader.readWarp | def readWarp(self):
""" Read the warp element
::
<warp>
<axis name="weight">
<map input="0" output="0" />
<map input="500" output="200" />
<map input="1000" output="1000" />
</axis>
</warp>
"""
warpDict = {}
for warpAxisElement in self.root.findall(".warp/axis"):
axisName = warpAxisElement.attrib.get("name")
warpDict[axisName] = []
for warpPoint in warpAxisElement.findall(".map"):
inputValue = float(warpPoint.attrib.get("input"))
outputValue = float(warpPoint.attrib.get("output"))
warpDict[axisName].append((inputValue, outputValue))
self.warpDict = warpDict | python | def readWarp(self):
""" Read the warp element
::
<warp>
<axis name="weight">
<map input="0" output="0" />
<map input="500" output="200" />
<map input="1000" output="1000" />
</axis>
</warp>
"""
warpDict = {}
for warpAxisElement in self.root.findall(".warp/axis"):
axisName = warpAxisElement.attrib.get("name")
warpDict[axisName] = []
for warpPoint in warpAxisElement.findall(".map"):
inputValue = float(warpPoint.attrib.get("input"))
outputValue = float(warpPoint.attrib.get("output"))
warpDict[axisName].append((inputValue, outputValue))
self.warpDict = warpDict | ['def', 'readWarp', '(', 'self', ')', ':', 'warpDict', '=', '{', '}', 'for', 'warpAxisElement', 'in', 'self', '.', 'root', '.', 'findall', '(', '".warp/axis"', ')', ':', 'axisName', '=', 'warpAxisElement', '.', 'attrib', '.', 'get', '(', '"name"', ')', 'warpDict', '[', 'axisName', ']', '=', '[', ']', 'for', 'warpPoint', 'in', 'warpAxisElement', '.', 'findall', '(', '".map"', ')', ':', 'inputValue', '=', 'float', '(', 'warpPoint', '.', 'attrib', '.', 'get', '(', '"input"', ')', ')', 'outputValue', '=', 'float', '(', 'warpPoint', '.', 'attrib', '.', 'get', '(', '"output"', ')', ')', 'warpDict', '[', 'axisName', ']', '.', 'append', '(', '(', 'inputValue', ',', 'outputValue', ')', ')', 'self', '.', 'warpDict', '=', 'warpDict'] | Read the warp element
::
<warp>
<axis name="weight">
<map input="0" output="0" />
<map input="500" output="200" />
<map input="1000" output="1000" />
</axis>
</warp> | ['Read', 'the', 'warp', 'element'] | train | https://github.com/LettError/MutatorMath/blob/10318fc4e7c9cee9df6130826829baea3054a42b/Lib/mutatorMath/ufo/document.py#L457-L478 |
7,879 | twilio/twilio-python | twilio/rest/taskrouter/v1/workspace/task_queue/task_queue_real_time_statistics.py | TaskQueueRealTimeStatisticsPage.get_instance | def get_instance(self, payload):
"""
Build an instance of TaskQueueRealTimeStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsInstance
"""
return TaskQueueRealTimeStatisticsInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
task_queue_sid=self._solution['task_queue_sid'],
) | python | def get_instance(self, payload):
"""
Build an instance of TaskQueueRealTimeStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsInstance
"""
return TaskQueueRealTimeStatisticsInstance(
self._version,
payload,
workspace_sid=self._solution['workspace_sid'],
task_queue_sid=self._solution['task_queue_sid'],
) | ['def', 'get_instance', '(', 'self', ',', 'payload', ')', ':', 'return', 'TaskQueueRealTimeStatisticsInstance', '(', 'self', '.', '_version', ',', 'payload', ',', 'workspace_sid', '=', 'self', '.', '_solution', '[', "'workspace_sid'", ']', ',', 'task_queue_sid', '=', 'self', '.', '_solution', '[', "'task_queue_sid'", ']', ',', ')'] | Build an instance of TaskQueueRealTimeStatisticsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsInstance
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_real_time_statistics.TaskQueueRealTimeStatisticsInstance | ['Build', 'an', 'instance', 'of', 'TaskQueueRealTimeStatisticsInstance'] | train | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/taskrouter/v1/workspace/task_queue/task_queue_real_time_statistics.py#L92-L106 |
7,880 | cjdrake/pyeda | pyeda/util.py | parity | def parity(num: int) -> int:
"""Return the parity of a non-negative integer.
For example, here are the parities of the first ten integers:
>>> [parity(n) for n in range(10)]
[0, 1, 1, 0, 1, 0, 0, 1, 1, 0]
This function is undefined for negative integers:
>>> parity(-1)
Traceback (most recent call last):
...
ValueError: expected num >= 0
"""
if num < 0:
raise ValueError("expected num >= 0")
par = 0
while num:
par ^= (num & 1)
num >>= 1
return par | python | def parity(num: int) -> int:
"""Return the parity of a non-negative integer.
For example, here are the parities of the first ten integers:
>>> [parity(n) for n in range(10)]
[0, 1, 1, 0, 1, 0, 0, 1, 1, 0]
This function is undefined for negative integers:
>>> parity(-1)
Traceback (most recent call last):
...
ValueError: expected num >= 0
"""
if num < 0:
raise ValueError("expected num >= 0")
par = 0
while num:
par ^= (num & 1)
num >>= 1
return par | ['def', 'parity', '(', 'num', ':', 'int', ')', '->', 'int', ':', 'if', 'num', '<', '0', ':', 'raise', 'ValueError', '(', '"expected num >= 0"', ')', 'par', '=', '0', 'while', 'num', ':', 'par', '^=', '(', 'num', '&', '1', ')', 'num', '>>=', '1', 'return', 'par'] | Return the parity of a non-negative integer.
For example, here are the parities of the first ten integers:
>>> [parity(n) for n in range(10)]
[0, 1, 1, 0, 1, 0, 0, 1, 1, 0]
This function is undefined for negative integers:
>>> parity(-1)
Traceback (most recent call last):
...
ValueError: expected num >= 0 | ['Return', 'the', 'parity', 'of', 'a', 'non', '-', 'negative', 'integer', '.'] | train | https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/util.py#L56-L77 |
7,881 | uber/tchannel-python | tchannel/singleton.py | TChannel.reset | def reset(cls, *args, **kwargs):
"""Undo call to prepare, useful for testing."""
cls.local.tchannel = None
cls.args = None
cls.kwargs = None
cls.prepared = False | python | def reset(cls, *args, **kwargs):
"""Undo call to prepare, useful for testing."""
cls.local.tchannel = None
cls.args = None
cls.kwargs = None
cls.prepared = False | ['def', 'reset', '(', 'cls', ',', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'cls', '.', 'local', '.', 'tchannel', '=', 'None', 'cls', '.', 'args', '=', 'None', 'cls', '.', 'kwargs', '=', 'None', 'cls', '.', 'prepared', '=', 'False'] | Undo call to prepare, useful for testing. | ['Undo', 'call', 'to', 'prepare', 'useful', 'for', 'testing', '.'] | train | https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/singleton.py#L54-L59 |
7,882 | PyGithub/PyGithub | github/Issue.py | Issue.get_events | def get_events(self):
"""
:calls: `GET /repos/:owner/:repo/issues/:issue_number/events <http://developer.github.com/v3/issues/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.IssueEvent.IssueEvent`
"""
return github.PaginatedList.PaginatedList(
github.IssueEvent.IssueEvent,
self._requester,
self.url + "/events",
None,
headers={'Accept': Consts.mediaTypeLockReasonPreview}
) | python | def get_events(self):
"""
:calls: `GET /repos/:owner/:repo/issues/:issue_number/events <http://developer.github.com/v3/issues/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.IssueEvent.IssueEvent`
"""
return github.PaginatedList.PaginatedList(
github.IssueEvent.IssueEvent,
self._requester,
self.url + "/events",
None,
headers={'Accept': Consts.mediaTypeLockReasonPreview}
) | ['def', 'get_events', '(', 'self', ')', ':', 'return', 'github', '.', 'PaginatedList', '.', 'PaginatedList', '(', 'github', '.', 'IssueEvent', '.', 'IssueEvent', ',', 'self', '.', '_requester', ',', 'self', '.', 'url', '+', '"/events"', ',', 'None', ',', 'headers', '=', '{', "'Accept'", ':', 'Consts', '.', 'mediaTypeLockReasonPreview', '}', ')'] | :calls: `GET /repos/:owner/:repo/issues/:issue_number/events <http://developer.github.com/v3/issues/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.IssueEvent.IssueEvent` | [':', 'calls', ':', 'GET', '/', 'repos', '/', ':', 'owner', '/', ':', 'repo', '/', 'issues', '/', ':', 'issue_number', '/', 'events', '<http', ':', '//', 'developer', '.', 'github', '.', 'com', '/', 'v3', '/', 'issues', '/', 'events', '>', '_', ':', 'rtype', ':', ':', 'class', ':', 'github', '.', 'PaginatedList', '.', 'PaginatedList', 'of', ':', 'class', ':', 'github', '.', 'IssueEvent', '.', 'IssueEvent'] | train | https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/Issue.py#L391-L402 |
7,883 | wandb/client | wandb/file_pusher.py | FilePusher.rename_file | def rename_file(self, old_save_name, new_save_name, new_path):
"""This only updates the name and path we use to track the file's size
and upload progress. Doesn't rename it on the back end or make us
upload from anywhere else.
"""
if old_save_name in self._files:
del self._files[old_save_name]
self.update_file(new_save_name, new_path) | python | def rename_file(self, old_save_name, new_save_name, new_path):
"""This only updates the name and path we use to track the file's size
and upload progress. Doesn't rename it on the back end or make us
upload from anywhere else.
"""
if old_save_name in self._files:
del self._files[old_save_name]
self.update_file(new_save_name, new_path) | ['def', 'rename_file', '(', 'self', ',', 'old_save_name', ',', 'new_save_name', ',', 'new_path', ')', ':', 'if', 'old_save_name', 'in', 'self', '.', '_files', ':', 'del', 'self', '.', '_files', '[', 'old_save_name', ']', 'self', '.', 'update_file', '(', 'new_save_name', ',', 'new_path', ')'] | This only updates the name and path we use to track the file's size
and upload progress. Doesn't rename it on the back end or make us
upload from anywhere else. | ['This', 'only', 'updates', 'the', 'name', 'and', 'path', 'we', 'use', 'to', 'track', 'the', 'file', 's', 'size', 'and', 'upload', 'progress', '.', 'Doesn', 't', 'rename', 'it', 'on', 'the', 'back', 'end', 'or', 'make', 'us', 'upload', 'from', 'anywhere', 'else', '.'] | train | https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/file_pusher.py#L103-L110 |
7,884 | saltstack/salt | salt/client/__init__.py | LocalClient.cmd_iter_no_block | def cmd_iter_no_block(
self,
tgt,
fun,
arg=(),
timeout=None,
tgt_type='glob',
ret='',
kwarg=None,
show_jid=False,
verbose=False,
**kwargs):
'''
Yields the individual minion returns as they come in, or None
when no returns are available.
The function signature is the same as :py:meth:`cmd` with the
following exceptions.
:returns: A generator yielding the individual minion returns, or None
when no returns are available. This allows for actions to be
injected in between minion returns.
.. code-block:: python
>>> ret = local.cmd_iter_no_block('*', 'test.ping')
>>> for i in ret:
... print(i)
None
{'jerry': {'ret': True}}
{'dave': {'ret': True}}
None
{'stewart': {'ret': True}}
'''
was_listening = self.event.cpub
try:
pub_data = self.run_job(
tgt,
fun,
arg,
tgt_type,
ret,
timeout,
kwarg=kwarg,
listen=True,
**kwargs)
if not pub_data:
yield pub_data
else:
for fn_ret in self.get_iter_returns(pub_data['jid'],
pub_data['minions'],
timeout=timeout,
tgt=tgt,
tgt_type=tgt_type,
block=False,
**kwargs):
if fn_ret and any([show_jid, verbose]):
for minion in fn_ret:
fn_ret[minion]['jid'] = pub_data['jid']
yield fn_ret
self._clean_up_subscriptions(pub_data['jid'])
finally:
if not was_listening:
self.event.close_pub() | python | def cmd_iter_no_block(
self,
tgt,
fun,
arg=(),
timeout=None,
tgt_type='glob',
ret='',
kwarg=None,
show_jid=False,
verbose=False,
**kwargs):
'''
Yields the individual minion returns as they come in, or None
when no returns are available.
The function signature is the same as :py:meth:`cmd` with the
following exceptions.
:returns: A generator yielding the individual minion returns, or None
when no returns are available. This allows for actions to be
injected in between minion returns.
.. code-block:: python
>>> ret = local.cmd_iter_no_block('*', 'test.ping')
>>> for i in ret:
... print(i)
None
{'jerry': {'ret': True}}
{'dave': {'ret': True}}
None
{'stewart': {'ret': True}}
'''
was_listening = self.event.cpub
try:
pub_data = self.run_job(
tgt,
fun,
arg,
tgt_type,
ret,
timeout,
kwarg=kwarg,
listen=True,
**kwargs)
if not pub_data:
yield pub_data
else:
for fn_ret in self.get_iter_returns(pub_data['jid'],
pub_data['minions'],
timeout=timeout,
tgt=tgt,
tgt_type=tgt_type,
block=False,
**kwargs):
if fn_ret and any([show_jid, verbose]):
for minion in fn_ret:
fn_ret[minion]['jid'] = pub_data['jid']
yield fn_ret
self._clean_up_subscriptions(pub_data['jid'])
finally:
if not was_listening:
self.event.close_pub() | ['def', 'cmd_iter_no_block', '(', 'self', ',', 'tgt', ',', 'fun', ',', 'arg', '=', '(', ')', ',', 'timeout', '=', 'None', ',', 'tgt_type', '=', "'glob'", ',', 'ret', '=', "''", ',', 'kwarg', '=', 'None', ',', 'show_jid', '=', 'False', ',', 'verbose', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'was_listening', '=', 'self', '.', 'event', '.', 'cpub', 'try', ':', 'pub_data', '=', 'self', '.', 'run_job', '(', 'tgt', ',', 'fun', ',', 'arg', ',', 'tgt_type', ',', 'ret', ',', 'timeout', ',', 'kwarg', '=', 'kwarg', ',', 'listen', '=', 'True', ',', '*', '*', 'kwargs', ')', 'if', 'not', 'pub_data', ':', 'yield', 'pub_data', 'else', ':', 'for', 'fn_ret', 'in', 'self', '.', 'get_iter_returns', '(', 'pub_data', '[', "'jid'", ']', ',', 'pub_data', '[', "'minions'", ']', ',', 'timeout', '=', 'timeout', ',', 'tgt', '=', 'tgt', ',', 'tgt_type', '=', 'tgt_type', ',', 'block', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'if', 'fn_ret', 'and', 'any', '(', '[', 'show_jid', ',', 'verbose', ']', ')', ':', 'for', 'minion', 'in', 'fn_ret', ':', 'fn_ret', '[', 'minion', ']', '[', "'jid'", ']', '=', 'pub_data', '[', "'jid'", ']', 'yield', 'fn_ret', 'self', '.', '_clean_up_subscriptions', '(', 'pub_data', '[', "'jid'", ']', ')', 'finally', ':', 'if', 'not', 'was_listening', ':', 'self', '.', 'event', '.', 'close_pub', '(', ')'] | Yields the individual minion returns as they come in, or None
when no returns are available.
The function signature is the same as :py:meth:`cmd` with the
following exceptions.
:returns: A generator yielding the individual minion returns, or None
when no returns are available. This allows for actions to be
injected in between minion returns.
.. code-block:: python
>>> ret = local.cmd_iter_no_block('*', 'test.ping')
>>> for i in ret:
... print(i)
None
{'jerry': {'ret': True}}
{'dave': {'ret': True}}
None
{'stewart': {'ret': True}} | ['Yields', 'the', 'individual', 'minion', 'returns', 'as', 'they', 'come', 'in', 'or', 'None', 'when', 'no', 'returns', 'are', 'available', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L886-L952 |
7,885 | abilian/abilian-core | abilian/services/preferences/service.py | PreferenceService.set_preferences | def set_preferences(self, user=None, **kwargs):
"""Set preferences from keyword arguments."""
if user is None:
user = current_user
d = {pref.key: pref for pref in user.preferences}
for k, v in kwargs.items():
if k in d:
d[k].value = v
else:
d[k] = UserPreference(user=user, key=k, value=v)
db.session.add(d[k]) | python | def set_preferences(self, user=None, **kwargs):
"""Set preferences from keyword arguments."""
if user is None:
user = current_user
d = {pref.key: pref for pref in user.preferences}
for k, v in kwargs.items():
if k in d:
d[k].value = v
else:
d[k] = UserPreference(user=user, key=k, value=v)
db.session.add(d[k]) | ['def', 'set_preferences', '(', 'self', ',', 'user', '=', 'None', ',', '*', '*', 'kwargs', ')', ':', 'if', 'user', 'is', 'None', ':', 'user', '=', 'current_user', 'd', '=', '{', 'pref', '.', 'key', ':', 'pref', 'for', 'pref', 'in', 'user', '.', 'preferences', '}', 'for', 'k', ',', 'v', 'in', 'kwargs', '.', 'items', '(', ')', ':', 'if', 'k', 'in', 'd', ':', 'd', '[', 'k', ']', '.', 'value', '=', 'v', 'else', ':', 'd', '[', 'k', ']', '=', 'UserPreference', '(', 'user', '=', 'user', ',', 'key', '=', 'k', ',', 'value', '=', 'v', ')', 'db', '.', 'session', '.', 'add', '(', 'd', '[', 'k', ']', ')'] | Set preferences from keyword arguments. | ['Set', 'preferences', 'from', 'keyword', 'arguments', '.'] | train | https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/services/preferences/service.py#L73-L84 |
7,886 | erikrose/more-itertools | more_itertools/more.py | spy | def spy(iterable, n=1):
"""Return a 2-tuple with a list containing the first *n* elements of
*iterable*, and an iterator with the same items as *iterable*.
This allows you to "look ahead" at the items in the iterable without
advancing it.
There is one item in the list by default:
>>> iterable = 'abcdefg'
>>> head, iterable = spy(iterable)
>>> head
['a']
>>> list(iterable)
['a', 'b', 'c', 'd', 'e', 'f', 'g']
You may use unpacking to retrieve items instead of lists:
>>> (head,), iterable = spy('abcdefg')
>>> head
'a'
>>> (first, second), iterable = spy('abcdefg', 2)
>>> first
'a'
>>> second
'b'
The number of items requested can be larger than the number of items in
the iterable:
>>> iterable = [1, 2, 3, 4, 5]
>>> head, iterable = spy(iterable, 10)
>>> head
[1, 2, 3, 4, 5]
>>> list(iterable)
[1, 2, 3, 4, 5]
"""
it = iter(iterable)
head = take(n, it)
return head, chain(head, it) | python | def spy(iterable, n=1):
"""Return a 2-tuple with a list containing the first *n* elements of
*iterable*, and an iterator with the same items as *iterable*.
This allows you to "look ahead" at the items in the iterable without
advancing it.
There is one item in the list by default:
>>> iterable = 'abcdefg'
>>> head, iterable = spy(iterable)
>>> head
['a']
>>> list(iterable)
['a', 'b', 'c', 'd', 'e', 'f', 'g']
You may use unpacking to retrieve items instead of lists:
>>> (head,), iterable = spy('abcdefg')
>>> head
'a'
>>> (first, second), iterable = spy('abcdefg', 2)
>>> first
'a'
>>> second
'b'
The number of items requested can be larger than the number of items in
the iterable:
>>> iterable = [1, 2, 3, 4, 5]
>>> head, iterable = spy(iterable, 10)
>>> head
[1, 2, 3, 4, 5]
>>> list(iterable)
[1, 2, 3, 4, 5]
"""
it = iter(iterable)
head = take(n, it)
return head, chain(head, it) | ['def', 'spy', '(', 'iterable', ',', 'n', '=', '1', ')', ':', 'it', '=', 'iter', '(', 'iterable', ')', 'head', '=', 'take', '(', 'n', ',', 'it', ')', 'return', 'head', ',', 'chain', '(', 'head', ',', 'it', ')'] | Return a 2-tuple with a list containing the first *n* elements of
*iterable*, and an iterator with the same items as *iterable*.
This allows you to "look ahead" at the items in the iterable without
advancing it.
There is one item in the list by default:
>>> iterable = 'abcdefg'
>>> head, iterable = spy(iterable)
>>> head
['a']
>>> list(iterable)
['a', 'b', 'c', 'd', 'e', 'f', 'g']
You may use unpacking to retrieve items instead of lists:
>>> (head,), iterable = spy('abcdefg')
>>> head
'a'
>>> (first, second), iterable = spy('abcdefg', 2)
>>> first
'a'
>>> second
'b'
The number of items requested can be larger than the number of items in
the iterable:
>>> iterable = [1, 2, 3, 4, 5]
>>> head, iterable = spy(iterable, 10)
>>> head
[1, 2, 3, 4, 5]
>>> list(iterable)
[1, 2, 3, 4, 5] | ['Return', 'a', '2', '-', 'tuple', 'with', 'a', 'list', 'containing', 'the', 'first', '*', 'n', '*', 'elements', 'of', '*', 'iterable', '*', 'and', 'an', 'iterator', 'with', 'the', 'same', 'items', 'as', '*', 'iterable', '*', '.', 'This', 'allows', 'you', 'to', 'look', 'ahead', 'at', 'the', 'items', 'in', 'the', 'iterable', 'without', 'advancing', 'it', '.'] | train | https://github.com/erikrose/more-itertools/blob/6a91b4e25c8e12fcf9fc2b53cf8ee0fba293e6f9/more_itertools/more.py#L834-L874 |
7,887 | singingwolfboy/flask-dance | flask_dance/utils.py | getattrd | def getattrd(obj, name, default=sentinel):
"""
Same as getattr(), but allows dot notation lookup
Source: http://stackoverflow.com/a/14324459
"""
try:
return functools.reduce(getattr, name.split("."), obj)
except AttributeError as e:
if default is not sentinel:
return default
raise | python | def getattrd(obj, name, default=sentinel):
"""
Same as getattr(), but allows dot notation lookup
Source: http://stackoverflow.com/a/14324459
"""
try:
return functools.reduce(getattr, name.split("."), obj)
except AttributeError as e:
if default is not sentinel:
return default
raise | ['def', 'getattrd', '(', 'obj', ',', 'name', ',', 'default', '=', 'sentinel', ')', ':', 'try', ':', 'return', 'functools', '.', 'reduce', '(', 'getattr', ',', 'name', '.', 'split', '(', '"."', ')', ',', 'obj', ')', 'except', 'AttributeError', 'as', 'e', ':', 'if', 'default', 'is', 'not', 'sentinel', ':', 'return', 'default', 'raise'] | Same as getattr(), but allows dot notation lookup
Source: http://stackoverflow.com/a/14324459 | ['Same', 'as', 'getattr', '()', 'but', 'allows', 'dot', 'notation', 'lookup', 'Source', ':', 'http', ':', '//', 'stackoverflow', '.', 'com', '/', 'a', '/', '14324459'] | train | https://github.com/singingwolfboy/flask-dance/blob/87d45328bbdaff833559a6d3da71461fe4579592/flask_dance/utils.py#L69-L79 |
7,888 | DarkEnergySurvey/ugali | ugali/analysis/farm.py | Farm.submit | def submit(self, pixels, queue=None, debug=False, configfile=None):
"""
Submit the likelihood job for the given pixel(s).
"""
# For backwards compatibility
batch = self.config['scan'].get('batch',self.config['batch'])
queue = batch['cluster'] if queue is None else queue
# Need to develop some way to take command line arguments...
self.batch = ugali.utils.batch.batchFactory(queue,**batch['opts'])
self.batch.max_jobs = batch.get('max_jobs',200)
if np.isscalar(pixels): pixels = np.array([pixels])
outdir = mkdir(self.config['output']['likedir'])
logdir = mkdir(join(outdir,'log'))
subdir = mkdir(join(outdir,'sub'))
# Save the current configuation settings; avoid writing
# file multiple times if configfile passed as argument.
if configfile is None:
shutil.copy(self.config.filename,outdir)
configfile = join(outdir,os.path.basename(self.config.filename))
lon,lat = pix2ang(self.nside_likelihood,pixels)
commands = []
chunk = batch['chunk']
istart = 0
logger.info('=== Submit Likelihood ===')
for ii,pix in enumerate(pixels):
msg = ' (%i/%i) pixel=%i nside=%i; (lon, lat) = (%.2f, %.2f)'
msg = msg%(ii+1,len(pixels),pix, self.nside_likelihood,lon[ii],lat[ii])
logger.info(msg)
# Create outfile name
outfile = self.config.likefile%(pix,self.config['coords']['coordsys'].lower())
outbase = os.path.basename(outfile)
jobname = batch['jobname']
# Submission command
sub = not os.path.exists(outfile)
cmd = self.command(outfile,configfile,pix)
commands.append([ii,cmd,lon[ii],lat[ii],sub])
if chunk == 0:
# No chunking
command = cmd
submit = sub
logfile = join(logdir,os.path.splitext(outbase)[0]+'.log')
elif (len(commands)%chunk==0) or (ii+1 == len(pixels)):
# End of chunk, create submission script
commands = np.array(commands,dtype=object)
istart, iend = commands[0][0], commands[-1][0]
subfile = join(subdir,'submit_%08i_%08i.sh'%(istart,iend))
logfile = join(logdir,'submit_%08i_%08i.log'%(istart,iend))
command = "sh %s"%subfile
submit = np.any(commands[:,-1])
if submit: self.write_script(subfile,commands)
else:
# Not end of chunk
continue
commands=[]
# Actual job submission
if not submit:
logger.info(self.skip)
continue
else:
job = self.batch.submit(command,jobname,logfile)
logger.info(" "+job)
time.sleep(0.5) | python | def submit(self, pixels, queue=None, debug=False, configfile=None):
"""
Submit the likelihood job for the given pixel(s).
"""
# For backwards compatibility
batch = self.config['scan'].get('batch',self.config['batch'])
queue = batch['cluster'] if queue is None else queue
# Need to develop some way to take command line arguments...
self.batch = ugali.utils.batch.batchFactory(queue,**batch['opts'])
self.batch.max_jobs = batch.get('max_jobs',200)
if np.isscalar(pixels): pixels = np.array([pixels])
outdir = mkdir(self.config['output']['likedir'])
logdir = mkdir(join(outdir,'log'))
subdir = mkdir(join(outdir,'sub'))
# Save the current configuation settings; avoid writing
# file multiple times if configfile passed as argument.
if configfile is None:
shutil.copy(self.config.filename,outdir)
configfile = join(outdir,os.path.basename(self.config.filename))
lon,lat = pix2ang(self.nside_likelihood,pixels)
commands = []
chunk = batch['chunk']
istart = 0
logger.info('=== Submit Likelihood ===')
for ii,pix in enumerate(pixels):
msg = ' (%i/%i) pixel=%i nside=%i; (lon, lat) = (%.2f, %.2f)'
msg = msg%(ii+1,len(pixels),pix, self.nside_likelihood,lon[ii],lat[ii])
logger.info(msg)
# Create outfile name
outfile = self.config.likefile%(pix,self.config['coords']['coordsys'].lower())
outbase = os.path.basename(outfile)
jobname = batch['jobname']
# Submission command
sub = not os.path.exists(outfile)
cmd = self.command(outfile,configfile,pix)
commands.append([ii,cmd,lon[ii],lat[ii],sub])
if chunk == 0:
# No chunking
command = cmd
submit = sub
logfile = join(logdir,os.path.splitext(outbase)[0]+'.log')
elif (len(commands)%chunk==0) or (ii+1 == len(pixels)):
# End of chunk, create submission script
commands = np.array(commands,dtype=object)
istart, iend = commands[0][0], commands[-1][0]
subfile = join(subdir,'submit_%08i_%08i.sh'%(istart,iend))
logfile = join(logdir,'submit_%08i_%08i.log'%(istart,iend))
command = "sh %s"%subfile
submit = np.any(commands[:,-1])
if submit: self.write_script(subfile,commands)
else:
# Not end of chunk
continue
commands=[]
# Actual job submission
if not submit:
logger.info(self.skip)
continue
else:
job = self.batch.submit(command,jobname,logfile)
logger.info(" "+job)
time.sleep(0.5) | ['def', 'submit', '(', 'self', ',', 'pixels', ',', 'queue', '=', 'None', ',', 'debug', '=', 'False', ',', 'configfile', '=', 'None', ')', ':', '# For backwards compatibility', 'batch', '=', 'self', '.', 'config', '[', "'scan'", ']', '.', 'get', '(', "'batch'", ',', 'self', '.', 'config', '[', "'batch'", ']', ')', 'queue', '=', 'batch', '[', "'cluster'", ']', 'if', 'queue', 'is', 'None', 'else', 'queue', '# Need to develop some way to take command line arguments...', 'self', '.', 'batch', '=', 'ugali', '.', 'utils', '.', 'batch', '.', 'batchFactory', '(', 'queue', ',', '*', '*', 'batch', '[', "'opts'", ']', ')', 'self', '.', 'batch', '.', 'max_jobs', '=', 'batch', '.', 'get', '(', "'max_jobs'", ',', '200', ')', 'if', 'np', '.', 'isscalar', '(', 'pixels', ')', ':', 'pixels', '=', 'np', '.', 'array', '(', '[', 'pixels', ']', ')', 'outdir', '=', 'mkdir', '(', 'self', '.', 'config', '[', "'output'", ']', '[', "'likedir'", ']', ')', 'logdir', '=', 'mkdir', '(', 'join', '(', 'outdir', ',', "'log'", ')', ')', 'subdir', '=', 'mkdir', '(', 'join', '(', 'outdir', ',', "'sub'", ')', ')', '# Save the current configuation settings; avoid writing ', '# file multiple times if configfile passed as argument.', 'if', 'configfile', 'is', 'None', ':', 'shutil', '.', 'copy', '(', 'self', '.', 'config', '.', 'filename', ',', 'outdir', ')', 'configfile', '=', 'join', '(', 'outdir', ',', 'os', '.', 'path', '.', 'basename', '(', 'self', '.', 'config', '.', 'filename', ')', ')', 'lon', ',', 'lat', '=', 'pix2ang', '(', 'self', '.', 'nside_likelihood', ',', 'pixels', ')', 'commands', '=', '[', ']', 'chunk', '=', 'batch', '[', "'chunk'", ']', 'istart', '=', '0', 'logger', '.', 'info', '(', "'=== Submit Likelihood ==='", ')', 'for', 'ii', ',', 'pix', 'in', 'enumerate', '(', 'pixels', ')', ':', 'msg', '=', "' (%i/%i) pixel=%i nside=%i; (lon, lat) = (%.2f, %.2f)'", 'msg', '=', 'msg', '%', '(', 'ii', '+', '1', ',', 'len', '(', 'pixels', ')', ',', 'pix', ',', 'self', '.', 'nside_likelihood', ',', 'lon', '[', 'ii', ']', ',', 'lat', '[', 'ii', ']', ')', 'logger', '.', 'info', '(', 'msg', ')', '# Create outfile name', 'outfile', '=', 'self', '.', 'config', '.', 'likefile', '%', '(', 'pix', ',', 'self', '.', 'config', '[', "'coords'", ']', '[', "'coordsys'", ']', '.', 'lower', '(', ')', ')', 'outbase', '=', 'os', '.', 'path', '.', 'basename', '(', 'outfile', ')', 'jobname', '=', 'batch', '[', "'jobname'", ']', '# Submission command', 'sub', '=', 'not', 'os', '.', 'path', '.', 'exists', '(', 'outfile', ')', 'cmd', '=', 'self', '.', 'command', '(', 'outfile', ',', 'configfile', ',', 'pix', ')', 'commands', '.', 'append', '(', '[', 'ii', ',', 'cmd', ',', 'lon', '[', 'ii', ']', ',', 'lat', '[', 'ii', ']', ',', 'sub', ']', ')', 'if', 'chunk', '==', '0', ':', '# No chunking', 'command', '=', 'cmd', 'submit', '=', 'sub', 'logfile', '=', 'join', '(', 'logdir', ',', 'os', '.', 'path', '.', 'splitext', '(', 'outbase', ')', '[', '0', ']', '+', "'.log'", ')', 'elif', '(', 'len', '(', 'commands', ')', '%', 'chunk', '==', '0', ')', 'or', '(', 'ii', '+', '1', '==', 'len', '(', 'pixels', ')', ')', ':', '# End of chunk, create submission script', 'commands', '=', 'np', '.', 'array', '(', 'commands', ',', 'dtype', '=', 'object', ')', 'istart', ',', 'iend', '=', 'commands', '[', '0', ']', '[', '0', ']', ',', 'commands', '[', '-', '1', ']', '[', '0', ']', 'subfile', '=', 'join', '(', 'subdir', ',', "'submit_%08i_%08i.sh'", '%', '(', 'istart', ',', 'iend', ')', ')', 'logfile', '=', 'join', '(', 'logdir', ',', "'submit_%08i_%08i.log'", '%', '(', 'istart', ',', 'iend', ')', ')', 'command', '=', '"sh %s"', '%', 'subfile', 'submit', '=', 'np', '.', 'any', '(', 'commands', '[', ':', ',', '-', '1', ']', ')', 'if', 'submit', ':', 'self', '.', 'write_script', '(', 'subfile', ',', 'commands', ')', 'else', ':', '# Not end of chunk', 'continue', 'commands', '=', '[', ']', '# Actual job submission', 'if', 'not', 'submit', ':', 'logger', '.', 'info', '(', 'self', '.', 'skip', ')', 'continue', 'else', ':', 'job', '=', 'self', '.', 'batch', '.', 'submit', '(', 'command', ',', 'jobname', ',', 'logfile', ')', 'logger', '.', 'info', '(', '" "', '+', 'job', ')', 'time', '.', 'sleep', '(', '0.5', ')'] | Submit the likelihood job for the given pixel(s). | ['Submit', 'the', 'likelihood', 'job', 'for', 'the', 'given', 'pixel', '(', 's', ')', '.'] | train | https://github.com/DarkEnergySurvey/ugali/blob/21e890b4117fc810afb6fb058e8055d564f03382/ugali/analysis/farm.py#L155-L226 |
7,889 | timothydmorton/simpledist | simpledist/distributions.py | Distribution.resample | def resample(self,N,minval=None,maxval=None,log=False,res=1e4):
"""Returns random samples generated according to the distribution
Mirrors basic functionality of `rvs` method for `scipy.stats`
random variates. Implemented by mapping uniform numbers onto the
inverse CDF using a closest-matching grid approach.
Parameters
----------
N : int
Number of samples to return
minval,maxval : float, optional
Minimum/maximum values to resample. Should both usually just be
`None`, which will default to `self.minval`/`self.maxval`.
log : bool, optional
Whether grid should be log- or linear-spaced.
res : int, optional
Resolution of CDF grid used.
Returns
-------
values : ndarray
N samples.
Raises
------
ValueError
If maxval/minval are +/- infinity, this doesn't work because of
the grid-based approach.
"""
N = int(N)
if minval is None:
if hasattr(self,'minval_cdf'):
minval = self.minval_cdf
else:
minval = self.minval
if maxval is None:
if hasattr(self,'maxval_cdf'):
maxval = self.maxval_cdf
else:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to resample. (set minval, maxval kws)')
u = rand.random(size=N)
if log:
vals = np.logspace(log10(minval),log10(maxval),res)
else:
vals = np.linspace(minval,maxval,res)
#sometimes cdf is flat. so ys will need to be uniqued
ys,yinds = np.unique(self.cdf(vals), return_index=True)
vals = vals[yinds]
inds = np.digitize(u,ys)
return vals[inds] | python | def resample(self,N,minval=None,maxval=None,log=False,res=1e4):
"""Returns random samples generated according to the distribution
Mirrors basic functionality of `rvs` method for `scipy.stats`
random variates. Implemented by mapping uniform numbers onto the
inverse CDF using a closest-matching grid approach.
Parameters
----------
N : int
Number of samples to return
minval,maxval : float, optional
Minimum/maximum values to resample. Should both usually just be
`None`, which will default to `self.minval`/`self.maxval`.
log : bool, optional
Whether grid should be log- or linear-spaced.
res : int, optional
Resolution of CDF grid used.
Returns
-------
values : ndarray
N samples.
Raises
------
ValueError
If maxval/minval are +/- infinity, this doesn't work because of
the grid-based approach.
"""
N = int(N)
if minval is None:
if hasattr(self,'minval_cdf'):
minval = self.minval_cdf
else:
minval = self.minval
if maxval is None:
if hasattr(self,'maxval_cdf'):
maxval = self.maxval_cdf
else:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to resample. (set minval, maxval kws)')
u = rand.random(size=N)
if log:
vals = np.logspace(log10(minval),log10(maxval),res)
else:
vals = np.linspace(minval,maxval,res)
#sometimes cdf is flat. so ys will need to be uniqued
ys,yinds = np.unique(self.cdf(vals), return_index=True)
vals = vals[yinds]
inds = np.digitize(u,ys)
return vals[inds] | ['def', 'resample', '(', 'self', ',', 'N', ',', 'minval', '=', 'None', ',', 'maxval', '=', 'None', ',', 'log', '=', 'False', ',', 'res', '=', '1e4', ')', ':', 'N', '=', 'int', '(', 'N', ')', 'if', 'minval', 'is', 'None', ':', 'if', 'hasattr', '(', 'self', ',', "'minval_cdf'", ')', ':', 'minval', '=', 'self', '.', 'minval_cdf', 'else', ':', 'minval', '=', 'self', '.', 'minval', 'if', 'maxval', 'is', 'None', ':', 'if', 'hasattr', '(', 'self', ',', "'maxval_cdf'", ')', ':', 'maxval', '=', 'self', '.', 'maxval_cdf', 'else', ':', 'maxval', '=', 'self', '.', 'maxval', 'if', 'maxval', '==', 'np', '.', 'inf', 'or', 'minval', '==', '-', 'np', '.', 'inf', ':', 'raise', 'ValueError', '(', "'must have finite upper and lower bounds to resample. (set minval, maxval kws)'", ')', 'u', '=', 'rand', '.', 'random', '(', 'size', '=', 'N', ')', 'if', 'log', ':', 'vals', '=', 'np', '.', 'logspace', '(', 'log10', '(', 'minval', ')', ',', 'log10', '(', 'maxval', ')', ',', 'res', ')', 'else', ':', 'vals', '=', 'np', '.', 'linspace', '(', 'minval', ',', 'maxval', ',', 'res', ')', '#sometimes cdf is flat. so ys will need to be uniqued', 'ys', ',', 'yinds', '=', 'np', '.', 'unique', '(', 'self', '.', 'cdf', '(', 'vals', ')', ',', 'return_index', '=', 'True', ')', 'vals', '=', 'vals', '[', 'yinds', ']', 'inds', '=', 'np', '.', 'digitize', '(', 'u', ',', 'ys', ')', 'return', 'vals', '[', 'inds', ']'] | Returns random samples generated according to the distribution
Mirrors basic functionality of `rvs` method for `scipy.stats`
random variates. Implemented by mapping uniform numbers onto the
inverse CDF using a closest-matching grid approach.
Parameters
----------
N : int
Number of samples to return
minval,maxval : float, optional
Minimum/maximum values to resample. Should both usually just be
`None`, which will default to `self.minval`/`self.maxval`.
log : bool, optional
Whether grid should be log- or linear-spaced.
res : int, optional
Resolution of CDF grid used.
Returns
-------
values : ndarray
N samples.
Raises
------
ValueError
If maxval/minval are +/- infinity, this doesn't work because of
the grid-based approach. | ['Returns', 'random', 'samples', 'generated', 'according', 'to', 'the', 'distribution'] | train | https://github.com/timothydmorton/simpledist/blob/d9807c90a935bd125213445ffed6255af558f1ca/simpledist/distributions.py#L296-L357 |
7,890 | suds-community/suds | suds/mx/literal.py | Typed.skip | def skip(self, content):
"""
Get whether to skip this I{content}.
Should be skipped when the content is optional and value is either None
or an empty list.
@param content: Content to skip.
@type content: L{Object}
@return: True if content is to be skipped.
@rtype: bool
"""
if self.optional(content):
v = content.value
if v is None:
return True
if isinstance(v, (list, tuple)) and not v:
return True
return False | python | def skip(self, content):
"""
Get whether to skip this I{content}.
Should be skipped when the content is optional and value is either None
or an empty list.
@param content: Content to skip.
@type content: L{Object}
@return: True if content is to be skipped.
@rtype: bool
"""
if self.optional(content):
v = content.value
if v is None:
return True
if isinstance(v, (list, tuple)) and not v:
return True
return False | ['def', 'skip', '(', 'self', ',', 'content', ')', ':', 'if', 'self', '.', 'optional', '(', 'content', ')', ':', 'v', '=', 'content', '.', 'value', 'if', 'v', 'is', 'None', ':', 'return', 'True', 'if', 'isinstance', '(', 'v', ',', '(', 'list', ',', 'tuple', ')', ')', 'and', 'not', 'v', ':', 'return', 'True', 'return', 'False'] | Get whether to skip this I{content}.
Should be skipped when the content is optional and value is either None
or an empty list.
@param content: Content to skip.
@type content: L{Object}
@return: True if content is to be skipped.
@rtype: bool | ['Get', 'whether', 'to', 'skip', 'this', 'I', '{', 'content', '}', '.'] | train | https://github.com/suds-community/suds/blob/6fb0a829337b5037a66c20aae6f89b41acd77e40/suds/mx/literal.py#L207-L226 |
7,891 | PMEAL/OpenPNM | openpnm/algorithms/Porosimetry.py | Porosimetry.results | def results(self, Pc):
r"""
"""
p_inv, t_inv = super().results(Pc).values()
phase = self.project.find_phase(self)
quantity = self.settings['quantity'].split('.')[-1]
lpf = np.array([1])
if self.settings['pore_partial_filling']:
# Set pressure on phase to current capillary pressure
phase['pore.'+quantity] = Pc
# Regenerate corresponding physics model
for phys in self.project.find_physics(phase=phase):
phys.regenerate_models(self.settings['pore_partial_filling'])
# Fetch partial filling fraction from phase object (0->1)
lpf = phase[self.settings['pore_partial_filling']]
# Calculate filled throat volumes
ltf = np.array([1])
if self.settings['throat_partial_filling']:
# Set pressure on phase to current capillary pressure
phase['throat.'+quantity] = Pc
# Regenerate corresponding physics model
for phys in self.project.find_physics(phase=phase):
phys.regenerate_models(self.settings['throat_partial_filling'])
# Fetch partial filling fraction from phase object (0->1)
ltf = phase[self.settings['throat_partial_filling']]
p_inv = p_inv*lpf
t_inv = t_inv*ltf
return {'pore.occupancy': p_inv, 'throat.occupancy': t_inv} | python | def results(self, Pc):
r"""
"""
p_inv, t_inv = super().results(Pc).values()
phase = self.project.find_phase(self)
quantity = self.settings['quantity'].split('.')[-1]
lpf = np.array([1])
if self.settings['pore_partial_filling']:
# Set pressure on phase to current capillary pressure
phase['pore.'+quantity] = Pc
# Regenerate corresponding physics model
for phys in self.project.find_physics(phase=phase):
phys.regenerate_models(self.settings['pore_partial_filling'])
# Fetch partial filling fraction from phase object (0->1)
lpf = phase[self.settings['pore_partial_filling']]
# Calculate filled throat volumes
ltf = np.array([1])
if self.settings['throat_partial_filling']:
# Set pressure on phase to current capillary pressure
phase['throat.'+quantity] = Pc
# Regenerate corresponding physics model
for phys in self.project.find_physics(phase=phase):
phys.regenerate_models(self.settings['throat_partial_filling'])
# Fetch partial filling fraction from phase object (0->1)
ltf = phase[self.settings['throat_partial_filling']]
p_inv = p_inv*lpf
t_inv = t_inv*ltf
return {'pore.occupancy': p_inv, 'throat.occupancy': t_inv} | ['def', 'results', '(', 'self', ',', 'Pc', ')', ':', 'p_inv', ',', 't_inv', '=', 'super', '(', ')', '.', 'results', '(', 'Pc', ')', '.', 'values', '(', ')', 'phase', '=', 'self', '.', 'project', '.', 'find_phase', '(', 'self', ')', 'quantity', '=', 'self', '.', 'settings', '[', "'quantity'", ']', '.', 'split', '(', "'.'", ')', '[', '-', '1', ']', 'lpf', '=', 'np', '.', 'array', '(', '[', '1', ']', ')', 'if', 'self', '.', 'settings', '[', "'pore_partial_filling'", ']', ':', '# Set pressure on phase to current capillary pressure', 'phase', '[', "'pore.'", '+', 'quantity', ']', '=', 'Pc', '# Regenerate corresponding physics model', 'for', 'phys', 'in', 'self', '.', 'project', '.', 'find_physics', '(', 'phase', '=', 'phase', ')', ':', 'phys', '.', 'regenerate_models', '(', 'self', '.', 'settings', '[', "'pore_partial_filling'", ']', ')', '# Fetch partial filling fraction from phase object (0->1)', 'lpf', '=', 'phase', '[', 'self', '.', 'settings', '[', "'pore_partial_filling'", ']', ']', '# Calculate filled throat volumes', 'ltf', '=', 'np', '.', 'array', '(', '[', '1', ']', ')', 'if', 'self', '.', 'settings', '[', "'throat_partial_filling'", ']', ':', '# Set pressure on phase to current capillary pressure', 'phase', '[', "'throat.'", '+', 'quantity', ']', '=', 'Pc', '# Regenerate corresponding physics model', 'for', 'phys', 'in', 'self', '.', 'project', '.', 'find_physics', '(', 'phase', '=', 'phase', ')', ':', 'phys', '.', 'regenerate_models', '(', 'self', '.', 'settings', '[', "'throat_partial_filling'", ']', ')', '# Fetch partial filling fraction from phase object (0->1)', 'ltf', '=', 'phase', '[', 'self', '.', 'settings', '[', "'throat_partial_filling'", ']', ']', 'p_inv', '=', 'p_inv', '*', 'lpf', 't_inv', '=', 't_inv', '*', 'ltf', 'return', '{', "'pore.occupancy'", ':', 'p_inv', ',', "'throat.occupancy'", ':', 't_inv', '}'] | r""" | ['r'] | train | https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/algorithms/Porosimetry.py#L171-L198 |
7,892 | EelcoHoogendoorn/Numpy_arraysetops_EP | numpy_indexed/funcs.py | incidence | def incidence(boundary):
"""
given an Nxm matrix containing boundary info between simplices,
compute indidence info matrix
not very reusable; should probably not be in this lib
"""
return GroupBy(boundary).split(np.arange(boundary.size) // boundary.shape[1]) | python | def incidence(boundary):
"""
given an Nxm matrix containing boundary info between simplices,
compute indidence info matrix
not very reusable; should probably not be in this lib
"""
return GroupBy(boundary).split(np.arange(boundary.size) // boundary.shape[1]) | ['def', 'incidence', '(', 'boundary', ')', ':', 'return', 'GroupBy', '(', 'boundary', ')', '.', 'split', '(', 'np', '.', 'arange', '(', 'boundary', '.', 'size', ')', '//', 'boundary', '.', 'shape', '[', '1', ']', ')'] | given an Nxm matrix containing boundary info between simplices,
compute indidence info matrix
not very reusable; should probably not be in this lib | ['given', 'an', 'Nxm', 'matrix', 'containing', 'boundary', 'info', 'between', 'simplices', 'compute', 'indidence', 'info', 'matrix', 'not', 'very', 'reusable', ';', 'should', 'probably', 'not', 'be', 'in', 'this', 'lib'] | train | https://github.com/EelcoHoogendoorn/Numpy_arraysetops_EP/blob/84dc8114bf8a79c3acb3f7f59128247b9fc97243/numpy_indexed/funcs.py#L267-L273 |
7,893 | manns/pyspread | pyspread/src/gui/_grid.py | GridEventHandlers._get_no_rowscols | def _get_no_rowscols(self, bbox):
"""Returns tuple of number of rows and cols from bbox"""
if bbox is None:
return 1, 1
else:
(bb_top, bb_left), (bb_bottom, bb_right) = bbox
if bb_top is None:
bb_top = 0
if bb_left is None:
bb_left = 0
if bb_bottom is None:
bb_bottom = self.grid.code_array.shape[0] - 1
if bb_right is None:
bb_right = self.grid.code_array.shape[1] - 1
return bb_bottom - bb_top + 1, bb_right - bb_left + 1 | python | def _get_no_rowscols(self, bbox):
"""Returns tuple of number of rows and cols from bbox"""
if bbox is None:
return 1, 1
else:
(bb_top, bb_left), (bb_bottom, bb_right) = bbox
if bb_top is None:
bb_top = 0
if bb_left is None:
bb_left = 0
if bb_bottom is None:
bb_bottom = self.grid.code_array.shape[0] - 1
if bb_right is None:
bb_right = self.grid.code_array.shape[1] - 1
return bb_bottom - bb_top + 1, bb_right - bb_left + 1 | ['def', '_get_no_rowscols', '(', 'self', ',', 'bbox', ')', ':', 'if', 'bbox', 'is', 'None', ':', 'return', '1', ',', '1', 'else', ':', '(', 'bb_top', ',', 'bb_left', ')', ',', '(', 'bb_bottom', ',', 'bb_right', ')', '=', 'bbox', 'if', 'bb_top', 'is', 'None', ':', 'bb_top', '=', '0', 'if', 'bb_left', 'is', 'None', ':', 'bb_left', '=', '0', 'if', 'bb_bottom', 'is', 'None', ':', 'bb_bottom', '=', 'self', '.', 'grid', '.', 'code_array', '.', 'shape', '[', '0', ']', '-', '1', 'if', 'bb_right', 'is', 'None', ':', 'bb_right', '=', 'self', '.', 'grid', '.', 'code_array', '.', 'shape', '[', '1', ']', '-', '1', 'return', 'bb_bottom', '-', 'bb_top', '+', '1', ',', 'bb_right', '-', 'bb_left', '+', '1'] | Returns tuple of number of rows and cols from bbox | ['Returns', 'tuple', 'of', 'number', 'of', 'rows', 'and', 'cols', 'from', 'bbox'] | train | https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/gui/_grid.py#L1304-L1320 |
7,894 | xperscore/alley | alley/migrations.py | Migrations.up | def up(self, migration_id=None, fake=False):
"""Executes migrations."""
if not self.check_directory():
return
for migration in self.get_migrations_to_up(migration_id):
logger.info('Executing migration: %s' % migration.filename)
migration_module = self.load_migration_file(migration.filename)
if not fake:
if hasattr(migration_module, 'up'):
migration_module.up(self.db)
else:
logger.error('No up method on migration %s' % migration.filename)
record = migration.as_dict()
record['date'] = datetime.utcnow()
self.collection.insert(record) | python | def up(self, migration_id=None, fake=False):
"""Executes migrations."""
if not self.check_directory():
return
for migration in self.get_migrations_to_up(migration_id):
logger.info('Executing migration: %s' % migration.filename)
migration_module = self.load_migration_file(migration.filename)
if not fake:
if hasattr(migration_module, 'up'):
migration_module.up(self.db)
else:
logger.error('No up method on migration %s' % migration.filename)
record = migration.as_dict()
record['date'] = datetime.utcnow()
self.collection.insert(record) | ['def', 'up', '(', 'self', ',', 'migration_id', '=', 'None', ',', 'fake', '=', 'False', ')', ':', 'if', 'not', 'self', '.', 'check_directory', '(', ')', ':', 'return', 'for', 'migration', 'in', 'self', '.', 'get_migrations_to_up', '(', 'migration_id', ')', ':', 'logger', '.', 'info', '(', "'Executing migration: %s'", '%', 'migration', '.', 'filename', ')', 'migration_module', '=', 'self', '.', 'load_migration_file', '(', 'migration', '.', 'filename', ')', 'if', 'not', 'fake', ':', 'if', 'hasattr', '(', 'migration_module', ',', "'up'", ')', ':', 'migration_module', '.', 'up', '(', 'self', '.', 'db', ')', 'else', ':', 'logger', '.', 'error', '(', "'No up method on migration %s'", '%', 'migration', '.', 'filename', ')', 'record', '=', 'migration', '.', 'as_dict', '(', ')', 'record', '[', "'date'", ']', '=', 'datetime', '.', 'utcnow', '(', ')', 'self', '.', 'collection', '.', 'insert', '(', 'record', ')'] | Executes migrations. | ['Executes', 'migrations', '.'] | train | https://github.com/xperscore/alley/blob/f9a5e9e2970230e38fd8a48b6a0bc1d43a38548e/alley/migrations.py#L143-L160 |
7,895 | Clarify/clarify_python | clarify_python/clarify.py | Client.get | def get(self, path, data=None):
"""Executes a GET.
'path' may not be None. Should include the full path to the
resource.
'data' may be None or a dictionary. These values will be
appended to the path as key/value pairs.
Returns a named tuple that includes:
status: the HTTP status code
json: the returned JSON-HAL
If the key was not set, throws an APIConfigurationException."""
# Argument error checking.
assert path is not None
# Execute the request.
response = self.conn.request('GET', path, data, self._get_headers())
# Extract the result.
self._last_status = response_status = response.status
response_content = response.data.decode()
return Result(status=response_status, json=response_content) | python | def get(self, path, data=None):
"""Executes a GET.
'path' may not be None. Should include the full path to the
resource.
'data' may be None or a dictionary. These values will be
appended to the path as key/value pairs.
Returns a named tuple that includes:
status: the HTTP status code
json: the returned JSON-HAL
If the key was not set, throws an APIConfigurationException."""
# Argument error checking.
assert path is not None
# Execute the request.
response = self.conn.request('GET', path, data, self._get_headers())
# Extract the result.
self._last_status = response_status = response.status
response_content = response.data.decode()
return Result(status=response_status, json=response_content) | ['def', 'get', '(', 'self', ',', 'path', ',', 'data', '=', 'None', ')', ':', '# Argument error checking.', 'assert', 'path', 'is', 'not', 'None', '# Execute the request.', 'response', '=', 'self', '.', 'conn', '.', 'request', '(', "'GET'", ',', 'path', ',', 'data', ',', 'self', '.', '_get_headers', '(', ')', ')', '# Extract the result.', 'self', '.', '_last_status', '=', 'response_status', '=', 'response', '.', 'status', 'response_content', '=', 'response', '.', 'data', '.', 'decode', '(', ')', 'return', 'Result', '(', 'status', '=', 'response_status', ',', 'json', '=', 'response_content', ')'] | Executes a GET.
'path' may not be None. Should include the full path to the
resource.
'data' may be None or a dictionary. These values will be
appended to the path as key/value pairs.
Returns a named tuple that includes:
status: the HTTP status code
json: the returned JSON-HAL
If the key was not set, throws an APIConfigurationException. | ['Executes', 'a', 'GET', '.'] | train | https://github.com/Clarify/clarify_python/blob/1a00a5e39f77af9ad7f2e08480a3ab14e7d72aeb/clarify_python/clarify.py#L851-L876 |
7,896 | spacetelescope/pysynphot | pysynphot/observation.py | validate_overlap | def validate_overlap(comp1, comp2, force):
"""Validate the overlap between the wavelength sets
of the two given components.
Parameters
----------
comp1, comp2 : `~pysynphot.spectrum.SourceSpectrum` or `~pysynphot.spectrum.SpectralElement`
Source spectrum and bandpass of an observation.
force : {'extrap', 'taper', `None`}
If not `None`, the components may be adjusted by
extrapolation or tapering.
Returns
-------
comp1, comp2
Same as inputs. However, ``comp1`` might be tapered
if that option is selected.
warnings : dict
Maps warning keyword to its description.
Raises
------
KeyError
Invalid ``force``.
pysynphot.exceptions.DisjointError
No overlap detected when ``force`` is `None`.
pysynphot.exceptions.PartialOverlap
Partial overlap detected when ``force`` is `None`.
"""
warnings = dict()
if force is None:
stat = comp2.check_overlap(comp1)
if stat=='full':
pass
elif stat == 'partial':
raise(exceptions.PartialOverlap('Spectrum and bandpass do not fully overlap. You may use force=[extrap|taper] to force this Observation anyway.'))
elif stat == 'none':
raise(exceptions.DisjointError('Spectrum and bandpass are disjoint'))
elif force.lower() == 'taper':
try:
comp1=comp1.taper()
except AttributeError:
comp1=comp1.tabulate().taper()
warnings['PartialOverlap']=force
elif force.lower().startswith('extrap'):
#default behavior works, but check the overlap so we can set the warning
stat=comp2.check_overlap(comp1)
if stat == 'partial':
warnings['PartialOverlap']=force
else:
raise(KeyError("Illegal value force=%s; legal values=('taper','extrap')"%force))
return comp1, comp2, warnings | python | def validate_overlap(comp1, comp2, force):
"""Validate the overlap between the wavelength sets
of the two given components.
Parameters
----------
comp1, comp2 : `~pysynphot.spectrum.SourceSpectrum` or `~pysynphot.spectrum.SpectralElement`
Source spectrum and bandpass of an observation.
force : {'extrap', 'taper', `None`}
If not `None`, the components may be adjusted by
extrapolation or tapering.
Returns
-------
comp1, comp2
Same as inputs. However, ``comp1`` might be tapered
if that option is selected.
warnings : dict
Maps warning keyword to its description.
Raises
------
KeyError
Invalid ``force``.
pysynphot.exceptions.DisjointError
No overlap detected when ``force`` is `None`.
pysynphot.exceptions.PartialOverlap
Partial overlap detected when ``force`` is `None`.
"""
warnings = dict()
if force is None:
stat = comp2.check_overlap(comp1)
if stat=='full':
pass
elif stat == 'partial':
raise(exceptions.PartialOverlap('Spectrum and bandpass do not fully overlap. You may use force=[extrap|taper] to force this Observation anyway.'))
elif stat == 'none':
raise(exceptions.DisjointError('Spectrum and bandpass are disjoint'))
elif force.lower() == 'taper':
try:
comp1=comp1.taper()
except AttributeError:
comp1=comp1.tabulate().taper()
warnings['PartialOverlap']=force
elif force.lower().startswith('extrap'):
#default behavior works, but check the overlap so we can set the warning
stat=comp2.check_overlap(comp1)
if stat == 'partial':
warnings['PartialOverlap']=force
else:
raise(KeyError("Illegal value force=%s; legal values=('taper','extrap')"%force))
return comp1, comp2, warnings | ['def', 'validate_overlap', '(', 'comp1', ',', 'comp2', ',', 'force', ')', ':', 'warnings', '=', 'dict', '(', ')', 'if', 'force', 'is', 'None', ':', 'stat', '=', 'comp2', '.', 'check_overlap', '(', 'comp1', ')', 'if', 'stat', '==', "'full'", ':', 'pass', 'elif', 'stat', '==', "'partial'", ':', 'raise', '(', 'exceptions', '.', 'PartialOverlap', '(', "'Spectrum and bandpass do not fully overlap. You may use force=[extrap|taper] to force this Observation anyway.'", ')', ')', 'elif', 'stat', '==', "'none'", ':', 'raise', '(', 'exceptions', '.', 'DisjointError', '(', "'Spectrum and bandpass are disjoint'", ')', ')', 'elif', 'force', '.', 'lower', '(', ')', '==', "'taper'", ':', 'try', ':', 'comp1', '=', 'comp1', '.', 'taper', '(', ')', 'except', 'AttributeError', ':', 'comp1', '=', 'comp1', '.', 'tabulate', '(', ')', '.', 'taper', '(', ')', 'warnings', '[', "'PartialOverlap'", ']', '=', 'force', 'elif', 'force', '.', 'lower', '(', ')', '.', 'startswith', '(', "'extrap'", ')', ':', '#default behavior works, but check the overlap so we can set the warning', 'stat', '=', 'comp2', '.', 'check_overlap', '(', 'comp1', ')', 'if', 'stat', '==', "'partial'", ':', 'warnings', '[', "'PartialOverlap'", ']', '=', 'force', 'else', ':', 'raise', '(', 'KeyError', '(', '"Illegal value force=%s; legal values=(\'taper\',\'extrap\')"', '%', 'force', ')', ')', 'return', 'comp1', ',', 'comp2', ',', 'warnings'] | Validate the overlap between the wavelength sets
of the two given components.
Parameters
----------
comp1, comp2 : `~pysynphot.spectrum.SourceSpectrum` or `~pysynphot.spectrum.SpectralElement`
Source spectrum and bandpass of an observation.
force : {'extrap', 'taper', `None`}
If not `None`, the components may be adjusted by
extrapolation or tapering.
Returns
-------
comp1, comp2
Same as inputs. However, ``comp1`` might be tapered
if that option is selected.
warnings : dict
Maps warning keyword to its description.
Raises
------
KeyError
Invalid ``force``.
pysynphot.exceptions.DisjointError
No overlap detected when ``force`` is `None`.
pysynphot.exceptions.PartialOverlap
Partial overlap detected when ``force`` is `None`. | ['Validate', 'the', 'overlap', 'between', 'the', 'wavelength', 'sets', 'of', 'the', 'two', 'given', 'components', '.'] | train | https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/observation.py#L81-L140 |
7,897 | boakley/robotframework-hub | rfhub/kwdb.py | KeywordTable.get_collections | def get_collections(self, pattern="*", libtype="*"):
"""Returns a list of collection name/summary tuples"""
sql = """SELECT collection.collection_id, collection.name, collection.doc,
collection.type, collection.path
FROM collection_table as collection
WHERE name like ?
AND type like ?
ORDER BY collection.name
"""
cursor = self._execute(sql, (self._glob_to_sql(pattern),
self._glob_to_sql(libtype)))
sql_result = cursor.fetchall()
return [{"collection_id": result[0],
"name": result[1],
"synopsis": result[2].split("\n")[0],
"type": result[3],
"path": result[4]
} for result in sql_result] | python | def get_collections(self, pattern="*", libtype="*"):
"""Returns a list of collection name/summary tuples"""
sql = """SELECT collection.collection_id, collection.name, collection.doc,
collection.type, collection.path
FROM collection_table as collection
WHERE name like ?
AND type like ?
ORDER BY collection.name
"""
cursor = self._execute(sql, (self._glob_to_sql(pattern),
self._glob_to_sql(libtype)))
sql_result = cursor.fetchall()
return [{"collection_id": result[0],
"name": result[1],
"synopsis": result[2].split("\n")[0],
"type": result[3],
"path": result[4]
} for result in sql_result] | ['def', 'get_collections', '(', 'self', ',', 'pattern', '=', '"*"', ',', 'libtype', '=', '"*"', ')', ':', 'sql', '=', '"""SELECT collection.collection_id, collection.name, collection.doc,\n collection.type, collection.path\n FROM collection_table as collection\n WHERE name like ?\n AND type like ?\n ORDER BY collection.name\n """', 'cursor', '=', 'self', '.', '_execute', '(', 'sql', ',', '(', 'self', '.', '_glob_to_sql', '(', 'pattern', ')', ',', 'self', '.', '_glob_to_sql', '(', 'libtype', ')', ')', ')', 'sql_result', '=', 'cursor', '.', 'fetchall', '(', ')', 'return', '[', '{', '"collection_id"', ':', 'result', '[', '0', ']', ',', '"name"', ':', 'result', '[', '1', ']', ',', '"synopsis"', ':', 'result', '[', '2', ']', '.', 'split', '(', '"\\n"', ')', '[', '0', ']', ',', '"type"', ':', 'result', '[', '3', ']', ',', '"path"', ':', 'result', '[', '4', ']', '}', 'for', 'result', 'in', 'sql_result', ']'] | Returns a list of collection name/summary tuples | ['Returns', 'a', 'list', 'of', 'collection', 'name', '/', 'summary', 'tuples'] | train | https://github.com/boakley/robotframework-hub/blob/f3dc7562fe6218a7b8d7aac7b9ef234e1a573f7c/rfhub/kwdb.py#L295-L315 |
7,898 | wonambi-python/wonambi | wonambi/widgets/overview.py | _make_timestamps | def _make_timestamps(start_time, minimum, maximum, steps):
"""Create timestamps on x-axis, every so often.
Parameters
----------
start_time : instance of datetime
actual start time of the dataset
minimum : int
start time of the recording from start_time, in s
maximum : int
end time of the recording from start_time, in s
steps : int
how often you want a label, in s
Returns
-------
dict
where the key is the label and the value is the time point where the
label should be placed.
Notes
-----
This function takes care that labels are placed at the meaningful time, not
at random values.
"""
t0 = start_time + timedelta(seconds=minimum)
t1 = start_time + timedelta(seconds=maximum)
t0_midnight = t0.replace(hour=0, minute=0, second=0, microsecond=0)
d0 = t0 - t0_midnight
d1 = t1 - t0_midnight
first_stamp = ceil(d0.total_seconds() / steps) * steps
last_stamp = ceil(d1.total_seconds() / steps) * steps
stamp_label = []
stamp_time = []
for stamp in range(first_stamp, last_stamp, steps):
stamp_as_datetime = t0_midnight + timedelta(seconds=stamp)
stamp_label.append(stamp_as_datetime.strftime('%H:%M'))
stamp_time.append(stamp - d0.total_seconds())
return stamp_label, stamp_time | python | def _make_timestamps(start_time, minimum, maximum, steps):
"""Create timestamps on x-axis, every so often.
Parameters
----------
start_time : instance of datetime
actual start time of the dataset
minimum : int
start time of the recording from start_time, in s
maximum : int
end time of the recording from start_time, in s
steps : int
how often you want a label, in s
Returns
-------
dict
where the key is the label and the value is the time point where the
label should be placed.
Notes
-----
This function takes care that labels are placed at the meaningful time, not
at random values.
"""
t0 = start_time + timedelta(seconds=minimum)
t1 = start_time + timedelta(seconds=maximum)
t0_midnight = t0.replace(hour=0, minute=0, second=0, microsecond=0)
d0 = t0 - t0_midnight
d1 = t1 - t0_midnight
first_stamp = ceil(d0.total_seconds() / steps) * steps
last_stamp = ceil(d1.total_seconds() / steps) * steps
stamp_label = []
stamp_time = []
for stamp in range(first_stamp, last_stamp, steps):
stamp_as_datetime = t0_midnight + timedelta(seconds=stamp)
stamp_label.append(stamp_as_datetime.strftime('%H:%M'))
stamp_time.append(stamp - d0.total_seconds())
return stamp_label, stamp_time | ['def', '_make_timestamps', '(', 'start_time', ',', 'minimum', ',', 'maximum', ',', 'steps', ')', ':', 't0', '=', 'start_time', '+', 'timedelta', '(', 'seconds', '=', 'minimum', ')', 't1', '=', 'start_time', '+', 'timedelta', '(', 'seconds', '=', 'maximum', ')', 't0_midnight', '=', 't0', '.', 'replace', '(', 'hour', '=', '0', ',', 'minute', '=', '0', ',', 'second', '=', '0', ',', 'microsecond', '=', '0', ')', 'd0', '=', 't0', '-', 't0_midnight', 'd1', '=', 't1', '-', 't0_midnight', 'first_stamp', '=', 'ceil', '(', 'd0', '.', 'total_seconds', '(', ')', '/', 'steps', ')', '*', 'steps', 'last_stamp', '=', 'ceil', '(', 'd1', '.', 'total_seconds', '(', ')', '/', 'steps', ')', '*', 'steps', 'stamp_label', '=', '[', ']', 'stamp_time', '=', '[', ']', 'for', 'stamp', 'in', 'range', '(', 'first_stamp', ',', 'last_stamp', ',', 'steps', ')', ':', 'stamp_as_datetime', '=', 't0_midnight', '+', 'timedelta', '(', 'seconds', '=', 'stamp', ')', 'stamp_label', '.', 'append', '(', 'stamp_as_datetime', '.', 'strftime', '(', "'%H:%M'", ')', ')', 'stamp_time', '.', 'append', '(', 'stamp', '-', 'd0', '.', 'total_seconds', '(', ')', ')', 'return', 'stamp_label', ',', 'stamp_time'] | Create timestamps on x-axis, every so often.
Parameters
----------
start_time : instance of datetime
actual start time of the dataset
minimum : int
start time of the recording from start_time, in s
maximum : int
end time of the recording from start_time, in s
steps : int
how often you want a label, in s
Returns
-------
dict
where the key is the label and the value is the time point where the
label should be placed.
Notes
-----
This function takes care that labels are placed at the meaningful time, not
at random values. | ['Create', 'timestamps', 'on', 'x', '-', 'axis', 'every', 'so', 'often', '.'] | train | https://github.com/wonambi-python/wonambi/blob/1d8e3d7e53df8017c199f703bcab582914676e76/wonambi/widgets/overview.py#L527-L570 |
7,899 | DataDog/integrations-core | tokumx/datadog_checks/tokumx/vendor/pymongo/common.py | validate_uuid_representation | def validate_uuid_representation(dummy, value):
"""Validate the uuid representation option selected in the URI.
"""
try:
return _UUID_REPRESENTATIONS[value]
except KeyError:
raise ValueError("%s is an invalid UUID representation. "
"Must be one of "
"%s" % (value, tuple(_UUID_REPRESENTATIONS))) | python | def validate_uuid_representation(dummy, value):
"""Validate the uuid representation option selected in the URI.
"""
try:
return _UUID_REPRESENTATIONS[value]
except KeyError:
raise ValueError("%s is an invalid UUID representation. "
"Must be one of "
"%s" % (value, tuple(_UUID_REPRESENTATIONS))) | ['def', 'validate_uuid_representation', '(', 'dummy', ',', 'value', ')', ':', 'try', ':', 'return', '_UUID_REPRESENTATIONS', '[', 'value', ']', 'except', 'KeyError', ':', 'raise', 'ValueError', '(', '"%s is an invalid UUID representation. "', '"Must be one of "', '"%s"', '%', '(', 'value', ',', 'tuple', '(', '_UUID_REPRESENTATIONS', ')', ')', ')'] | Validate the uuid representation option selected in the URI. | ['Validate', 'the', 'uuid', 'representation', 'option', 'selected', 'in', 'the', 'URI', '.'] | train | https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/common.py#L327-L335 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.