repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
icometrix/dicom2nifti
dicom2nifti/convert_philips.py
https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_philips.py#L172-L187
def _is_bval_type_a(grouped_dicoms): """ Check if the bvals are stored in the first of 2 currently known ways for single frame dti """ bval_tag = Tag(0x2001, 0x1003) bvec_x_tag = Tag(0x2005, 0x10b0) bvec_y_tag = Tag(0x2005, 0x10b1) bvec_z_tag = Tag(0x2005, 0x10b2) for group in grouped_dicoms: if bvec_x_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_x_tag])) and \ bvec_y_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_y_tag])) and \ bvec_z_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_z_tag])) and \ bval_tag in group[0] and _is_float(common.get_fl_value(group[0][bval_tag])) and \ common.get_fl_value(group[0][bval_tag]) != 0: return True return False
[ "def", "_is_bval_type_a", "(", "grouped_dicoms", ")", ":", "bval_tag", "=", "Tag", "(", "0x2001", ",", "0x1003", ")", "bvec_x_tag", "=", "Tag", "(", "0x2005", ",", "0x10b0", ")", "bvec_y_tag", "=", "Tag", "(", "0x2005", ",", "0x10b1", ")", "bvec_z_tag", "=", "Tag", "(", "0x2005", ",", "0x10b2", ")", "for", "group", "in", "grouped_dicoms", ":", "if", "bvec_x_tag", "in", "group", "[", "0", "]", "and", "_is_float", "(", "common", ".", "get_fl_value", "(", "group", "[", "0", "]", "[", "bvec_x_tag", "]", ")", ")", "and", "bvec_y_tag", "in", "group", "[", "0", "]", "and", "_is_float", "(", "common", ".", "get_fl_value", "(", "group", "[", "0", "]", "[", "bvec_y_tag", "]", ")", ")", "and", "bvec_z_tag", "in", "group", "[", "0", "]", "and", "_is_float", "(", "common", ".", "get_fl_value", "(", "group", "[", "0", "]", "[", "bvec_z_tag", "]", ")", ")", "and", "bval_tag", "in", "group", "[", "0", "]", "and", "_is_float", "(", "common", ".", "get_fl_value", "(", "group", "[", "0", "]", "[", "bval_tag", "]", ")", ")", "and", "common", ".", "get_fl_value", "(", "group", "[", "0", "]", "[", "bval_tag", "]", ")", "!=", "0", ":", "return", "True", "return", "False" ]
Check if the bvals are stored in the first of 2 currently known ways for single frame dti
[ "Check", "if", "the", "bvals", "are", "stored", "in", "the", "first", "of", "2", "currently", "known", "ways", "for", "single", "frame", "dti" ]
python
train
dropbox/pyannotate
pyannotate_tools/annotations/parse.py
https://github.com/dropbox/pyannotate/blob/d128c76b8a86f208e5c78716f2a917003650cebc/pyannotate_tools/annotations/parse.py#L96-L133
def parse_json(path): # type: (str) -> List[FunctionInfo] """Deserialize a JSON file containing runtime collected types. The input JSON is expected to to have a list of RawEntry items. """ with open(path) as f: data = json.load(f) # type: List[RawEntry] result = [] def assert_type(value, typ): # type: (object, type) -> None assert isinstance(value, typ), '%s: Unexpected type %r' % (path, type(value).__name__) def assert_dict_item(dictionary, key, typ): # type: (Mapping[Any, Any], str, type) -> None assert key in dictionary, '%s: Missing dictionary key %r' % (path, key) value = dictionary[key] assert isinstance(value, typ), '%s: Unexpected type %r for key %r' % ( path, type(value).__name__, key) assert_type(data, list) for item in data: assert_type(item, dict) assert_dict_item(item, 'path', Text) assert_dict_item(item, 'line', int) assert_dict_item(item, 'func_name', Text) assert_dict_item(item, 'type_comments', list) for comment in item['type_comments']: assert_type(comment, Text) assert_type(item['samples'], int) info = FunctionInfo(encode(item['path']), item['line'], encode(item['func_name']), [encode(comment) for comment in item['type_comments']], item['samples']) result.append(info) return result
[ "def", "parse_json", "(", "path", ")", ":", "# type: (str) -> List[FunctionInfo]", "with", "open", "(", "path", ")", "as", "f", ":", "data", "=", "json", ".", "load", "(", "f", ")", "# type: List[RawEntry]", "result", "=", "[", "]", "def", "assert_type", "(", "value", ",", "typ", ")", ":", "# type: (object, type) -> None", "assert", "isinstance", "(", "value", ",", "typ", ")", ",", "'%s: Unexpected type %r'", "%", "(", "path", ",", "type", "(", "value", ")", ".", "__name__", ")", "def", "assert_dict_item", "(", "dictionary", ",", "key", ",", "typ", ")", ":", "# type: (Mapping[Any, Any], str, type) -> None", "assert", "key", "in", "dictionary", ",", "'%s: Missing dictionary key %r'", "%", "(", "path", ",", "key", ")", "value", "=", "dictionary", "[", "key", "]", "assert", "isinstance", "(", "value", ",", "typ", ")", ",", "'%s: Unexpected type %r for key %r'", "%", "(", "path", ",", "type", "(", "value", ")", ".", "__name__", ",", "key", ")", "assert_type", "(", "data", ",", "list", ")", "for", "item", "in", "data", ":", "assert_type", "(", "item", ",", "dict", ")", "assert_dict_item", "(", "item", ",", "'path'", ",", "Text", ")", "assert_dict_item", "(", "item", ",", "'line'", ",", "int", ")", "assert_dict_item", "(", "item", ",", "'func_name'", ",", "Text", ")", "assert_dict_item", "(", "item", ",", "'type_comments'", ",", "list", ")", "for", "comment", "in", "item", "[", "'type_comments'", "]", ":", "assert_type", "(", "comment", ",", "Text", ")", "assert_type", "(", "item", "[", "'samples'", "]", ",", "int", ")", "info", "=", "FunctionInfo", "(", "encode", "(", "item", "[", "'path'", "]", ")", ",", "item", "[", "'line'", "]", ",", "encode", "(", "item", "[", "'func_name'", "]", ")", ",", "[", "encode", "(", "comment", ")", "for", "comment", "in", "item", "[", "'type_comments'", "]", "]", ",", "item", "[", "'samples'", "]", ")", "result", ".", "append", "(", "info", ")", "return", "result" ]
Deserialize a JSON file containing runtime collected types. The input JSON is expected to to have a list of RawEntry items.
[ "Deserialize", "a", "JSON", "file", "containing", "runtime", "collected", "types", "." ]
python
train
CartoDB/cartoframes
cartoframes/layer.py
https://github.com/CartoDB/cartoframes/blob/c94238a545f3dec45963dac3892540942b6f0df8/cartoframes/layer.py#L369-L397
def _parse_time(self, time): """Parse time inputs""" if time is None: return None if isinstance(time, dict): if 'column' not in time: raise ValueError("`time` must include a 'column' key/value") time_column = time['column'] time_options = time elif isinstance(time, str): time_column = time time_options = {} else: raise ValueError( '`time` should be a column name or dictionary of ' 'styling options.') self.style_cols[time_column] = None time = { 'column': time_column, 'method': 'count', 'cumulative': False, 'frames': 256, 'duration': 30, 'trails': 2, } time.update(time_options) return time
[ "def", "_parse_time", "(", "self", ",", "time", ")", ":", "if", "time", "is", "None", ":", "return", "None", "if", "isinstance", "(", "time", ",", "dict", ")", ":", "if", "'column'", "not", "in", "time", ":", "raise", "ValueError", "(", "\"`time` must include a 'column' key/value\"", ")", "time_column", "=", "time", "[", "'column'", "]", "time_options", "=", "time", "elif", "isinstance", "(", "time", ",", "str", ")", ":", "time_column", "=", "time", "time_options", "=", "{", "}", "else", ":", "raise", "ValueError", "(", "'`time` should be a column name or dictionary of '", "'styling options.'", ")", "self", ".", "style_cols", "[", "time_column", "]", "=", "None", "time", "=", "{", "'column'", ":", "time_column", ",", "'method'", ":", "'count'", ",", "'cumulative'", ":", "False", ",", "'frames'", ":", "256", ",", "'duration'", ":", "30", ",", "'trails'", ":", "2", ",", "}", "time", ".", "update", "(", "time_options", ")", "return", "time" ]
Parse time inputs
[ "Parse", "time", "inputs" ]
python
train
niolabs/python-xbee
xbee/tornado/base.py
https://github.com/niolabs/python-xbee/blob/b91be3d0ee7ccaa1990120b5b5490999d8e6cbc7/xbee/tornado/base.py#L127-L167
def _process_input(self, data, events): """ _process_input: _process_input will be notified when there is data ready on the serial connection to be read. It will read and process the data into an API Frame and then either resolve a frame future, or push the frame into the queue of frames needing to be processed """ frame = APIFrame(escaped=self._escaped) byte = self.serial.read() if byte != APIFrame.START_BYTE: return # Save all following bytes, if they are not empty if len(byte) == 1: frame.fill(byte) while(frame.remaining_bytes() > 0): byte = self.serial.read() if len(byte) == 1: frame.fill(byte) try: # Try to parse and return result frame.parse() # Ignore empty frames if len(frame.data) == 0: return if self._frame_future is not None: self._frame_future.set_result(frame) self._frame_future = None else: self._frame_queue.append(frame) except ValueError: return
[ "def", "_process_input", "(", "self", ",", "data", ",", "events", ")", ":", "frame", "=", "APIFrame", "(", "escaped", "=", "self", ".", "_escaped", ")", "byte", "=", "self", ".", "serial", ".", "read", "(", ")", "if", "byte", "!=", "APIFrame", ".", "START_BYTE", ":", "return", "# Save all following bytes, if they are not empty", "if", "len", "(", "byte", ")", "==", "1", ":", "frame", ".", "fill", "(", "byte", ")", "while", "(", "frame", ".", "remaining_bytes", "(", ")", ">", "0", ")", ":", "byte", "=", "self", ".", "serial", ".", "read", "(", ")", "if", "len", "(", "byte", ")", "==", "1", ":", "frame", ".", "fill", "(", "byte", ")", "try", ":", "# Try to parse and return result", "frame", ".", "parse", "(", ")", "# Ignore empty frames", "if", "len", "(", "frame", ".", "data", ")", "==", "0", ":", "return", "if", "self", ".", "_frame_future", "is", "not", "None", ":", "self", ".", "_frame_future", ".", "set_result", "(", "frame", ")", "self", ".", "_frame_future", "=", "None", "else", ":", "self", ".", "_frame_queue", ".", "append", "(", "frame", ")", "except", "ValueError", ":", "return" ]
_process_input: _process_input will be notified when there is data ready on the serial connection to be read. It will read and process the data into an API Frame and then either resolve a frame future, or push the frame into the queue of frames needing to be processed
[ "_process_input", ":" ]
python
train
moralrecordings/mrcrowbar
mrcrowbar/refs.py
https://github.com/moralrecordings/mrcrowbar/blob/b1ed882c4555552e7656b2d84aca543184577fa3/mrcrowbar/refs.py#L115-L132
def property_set( prop, instance, value, **kwargs ): """Wrapper for property writes which auto-deferences Refs. prop A Ref (which gets dereferenced and the target value set). instance The context object used to dereference the Ref. value The value to set the property to. Throws AttributeError if prop is not a Ref. """ if isinstance( prop, Ref ): return prop.set( instance, value, **kwargs ) raise AttributeError( "can't change value of constant {} (context: {})".format( prop, instance ) )
[ "def", "property_set", "(", "prop", ",", "instance", ",", "value", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "prop", ",", "Ref", ")", ":", "return", "prop", ".", "set", "(", "instance", ",", "value", ",", "*", "*", "kwargs", ")", "raise", "AttributeError", "(", "\"can't change value of constant {} (context: {})\"", ".", "format", "(", "prop", ",", "instance", ")", ")" ]
Wrapper for property writes which auto-deferences Refs. prop A Ref (which gets dereferenced and the target value set). instance The context object used to dereference the Ref. value The value to set the property to. Throws AttributeError if prop is not a Ref.
[ "Wrapper", "for", "property", "writes", "which", "auto", "-", "deferences", "Refs", "." ]
python
train
zalando/patroni
patroni/ctl.py
https://github.com/zalando/patroni/blob/f6d29081c90af52064b981cdd877a07338d86038/patroni/ctl.py#L585-L680
def _do_failover_or_switchover(obj, action, cluster_name, master, candidate, force, scheduled=None): """ We want to trigger a failover or switchover for the specified cluster name. We verify that the cluster name, master name and candidate name are correct. If so, we trigger an action and keep the client up to date. """ dcs = get_dcs(obj, cluster_name) cluster = dcs.get_cluster() if action == 'switchover' and cluster.leader is None: raise PatroniCtlException('This cluster has no master') if master is None: if force or action == 'failover': master = cluster.leader and cluster.leader.name else: master = click.prompt('Master', type=str, default=cluster.leader.member.name) if master is not None and cluster.leader and cluster.leader.member.name != master: raise PatroniCtlException('Member {0} is not the leader of cluster {1}'.format(master, cluster_name)) # excluding members with nofailover tag candidate_names = [str(m.name) for m in cluster.members if m.name != master and not m.nofailover] # We sort the names for consistent output to the client candidate_names.sort() if not candidate_names: raise PatroniCtlException('No candidates found to {0} to'.format(action)) if candidate is None and not force: candidate = click.prompt('Candidate ' + str(candidate_names), type=str, default='') if action == 'failover' and not candidate: raise PatroniCtlException('Failover could be performed only to a specific candidate') if candidate == master: raise PatroniCtlException(action.title() + ' target and source are the same.') if candidate and candidate not in candidate_names: raise PatroniCtlException('Member {0} does not exist in cluster {1}'.format(candidate, cluster_name)) scheduled_at_str = None scheduled_at = None if action == 'switchover': if scheduled is None and not force: scheduled = click.prompt('When should the switchover take place (e.g. 2015-10-01T14:30) ', type=str, default='now') scheduled_at = parse_scheduled(scheduled) if scheduled_at: if cluster.is_paused(): raise PatroniCtlException("Can't schedule switchover in the paused state") scheduled_at_str = scheduled_at.isoformat() failover_value = {'leader': master, 'candidate': candidate, 'scheduled_at': scheduled_at_str} logging.debug(failover_value) # By now we have established that the leader exists and the candidate exists click.echo('Current cluster topology') output_members(dcs.get_cluster(), cluster_name) if not force: demote_msg = ', demoting current master ' + master if master else '' if not click.confirm('Are you sure you want to {0} cluster {1}{2}?'.format(action, cluster_name, demote_msg)): raise PatroniCtlException('Aborting ' + action) r = None try: member = cluster.leader.member if cluster.leader else cluster.get_member(candidate, False) r = request_patroni(member, 'post', action, failover_value, auth_header(obj)) # probably old patroni, which doesn't support switchover yet if r.status_code == 501 and action == 'switchover' and 'Server does not support this operation' in r.text: r = request_patroni(member, 'post', 'failover', failover_value, auth_header(obj)) if r.status_code in (200, 202): logging.debug(r) cluster = dcs.get_cluster() logging.debug(cluster) click.echo('{0} {1}'.format(timestamp(), r.text)) else: click.echo('{0} failed, details: {1}, {2}'.format(action.title(), r.status_code, r.text)) return except Exception: logging.exception(r) logging.warning('Failing over to DCS') click.echo('{0} Could not {1} using Patroni api, falling back to DCS'.format(timestamp(), action)) dcs.manual_failover(master, candidate, scheduled_at=scheduled_at) output_members(cluster, cluster_name)
[ "def", "_do_failover_or_switchover", "(", "obj", ",", "action", ",", "cluster_name", ",", "master", ",", "candidate", ",", "force", ",", "scheduled", "=", "None", ")", ":", "dcs", "=", "get_dcs", "(", "obj", ",", "cluster_name", ")", "cluster", "=", "dcs", ".", "get_cluster", "(", ")", "if", "action", "==", "'switchover'", "and", "cluster", ".", "leader", "is", "None", ":", "raise", "PatroniCtlException", "(", "'This cluster has no master'", ")", "if", "master", "is", "None", ":", "if", "force", "or", "action", "==", "'failover'", ":", "master", "=", "cluster", ".", "leader", "and", "cluster", ".", "leader", ".", "name", "else", ":", "master", "=", "click", ".", "prompt", "(", "'Master'", ",", "type", "=", "str", ",", "default", "=", "cluster", ".", "leader", ".", "member", ".", "name", ")", "if", "master", "is", "not", "None", "and", "cluster", ".", "leader", "and", "cluster", ".", "leader", ".", "member", ".", "name", "!=", "master", ":", "raise", "PatroniCtlException", "(", "'Member {0} is not the leader of cluster {1}'", ".", "format", "(", "master", ",", "cluster_name", ")", ")", "# excluding members with nofailover tag", "candidate_names", "=", "[", "str", "(", "m", ".", "name", ")", "for", "m", "in", "cluster", ".", "members", "if", "m", ".", "name", "!=", "master", "and", "not", "m", ".", "nofailover", "]", "# We sort the names for consistent output to the client", "candidate_names", ".", "sort", "(", ")", "if", "not", "candidate_names", ":", "raise", "PatroniCtlException", "(", "'No candidates found to {0} to'", ".", "format", "(", "action", ")", ")", "if", "candidate", "is", "None", "and", "not", "force", ":", "candidate", "=", "click", ".", "prompt", "(", "'Candidate '", "+", "str", "(", "candidate_names", ")", ",", "type", "=", "str", ",", "default", "=", "''", ")", "if", "action", "==", "'failover'", "and", "not", "candidate", ":", "raise", "PatroniCtlException", "(", "'Failover could be performed only to a specific candidate'", ")", "if", "candidate", "==", "master", ":", "raise", "PatroniCtlException", "(", "action", ".", "title", "(", ")", "+", "' target and source are the same.'", ")", "if", "candidate", "and", "candidate", "not", "in", "candidate_names", ":", "raise", "PatroniCtlException", "(", "'Member {0} does not exist in cluster {1}'", ".", "format", "(", "candidate", ",", "cluster_name", ")", ")", "scheduled_at_str", "=", "None", "scheduled_at", "=", "None", "if", "action", "==", "'switchover'", ":", "if", "scheduled", "is", "None", "and", "not", "force", ":", "scheduled", "=", "click", ".", "prompt", "(", "'When should the switchover take place (e.g. 2015-10-01T14:30) '", ",", "type", "=", "str", ",", "default", "=", "'now'", ")", "scheduled_at", "=", "parse_scheduled", "(", "scheduled", ")", "if", "scheduled_at", ":", "if", "cluster", ".", "is_paused", "(", ")", ":", "raise", "PatroniCtlException", "(", "\"Can't schedule switchover in the paused state\"", ")", "scheduled_at_str", "=", "scheduled_at", ".", "isoformat", "(", ")", "failover_value", "=", "{", "'leader'", ":", "master", ",", "'candidate'", ":", "candidate", ",", "'scheduled_at'", ":", "scheduled_at_str", "}", "logging", ".", "debug", "(", "failover_value", ")", "# By now we have established that the leader exists and the candidate exists", "click", ".", "echo", "(", "'Current cluster topology'", ")", "output_members", "(", "dcs", ".", "get_cluster", "(", ")", ",", "cluster_name", ")", "if", "not", "force", ":", "demote_msg", "=", "', demoting current master '", "+", "master", "if", "master", "else", "''", "if", "not", "click", ".", "confirm", "(", "'Are you sure you want to {0} cluster {1}{2}?'", ".", "format", "(", "action", ",", "cluster_name", ",", "demote_msg", ")", ")", ":", "raise", "PatroniCtlException", "(", "'Aborting '", "+", "action", ")", "r", "=", "None", "try", ":", "member", "=", "cluster", ".", "leader", ".", "member", "if", "cluster", ".", "leader", "else", "cluster", ".", "get_member", "(", "candidate", ",", "False", ")", "r", "=", "request_patroni", "(", "member", ",", "'post'", ",", "action", ",", "failover_value", ",", "auth_header", "(", "obj", ")", ")", "# probably old patroni, which doesn't support switchover yet", "if", "r", ".", "status_code", "==", "501", "and", "action", "==", "'switchover'", "and", "'Server does not support this operation'", "in", "r", ".", "text", ":", "r", "=", "request_patroni", "(", "member", ",", "'post'", ",", "'failover'", ",", "failover_value", ",", "auth_header", "(", "obj", ")", ")", "if", "r", ".", "status_code", "in", "(", "200", ",", "202", ")", ":", "logging", ".", "debug", "(", "r", ")", "cluster", "=", "dcs", ".", "get_cluster", "(", ")", "logging", ".", "debug", "(", "cluster", ")", "click", ".", "echo", "(", "'{0} {1}'", ".", "format", "(", "timestamp", "(", ")", ",", "r", ".", "text", ")", ")", "else", ":", "click", ".", "echo", "(", "'{0} failed, details: {1}, {2}'", ".", "format", "(", "action", ".", "title", "(", ")", ",", "r", ".", "status_code", ",", "r", ".", "text", ")", ")", "return", "except", "Exception", ":", "logging", ".", "exception", "(", "r", ")", "logging", ".", "warning", "(", "'Failing over to DCS'", ")", "click", ".", "echo", "(", "'{0} Could not {1} using Patroni api, falling back to DCS'", ".", "format", "(", "timestamp", "(", ")", ",", "action", ")", ")", "dcs", ".", "manual_failover", "(", "master", ",", "candidate", ",", "scheduled_at", "=", "scheduled_at", ")", "output_members", "(", "cluster", ",", "cluster_name", ")" ]
We want to trigger a failover or switchover for the specified cluster name. We verify that the cluster name, master name and candidate name are correct. If so, we trigger an action and keep the client up to date.
[ "We", "want", "to", "trigger", "a", "failover", "or", "switchover", "for", "the", "specified", "cluster", "name", "." ]
python
train
opennode/waldur-core
waldur_core/structure/utils.py
https://github.com/opennode/waldur-core/blob/d6c17a9592bb6c49c33567542eef8d099605a46a/waldur_core/structure/utils.py#L130-L150
def handle_resource_update_success(resource): """ Recover resource if its state is ERRED and clear error message. """ update_fields = [] if resource.state == resource.States.ERRED: resource.recover() update_fields.append('state') if resource.state in (resource.States.UPDATING, resource.States.CREATING): resource.set_ok() update_fields.append('state') if resource.error_message: resource.error_message = '' update_fields.append('error_message') if update_fields: resource.save(update_fields=update_fields) logger.warning('%s %s (PK: %s) was successfully updated.' % ( resource.__class__.__name__, resource, resource.pk))
[ "def", "handle_resource_update_success", "(", "resource", ")", ":", "update_fields", "=", "[", "]", "if", "resource", ".", "state", "==", "resource", ".", "States", ".", "ERRED", ":", "resource", ".", "recover", "(", ")", "update_fields", ".", "append", "(", "'state'", ")", "if", "resource", ".", "state", "in", "(", "resource", ".", "States", ".", "UPDATING", ",", "resource", ".", "States", ".", "CREATING", ")", ":", "resource", ".", "set_ok", "(", ")", "update_fields", ".", "append", "(", "'state'", ")", "if", "resource", ".", "error_message", ":", "resource", ".", "error_message", "=", "''", "update_fields", ".", "append", "(", "'error_message'", ")", "if", "update_fields", ":", "resource", ".", "save", "(", "update_fields", "=", "update_fields", ")", "logger", ".", "warning", "(", "'%s %s (PK: %s) was successfully updated.'", "%", "(", "resource", ".", "__class__", ".", "__name__", ",", "resource", ",", "resource", ".", "pk", ")", ")" ]
Recover resource if its state is ERRED and clear error message.
[ "Recover", "resource", "if", "its", "state", "is", "ERRED", "and", "clear", "error", "message", "." ]
python
train
gem/oq-engine
openquake/hmtk/comparison/rate_grids.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hmtk/comparison/rate_grids.py#L244-L265
def _get_fault_rates(self, source, mmin, mmax=np.inf): """ Adds the rates for a simple or complex fault source :param source: Fault source as instance of :class: openquake.hazardlib.source.simple_fault.SimpleFaultSource or openquake.hazardlib.source.complex_fault.ComplexFaultSource """ for rupt in list(source.iter_ruptures()): valid_rupt = (rupt.mag >= mmin) and (rupt.mag < mmax) if not valid_rupt: continue grd = np.column_stack([rupt.surface.mesh.lons.flatten(), rupt.surface.mesh.lats.flatten(), rupt.surface.mesh.depths.flatten()]) npts = np.shape(grd)[0] counter = np.histogramdd(grd, bins=[self.xlim, self.ylim, self.zlim] )[0] point_rate = rupt.occurrence_rate / float(npts) self.rates += (point_rate * counter)
[ "def", "_get_fault_rates", "(", "self", ",", "source", ",", "mmin", ",", "mmax", "=", "np", ".", "inf", ")", ":", "for", "rupt", "in", "list", "(", "source", ".", "iter_ruptures", "(", ")", ")", ":", "valid_rupt", "=", "(", "rupt", ".", "mag", ">=", "mmin", ")", "and", "(", "rupt", ".", "mag", "<", "mmax", ")", "if", "not", "valid_rupt", ":", "continue", "grd", "=", "np", ".", "column_stack", "(", "[", "rupt", ".", "surface", ".", "mesh", ".", "lons", ".", "flatten", "(", ")", ",", "rupt", ".", "surface", ".", "mesh", ".", "lats", ".", "flatten", "(", ")", ",", "rupt", ".", "surface", ".", "mesh", ".", "depths", ".", "flatten", "(", ")", "]", ")", "npts", "=", "np", ".", "shape", "(", "grd", ")", "[", "0", "]", "counter", "=", "np", ".", "histogramdd", "(", "grd", ",", "bins", "=", "[", "self", ".", "xlim", ",", "self", ".", "ylim", ",", "self", ".", "zlim", "]", ")", "[", "0", "]", "point_rate", "=", "rupt", ".", "occurrence_rate", "/", "float", "(", "npts", ")", "self", ".", "rates", "+=", "(", "point_rate", "*", "counter", ")" ]
Adds the rates for a simple or complex fault source :param source: Fault source as instance of :class: openquake.hazardlib.source.simple_fault.SimpleFaultSource or openquake.hazardlib.source.complex_fault.ComplexFaultSource
[ "Adds", "the", "rates", "for", "a", "simple", "or", "complex", "fault", "source" ]
python
train
DistrictDataLabs/yellowbrick
yellowbrick/features/jointplot.py
https://github.com/DistrictDataLabs/yellowbrick/blob/59b67236a3862c73363e8edad7cd86da5b69e3b2/yellowbrick/features/jointplot.py#L208-L235
def _layout(self): """ Creates the grid layout for the joint plot, adding new axes for the histograms if necessary and modifying the aspect ratio. Does not modify the axes or the layout if self.hist is False or None. """ # Ensure the axes are created if not hist, then return. if not self.hist: self.ax return # Ensure matplotlib version compatibility if make_axes_locatable is None: raise YellowbrickValueError(( "joint plot histograms requires matplotlib 2.0.2 or greater " "please upgrade matplotlib or set hist=False on the visualizer" )) # Create the new axes for the histograms divider = make_axes_locatable(self.ax) self._xhax = divider.append_axes("top", size=1, pad=0.1, sharex=self.ax) self._yhax = divider.append_axes("right", size=1, pad=0.1, sharey=self.ax) # Modify the display of the axes self._xhax.xaxis.tick_top() self._yhax.yaxis.tick_right() self._xhax.grid(False, axis='y') self._yhax.grid(False, axis='x')
[ "def", "_layout", "(", "self", ")", ":", "# Ensure the axes are created if not hist, then return.", "if", "not", "self", ".", "hist", ":", "self", ".", "ax", "return", "# Ensure matplotlib version compatibility", "if", "make_axes_locatable", "is", "None", ":", "raise", "YellowbrickValueError", "(", "(", "\"joint plot histograms requires matplotlib 2.0.2 or greater \"", "\"please upgrade matplotlib or set hist=False on the visualizer\"", ")", ")", "# Create the new axes for the histograms", "divider", "=", "make_axes_locatable", "(", "self", ".", "ax", ")", "self", ".", "_xhax", "=", "divider", ".", "append_axes", "(", "\"top\"", ",", "size", "=", "1", ",", "pad", "=", "0.1", ",", "sharex", "=", "self", ".", "ax", ")", "self", ".", "_yhax", "=", "divider", ".", "append_axes", "(", "\"right\"", ",", "size", "=", "1", ",", "pad", "=", "0.1", ",", "sharey", "=", "self", ".", "ax", ")", "# Modify the display of the axes", "self", ".", "_xhax", ".", "xaxis", ".", "tick_top", "(", ")", "self", ".", "_yhax", ".", "yaxis", ".", "tick_right", "(", ")", "self", ".", "_xhax", ".", "grid", "(", "False", ",", "axis", "=", "'y'", ")", "self", ".", "_yhax", ".", "grid", "(", "False", ",", "axis", "=", "'x'", ")" ]
Creates the grid layout for the joint plot, adding new axes for the histograms if necessary and modifying the aspect ratio. Does not modify the axes or the layout if self.hist is False or None.
[ "Creates", "the", "grid", "layout", "for", "the", "joint", "plot", "adding", "new", "axes", "for", "the", "histograms", "if", "necessary", "and", "modifying", "the", "aspect", "ratio", ".", "Does", "not", "modify", "the", "axes", "or", "the", "layout", "if", "self", ".", "hist", "is", "False", "or", "None", "." ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L17867-L17882
def get_default_io_cache_setting_for_storage_controller(self, controller_type): """Returns the default I/O cache setting for the given storage controller in controller_type of type :class:`StorageControllerType` The storage controller type to get the setting for. return enabled of type bool Returned flag indicating the default value """ if not isinstance(controller_type, StorageControllerType): raise TypeError("controller_type can only be an instance of type StorageControllerType") enabled = self._call("getDefaultIoCacheSettingForStorageController", in_p=[controller_type]) return enabled
[ "def", "get_default_io_cache_setting_for_storage_controller", "(", "self", ",", "controller_type", ")", ":", "if", "not", "isinstance", "(", "controller_type", ",", "StorageControllerType", ")", ":", "raise", "TypeError", "(", "\"controller_type can only be an instance of type StorageControllerType\"", ")", "enabled", "=", "self", ".", "_call", "(", "\"getDefaultIoCacheSettingForStorageController\"", ",", "in_p", "=", "[", "controller_type", "]", ")", "return", "enabled" ]
Returns the default I/O cache setting for the given storage controller in controller_type of type :class:`StorageControllerType` The storage controller type to get the setting for. return enabled of type bool Returned flag indicating the default value
[ "Returns", "the", "default", "I", "/", "O", "cache", "setting", "for", "the", "given", "storage", "controller" ]
python
train
spacetelescope/pysynphot
pysynphot/graphtab.py
https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/graphtab.py#L242-L271
def validate(self): """ Simulataneously checks for loops and unreachable nodes """ msg = list() previously_seen = set() currently_seen = set([1]) problemset = set() while currently_seen: node = currently_seen.pop() if node in previously_seen: problemset.add(node) else: previously_seen.add(node) self.add_descendants(node, currently_seen) unreachable = self.all_nodes - previously_seen if unreachable: msg.append("%d unreachable nodes: "%len(unreachable)) for node in unreachable: msg.append(str(node)) if problemset: msg.append("Loop involving %d nodes"%len(problemset)) for node in problemset: msg.append(str(node)) if msg: return msg else: return True
[ "def", "validate", "(", "self", ")", ":", "msg", "=", "list", "(", ")", "previously_seen", "=", "set", "(", ")", "currently_seen", "=", "set", "(", "[", "1", "]", ")", "problemset", "=", "set", "(", ")", "while", "currently_seen", ":", "node", "=", "currently_seen", ".", "pop", "(", ")", "if", "node", "in", "previously_seen", ":", "problemset", ".", "add", "(", "node", ")", "else", ":", "previously_seen", ".", "add", "(", "node", ")", "self", ".", "add_descendants", "(", "node", ",", "currently_seen", ")", "unreachable", "=", "self", ".", "all_nodes", "-", "previously_seen", "if", "unreachable", ":", "msg", ".", "append", "(", "\"%d unreachable nodes: \"", "%", "len", "(", "unreachable", ")", ")", "for", "node", "in", "unreachable", ":", "msg", ".", "append", "(", "str", "(", "node", ")", ")", "if", "problemset", ":", "msg", ".", "append", "(", "\"Loop involving %d nodes\"", "%", "len", "(", "problemset", ")", ")", "for", "node", "in", "problemset", ":", "msg", ".", "append", "(", "str", "(", "node", ")", ")", "if", "msg", ":", "return", "msg", "else", ":", "return", "True" ]
Simulataneously checks for loops and unreachable nodes
[ "Simulataneously", "checks", "for", "loops", "and", "unreachable", "nodes" ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_ras.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_ras.py#L126-L139
def logging_syslog_server_secure(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") logging = ET.SubElement(config, "logging", xmlns="urn:brocade.com:mgmt:brocade-ras") syslog_server = ET.SubElement(logging, "syslog-server") syslogip_key = ET.SubElement(syslog_server, "syslogip") syslogip_key.text = kwargs.pop('syslogip') use_vrf_key = ET.SubElement(syslog_server, "use-vrf") use_vrf_key.text = kwargs.pop('use_vrf') secure = ET.SubElement(syslog_server, "secure") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "logging_syslog_server_secure", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "logging", "=", "ET", ".", "SubElement", "(", "config", ",", "\"logging\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-ras\"", ")", "syslog_server", "=", "ET", ".", "SubElement", "(", "logging", ",", "\"syslog-server\"", ")", "syslogip_key", "=", "ET", ".", "SubElement", "(", "syslog_server", ",", "\"syslogip\"", ")", "syslogip_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'syslogip'", ")", "use_vrf_key", "=", "ET", ".", "SubElement", "(", "syslog_server", ",", "\"use-vrf\"", ")", "use_vrf_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'use_vrf'", ")", "secure", "=", "ET", ".", "SubElement", "(", "syslog_server", ",", "\"secure\"", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/system.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/system.py#L934-L965
def get_postmortem_exclusion_list(cls, bits = None): """ Returns the exclusion list for the postmortem debugger. @see: L{get_postmortem_debugger} @type bits: int @param bits: Set to C{32} for the 32 bits debugger, or C{64} for the 64 bits debugger. Set to {None} for the default (L{System.bits}). @rtype: list( str ) @return: List of excluded application filenames. @raise WindowsError: Raises an exception on error. """ if bits is None: bits = cls.bits elif bits not in (32, 64): raise NotImplementedError("Unknown architecture (%r bits)" % bits) if bits == 32 and cls.bits == 64: keyname = 'HKLM\\SOFTWARE\\Wow6432Node\\Microsoft\\Windows NT\\CurrentVersion\\AeDebug\\AutoExclusionList' else: keyname = 'HKLM\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\AeDebug\\AutoExclusionList' try: key = cls.registry[keyname] except KeyError: return [] return [name for (name, enabled) in key.items() if enabled]
[ "def", "get_postmortem_exclusion_list", "(", "cls", ",", "bits", "=", "None", ")", ":", "if", "bits", "is", "None", ":", "bits", "=", "cls", ".", "bits", "elif", "bits", "not", "in", "(", "32", ",", "64", ")", ":", "raise", "NotImplementedError", "(", "\"Unknown architecture (%r bits)\"", "%", "bits", ")", "if", "bits", "==", "32", "and", "cls", ".", "bits", "==", "64", ":", "keyname", "=", "'HKLM\\\\SOFTWARE\\\\Wow6432Node\\\\Microsoft\\\\Windows NT\\\\CurrentVersion\\\\AeDebug\\\\AutoExclusionList'", "else", ":", "keyname", "=", "'HKLM\\\\SOFTWARE\\\\Microsoft\\\\Windows NT\\\\CurrentVersion\\\\AeDebug\\\\AutoExclusionList'", "try", ":", "key", "=", "cls", ".", "registry", "[", "keyname", "]", "except", "KeyError", ":", "return", "[", "]", "return", "[", "name", "for", "(", "name", ",", "enabled", ")", "in", "key", ".", "items", "(", ")", "if", "enabled", "]" ]
Returns the exclusion list for the postmortem debugger. @see: L{get_postmortem_debugger} @type bits: int @param bits: Set to C{32} for the 32 bits debugger, or C{64} for the 64 bits debugger. Set to {None} for the default (L{System.bits}). @rtype: list( str ) @return: List of excluded application filenames. @raise WindowsError: Raises an exception on error.
[ "Returns", "the", "exclusion", "list", "for", "the", "postmortem", "debugger", "." ]
python
train
hydpy-dev/hydpy
hydpy/auxs/anntools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/auxs/anntools.py#L1323-L1370
def refresh(self) -> None: """Prepare the actual |anntools.SeasonalANN| object for calculations. Dispite all automated refreshings explained in the general documentation on class |anntools.SeasonalANN|, it is still possible to destroy the inner consistency of a |anntools.SeasonalANN| instance, as it stores its |anntools.ANN| objects by reference. This is shown by the following example: >>> from hydpy import SeasonalANN, ann >>> seasonalann = SeasonalANN(None) >>> seasonalann.simulationstep = '1d' >>> jan = ann(nmb_inputs=1, nmb_neurons=(1,), nmb_outputs=1, ... weights_input=0.0, weights_output=0.0, ... intercepts_hidden=0.0, intercepts_output=1.0) >>> seasonalann(_1_1_12=jan) >>> jan.nmb_inputs, jan.nmb_outputs = 2, 3 >>> jan.nmb_inputs, jan.nmb_outputs (2, 3) >>> seasonalann.nmb_inputs, seasonalann.nmb_outputs (1, 1) Due to the C level implementation of the mathematical core of both |anntools.ANN| and |anntools.SeasonalANN| in module |annutils|, such an inconsistency might result in a program crash without any informative error message. Whenever you are afraid some inconsistency might have crept in, and you want to repair it, call method |anntools.SeasonalANN.refresh| explicitly: >>> seasonalann.refresh() >>> jan.nmb_inputs, jan.nmb_outputs (2, 3) >>> seasonalann.nmb_inputs, seasonalann.nmb_outputs (2, 3) """ # pylint: disable=unsupported-assignment-operation if self._do_refresh: if self.anns: self.__sann = annutils.SeasonalANN(self.anns) setattr(self.fastaccess, self.name, self._sann) self._set_shape((None, self._sann.nmb_anns)) if self._sann.nmb_anns > 1: self._interp() else: self._sann.ratios[:, 0] = 1. self.verify() else: self.__sann = None
[ "def", "refresh", "(", "self", ")", "->", "None", ":", "# pylint: disable=unsupported-assignment-operation", "if", "self", ".", "_do_refresh", ":", "if", "self", ".", "anns", ":", "self", ".", "__sann", "=", "annutils", ".", "SeasonalANN", "(", "self", ".", "anns", ")", "setattr", "(", "self", ".", "fastaccess", ",", "self", ".", "name", ",", "self", ".", "_sann", ")", "self", ".", "_set_shape", "(", "(", "None", ",", "self", ".", "_sann", ".", "nmb_anns", ")", ")", "if", "self", ".", "_sann", ".", "nmb_anns", ">", "1", ":", "self", ".", "_interp", "(", ")", "else", ":", "self", ".", "_sann", ".", "ratios", "[", ":", ",", "0", "]", "=", "1.", "self", ".", "verify", "(", ")", "else", ":", "self", ".", "__sann", "=", "None" ]
Prepare the actual |anntools.SeasonalANN| object for calculations. Dispite all automated refreshings explained in the general documentation on class |anntools.SeasonalANN|, it is still possible to destroy the inner consistency of a |anntools.SeasonalANN| instance, as it stores its |anntools.ANN| objects by reference. This is shown by the following example: >>> from hydpy import SeasonalANN, ann >>> seasonalann = SeasonalANN(None) >>> seasonalann.simulationstep = '1d' >>> jan = ann(nmb_inputs=1, nmb_neurons=(1,), nmb_outputs=1, ... weights_input=0.0, weights_output=0.0, ... intercepts_hidden=0.0, intercepts_output=1.0) >>> seasonalann(_1_1_12=jan) >>> jan.nmb_inputs, jan.nmb_outputs = 2, 3 >>> jan.nmb_inputs, jan.nmb_outputs (2, 3) >>> seasonalann.nmb_inputs, seasonalann.nmb_outputs (1, 1) Due to the C level implementation of the mathematical core of both |anntools.ANN| and |anntools.SeasonalANN| in module |annutils|, such an inconsistency might result in a program crash without any informative error message. Whenever you are afraid some inconsistency might have crept in, and you want to repair it, call method |anntools.SeasonalANN.refresh| explicitly: >>> seasonalann.refresh() >>> jan.nmb_inputs, jan.nmb_outputs (2, 3) >>> seasonalann.nmb_inputs, seasonalann.nmb_outputs (2, 3)
[ "Prepare", "the", "actual", "|anntools", ".", "SeasonalANN|", "object", "for", "calculations", "." ]
python
train
dmwm/DBS
Client/src/python/dbs/apis/dbsClient.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Client/src/python/dbs/apis/dbsClient.py#L212-L228
def __parseForException(self, http_error): """ An internal method, should not be used by clients :param httperror: Thrown httperror by the server """ data = http_error.body try: if isinstance(data, str): data = cjson.decode(data) except: raise http_error if isinstance(data, dict) and 'exception' in data:# re-raise with more details raise HTTPError(http_error.url, data['exception'], data['message'], http_error.header, http_error.body) raise http_error
[ "def", "__parseForException", "(", "self", ",", "http_error", ")", ":", "data", "=", "http_error", ".", "body", "try", ":", "if", "isinstance", "(", "data", ",", "str", ")", ":", "data", "=", "cjson", ".", "decode", "(", "data", ")", "except", ":", "raise", "http_error", "if", "isinstance", "(", "data", ",", "dict", ")", "and", "'exception'", "in", "data", ":", "# re-raise with more details", "raise", "HTTPError", "(", "http_error", ".", "url", ",", "data", "[", "'exception'", "]", ",", "data", "[", "'message'", "]", ",", "http_error", ".", "header", ",", "http_error", ".", "body", ")", "raise", "http_error" ]
An internal method, should not be used by clients :param httperror: Thrown httperror by the server
[ "An", "internal", "method", "should", "not", "be", "used", "by", "clients" ]
python
train
mapillary/mapillary_tools
mapillary_tools/exif_aux.py
https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/exif_aux.py#L35-L44
def verify_exif(filename): ''' Check that image file has the required EXIF fields. Incompatible files will be ignored server side. ''' # required tags in IFD name convention required_exif = required_fields() exif = ExifRead(filename) required_exif_exist = exif.fields_exist(required_exif) return required_exif_exist
[ "def", "verify_exif", "(", "filename", ")", ":", "# required tags in IFD name convention", "required_exif", "=", "required_fields", "(", ")", "exif", "=", "ExifRead", "(", "filename", ")", "required_exif_exist", "=", "exif", ".", "fields_exist", "(", "required_exif", ")", "return", "required_exif_exist" ]
Check that image file has the required EXIF fields. Incompatible files will be ignored server side.
[ "Check", "that", "image", "file", "has", "the", "required", "EXIF", "fields", ".", "Incompatible", "files", "will", "be", "ignored", "server", "side", "." ]
python
train
saltstack/salt
salt/client/ssh/wrapper/grains.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/wrapper/grains.py#L177-L286
def filter_by(lookup_dict, grain='os_family', merge=None, default='default', base=None): ''' .. versionadded:: 0.17.0 Look up the given grain in a given dictionary for the current OS and return the result Although this may occasionally be useful at the CLI, the primary intent of this function is for use in Jinja to make short work of creating lookup tables for OS-specific data. For example: .. code-block:: jinja {% set apache = salt['grains.filter_by']({ 'Debian': {'pkg': 'apache2', 'srv': 'apache2'}, 'RedHat': {'pkg': 'httpd', 'srv': 'httpd'}, }), default='Debian' %} myapache: pkg.installed: - name: {{ apache.pkg }} service.running: - name: {{ apache.srv }} Values in the lookup table may be overridden by values in Pillar. An example Pillar to override values in the example above could be as follows: .. code-block:: yaml apache: lookup: pkg: apache_13 srv: apache The call to ``filter_by()`` would be modified as follows to reference those Pillar values: .. code-block:: jinja {% set apache = salt['grains.filter_by']({ ... }, merge=salt['pillar.get']('apache:lookup')) %} :param lookup_dict: A dictionary, keyed by a grain, containing a value or values relevant to systems matching that grain. For example, a key could be the grain for an OS and the value could the name of a package on that particular OS. :param grain: The name of a grain to match with the current system's grains. For example, the value of the "os_family" grain for the current system could be used to pull values from the ``lookup_dict`` dictionary. :param merge: A dictionary to merge with the ``lookup_dict`` before doing the lookup. This allows Pillar to override the values in the ``lookup_dict``. This could be useful, for example, to override the values for non-standard package names such as when using a different Python version from the default Python version provided by the OS (e.g., ``python26-mysql`` instead of ``python-mysql``). :param default: default lookup_dict's key used if the grain does not exists or if the grain value has no match on lookup_dict. .. versionadded:: 2014.1.0 :param base: A lookup_dict key to use for a base dictionary. The grain-selected ``lookup_dict`` is merged over this and then finally the ``merge`` dictionary is merged. This allows common values for each case to be collected in the base and overridden by the grain selection dictionary and the merge dictionary. Default is None. .. versionadded:: 2015.8.11,2016.3.2 CLI Example: .. code-block:: bash salt '*' grains.filter_by '{Debian: Debheads rule, RedHat: I love my hat}' # this one will render {D: {E: I, G: H}, J: K} salt '*' grains.filter_by '{A: B, C: {D: {E: F,G: H}}}' 'xxx' '{D: {E: I},J: K}' 'C' ''' ret = lookup_dict.get( __grains__.get( grain, default), lookup_dict.get( default, None) ) if base and base in lookup_dict: base_values = lookup_dict[base] if ret is None: ret = base_values elif isinstance(base_values, collections.Mapping): if not isinstance(ret, collections.Mapping): raise SaltException('filter_by default and look-up values must both be dictionaries.') ret = salt.utils.dictupdate.update(copy.deepcopy(base_values), ret) if merge: if not isinstance(merge, collections.Mapping): raise SaltException('filter_by merge argument must be a dictionary.') else: if ret is None: ret = merge else: salt.utils.dictupdate.update(ret, merge) return ret
[ "def", "filter_by", "(", "lookup_dict", ",", "grain", "=", "'os_family'", ",", "merge", "=", "None", ",", "default", "=", "'default'", ",", "base", "=", "None", ")", ":", "ret", "=", "lookup_dict", ".", "get", "(", "__grains__", ".", "get", "(", "grain", ",", "default", ")", ",", "lookup_dict", ".", "get", "(", "default", ",", "None", ")", ")", "if", "base", "and", "base", "in", "lookup_dict", ":", "base_values", "=", "lookup_dict", "[", "base", "]", "if", "ret", "is", "None", ":", "ret", "=", "base_values", "elif", "isinstance", "(", "base_values", ",", "collections", ".", "Mapping", ")", ":", "if", "not", "isinstance", "(", "ret", ",", "collections", ".", "Mapping", ")", ":", "raise", "SaltException", "(", "'filter_by default and look-up values must both be dictionaries.'", ")", "ret", "=", "salt", ".", "utils", ".", "dictupdate", ".", "update", "(", "copy", ".", "deepcopy", "(", "base_values", ")", ",", "ret", ")", "if", "merge", ":", "if", "not", "isinstance", "(", "merge", ",", "collections", ".", "Mapping", ")", ":", "raise", "SaltException", "(", "'filter_by merge argument must be a dictionary.'", ")", "else", ":", "if", "ret", "is", "None", ":", "ret", "=", "merge", "else", ":", "salt", ".", "utils", ".", "dictupdate", ".", "update", "(", "ret", ",", "merge", ")", "return", "ret" ]
.. versionadded:: 0.17.0 Look up the given grain in a given dictionary for the current OS and return the result Although this may occasionally be useful at the CLI, the primary intent of this function is for use in Jinja to make short work of creating lookup tables for OS-specific data. For example: .. code-block:: jinja {% set apache = salt['grains.filter_by']({ 'Debian': {'pkg': 'apache2', 'srv': 'apache2'}, 'RedHat': {'pkg': 'httpd', 'srv': 'httpd'}, }), default='Debian' %} myapache: pkg.installed: - name: {{ apache.pkg }} service.running: - name: {{ apache.srv }} Values in the lookup table may be overridden by values in Pillar. An example Pillar to override values in the example above could be as follows: .. code-block:: yaml apache: lookup: pkg: apache_13 srv: apache The call to ``filter_by()`` would be modified as follows to reference those Pillar values: .. code-block:: jinja {% set apache = salt['grains.filter_by']({ ... }, merge=salt['pillar.get']('apache:lookup')) %} :param lookup_dict: A dictionary, keyed by a grain, containing a value or values relevant to systems matching that grain. For example, a key could be the grain for an OS and the value could the name of a package on that particular OS. :param grain: The name of a grain to match with the current system's grains. For example, the value of the "os_family" grain for the current system could be used to pull values from the ``lookup_dict`` dictionary. :param merge: A dictionary to merge with the ``lookup_dict`` before doing the lookup. This allows Pillar to override the values in the ``lookup_dict``. This could be useful, for example, to override the values for non-standard package names such as when using a different Python version from the default Python version provided by the OS (e.g., ``python26-mysql`` instead of ``python-mysql``). :param default: default lookup_dict's key used if the grain does not exists or if the grain value has no match on lookup_dict. .. versionadded:: 2014.1.0 :param base: A lookup_dict key to use for a base dictionary. The grain-selected ``lookup_dict`` is merged over this and then finally the ``merge`` dictionary is merged. This allows common values for each case to be collected in the base and overridden by the grain selection dictionary and the merge dictionary. Default is None. .. versionadded:: 2015.8.11,2016.3.2 CLI Example: .. code-block:: bash salt '*' grains.filter_by '{Debian: Debheads rule, RedHat: I love my hat}' # this one will render {D: {E: I, G: H}, J: K} salt '*' grains.filter_by '{A: B, C: {D: {E: F,G: H}}}' 'xxx' '{D: {E: I},J: K}' 'C'
[ "..", "versionadded", "::", "0", ".", "17", ".", "0" ]
python
train
twisted/txacme
src/txacme/client.py
https://github.com/twisted/txacme/blob/9478381cc63c6d53d14bf8db8407c923f472989a/src/txacme/client.py#L322-L328
def _check_authorization(cls, authzr, identifier): """ Check that the authorization we got is the one we expected. """ if authzr.body.identifier != identifier: raise errors.UnexpectedUpdate(authzr) return authzr
[ "def", "_check_authorization", "(", "cls", ",", "authzr", ",", "identifier", ")", ":", "if", "authzr", ".", "body", ".", "identifier", "!=", "identifier", ":", "raise", "errors", ".", "UnexpectedUpdate", "(", "authzr", ")", "return", "authzr" ]
Check that the authorization we got is the one we expected.
[ "Check", "that", "the", "authorization", "we", "got", "is", "the", "one", "we", "expected", "." ]
python
train
jenisys/parse_type
parse_type/builder.py
https://github.com/jenisys/parse_type/blob/7cad3a67a5ca725cb786da31f656fd473084289f/parse_type/builder.py#L83-L95
def make_list(cls, item_converter=None, listsep=','): """ Create a type converter for a list of items (many := 1..*). The parser accepts anything and the converter needs to fail on errors. :param item_converter: Type converter for an item. :param listsep: List separator to use (as string). :return: Type converter function object for the list. """ if not item_converter: item_converter = parse_anything return cls.with_cardinality(Cardinality.many, item_converter, pattern=cls.anything_pattern, listsep=listsep)
[ "def", "make_list", "(", "cls", ",", "item_converter", "=", "None", ",", "listsep", "=", "','", ")", ":", "if", "not", "item_converter", ":", "item_converter", "=", "parse_anything", "return", "cls", ".", "with_cardinality", "(", "Cardinality", ".", "many", ",", "item_converter", ",", "pattern", "=", "cls", ".", "anything_pattern", ",", "listsep", "=", "listsep", ")" ]
Create a type converter for a list of items (many := 1..*). The parser accepts anything and the converter needs to fail on errors. :param item_converter: Type converter for an item. :param listsep: List separator to use (as string). :return: Type converter function object for the list.
[ "Create", "a", "type", "converter", "for", "a", "list", "of", "items", "(", "many", ":", "=", "1", "..", "*", ")", ".", "The", "parser", "accepts", "anything", "and", "the", "converter", "needs", "to", "fail", "on", "errors", "." ]
python
train
jeroyang/txttk
txttk/retools.py
https://github.com/jeroyang/txttk/blob/8e6daf9cbb7dfbc4900870fb365add17929bd4ab/txttk/retools.py#L159-L175
def parallel(regex_list, sort=False): """ Join the given regexes using r'|' if the sort=True, regexes will be sorted by lenth before processing >>> parallel([r'abc', r'def']) 'abc|def' >>> parallel([r'abc', r'd|ef']) 'abc|def' >>> parallel([r'abc', r'(d|ef)']) 'abc|d|ef' >>> parallel([r'abc', r'defg']) 'defg|abc' """ if sort: regex_list = sorted(regex_list, key=len, reverse=True) return '|'.join([unpack(regex) for regex in regex_list])
[ "def", "parallel", "(", "regex_list", ",", "sort", "=", "False", ")", ":", "if", "sort", ":", "regex_list", "=", "sorted", "(", "regex_list", ",", "key", "=", "len", ",", "reverse", "=", "True", ")", "return", "'|'", ".", "join", "(", "[", "unpack", "(", "regex", ")", "for", "regex", "in", "regex_list", "]", ")" ]
Join the given regexes using r'|' if the sort=True, regexes will be sorted by lenth before processing >>> parallel([r'abc', r'def']) 'abc|def' >>> parallel([r'abc', r'd|ef']) 'abc|def' >>> parallel([r'abc', r'(d|ef)']) 'abc|d|ef' >>> parallel([r'abc', r'defg']) 'defg|abc'
[ "Join", "the", "given", "regexes", "using", "r", "|", "if", "the", "sort", "=", "True", "regexes", "will", "be", "sorted", "by", "lenth", "before", "processing", ">>>", "parallel", "(", "[", "r", "abc", "r", "def", "]", ")", "abc|def", ">>>", "parallel", "(", "[", "r", "abc", "r", "d|ef", "]", ")", "abc|def", ">>>", "parallel", "(", "[", "r", "abc", "r", "(", "d|ef", ")", "]", ")", "abc|d|ef", ">>>", "parallel", "(", "[", "r", "abc", "r", "defg", "]", ")", "defg|abc" ]
python
train
Chilipp/psy-simple
psy_simple/plotters.py
https://github.com/Chilipp/psy-simple/blob/7d916406a6d3c3c27c0b7102f98fef07a4da0a61/psy_simple/plotters.py#L4596-L4603
def get_xyz_1d(self, xcoord, x, ycoord, y, u, v): """Get closest x, y and z for the given `x` and `y` in `data` for 1d coords""" xclose = xcoord.indexes[xcoord.name].get_loc(x, method='nearest') yclose = ycoord.indexes[ycoord.name].get_loc(y, method='nearest') uval = u[yclose, xclose].values vval = v[yclose, xclose].values return xcoord[xclose].values, ycoord[yclose].values, uval, vval
[ "def", "get_xyz_1d", "(", "self", ",", "xcoord", ",", "x", ",", "ycoord", ",", "y", ",", "u", ",", "v", ")", ":", "xclose", "=", "xcoord", ".", "indexes", "[", "xcoord", ".", "name", "]", ".", "get_loc", "(", "x", ",", "method", "=", "'nearest'", ")", "yclose", "=", "ycoord", ".", "indexes", "[", "ycoord", ".", "name", "]", ".", "get_loc", "(", "y", ",", "method", "=", "'nearest'", ")", "uval", "=", "u", "[", "yclose", ",", "xclose", "]", ".", "values", "vval", "=", "v", "[", "yclose", ",", "xclose", "]", ".", "values", "return", "xcoord", "[", "xclose", "]", ".", "values", ",", "ycoord", "[", "yclose", "]", ".", "values", ",", "uval", ",", "vval" ]
Get closest x, y and z for the given `x` and `y` in `data` for 1d coords
[ "Get", "closest", "x", "y", "and", "z", "for", "the", "given", "x", "and", "y", "in", "data", "for", "1d", "coords" ]
python
train
django-treebeard/django-treebeard
treebeard/mp_tree.py
https://github.com/django-treebeard/django-treebeard/blob/8042ee939cb45394909237da447f8925e3cc6aa3/treebeard/mp_tree.py#L542-L568
def update_move_to_child_vars(self): """Update preliminar vars in :meth:`move` when moving to a child""" newdepth = self.target.depth newpos = None siblings = [] if self.pos in ('first-child', 'last-child', 'sorted-child'): # moving to a child parent = self.target newdepth += 1 if self.target.is_leaf(): # moving as a target's first child newpos = 1 self.pos = 'first-sibling' siblings = get_result_class(self.node_cls).objects.none() else: self.target = self.target.get_last_child() self.pos = { 'first-child': 'first-sibling', 'last-child': 'last-sibling', 'sorted-child': 'sorted-sibling'}[self.pos] # this is not for save(), since if needed, will be handled with a # custom UPDATE, this is only here to update django's object, # should be useful in loops parent.numchild += 1 return newdepth, siblings, newpos
[ "def", "update_move_to_child_vars", "(", "self", ")", ":", "newdepth", "=", "self", ".", "target", ".", "depth", "newpos", "=", "None", "siblings", "=", "[", "]", "if", "self", ".", "pos", "in", "(", "'first-child'", ",", "'last-child'", ",", "'sorted-child'", ")", ":", "# moving to a child", "parent", "=", "self", ".", "target", "newdepth", "+=", "1", "if", "self", ".", "target", ".", "is_leaf", "(", ")", ":", "# moving as a target's first child", "newpos", "=", "1", "self", ".", "pos", "=", "'first-sibling'", "siblings", "=", "get_result_class", "(", "self", ".", "node_cls", ")", ".", "objects", ".", "none", "(", ")", "else", ":", "self", ".", "target", "=", "self", ".", "target", ".", "get_last_child", "(", ")", "self", ".", "pos", "=", "{", "'first-child'", ":", "'first-sibling'", ",", "'last-child'", ":", "'last-sibling'", ",", "'sorted-child'", ":", "'sorted-sibling'", "}", "[", "self", ".", "pos", "]", "# this is not for save(), since if needed, will be handled with a", "# custom UPDATE, this is only here to update django's object,", "# should be useful in loops", "parent", ".", "numchild", "+=", "1", "return", "newdepth", ",", "siblings", ",", "newpos" ]
Update preliminar vars in :meth:`move` when moving to a child
[ "Update", "preliminar", "vars", "in", ":", "meth", ":", "move", "when", "moving", "to", "a", "child" ]
python
train
DataKitchen/DKCloudCommand
DKCloudCommand/modules/DKCloudCommandRunner.py
https://github.com/DataKitchen/DKCloudCommand/blob/1cf9cb08ab02f063eef6b5c4b327af142991daa3/DKCloudCommand/modules/DKCloudCommandRunner.py#L566-L588
def is_subdirectory(potential_subdirectory, expected_parent_directory): """ Is the first argument a sub-directory of the second argument? :param potential_subdirectory: :param expected_parent_directory: :return: True if the potential_subdirectory is a child of the expected parent directory """ def _get_normalized_parts(path): return DKCloudCommandRunner.os_path_split_asunder(os.path.realpath(os.path.abspath(os.path.normpath(path)))) # make absolute and handle symbolic links, split into components sub_parts = _get_normalized_parts(potential_subdirectory) parent_parts = _get_normalized_parts(expected_parent_directory) if len(parent_parts) > len(sub_parts): # a parent directory never has more path segments than its child return False # we expect the zip to end with the short path, which we know to be the parent return all(part1 == part2 for part1, part2 in zip(sub_parts, parent_parts))
[ "def", "is_subdirectory", "(", "potential_subdirectory", ",", "expected_parent_directory", ")", ":", "def", "_get_normalized_parts", "(", "path", ")", ":", "return", "DKCloudCommandRunner", ".", "os_path_split_asunder", "(", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "normpath", "(", "path", ")", ")", ")", ")", "# make absolute and handle symbolic links, split into components", "sub_parts", "=", "_get_normalized_parts", "(", "potential_subdirectory", ")", "parent_parts", "=", "_get_normalized_parts", "(", "expected_parent_directory", ")", "if", "len", "(", "parent_parts", ")", ">", "len", "(", "sub_parts", ")", ":", "# a parent directory never has more path segments than its child", "return", "False", "# we expect the zip to end with the short path, which we know to be the parent", "return", "all", "(", "part1", "==", "part2", "for", "part1", ",", "part2", "in", "zip", "(", "sub_parts", ",", "parent_parts", ")", ")" ]
Is the first argument a sub-directory of the second argument? :param potential_subdirectory: :param expected_parent_directory: :return: True if the potential_subdirectory is a child of the expected parent directory
[ "Is", "the", "first", "argument", "a", "sub", "-", "directory", "of", "the", "second", "argument?" ]
python
train
softlayer/softlayer-python
SoftLayer/CLI/object_storage/credential/create.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/object_storage/credential/create.py#L14-L28
def cli(env, identifier): """Create credentials for an IBM Cloud Object Storage Account""" mgr = SoftLayer.ObjectStorageManager(env.client) credential = mgr.create_credential(identifier) table = formatting.Table(['id', 'password', 'username', 'type_name']) table.sortby = 'id' table.add_row([ credential['id'], credential['password'], credential['username'], credential['type']['name'] ]) env.fout(table)
[ "def", "cli", "(", "env", ",", "identifier", ")", ":", "mgr", "=", "SoftLayer", ".", "ObjectStorageManager", "(", "env", ".", "client", ")", "credential", "=", "mgr", ".", "create_credential", "(", "identifier", ")", "table", "=", "formatting", ".", "Table", "(", "[", "'id'", ",", "'password'", ",", "'username'", ",", "'type_name'", "]", ")", "table", ".", "sortby", "=", "'id'", "table", ".", "add_row", "(", "[", "credential", "[", "'id'", "]", ",", "credential", "[", "'password'", "]", ",", "credential", "[", "'username'", "]", ",", "credential", "[", "'type'", "]", "[", "'name'", "]", "]", ")", "env", ".", "fout", "(", "table", ")" ]
Create credentials for an IBM Cloud Object Storage Account
[ "Create", "credentials", "for", "an", "IBM", "Cloud", "Object", "Storage", "Account" ]
python
train
facelessuser/soupsieve
soupsieve/css_match.py
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/css_match.py#L652-L674
def match_past_relations(self, el, relation): """Match past relationship.""" found = False if relation[0].rel_type == REL_PARENT: parent = self.get_parent(el, no_iframe=self.iframe_restrict) while not found and parent: found = self.match_selectors(parent, relation) parent = self.get_parent(parent, no_iframe=self.iframe_restrict) elif relation[0].rel_type == REL_CLOSE_PARENT: parent = self.get_parent(el, no_iframe=self.iframe_restrict) if parent: found = self.match_selectors(parent, relation) elif relation[0].rel_type == REL_SIBLING: sibling = self.get_previous_tag(el) while not found and sibling: found = self.match_selectors(sibling, relation) sibling = self.get_previous_tag(sibling) elif relation[0].rel_type == REL_CLOSE_SIBLING: sibling = self.get_previous_tag(el) if sibling and self.is_tag(sibling): found = self.match_selectors(sibling, relation) return found
[ "def", "match_past_relations", "(", "self", ",", "el", ",", "relation", ")", ":", "found", "=", "False", "if", "relation", "[", "0", "]", ".", "rel_type", "==", "REL_PARENT", ":", "parent", "=", "self", ".", "get_parent", "(", "el", ",", "no_iframe", "=", "self", ".", "iframe_restrict", ")", "while", "not", "found", "and", "parent", ":", "found", "=", "self", ".", "match_selectors", "(", "parent", ",", "relation", ")", "parent", "=", "self", ".", "get_parent", "(", "parent", ",", "no_iframe", "=", "self", ".", "iframe_restrict", ")", "elif", "relation", "[", "0", "]", ".", "rel_type", "==", "REL_CLOSE_PARENT", ":", "parent", "=", "self", ".", "get_parent", "(", "el", ",", "no_iframe", "=", "self", ".", "iframe_restrict", ")", "if", "parent", ":", "found", "=", "self", ".", "match_selectors", "(", "parent", ",", "relation", ")", "elif", "relation", "[", "0", "]", ".", "rel_type", "==", "REL_SIBLING", ":", "sibling", "=", "self", ".", "get_previous_tag", "(", "el", ")", "while", "not", "found", "and", "sibling", ":", "found", "=", "self", ".", "match_selectors", "(", "sibling", ",", "relation", ")", "sibling", "=", "self", ".", "get_previous_tag", "(", "sibling", ")", "elif", "relation", "[", "0", "]", ".", "rel_type", "==", "REL_CLOSE_SIBLING", ":", "sibling", "=", "self", ".", "get_previous_tag", "(", "el", ")", "if", "sibling", "and", "self", ".", "is_tag", "(", "sibling", ")", ":", "found", "=", "self", ".", "match_selectors", "(", "sibling", ",", "relation", ")", "return", "found" ]
Match past relationship.
[ "Match", "past", "relationship", "." ]
python
train
vtemian/buffpy
buffpy/managers/updates.py
https://github.com/vtemian/buffpy/blob/6c9236fd3b6a8f9e2d70dbf1bc01529242b73075/buffpy/managers/updates.py#L79-L99
def reorder(self, updates_ids, offset=None, utc=None): ''' Edit the order at which statuses for the specified social media profile will be sent out of the buffer. ''' url = PATHS['REORDER'] % self.profile_id order_format = "order[]=%s&" post_data = '' if offset: post_data += 'offset=%s&' % offset if utc: post_data += 'utc=%s&' % utc for update in updates_ids: post_data += order_format % update return self.api.post(url=url, data=post_data)
[ "def", "reorder", "(", "self", ",", "updates_ids", ",", "offset", "=", "None", ",", "utc", "=", "None", ")", ":", "url", "=", "PATHS", "[", "'REORDER'", "]", "%", "self", ".", "profile_id", "order_format", "=", "\"order[]=%s&\"", "post_data", "=", "''", "if", "offset", ":", "post_data", "+=", "'offset=%s&'", "%", "offset", "if", "utc", ":", "post_data", "+=", "'utc=%s&'", "%", "utc", "for", "update", "in", "updates_ids", ":", "post_data", "+=", "order_format", "%", "update", "return", "self", ".", "api", ".", "post", "(", "url", "=", "url", ",", "data", "=", "post_data", ")" ]
Edit the order at which statuses for the specified social media profile will be sent out of the buffer.
[ "Edit", "the", "order", "at", "which", "statuses", "for", "the", "specified", "social", "media", "profile", "will", "be", "sent", "out", "of", "the", "buffer", "." ]
python
valid
tanghaibao/jcvi
jcvi/utils/progressbar.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/utils/progressbar.py#L592-L618
def start(self): '''Starts measuring time, and prints the bar at 0%. It returns self so you can use it like this: >>> pbar = ProgressBar().start() >>> for i in range(100): ... # do something ... pbar.update(i+1) ... >>> pbar.finish() ''' if self.maxval is None: self.maxval = self._DEFAULT_MAXVAL self.num_intervals = max(100, self.term_width) self.next_update = 0 if self.maxval is not UnknownLength: if self.maxval < 0: raise ValueError('Value out of range') self.update_interval = self.maxval / self.num_intervals self.start_time = self.last_update_time = time.time() self.update(0) return self
[ "def", "start", "(", "self", ")", ":", "if", "self", ".", "maxval", "is", "None", ":", "self", ".", "maxval", "=", "self", ".", "_DEFAULT_MAXVAL", "self", ".", "num_intervals", "=", "max", "(", "100", ",", "self", ".", "term_width", ")", "self", ".", "next_update", "=", "0", "if", "self", ".", "maxval", "is", "not", "UnknownLength", ":", "if", "self", ".", "maxval", "<", "0", ":", "raise", "ValueError", "(", "'Value out of range'", ")", "self", ".", "update_interval", "=", "self", ".", "maxval", "/", "self", ".", "num_intervals", "self", ".", "start_time", "=", "self", ".", "last_update_time", "=", "time", ".", "time", "(", ")", "self", ".", "update", "(", "0", ")", "return", "self" ]
Starts measuring time, and prints the bar at 0%. It returns self so you can use it like this: >>> pbar = ProgressBar().start() >>> for i in range(100): ... # do something ... pbar.update(i+1) ... >>> pbar.finish()
[ "Starts", "measuring", "time", "and", "prints", "the", "bar", "at", "0%", "." ]
python
train
zyga/json-schema-validator
json_schema_validator/schema.py
https://github.com/zyga/json-schema-validator/blob/0504605da5c0a9a5b5b05c41b37661aec9652144/json_schema_validator/schema.py#L236-L257
def pattern(self): """ Regular expression describing valid objects. .. note:: JSON schema specifications says that this value SHOULD follow the ``EMCA 262/Perl 5`` format. We cannot support this so we support python regular expressions instead. This is still valid but should be noted for clarity. :returns: None or compiled regular expression """ value = self._schema.get("pattern", None) if value is None: return try: return re.compile(value) except re.error as ex: raise SchemaError( "pattern value {0!r} is not a valid regular expression:" " {1}".format(value, str(ex)))
[ "def", "pattern", "(", "self", ")", ":", "value", "=", "self", ".", "_schema", ".", "get", "(", "\"pattern\"", ",", "None", ")", "if", "value", "is", "None", ":", "return", "try", ":", "return", "re", ".", "compile", "(", "value", ")", "except", "re", ".", "error", "as", "ex", ":", "raise", "SchemaError", "(", "\"pattern value {0!r} is not a valid regular expression:\"", "\" {1}\"", ".", "format", "(", "value", ",", "str", "(", "ex", ")", ")", ")" ]
Regular expression describing valid objects. .. note:: JSON schema specifications says that this value SHOULD follow the ``EMCA 262/Perl 5`` format. We cannot support this so we support python regular expressions instead. This is still valid but should be noted for clarity. :returns: None or compiled regular expression
[ "Regular", "expression", "describing", "valid", "objects", "." ]
python
train
JensAstrup/pyOutlook
pyOutlook/core/folder.py
https://github.com/JensAstrup/pyOutlook/blob/f4ca9d4a8629c0a41f78102ce84fab702a841167/pyOutlook/core/folder.py#L90-L102
def delete(self): """Deletes this Folder. Raises: AuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token. """ headers = self.headers endpoint = 'https://outlook.office.com/api/v2.0/me/MailFolders/' + self.id r = requests.delete(endpoint, headers=headers) check_response(r)
[ "def", "delete", "(", "self", ")", ":", "headers", "=", "self", ".", "headers", "endpoint", "=", "'https://outlook.office.com/api/v2.0/me/MailFolders/'", "+", "self", ".", "id", "r", "=", "requests", ".", "delete", "(", "endpoint", ",", "headers", "=", "headers", ")", "check_response", "(", "r", ")" ]
Deletes this Folder. Raises: AuthError: Raised if Outlook returns a 401, generally caused by an invalid or expired access token.
[ "Deletes", "this", "Folder", "." ]
python
train
pingali/dgit
dgitcore/config.py
https://github.com/pingali/dgit/blob/ecde01f40b98f0719dbcfb54452270ed2f86686d/dgitcore/config.py#L110-L211
def update(globalvars): """ Update the profile """ global config profileini = getprofileini() config = configparser.ConfigParser() config.read(profileini) defaults = {} if globalvars is not None: defaults = {a[0]: a[1] for a in globalvars } # Generic variables to be captured... generic_configs = [{ 'name': 'User', 'nature': 'generic', 'description': "General information", 'variables': ['user.email', 'user.name', 'user.fullname'], 'defaults': { 'user.email': { 'value': defaults.get('user.email',''), 'description': "Email address", 'validator': EmailValidator() }, 'user.fullname': { 'value': defaults.get('user.fullname',''), 'description': "Full Name", 'validator': NonEmptyValidator() }, 'user.name': { 'value': defaults.get('user.name', getpass.getuser()), 'description': "Name", 'validator': NonEmptyValidator() }, } }] # Gather configuration requirements from all plugins mgr = plugins_get_mgr() extra_configs = mgr.gather_configs() allconfigs = generic_configs + extra_configs # Read the existing config and update the defaults for c in allconfigs: name = c['name'] for v in c['variables']: try: c['defaults'][v]['value'] = config[name][v] except: continue for c in allconfigs: print("") print(c['description']) print("==================") if len(c['variables']) == 0: print("Nothing to do. Enabled by default") continue name = c['name'] config[name] = {} config[name]['nature'] = c['nature'] for v in c['variables']: # defaults value = '' description = v + " " helptext = "" validator = None # Look up pre-set values if v in c['defaults']: value = c['defaults'][v].get('value','') helptext = c['defaults'][v].get("description","") validator = c['defaults'][v].get('validator',None) if helptext != "": description += "(" + helptext + ")" # Get user input.. while True: choice = input_with_default(description, value) if validator is not None: if validator.is_valid(choice): break else: print("Invalid input. Expected input is {}".format(validator.message)) else: break config[name][v] = choice if v == 'enable' and choice == 'n': break with open(profileini, 'w') as fd: config.write(fd) print("Updated profile file:", config)
[ "def", "update", "(", "globalvars", ")", ":", "global", "config", "profileini", "=", "getprofileini", "(", ")", "config", "=", "configparser", ".", "ConfigParser", "(", ")", "config", ".", "read", "(", "profileini", ")", "defaults", "=", "{", "}", "if", "globalvars", "is", "not", "None", ":", "defaults", "=", "{", "a", "[", "0", "]", ":", "a", "[", "1", "]", "for", "a", "in", "globalvars", "}", "# Generic variables to be captured...", "generic_configs", "=", "[", "{", "'name'", ":", "'User'", ",", "'nature'", ":", "'generic'", ",", "'description'", ":", "\"General information\"", ",", "'variables'", ":", "[", "'user.email'", ",", "'user.name'", ",", "'user.fullname'", "]", ",", "'defaults'", ":", "{", "'user.email'", ":", "{", "'value'", ":", "defaults", ".", "get", "(", "'user.email'", ",", "''", ")", ",", "'description'", ":", "\"Email address\"", ",", "'validator'", ":", "EmailValidator", "(", ")", "}", ",", "'user.fullname'", ":", "{", "'value'", ":", "defaults", ".", "get", "(", "'user.fullname'", ",", "''", ")", ",", "'description'", ":", "\"Full Name\"", ",", "'validator'", ":", "NonEmptyValidator", "(", ")", "}", ",", "'user.name'", ":", "{", "'value'", ":", "defaults", ".", "get", "(", "'user.name'", ",", "getpass", ".", "getuser", "(", ")", ")", ",", "'description'", ":", "\"Name\"", ",", "'validator'", ":", "NonEmptyValidator", "(", ")", "}", ",", "}", "}", "]", "# Gather configuration requirements from all plugins", "mgr", "=", "plugins_get_mgr", "(", ")", "extra_configs", "=", "mgr", ".", "gather_configs", "(", ")", "allconfigs", "=", "generic_configs", "+", "extra_configs", "# Read the existing config and update the defaults", "for", "c", "in", "allconfigs", ":", "name", "=", "c", "[", "'name'", "]", "for", "v", "in", "c", "[", "'variables'", "]", ":", "try", ":", "c", "[", "'defaults'", "]", "[", "v", "]", "[", "'value'", "]", "=", "config", "[", "name", "]", "[", "v", "]", "except", ":", "continue", "for", "c", "in", "allconfigs", ":", "print", "(", "\"\"", ")", "print", "(", "c", "[", "'description'", "]", ")", "print", "(", "\"==================\"", ")", "if", "len", "(", "c", "[", "'variables'", "]", ")", "==", "0", ":", "print", "(", "\"Nothing to do. Enabled by default\"", ")", "continue", "name", "=", "c", "[", "'name'", "]", "config", "[", "name", "]", "=", "{", "}", "config", "[", "name", "]", "[", "'nature'", "]", "=", "c", "[", "'nature'", "]", "for", "v", "in", "c", "[", "'variables'", "]", ":", "# defaults", "value", "=", "''", "description", "=", "v", "+", "\" \"", "helptext", "=", "\"\"", "validator", "=", "None", "# Look up pre-set values", "if", "v", "in", "c", "[", "'defaults'", "]", ":", "value", "=", "c", "[", "'defaults'", "]", "[", "v", "]", ".", "get", "(", "'value'", ",", "''", ")", "helptext", "=", "c", "[", "'defaults'", "]", "[", "v", "]", ".", "get", "(", "\"description\"", ",", "\"\"", ")", "validator", "=", "c", "[", "'defaults'", "]", "[", "v", "]", ".", "get", "(", "'validator'", ",", "None", ")", "if", "helptext", "!=", "\"\"", ":", "description", "+=", "\"(\"", "+", "helptext", "+", "\")\"", "# Get user input..", "while", "True", ":", "choice", "=", "input_with_default", "(", "description", ",", "value", ")", "if", "validator", "is", "not", "None", ":", "if", "validator", ".", "is_valid", "(", "choice", ")", ":", "break", "else", ":", "print", "(", "\"Invalid input. Expected input is {}\"", ".", "format", "(", "validator", ".", "message", ")", ")", "else", ":", "break", "config", "[", "name", "]", "[", "v", "]", "=", "choice", "if", "v", "==", "'enable'", "and", "choice", "==", "'n'", ":", "break", "with", "open", "(", "profileini", ",", "'w'", ")", "as", "fd", ":", "config", ".", "write", "(", "fd", ")", "print", "(", "\"Updated profile file:\"", ",", "config", ")" ]
Update the profile
[ "Update", "the", "profile" ]
python
valid
ellmetha/django-machina
machina/apps/forum_moderation/views.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_moderation/views.py#L301-L303
def perform_permissions_check(self, user, obj, perms): """ Performs the permissions check. """ return self.request.forum_permission_handler.can_update_topics_to_normal_topics(obj, user)
[ "def", "perform_permissions_check", "(", "self", ",", "user", ",", "obj", ",", "perms", ")", ":", "return", "self", ".", "request", ".", "forum_permission_handler", ".", "can_update_topics_to_normal_topics", "(", "obj", ",", "user", ")" ]
Performs the permissions check.
[ "Performs", "the", "permissions", "check", "." ]
python
train
sirfoga/pyhal
hal/help.py
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/help.py#L76-L84
def as_sql(self): """Gets report as json :return: json-formatted report """ labels, data = self._get_table() table = SqlTable(labels, data, "{:.3f}", "\n") return str(table)
[ "def", "as_sql", "(", "self", ")", ":", "labels", ",", "data", "=", "self", ".", "_get_table", "(", ")", "table", "=", "SqlTable", "(", "labels", ",", "data", ",", "\"{:.3f}\"", ",", "\"\\n\"", ")", "return", "str", "(", "table", ")" ]
Gets report as json :return: json-formatted report
[ "Gets", "report", "as", "json" ]
python
train
cloud-custodian/cloud-custodian
tools/c7n_policystream/policystream.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/c7n_policystream/policystream.py#L901-L949
def stream(repo_uri, stream_uri, verbose, assume, sort, before=None, after=None): """Stream git history policy changes to destination. Default stream destination is a summary of the policy changes to stdout, one per line. Also supported for stdout streaming is `jsonline`. AWS Kinesis and SQS destinations are specified by providing the ARN. Database destinations are supported by providing a sqlalchemy DSN. Note SQLAlchemy and db drivers must be installed separately as they an optional dependency. When using database destinations, streaming defaults to incremental. """ logging.basicConfig( format="%(asctime)s: %(name)s:%(levelname)s %(message)s", level=(verbose and logging.DEBUG or logging.INFO)) logging.getLogger('botocore').setLevel(logging.WARNING) if before: before = parse(before) if after: after = parse(after) if sort: sort = six.moves.reduce(operator.or_, [SORT_TYPE[s] for s in sort]) with contextlib.closing(TempDir().open()) as temp_dir: if repo_uri is None: repo_uri = pygit2.discover_repository(os.getcwd()) log.debug("Using repository %s", repo_uri) if repo_uri.startswith('http') or repo_uri.startswith('git@'): log.info("Cloning repository: %s", repo_uri) repo = pygit2.clone_repository(repo_uri, temp_dir.path) else: repo = pygit2.Repository(repo_uri) load_resources() policy_repo = PolicyRepo(repo_uri, repo) change_count = 0 with contextlib.closing(transport(stream_uri, assume)) as t: if after is None and isinstance(t, IndexedTransport): after = t.last() for change in policy_repo.delta_stream(after=after, before=before): change_count += 1 t.send(change) log.info("Streamed %d policy repo changes", change_count) return change_count
[ "def", "stream", "(", "repo_uri", ",", "stream_uri", ",", "verbose", ",", "assume", ",", "sort", ",", "before", "=", "None", ",", "after", "=", "None", ")", ":", "logging", ".", "basicConfig", "(", "format", "=", "\"%(asctime)s: %(name)s:%(levelname)s %(message)s\"", ",", "level", "=", "(", "verbose", "and", "logging", ".", "DEBUG", "or", "logging", ".", "INFO", ")", ")", "logging", ".", "getLogger", "(", "'botocore'", ")", ".", "setLevel", "(", "logging", ".", "WARNING", ")", "if", "before", ":", "before", "=", "parse", "(", "before", ")", "if", "after", ":", "after", "=", "parse", "(", "after", ")", "if", "sort", ":", "sort", "=", "six", ".", "moves", ".", "reduce", "(", "operator", ".", "or_", ",", "[", "SORT_TYPE", "[", "s", "]", "for", "s", "in", "sort", "]", ")", "with", "contextlib", ".", "closing", "(", "TempDir", "(", ")", ".", "open", "(", ")", ")", "as", "temp_dir", ":", "if", "repo_uri", "is", "None", ":", "repo_uri", "=", "pygit2", ".", "discover_repository", "(", "os", ".", "getcwd", "(", ")", ")", "log", ".", "debug", "(", "\"Using repository %s\"", ",", "repo_uri", ")", "if", "repo_uri", ".", "startswith", "(", "'http'", ")", "or", "repo_uri", ".", "startswith", "(", "'git@'", ")", ":", "log", ".", "info", "(", "\"Cloning repository: %s\"", ",", "repo_uri", ")", "repo", "=", "pygit2", ".", "clone_repository", "(", "repo_uri", ",", "temp_dir", ".", "path", ")", "else", ":", "repo", "=", "pygit2", ".", "Repository", "(", "repo_uri", ")", "load_resources", "(", ")", "policy_repo", "=", "PolicyRepo", "(", "repo_uri", ",", "repo", ")", "change_count", "=", "0", "with", "contextlib", ".", "closing", "(", "transport", "(", "stream_uri", ",", "assume", ")", ")", "as", "t", ":", "if", "after", "is", "None", "and", "isinstance", "(", "t", ",", "IndexedTransport", ")", ":", "after", "=", "t", ".", "last", "(", ")", "for", "change", "in", "policy_repo", ".", "delta_stream", "(", "after", "=", "after", ",", "before", "=", "before", ")", ":", "change_count", "+=", "1", "t", ".", "send", "(", "change", ")", "log", ".", "info", "(", "\"Streamed %d policy repo changes\"", ",", "change_count", ")", "return", "change_count" ]
Stream git history policy changes to destination. Default stream destination is a summary of the policy changes to stdout, one per line. Also supported for stdout streaming is `jsonline`. AWS Kinesis and SQS destinations are specified by providing the ARN. Database destinations are supported by providing a sqlalchemy DSN. Note SQLAlchemy and db drivers must be installed separately as they an optional dependency. When using database destinations, streaming defaults to incremental.
[ "Stream", "git", "history", "policy", "changes", "to", "destination", "." ]
python
train
crossbario/txaio-etcd
txaioetcd/_lease.py
https://github.com/crossbario/txaio-etcd/blob/c9aebff7f288a0b219bffc9d2579d22cf543baa5/txaioetcd/_lease.py#L146-L173
def revoke(self): """ Revokes a lease. All keys attached to the lease will expire and be deleted. :returns: Response header. :rtype: instance of :class:`txaioetcd.Header` """ if self._expired: raise Expired() obj = { # ID is the lease ID to revoke. When the ID is revoked, all # associated keys will be deleted. u'ID': self.lease_id, } data = json.dumps(obj).encode('utf8') url = u'{}/v3alpha/kv/lease/revoke'.format(self._client._url).encode() response = yield treq.post(url, data, headers=self._client._REQ_HEADERS) obj = yield treq.json_content(response) header = Header._parse(obj[u'header']) if u'header' in obj else None self._expired = True returnValue(header)
[ "def", "revoke", "(", "self", ")", ":", "if", "self", ".", "_expired", ":", "raise", "Expired", "(", ")", "obj", "=", "{", "# ID is the lease ID to revoke. When the ID is revoked, all", "# associated keys will be deleted.", "u'ID'", ":", "self", ".", "lease_id", ",", "}", "data", "=", "json", ".", "dumps", "(", "obj", ")", ".", "encode", "(", "'utf8'", ")", "url", "=", "u'{}/v3alpha/kv/lease/revoke'", ".", "format", "(", "self", ".", "_client", ".", "_url", ")", ".", "encode", "(", ")", "response", "=", "yield", "treq", ".", "post", "(", "url", ",", "data", ",", "headers", "=", "self", ".", "_client", ".", "_REQ_HEADERS", ")", "obj", "=", "yield", "treq", ".", "json_content", "(", "response", ")", "header", "=", "Header", ".", "_parse", "(", "obj", "[", "u'header'", "]", ")", "if", "u'header'", "in", "obj", "else", "None", "self", ".", "_expired", "=", "True", "returnValue", "(", "header", ")" ]
Revokes a lease. All keys attached to the lease will expire and be deleted. :returns: Response header. :rtype: instance of :class:`txaioetcd.Header`
[ "Revokes", "a", "lease", ".", "All", "keys", "attached", "to", "the", "lease", "will", "expire", "and", "be", "deleted", "." ]
python
train
google/dotty
efilter/transforms/solve.py
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/transforms/solve.py#L463-L475
def solve_filter(expr, vars): """Filter values on the LHS by evaluating RHS with each value. Returns any LHS values for which RHS evaluates to a true value. """ lhs_values, _ = __solve_for_repeated(expr.lhs, vars) def lazy_filter(): for lhs_value in repeated.getvalues(lhs_values): if solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)).value: yield lhs_value return Result(repeated.lazy(lazy_filter), ())
[ "def", "solve_filter", "(", "expr", ",", "vars", ")", ":", "lhs_values", ",", "_", "=", "__solve_for_repeated", "(", "expr", ".", "lhs", ",", "vars", ")", "def", "lazy_filter", "(", ")", ":", "for", "lhs_value", "in", "repeated", ".", "getvalues", "(", "lhs_values", ")", ":", "if", "solve", "(", "expr", ".", "rhs", ",", "__nest_scope", "(", "expr", ".", "lhs", ",", "vars", ",", "lhs_value", ")", ")", ".", "value", ":", "yield", "lhs_value", "return", "Result", "(", "repeated", ".", "lazy", "(", "lazy_filter", ")", ",", "(", ")", ")" ]
Filter values on the LHS by evaluating RHS with each value. Returns any LHS values for which RHS evaluates to a true value.
[ "Filter", "values", "on", "the", "LHS", "by", "evaluating", "RHS", "with", "each", "value", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L10751-L10775
def position_target_global_int_send(self, time_boot_ms, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate, force_mavlink1=False): ''' Reports the current commanded vehicle position, velocity, and acceleration as specified by the autopilot. This should match the commands sent in SET_POSITION_TARGET_GLOBAL_INT if the vehicle is being controlled this way. time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t) coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t) type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t) lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t) lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t) alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float) vx : X velocity in NED frame in meter / s (float) vy : Y velocity in NED frame in meter / s (float) vz : Z velocity in NED frame in meter / s (float) afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) yaw : yaw setpoint in rad (float) yaw_rate : yaw rate setpoint in rad/s (float) ''' return self.send(self.position_target_global_int_encode(time_boot_ms, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate), force_mavlink1=force_mavlink1)
[ "def", "position_target_global_int_send", "(", "self", ",", "time_boot_ms", ",", "coordinate_frame", ",", "type_mask", ",", "lat_int", ",", "lon_int", ",", "alt", ",", "vx", ",", "vy", ",", "vz", ",", "afx", ",", "afy", ",", "afz", ",", "yaw", ",", "yaw_rate", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "position_target_global_int_encode", "(", "time_boot_ms", ",", "coordinate_frame", ",", "type_mask", ",", "lat_int", ",", "lon_int", ",", "alt", ",", "vx", ",", "vy", ",", "vz", ",", "afx", ",", "afy", ",", "afz", ",", "yaw", ",", "yaw_rate", ")", ",", "force_mavlink1", "=", "force_mavlink1", ")" ]
Reports the current commanded vehicle position, velocity, and acceleration as specified by the autopilot. This should match the commands sent in SET_POSITION_TARGET_GLOBAL_INT if the vehicle is being controlled this way. time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t) coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t) type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t) lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t) lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t) alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float) vx : X velocity in NED frame in meter / s (float) vy : Y velocity in NED frame in meter / s (float) vz : Z velocity in NED frame in meter / s (float) afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) yaw : yaw setpoint in rad (float) yaw_rate : yaw rate setpoint in rad/s (float)
[ "Reports", "the", "current", "commanded", "vehicle", "position", "velocity", "and", "acceleration", "as", "specified", "by", "the", "autopilot", ".", "This", "should", "match", "the", "commands", "sent", "in", "SET_POSITION_TARGET_GLOBAL_INT", "if", "the", "vehicle", "is", "being", "controlled", "this", "way", "." ]
python
train
xhtml2pdf/xhtml2pdf
xhtml2pdf/w3c/cssParser.py
https://github.com/xhtml2pdf/xhtml2pdf/blob/230357a392f48816532d3c2fa082a680b80ece48/xhtml2pdf/w3c/cssParser.py#L522-L564
def _parseStylesheet(self, src): """stylesheet : [ CHARSET_SYM S* STRING S* ';' ]? [S|CDO|CDC]* [ import [S|CDO|CDC]* ]* [ [ ruleset | media | page | font_face ] [S|CDO|CDC]* ]* ; """ # FIXME: BYTES to STR if type(src) == six.binary_type: src=six.text_type(src) # Get rid of the comments src = self.re_comment.sub('', src) # [ CHARSET_SYM S* STRING S* ';' ]? src = self._parseAtCharset(src) # [S|CDO|CDC]* src = self._parseSCDOCDC(src) # [ import [S|CDO|CDC]* ]* src, stylesheetImports = self._parseAtImports(src) # [ namespace [S|CDO|CDC]* ]* src = self._parseAtNamespace(src) stylesheetElements = [] # [ [ ruleset | atkeywords ] [S|CDO|CDC]* ]* while src: # due to ending with ]* if src.startswith('@'): # @media, @page, @font-face src, atResults = self._parseAtKeyword(src) if atResults is not None and atResults != NotImplemented: stylesheetElements.extend(atResults) else: # ruleset src, ruleset = self._parseRuleset(src) stylesheetElements.append(ruleset) # [S|CDO|CDC]* src = self._parseSCDOCDC(src) stylesheet = self.cssBuilder.stylesheet(stylesheetElements, stylesheetImports) return src, stylesheet
[ "def", "_parseStylesheet", "(", "self", ",", "src", ")", ":", "# FIXME: BYTES to STR ", "if", "type", "(", "src", ")", "==", "six", ".", "binary_type", ":", "src", "=", "six", ".", "text_type", "(", "src", ")", "# Get rid of the comments", "src", "=", "self", ".", "re_comment", ".", "sub", "(", "''", ",", "src", ")", "# [ CHARSET_SYM S* STRING S* ';' ]?", "src", "=", "self", ".", "_parseAtCharset", "(", "src", ")", "# [S|CDO|CDC]*", "src", "=", "self", ".", "_parseSCDOCDC", "(", "src", ")", "# [ import [S|CDO|CDC]* ]*", "src", ",", "stylesheetImports", "=", "self", ".", "_parseAtImports", "(", "src", ")", "# [ namespace [S|CDO|CDC]* ]*", "src", "=", "self", ".", "_parseAtNamespace", "(", "src", ")", "stylesheetElements", "=", "[", "]", "# [ [ ruleset | atkeywords ] [S|CDO|CDC]* ]*", "while", "src", ":", "# due to ending with ]*", "if", "src", ".", "startswith", "(", "'@'", ")", ":", "# @media, @page, @font-face", "src", ",", "atResults", "=", "self", ".", "_parseAtKeyword", "(", "src", ")", "if", "atResults", "is", "not", "None", "and", "atResults", "!=", "NotImplemented", ":", "stylesheetElements", ".", "extend", "(", "atResults", ")", "else", ":", "# ruleset", "src", ",", "ruleset", "=", "self", ".", "_parseRuleset", "(", "src", ")", "stylesheetElements", ".", "append", "(", "ruleset", ")", "# [S|CDO|CDC]*", "src", "=", "self", ".", "_parseSCDOCDC", "(", "src", ")", "stylesheet", "=", "self", ".", "cssBuilder", ".", "stylesheet", "(", "stylesheetElements", ",", "stylesheetImports", ")", "return", "src", ",", "stylesheet" ]
stylesheet : [ CHARSET_SYM S* STRING S* ';' ]? [S|CDO|CDC]* [ import [S|CDO|CDC]* ]* [ [ ruleset | media | page | font_face ] [S|CDO|CDC]* ]* ;
[ "stylesheet", ":", "[", "CHARSET_SYM", "S", "*", "STRING", "S", "*", ";", "]", "?", "[", "S|CDO|CDC", "]", "*", "[", "import", "[", "S|CDO|CDC", "]", "*", "]", "*", "[", "[", "ruleset", "|", "media", "|", "page", "|", "font_face", "]", "[", "S|CDO|CDC", "]", "*", "]", "*", ";" ]
python
train
ninuxorg/nodeshot
nodeshot/core/websockets/handlers.py
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/core/websockets/handlers.py#L43-L48
def broadcast(cls, message): """ broadcast message to all connected clients """ clients = cls.get_clients() # loop over every client and send message for id, client in clients.iteritems(): client.send_message(message)
[ "def", "broadcast", "(", "cls", ",", "message", ")", ":", "clients", "=", "cls", ".", "get_clients", "(", ")", "# loop over every client and send message", "for", "id", ",", "client", "in", "clients", ".", "iteritems", "(", ")", ":", "client", ".", "send_message", "(", "message", ")" ]
broadcast message to all connected clients
[ "broadcast", "message", "to", "all", "connected", "clients" ]
python
train
TrafficSenseMSD/SumoTools
traci/_person.py
https://github.com/TrafficSenseMSD/SumoTools/blob/8607b4f885f1d1798e43240be643efe6dccccdaa/traci/_person.py#L336-L342
def setHeight(self, personID, height): """setHeight(string, double) -> None Sets the height in m for this person. """ self._connection._sendDoubleCmd( tc.CMD_SET_PERSON_VARIABLE, tc.VAR_HEIGHT, personID, height)
[ "def", "setHeight", "(", "self", ",", "personID", ",", "height", ")", ":", "self", ".", "_connection", ".", "_sendDoubleCmd", "(", "tc", ".", "CMD_SET_PERSON_VARIABLE", ",", "tc", ".", "VAR_HEIGHT", ",", "personID", ",", "height", ")" ]
setHeight(string, double) -> None Sets the height in m for this person.
[ "setHeight", "(", "string", "double", ")", "-", ">", "None" ]
python
train
azogue/i2csense
i2csense/bme280.py
https://github.com/azogue/i2csense/blob/ecc6806dcee9de827a5414a9e836d271fedca9b9/i2csense/bme280.py#L61-L79
def _compensate_temperature(self, adc_t): """Compensate temperature. Formula from datasheet Bosch BME280 Environmental sensor. 8.1 Compensation formulas in double precision floating point Edition BST-BME280-DS001-10 | Revision 1.1 | May 2015 """ var_1 = ((adc_t / 16384.0 - self._calibration_t[0] / 1024.0) * self._calibration_t[1]) var_2 = ((adc_t / 131072.0 - self._calibration_t[0] / 8192.0) * (adc_t / 131072.0 - self._calibration_t[0] / 8192.0) * self._calibration_t[2]) self._temp_fine = var_1 + var_2 if self._delta_temp != 0.: # temperature correction for self heating temp = self._temp_fine / 5120.0 + self._delta_temp self._temp_fine = temp * 5120.0 else: temp = self._temp_fine / 5120.0 return temp
[ "def", "_compensate_temperature", "(", "self", ",", "adc_t", ")", ":", "var_1", "=", "(", "(", "adc_t", "/", "16384.0", "-", "self", ".", "_calibration_t", "[", "0", "]", "/", "1024.0", ")", "*", "self", ".", "_calibration_t", "[", "1", "]", ")", "var_2", "=", "(", "(", "adc_t", "/", "131072.0", "-", "self", ".", "_calibration_t", "[", "0", "]", "/", "8192.0", ")", "*", "(", "adc_t", "/", "131072.0", "-", "self", ".", "_calibration_t", "[", "0", "]", "/", "8192.0", ")", "*", "self", ".", "_calibration_t", "[", "2", "]", ")", "self", ".", "_temp_fine", "=", "var_1", "+", "var_2", "if", "self", ".", "_delta_temp", "!=", "0.", ":", "# temperature correction for self heating", "temp", "=", "self", ".", "_temp_fine", "/", "5120.0", "+", "self", ".", "_delta_temp", "self", ".", "_temp_fine", "=", "temp", "*", "5120.0", "else", ":", "temp", "=", "self", ".", "_temp_fine", "/", "5120.0", "return", "temp" ]
Compensate temperature. Formula from datasheet Bosch BME280 Environmental sensor. 8.1 Compensation formulas in double precision floating point Edition BST-BME280-DS001-10 | Revision 1.1 | May 2015
[ "Compensate", "temperature", "." ]
python
train
MartinThoma/hwrt
hwrt/partitions.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/partitions.py#L20-L32
def prepare_table(table): """Make the table 'symmetric' where the lower left part of the matrix is the reverse probability """ n = len(table) for i, row in enumerate(table): assert len(row) == n for j, el in enumerate(row): if i == j: table[i][i] = 0.0 elif i > j: table[i][j] = 1-table[j][i] return table
[ "def", "prepare_table", "(", "table", ")", ":", "n", "=", "len", "(", "table", ")", "for", "i", ",", "row", "in", "enumerate", "(", "table", ")", ":", "assert", "len", "(", "row", ")", "==", "n", "for", "j", ",", "el", "in", "enumerate", "(", "row", ")", ":", "if", "i", "==", "j", ":", "table", "[", "i", "]", "[", "i", "]", "=", "0.0", "elif", "i", ">", "j", ":", "table", "[", "i", "]", "[", "j", "]", "=", "1", "-", "table", "[", "j", "]", "[", "i", "]", "return", "table" ]
Make the table 'symmetric' where the lower left part of the matrix is the reverse probability
[ "Make", "the", "table", "symmetric", "where", "the", "lower", "left", "part", "of", "the", "matrix", "is", "the", "reverse", "probability" ]
python
train
log2timeline/plaso
plaso/cli/helpers/profiling.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/helpers/profiling.py#L70-L117
def ParseOptions(cls, options, configuration_object): """Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type. """ if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( 'Configuration object is not an instance of CLITool') profilers = cls._ParseStringOption(options, 'profilers') if not profilers: profilers = set() elif profilers.lower() != 'list': profilers = set(profilers.split(',')) supported_profilers = set(cls.PROFILERS_INFORMATION.keys()) unsupported_profilers = profilers.difference(supported_profilers) if unsupported_profilers: unsupported_profilers = ', '.join(unsupported_profilers) raise errors.BadConfigOption( 'Unsupported profilers: {0:s}'.format(unsupported_profilers)) profiling_directory = getattr(options, 'profiling_directory', None) if profiling_directory and not os.path.isdir(profiling_directory): raise errors.BadConfigOption( 'No such profiling directory: {0:s}'.format(profiling_directory)) profiling_sample_rate = getattr(options, 'profiling_sample_rate', None) if not profiling_sample_rate: profiling_sample_rate = cls.DEFAULT_PROFILING_SAMPLE_RATE else: try: profiling_sample_rate = int(profiling_sample_rate, 10) except (TypeError, ValueError): raise errors.BadConfigOption( 'Invalid profile sample rate: {0!s}.'.format(profiling_sample_rate)) setattr(configuration_object, '_profilers', profilers) setattr(configuration_object, '_profiling_directory', profiling_directory) setattr( configuration_object, '_profiling_sample_rate', profiling_sample_rate)
[ "def", "ParseOptions", "(", "cls", ",", "options", ",", "configuration_object", ")", ":", "if", "not", "isinstance", "(", "configuration_object", ",", "tools", ".", "CLITool", ")", ":", "raise", "errors", ".", "BadConfigObject", "(", "'Configuration object is not an instance of CLITool'", ")", "profilers", "=", "cls", ".", "_ParseStringOption", "(", "options", ",", "'profilers'", ")", "if", "not", "profilers", ":", "profilers", "=", "set", "(", ")", "elif", "profilers", ".", "lower", "(", ")", "!=", "'list'", ":", "profilers", "=", "set", "(", "profilers", ".", "split", "(", "','", ")", ")", "supported_profilers", "=", "set", "(", "cls", ".", "PROFILERS_INFORMATION", ".", "keys", "(", ")", ")", "unsupported_profilers", "=", "profilers", ".", "difference", "(", "supported_profilers", ")", "if", "unsupported_profilers", ":", "unsupported_profilers", "=", "', '", ".", "join", "(", "unsupported_profilers", ")", "raise", "errors", ".", "BadConfigOption", "(", "'Unsupported profilers: {0:s}'", ".", "format", "(", "unsupported_profilers", ")", ")", "profiling_directory", "=", "getattr", "(", "options", ",", "'profiling_directory'", ",", "None", ")", "if", "profiling_directory", "and", "not", "os", ".", "path", ".", "isdir", "(", "profiling_directory", ")", ":", "raise", "errors", ".", "BadConfigOption", "(", "'No such profiling directory: {0:s}'", ".", "format", "(", "profiling_directory", ")", ")", "profiling_sample_rate", "=", "getattr", "(", "options", ",", "'profiling_sample_rate'", ",", "None", ")", "if", "not", "profiling_sample_rate", ":", "profiling_sample_rate", "=", "cls", ".", "DEFAULT_PROFILING_SAMPLE_RATE", "else", ":", "try", ":", "profiling_sample_rate", "=", "int", "(", "profiling_sample_rate", ",", "10", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "errors", ".", "BadConfigOption", "(", "'Invalid profile sample rate: {0!s}.'", ".", "format", "(", "profiling_sample_rate", ")", ")", "setattr", "(", "configuration_object", ",", "'_profilers'", ",", "profilers", ")", "setattr", "(", "configuration_object", ",", "'_profiling_directory'", ",", "profiling_directory", ")", "setattr", "(", "configuration_object", ",", "'_profiling_sample_rate'", ",", "profiling_sample_rate", ")" ]
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type.
[ "Parses", "and", "validates", "options", "." ]
python
train
yandex/yandex-tank
yandextank/stepper/main.py
https://github.com/yandex/yandex-tank/blob/d71d63b6ab5de8b8a5ea2b728b6ab9ac0b1ba71b/yandextank/stepper/main.py#L159-L191
def read_config(self): ''' stepper part of reading options ''' self.log.info("Configuring StepperWrapper...") self.ammo_file = self.get_option(self.OPTION_AMMOFILE) self.ammo_type = self.get_option('ammo_type') if self.ammo_file: self.ammo_file = os.path.expanduser(self.ammo_file) self.loop_limit = self.get_option(self.OPTION_LOOP) self.ammo_limit = self.get_option("ammo_limit") self.load_profile = LoadProfile(**self.get_option('load_profile')) self.instances = int( self.get_option(self.OPTION_INSTANCES_LIMIT, '1000')) self.uris = self.get_option("uris", []) while '' in self.uris: self.uris.remove('') self.headers = self.get_option("headers") self.http_ver = self.get_option("header_http") self.autocases = self.get_option("autocases") self.enum_ammo = self.get_option("enum_ammo") self.use_caching = self.get_option("use_caching") self.file_cache = self.get_option('file_cache') cache_dir = self.get_option("cache_dir") or self.core.artifacts_base_dir self.cache_dir = os.path.expanduser(cache_dir) self.force_stepping = self.get_option("force_stepping") if self.get_option(self.OPTION_LOAD)[self.OPTION_LOAD_TYPE] == 'stpd_file': self.stpd = self.get_option(self.OPTION_LOAD)[self.OPTION_SCHEDULE] self.chosen_cases = self.get_option("chosen_cases").split() if self.chosen_cases: self.log.info("chosen_cases LIMITS: %s", self.chosen_cases)
[ "def", "read_config", "(", "self", ")", ":", "self", ".", "log", ".", "info", "(", "\"Configuring StepperWrapper...\"", ")", "self", ".", "ammo_file", "=", "self", ".", "get_option", "(", "self", ".", "OPTION_AMMOFILE", ")", "self", ".", "ammo_type", "=", "self", ".", "get_option", "(", "'ammo_type'", ")", "if", "self", ".", "ammo_file", ":", "self", ".", "ammo_file", "=", "os", ".", "path", ".", "expanduser", "(", "self", ".", "ammo_file", ")", "self", ".", "loop_limit", "=", "self", ".", "get_option", "(", "self", ".", "OPTION_LOOP", ")", "self", ".", "ammo_limit", "=", "self", ".", "get_option", "(", "\"ammo_limit\"", ")", "self", ".", "load_profile", "=", "LoadProfile", "(", "*", "*", "self", ".", "get_option", "(", "'load_profile'", ")", ")", "self", ".", "instances", "=", "int", "(", "self", ".", "get_option", "(", "self", ".", "OPTION_INSTANCES_LIMIT", ",", "'1000'", ")", ")", "self", ".", "uris", "=", "self", ".", "get_option", "(", "\"uris\"", ",", "[", "]", ")", "while", "''", "in", "self", ".", "uris", ":", "self", ".", "uris", ".", "remove", "(", "''", ")", "self", ".", "headers", "=", "self", ".", "get_option", "(", "\"headers\"", ")", "self", ".", "http_ver", "=", "self", ".", "get_option", "(", "\"header_http\"", ")", "self", ".", "autocases", "=", "self", ".", "get_option", "(", "\"autocases\"", ")", "self", ".", "enum_ammo", "=", "self", ".", "get_option", "(", "\"enum_ammo\"", ")", "self", ".", "use_caching", "=", "self", ".", "get_option", "(", "\"use_caching\"", ")", "self", ".", "file_cache", "=", "self", ".", "get_option", "(", "'file_cache'", ")", "cache_dir", "=", "self", ".", "get_option", "(", "\"cache_dir\"", ")", "or", "self", ".", "core", ".", "artifacts_base_dir", "self", ".", "cache_dir", "=", "os", ".", "path", ".", "expanduser", "(", "cache_dir", ")", "self", ".", "force_stepping", "=", "self", ".", "get_option", "(", "\"force_stepping\"", ")", "if", "self", ".", "get_option", "(", "self", ".", "OPTION_LOAD", ")", "[", "self", ".", "OPTION_LOAD_TYPE", "]", "==", "'stpd_file'", ":", "self", ".", "stpd", "=", "self", ".", "get_option", "(", "self", ".", "OPTION_LOAD", ")", "[", "self", ".", "OPTION_SCHEDULE", "]", "self", ".", "chosen_cases", "=", "self", ".", "get_option", "(", "\"chosen_cases\"", ")", ".", "split", "(", ")", "if", "self", ".", "chosen_cases", ":", "self", ".", "log", ".", "info", "(", "\"chosen_cases LIMITS: %s\"", ",", "self", ".", "chosen_cases", ")" ]
stepper part of reading options
[ "stepper", "part", "of", "reading", "options" ]
python
test
Phylliade/ikpy
src/ikpy/geometry_utils.py
https://github.com/Phylliade/ikpy/blob/60e36d6163136942bf520d952db17123c658d0b6/src/ikpy/geometry_utils.py#L46-L48
def rotation_matrix(phi, theta, psi): """Retourne la matrice de rotation décrite par les angles d'Euler donnés en paramètres""" return np.dot(Rz_matrix(phi), np.dot(Rx_matrix(theta), Rz_matrix(psi)))
[ "def", "rotation_matrix", "(", "phi", ",", "theta", ",", "psi", ")", ":", "return", "np", ".", "dot", "(", "Rz_matrix", "(", "phi", ")", ",", "np", ".", "dot", "(", "Rx_matrix", "(", "theta", ")", ",", "Rz_matrix", "(", "psi", ")", ")", ")" ]
Retourne la matrice de rotation décrite par les angles d'Euler donnés en paramètres
[ "Retourne", "la", "matrice", "de", "rotation", "décrite", "par", "les", "angles", "d", "Euler", "donnés", "en", "paramètres" ]
python
train
agoragames/kairos
kairos/cassandra_backend.py
https://github.com/agoragames/kairos/blob/0b062d543b0f4a46df460fa0eb6ec281232ab179/kairos/cassandra_backend.py#L145-L166
def _connection(self): ''' Return a connection from the pool ''' try: return self._pool.get(False) except Empty: args = [ self._host, self._port, self._keyspace ] kwargs = { 'user' : None, 'password' : None, 'cql_version' : self._cql_version, 'compression' : self._compression, 'consistency_level' : self._consistency_level, 'transport' : self._transport, } if self._credentials: kwargs['user'] = self._credentials['user'] kwargs['password'] = self._credentials['password'] return cql.connect(*args, **kwargs)
[ "def", "_connection", "(", "self", ")", ":", "try", ":", "return", "self", ".", "_pool", ".", "get", "(", "False", ")", "except", "Empty", ":", "args", "=", "[", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_keyspace", "]", "kwargs", "=", "{", "'user'", ":", "None", ",", "'password'", ":", "None", ",", "'cql_version'", ":", "self", ".", "_cql_version", ",", "'compression'", ":", "self", ".", "_compression", ",", "'consistency_level'", ":", "self", ".", "_consistency_level", ",", "'transport'", ":", "self", ".", "_transport", ",", "}", "if", "self", ".", "_credentials", ":", "kwargs", "[", "'user'", "]", "=", "self", ".", "_credentials", "[", "'user'", "]", "kwargs", "[", "'password'", "]", "=", "self", ".", "_credentials", "[", "'password'", "]", "return", "cql", ".", "connect", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Return a connection from the pool
[ "Return", "a", "connection", "from", "the", "pool" ]
python
train
scanny/python-pptx
pptx/opc/pkgwriter.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/opc/pkgwriter.py#L39-L47
def _write_content_types_stream(phys_writer, parts): """ Write ``[Content_Types].xml`` part to the physical package with an appropriate content type lookup target for each part in *parts*. """ content_types_blob = serialize_part_xml( _ContentTypesItem.xml_for(parts) ) phys_writer.write(CONTENT_TYPES_URI, content_types_blob)
[ "def", "_write_content_types_stream", "(", "phys_writer", ",", "parts", ")", ":", "content_types_blob", "=", "serialize_part_xml", "(", "_ContentTypesItem", ".", "xml_for", "(", "parts", ")", ")", "phys_writer", ".", "write", "(", "CONTENT_TYPES_URI", ",", "content_types_blob", ")" ]
Write ``[Content_Types].xml`` part to the physical package with an appropriate content type lookup target for each part in *parts*.
[ "Write", "[", "Content_Types", "]", ".", "xml", "part", "to", "the", "physical", "package", "with", "an", "appropriate", "content", "type", "lookup", "target", "for", "each", "part", "in", "*", "parts", "*", "." ]
python
train
gazpachoking/jsonref
jsonref.py
https://github.com/gazpachoking/jsonref/blob/066132e527f8115f75bcadfd0eca12f8973a6309/jsonref.py#L70-L130
def replace_refs(cls, obj, _recursive=False, **kwargs): """ Returns a deep copy of `obj` with all contained JSON reference objects replaced with :class:`JsonRef` instances. :param obj: If this is a JSON reference object, a :class:`JsonRef` instance will be created. If `obj` is not a JSON reference object, a deep copy of it will be created with all contained JSON reference objects replaced by :class:`JsonRef` instances :param base_uri: URI to resolve relative references against :param loader: Callable that takes a URI and returns the parsed JSON (defaults to global ``jsonloader``, a :class:`JsonLoader` instance) :param jsonschema: Flag to turn on `JSON Schema mode <http://json-schema.org/latest/json-schema-core.html#anchor25>`_. 'id' keyword changes the `base_uri` for references contained within the object :param load_on_repr: If set to ``False``, :func:`repr` call on a :class:`JsonRef` object will not cause the reference to be loaded if it hasn't already. (defaults to ``True``) """ store = kwargs.setdefault("_store", _URIDict()) base_uri, frag = urlparse.urldefrag(kwargs.get("base_uri", "")) store_uri = None # If this does not get set, we won't store the result if not frag and not _recursive: store_uri = base_uri try: if kwargs.get("jsonschema") and isinstance(obj["id"], basestring): kwargs["base_uri"] = urlparse.urljoin( kwargs.get("base_uri", ""), obj["id"] ) store_uri = kwargs["base_uri"] except (TypeError, LookupError): pass try: if not isinstance(obj["$ref"], basestring): raise TypeError except (TypeError, LookupError): pass else: return cls(obj, **kwargs) # If our obj was not a json reference object, iterate through it, # replacing children with JsonRefs kwargs["_recursive"] = True path = list(kwargs.pop("_path", ())) if isinstance(obj, Mapping): obj = type(obj)( (k, cls.replace_refs(v, _path=path + [k], **kwargs)) for k, v in iteritems(obj) ) elif isinstance(obj, Sequence) and not isinstance(obj, basestring): obj = type(obj)( cls.replace_refs(v, _path=path + [i], **kwargs) for i, v in enumerate(obj) ) if store_uri is not None: store[store_uri] = obj return obj
[ "def", "replace_refs", "(", "cls", ",", "obj", ",", "_recursive", "=", "False", ",", "*", "*", "kwargs", ")", ":", "store", "=", "kwargs", ".", "setdefault", "(", "\"_store\"", ",", "_URIDict", "(", ")", ")", "base_uri", ",", "frag", "=", "urlparse", ".", "urldefrag", "(", "kwargs", ".", "get", "(", "\"base_uri\"", ",", "\"\"", ")", ")", "store_uri", "=", "None", "# If this does not get set, we won't store the result", "if", "not", "frag", "and", "not", "_recursive", ":", "store_uri", "=", "base_uri", "try", ":", "if", "kwargs", ".", "get", "(", "\"jsonschema\"", ")", "and", "isinstance", "(", "obj", "[", "\"id\"", "]", ",", "basestring", ")", ":", "kwargs", "[", "\"base_uri\"", "]", "=", "urlparse", ".", "urljoin", "(", "kwargs", ".", "get", "(", "\"base_uri\"", ",", "\"\"", ")", ",", "obj", "[", "\"id\"", "]", ")", "store_uri", "=", "kwargs", "[", "\"base_uri\"", "]", "except", "(", "TypeError", ",", "LookupError", ")", ":", "pass", "try", ":", "if", "not", "isinstance", "(", "obj", "[", "\"$ref\"", "]", ",", "basestring", ")", ":", "raise", "TypeError", "except", "(", "TypeError", ",", "LookupError", ")", ":", "pass", "else", ":", "return", "cls", "(", "obj", ",", "*", "*", "kwargs", ")", "# If our obj was not a json reference object, iterate through it,", "# replacing children with JsonRefs", "kwargs", "[", "\"_recursive\"", "]", "=", "True", "path", "=", "list", "(", "kwargs", ".", "pop", "(", "\"_path\"", ",", "(", ")", ")", ")", "if", "isinstance", "(", "obj", ",", "Mapping", ")", ":", "obj", "=", "type", "(", "obj", ")", "(", "(", "k", ",", "cls", ".", "replace_refs", "(", "v", ",", "_path", "=", "path", "+", "[", "k", "]", ",", "*", "*", "kwargs", ")", ")", "for", "k", ",", "v", "in", "iteritems", "(", "obj", ")", ")", "elif", "isinstance", "(", "obj", ",", "Sequence", ")", "and", "not", "isinstance", "(", "obj", ",", "basestring", ")", ":", "obj", "=", "type", "(", "obj", ")", "(", "cls", ".", "replace_refs", "(", "v", ",", "_path", "=", "path", "+", "[", "i", "]", ",", "*", "*", "kwargs", ")", "for", "i", ",", "v", "in", "enumerate", "(", "obj", ")", ")", "if", "store_uri", "is", "not", "None", ":", "store", "[", "store_uri", "]", "=", "obj", "return", "obj" ]
Returns a deep copy of `obj` with all contained JSON reference objects replaced with :class:`JsonRef` instances. :param obj: If this is a JSON reference object, a :class:`JsonRef` instance will be created. If `obj` is not a JSON reference object, a deep copy of it will be created with all contained JSON reference objects replaced by :class:`JsonRef` instances :param base_uri: URI to resolve relative references against :param loader: Callable that takes a URI and returns the parsed JSON (defaults to global ``jsonloader``, a :class:`JsonLoader` instance) :param jsonschema: Flag to turn on `JSON Schema mode <http://json-schema.org/latest/json-schema-core.html#anchor25>`_. 'id' keyword changes the `base_uri` for references contained within the object :param load_on_repr: If set to ``False``, :func:`repr` call on a :class:`JsonRef` object will not cause the reference to be loaded if it hasn't already. (defaults to ``True``)
[ "Returns", "a", "deep", "copy", "of", "obj", "with", "all", "contained", "JSON", "reference", "objects", "replaced", "with", ":", "class", ":", "JsonRef", "instances", "." ]
python
train
apache/spark
python/pyspark/sql/dataframe.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/sql/dataframe.py#L1603-L1665
def fillna(self, value, subset=None): """Replace null values, alias for ``na.fill()``. :func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other. :param value: int, long, float, string, bool or dict. Value to replace null values with. If the value is a dict, then `subset` is ignored and `value` must be a mapping from column name (string) to replacement value. The replacement value must be an int, long, float, boolean, or string. :param subset: optional list of column names to consider. Columns specified in subset that do not have matching data type are ignored. For example, if `value` is a string, and subset contains a non-string column, then the non-string column is simply ignored. >>> df4.na.fill(50).show() +---+------+-----+ |age|height| name| +---+------+-----+ | 10| 80|Alice| | 5| 50| Bob| | 50| 50| Tom| | 50| 50| null| +---+------+-----+ >>> df5.na.fill(False).show() +----+-------+-----+ | age| name| spy| +----+-------+-----+ | 10| Alice|false| | 5| Bob|false| |null|Mallory| true| +----+-------+-----+ >>> df4.na.fill({'age': 50, 'name': 'unknown'}).show() +---+------+-------+ |age|height| name| +---+------+-------+ | 10| 80| Alice| | 5| null| Bob| | 50| null| Tom| | 50| null|unknown| +---+------+-------+ """ if not isinstance(value, (float, int, long, basestring, bool, dict)): raise ValueError("value should be a float, int, long, string, bool or dict") # Note that bool validates isinstance(int), but we don't want to # convert bools to floats if not isinstance(value, bool) and isinstance(value, (int, long)): value = float(value) if isinstance(value, dict): return DataFrame(self._jdf.na().fill(value), self.sql_ctx) elif subset is None: return DataFrame(self._jdf.na().fill(value), self.sql_ctx) else: if isinstance(subset, basestring): subset = [subset] elif not isinstance(subset, (list, tuple)): raise ValueError("subset should be a list or tuple of column names") return DataFrame(self._jdf.na().fill(value, self._jseq(subset)), self.sql_ctx)
[ "def", "fillna", "(", "self", ",", "value", ",", "subset", "=", "None", ")", ":", "if", "not", "isinstance", "(", "value", ",", "(", "float", ",", "int", ",", "long", ",", "basestring", ",", "bool", ",", "dict", ")", ")", ":", "raise", "ValueError", "(", "\"value should be a float, int, long, string, bool or dict\"", ")", "# Note that bool validates isinstance(int), but we don't want to", "# convert bools to floats", "if", "not", "isinstance", "(", "value", ",", "bool", ")", "and", "isinstance", "(", "value", ",", "(", "int", ",", "long", ")", ")", ":", "value", "=", "float", "(", "value", ")", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "return", "DataFrame", "(", "self", ".", "_jdf", ".", "na", "(", ")", ".", "fill", "(", "value", ")", ",", "self", ".", "sql_ctx", ")", "elif", "subset", "is", "None", ":", "return", "DataFrame", "(", "self", ".", "_jdf", ".", "na", "(", ")", ".", "fill", "(", "value", ")", ",", "self", ".", "sql_ctx", ")", "else", ":", "if", "isinstance", "(", "subset", ",", "basestring", ")", ":", "subset", "=", "[", "subset", "]", "elif", "not", "isinstance", "(", "subset", ",", "(", "list", ",", "tuple", ")", ")", ":", "raise", "ValueError", "(", "\"subset should be a list or tuple of column names\"", ")", "return", "DataFrame", "(", "self", ".", "_jdf", ".", "na", "(", ")", ".", "fill", "(", "value", ",", "self", ".", "_jseq", "(", "subset", ")", ")", ",", "self", ".", "sql_ctx", ")" ]
Replace null values, alias for ``na.fill()``. :func:`DataFrame.fillna` and :func:`DataFrameNaFunctions.fill` are aliases of each other. :param value: int, long, float, string, bool or dict. Value to replace null values with. If the value is a dict, then `subset` is ignored and `value` must be a mapping from column name (string) to replacement value. The replacement value must be an int, long, float, boolean, or string. :param subset: optional list of column names to consider. Columns specified in subset that do not have matching data type are ignored. For example, if `value` is a string, and subset contains a non-string column, then the non-string column is simply ignored. >>> df4.na.fill(50).show() +---+------+-----+ |age|height| name| +---+------+-----+ | 10| 80|Alice| | 5| 50| Bob| | 50| 50| Tom| | 50| 50| null| +---+------+-----+ >>> df5.na.fill(False).show() +----+-------+-----+ | age| name| spy| +----+-------+-----+ | 10| Alice|false| | 5| Bob|false| |null|Mallory| true| +----+-------+-----+ >>> df4.na.fill({'age': 50, 'name': 'unknown'}).show() +---+------+-------+ |age|height| name| +---+------+-------+ | 10| 80| Alice| | 5| null| Bob| | 50| null| Tom| | 50| null|unknown| +---+------+-------+
[ "Replace", "null", "values", "alias", "for", "na", ".", "fill", "()", ".", ":", "func", ":", "DataFrame", ".", "fillna", "and", ":", "func", ":", "DataFrameNaFunctions", ".", "fill", "are", "aliases", "of", "each", "other", "." ]
python
train
saltstack/salt
salt/utils/event.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/event.py#L341-L354
def subscribe(self, tag=None, match_type=None): ''' Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event. ''' if tag is None: return match_func = self._get_match_func(match_type) self.pending_tags.append([tag, match_func])
[ "def", "subscribe", "(", "self", ",", "tag", "=", "None", ",", "match_type", "=", "None", ")", ":", "if", "tag", "is", "None", ":", "return", "match_func", "=", "self", ".", "_get_match_func", "(", "match_type", ")", "self", ".", "pending_tags", ".", "append", "(", "[", "tag", ",", "match_func", "]", ")" ]
Subscribe to events matching the passed tag. If you do not subscribe to a tag, events will be discarded by calls to get_event that request a different tag. In contexts where many different jobs are outstanding it is important to subscribe to prevent one call to get_event from discarding a response required by a subsequent call to get_event.
[ "Subscribe", "to", "events", "matching", "the", "passed", "tag", "." ]
python
train
c0ntrol-x/p4rr0t007
p4rr0t007/lib/core.py
https://github.com/c0ntrol-x/p4rr0t007/blob/6fe88ec1231a778b9f1d13bc61332581715d646e/p4rr0t007/lib/core.py#L102-L112
def rpad(s, N, char='\0'): """pads a string to the right with null-bytes or any other given character. ..note:: This is used by the :py:func:`xor` function. :param s: the string :param N: an integer of how much padding should be done :returns: the original bytes """ assert isinstance(char, bytes) and len(char) == 1, 'char should be a string with length 1' return s.ljust(N, char)
[ "def", "rpad", "(", "s", ",", "N", ",", "char", "=", "'\\0'", ")", ":", "assert", "isinstance", "(", "char", ",", "bytes", ")", "and", "len", "(", "char", ")", "==", "1", ",", "'char should be a string with length 1'", "return", "s", ".", "ljust", "(", "N", ",", "char", ")" ]
pads a string to the right with null-bytes or any other given character. ..note:: This is used by the :py:func:`xor` function. :param s: the string :param N: an integer of how much padding should be done :returns: the original bytes
[ "pads", "a", "string", "to", "the", "right", "with", "null", "-", "bytes", "or", "any", "other", "given", "character", "." ]
python
train
abarker/pdfCropMargins
src/pdfCropMargins/main_pdfCropMargins.py
https://github.com/abarker/pdfCropMargins/blob/55aca874613750ebf4ae69fd8851bdbb7696d6ac/src/pdfCropMargins/main_pdfCropMargins.py#L112-L123
def intersect_boxes(box1, box2): """Takes two pyPdf boxes (such as page.mediaBox) and returns the pyPdf box which is their intersection.""" if not box1 and not box2: return None if not box1: return box2 if not box2: return box1 intersect = RectangleObject([0, 0, 0, 0]) # Note [llx,lly,urx,ury] == [l,b,r,t] intersect.upperRight = (min(box1.upperRight[0], box2.upperRight[0]), min(box1.upperRight[1], box2.upperRight[1])) intersect.lowerLeft = (max(box1.lowerLeft[0], box2.lowerLeft[0]), max(box1.lowerLeft[1], box2.lowerLeft[1])) return intersect
[ "def", "intersect_boxes", "(", "box1", ",", "box2", ")", ":", "if", "not", "box1", "and", "not", "box2", ":", "return", "None", "if", "not", "box1", ":", "return", "box2", "if", "not", "box2", ":", "return", "box1", "intersect", "=", "RectangleObject", "(", "[", "0", ",", "0", ",", "0", ",", "0", "]", ")", "# Note [llx,lly,urx,ury] == [l,b,r,t]", "intersect", ".", "upperRight", "=", "(", "min", "(", "box1", ".", "upperRight", "[", "0", "]", ",", "box2", ".", "upperRight", "[", "0", "]", ")", ",", "min", "(", "box1", ".", "upperRight", "[", "1", "]", ",", "box2", ".", "upperRight", "[", "1", "]", ")", ")", "intersect", ".", "lowerLeft", "=", "(", "max", "(", "box1", ".", "lowerLeft", "[", "0", "]", ",", "box2", ".", "lowerLeft", "[", "0", "]", ")", ",", "max", "(", "box1", ".", "lowerLeft", "[", "1", "]", ",", "box2", ".", "lowerLeft", "[", "1", "]", ")", ")", "return", "intersect" ]
Takes two pyPdf boxes (such as page.mediaBox) and returns the pyPdf box which is their intersection.
[ "Takes", "two", "pyPdf", "boxes", "(", "such", "as", "page", ".", "mediaBox", ")", "and", "returns", "the", "pyPdf", "box", "which", "is", "their", "intersection", "." ]
python
train
ggaughan/pipe2py
pipe2py/modules/pipetail.py
https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipetail.py#L14-L32
def pipe_tail(context=None, _INPUT=None, conf=None, **kwargs): """Returns a specified number of items from the bottom of a feed. Parameters ---------- context : pipe2py.Context object _INPUT : pipe2py.modules pipe like object (iterable of items) kwargs -- terminal, if the truncation value is wired in conf : count -- length of the truncated feed, if specified literally Yields ------ _OUTPUT : items """ conf = DotDict(conf) limit = conf.get('count', func=int, **kwargs) for item in deque(_INPUT, limit): yield item
[ "def", "pipe_tail", "(", "context", "=", "None", ",", "_INPUT", "=", "None", ",", "conf", "=", "None", ",", "*", "*", "kwargs", ")", ":", "conf", "=", "DotDict", "(", "conf", ")", "limit", "=", "conf", ".", "get", "(", "'count'", ",", "func", "=", "int", ",", "*", "*", "kwargs", ")", "for", "item", "in", "deque", "(", "_INPUT", ",", "limit", ")", ":", "yield", "item" ]
Returns a specified number of items from the bottom of a feed. Parameters ---------- context : pipe2py.Context object _INPUT : pipe2py.modules pipe like object (iterable of items) kwargs -- terminal, if the truncation value is wired in conf : count -- length of the truncated feed, if specified literally Yields ------ _OUTPUT : items
[ "Returns", "a", "specified", "number", "of", "items", "from", "the", "bottom", "of", "a", "feed", "." ]
python
train
ejeschke/ginga
ginga/Bindings.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/Bindings.py#L1113-L1122
def kp_pan_zoom_set(self, viewer, event, data_x, data_y, msg=True): """Sets the pan position under the cursor.""" if self.canpan: reg = 1 with viewer.suppress_redraw: viewer.panset_xy(data_x, data_y) scale_x, scale_y = self._save.get((viewer, 'scale', reg), (1.0, 1.0)) viewer.scale_to(scale_x, scale_y) return True
[ "def", "kp_pan_zoom_set", "(", "self", ",", "viewer", ",", "event", ",", "data_x", ",", "data_y", ",", "msg", "=", "True", ")", ":", "if", "self", ".", "canpan", ":", "reg", "=", "1", "with", "viewer", ".", "suppress_redraw", ":", "viewer", ".", "panset_xy", "(", "data_x", ",", "data_y", ")", "scale_x", ",", "scale_y", "=", "self", ".", "_save", ".", "get", "(", "(", "viewer", ",", "'scale'", ",", "reg", ")", ",", "(", "1.0", ",", "1.0", ")", ")", "viewer", ".", "scale_to", "(", "scale_x", ",", "scale_y", ")", "return", "True" ]
Sets the pan position under the cursor.
[ "Sets", "the", "pan", "position", "under", "the", "cursor", "." ]
python
train
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAData/base_datastruct.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAData/base_datastruct.py#L1264-L1278
def get_bar(self, code, time): """ 获取一个bar的数据 返回一个series 如果不存在,raise ValueError """ try: return self.data.loc[(pd.Timestamp(time), code)] except: raise ValueError( 'DATASTRUCT CURRENTLY CANNOT FIND THIS BAR WITH {} {}'.format( code, time ) )
[ "def", "get_bar", "(", "self", ",", "code", ",", "time", ")", ":", "try", ":", "return", "self", ".", "data", ".", "loc", "[", "(", "pd", ".", "Timestamp", "(", "time", ")", ",", "code", ")", "]", "except", ":", "raise", "ValueError", "(", "'DATASTRUCT CURRENTLY CANNOT FIND THIS BAR WITH {} {}'", ".", "format", "(", "code", ",", "time", ")", ")" ]
获取一个bar的数据 返回一个series 如果不存在,raise ValueError
[ "获取一个bar的数据", "返回一个series", "如果不存在", "raise", "ValueError" ]
python
train
mcs07/PubChemPy
pubchempy.py
https://github.com/mcs07/PubChemPy/blob/e3c4f4a9b6120433e5cc3383464c7a79e9b2b86e/pubchempy.py#L296-L302
def get_json(identifier, namespace='cid', domain='compound', operation=None, searchtype=None, **kwargs): """Request wrapper that automatically parses JSON response and supresses NotFoundError.""" try: return json.loads(get(identifier, namespace, domain, operation, 'JSON', searchtype, **kwargs).decode()) except NotFoundError as e: log.info(e) return None
[ "def", "get_json", "(", "identifier", ",", "namespace", "=", "'cid'", ",", "domain", "=", "'compound'", ",", "operation", "=", "None", ",", "searchtype", "=", "None", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "json", ".", "loads", "(", "get", "(", "identifier", ",", "namespace", ",", "domain", ",", "operation", ",", "'JSON'", ",", "searchtype", ",", "*", "*", "kwargs", ")", ".", "decode", "(", ")", ")", "except", "NotFoundError", "as", "e", ":", "log", ".", "info", "(", "e", ")", "return", "None" ]
Request wrapper that automatically parses JSON response and supresses NotFoundError.
[ "Request", "wrapper", "that", "automatically", "parses", "JSON", "response", "and", "supresses", "NotFoundError", "." ]
python
train
jjjake/internetarchive
internetarchive/cli/ia.py
https://github.com/jjjake/internetarchive/blob/7c0c71bfe52490927a37ade15bd09b2733fea660/internetarchive/cli/ia.py#L107-L167
def main(): """This is the CLI driver for ia-wrapper.""" args = docopt(__doc__, version=__version__, options_first=True) # Validate args. s = Schema({ six.text_type: bool, '--config-file': Or(None, str), '<args>': list, '<command>': Or(str, lambda _: 'help'), }) try: args = s.validate(args) except SchemaError as exc: print('{0}\n{1}'.format(str(exc), printable_usage(__doc__)), file=sys.stderr) sys.exit(1) # Get subcommand. cmd = args['<command>'] if cmd in cmd_aliases: cmd = cmd_aliases[cmd] if (cmd == 'help') or (not cmd): if not args['<args>']: sys.exit(print(__doc__.strip(), file=sys.stderr)) else: ia_module = load_ia_module(args['<args>'][0]) sys.exit(print(ia_module.__doc__.strip(), file=sys.stderr)) if cmd != 'configure' and args['--config-file']: if not os.path.isfile(args['--config-file']): print('--config-file should be a readable file.\n{0}'.format( printable_usage(__doc__)), file=sys.stderr) sys.exit(1) argv = [cmd] + args['<args>'] config = dict() if args['--log']: config['logging'] = {'level': 'INFO'} elif args['--debug']: config['logging'] = {'level': 'DEBUG'} if args['--insecure']: config['general'] = dict(secure=False) session = get_session(config_file=args['--config-file'], config=config, debug=args['--debug']) ia_module = load_ia_module(cmd) try: sys.exit(ia_module.main(argv, session)) except IOError as e: # Handle Broken Pipe errors. if e.errno == errno.EPIPE: sys.stderr.close() sys.stdout.close() sys.exit(0) else: raise
[ "def", "main", "(", ")", ":", "args", "=", "docopt", "(", "__doc__", ",", "version", "=", "__version__", ",", "options_first", "=", "True", ")", "# Validate args.", "s", "=", "Schema", "(", "{", "six", ".", "text_type", ":", "bool", ",", "'--config-file'", ":", "Or", "(", "None", ",", "str", ")", ",", "'<args>'", ":", "list", ",", "'<command>'", ":", "Or", "(", "str", ",", "lambda", "_", ":", "'help'", ")", ",", "}", ")", "try", ":", "args", "=", "s", ".", "validate", "(", "args", ")", "except", "SchemaError", "as", "exc", ":", "print", "(", "'{0}\\n{1}'", ".", "format", "(", "str", "(", "exc", ")", ",", "printable_usage", "(", "__doc__", ")", ")", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")", "# Get subcommand.", "cmd", "=", "args", "[", "'<command>'", "]", "if", "cmd", "in", "cmd_aliases", ":", "cmd", "=", "cmd_aliases", "[", "cmd", "]", "if", "(", "cmd", "==", "'help'", ")", "or", "(", "not", "cmd", ")", ":", "if", "not", "args", "[", "'<args>'", "]", ":", "sys", ".", "exit", "(", "print", "(", "__doc__", ".", "strip", "(", ")", ",", "file", "=", "sys", ".", "stderr", ")", ")", "else", ":", "ia_module", "=", "load_ia_module", "(", "args", "[", "'<args>'", "]", "[", "0", "]", ")", "sys", ".", "exit", "(", "print", "(", "ia_module", ".", "__doc__", ".", "strip", "(", ")", ",", "file", "=", "sys", ".", "stderr", ")", ")", "if", "cmd", "!=", "'configure'", "and", "args", "[", "'--config-file'", "]", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "args", "[", "'--config-file'", "]", ")", ":", "print", "(", "'--config-file should be a readable file.\\n{0}'", ".", "format", "(", "printable_usage", "(", "__doc__", ")", ")", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")", "argv", "=", "[", "cmd", "]", "+", "args", "[", "'<args>'", "]", "config", "=", "dict", "(", ")", "if", "args", "[", "'--log'", "]", ":", "config", "[", "'logging'", "]", "=", "{", "'level'", ":", "'INFO'", "}", "elif", "args", "[", "'--debug'", "]", ":", "config", "[", "'logging'", "]", "=", "{", "'level'", ":", "'DEBUG'", "}", "if", "args", "[", "'--insecure'", "]", ":", "config", "[", "'general'", "]", "=", "dict", "(", "secure", "=", "False", ")", "session", "=", "get_session", "(", "config_file", "=", "args", "[", "'--config-file'", "]", ",", "config", "=", "config", ",", "debug", "=", "args", "[", "'--debug'", "]", ")", "ia_module", "=", "load_ia_module", "(", "cmd", ")", "try", ":", "sys", ".", "exit", "(", "ia_module", ".", "main", "(", "argv", ",", "session", ")", ")", "except", "IOError", "as", "e", ":", "# Handle Broken Pipe errors.", "if", "e", ".", "errno", "==", "errno", ".", "EPIPE", ":", "sys", ".", "stderr", ".", "close", "(", ")", "sys", ".", "stdout", ".", "close", "(", ")", "sys", ".", "exit", "(", "0", ")", "else", ":", "raise" ]
This is the CLI driver for ia-wrapper.
[ "This", "is", "the", "CLI", "driver", "for", "ia", "-", "wrapper", "." ]
python
train
SheffieldML/GPy
GPy/kern/src/todo/eq_ode1.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/kern/src/todo/eq_ode1.py#L292-L309
def _K_compute_eq(self): """Compute covariance for latent covariance.""" t_eq = self._t[self._index==0] if self._t2 is None: if t_eq.size==0: self._K_eq = np.zeros((0, 0)) return self._dist2 = np.square(t_eq[:, None] - t_eq[None, :]) else: t2_eq = self._t2[self._index2==0] if t_eq.size==0 or t2_eq.size==0: self._K_eq = np.zeros((t_eq.size, t2_eq.size)) return self._dist2 = np.square(t_eq[:, None] - t2_eq[None, :]) self._K_eq = np.exp(-self._dist2/(2*self.lengthscale*self.lengthscale)) if self.is_normalized: self._K_eq/=(np.sqrt(2*np.pi)*self.lengthscale)
[ "def", "_K_compute_eq", "(", "self", ")", ":", "t_eq", "=", "self", ".", "_t", "[", "self", ".", "_index", "==", "0", "]", "if", "self", ".", "_t2", "is", "None", ":", "if", "t_eq", ".", "size", "==", "0", ":", "self", ".", "_K_eq", "=", "np", ".", "zeros", "(", "(", "0", ",", "0", ")", ")", "return", "self", ".", "_dist2", "=", "np", ".", "square", "(", "t_eq", "[", ":", ",", "None", "]", "-", "t_eq", "[", "None", ",", ":", "]", ")", "else", ":", "t2_eq", "=", "self", ".", "_t2", "[", "self", ".", "_index2", "==", "0", "]", "if", "t_eq", ".", "size", "==", "0", "or", "t2_eq", ".", "size", "==", "0", ":", "self", ".", "_K_eq", "=", "np", ".", "zeros", "(", "(", "t_eq", ".", "size", ",", "t2_eq", ".", "size", ")", ")", "return", "self", ".", "_dist2", "=", "np", ".", "square", "(", "t_eq", "[", ":", ",", "None", "]", "-", "t2_eq", "[", "None", ",", ":", "]", ")", "self", ".", "_K_eq", "=", "np", ".", "exp", "(", "-", "self", ".", "_dist2", "/", "(", "2", "*", "self", ".", "lengthscale", "*", "self", ".", "lengthscale", ")", ")", "if", "self", ".", "is_normalized", ":", "self", ".", "_K_eq", "/=", "(", "np", ".", "sqrt", "(", "2", "*", "np", ".", "pi", ")", "*", "self", ".", "lengthscale", ")" ]
Compute covariance for latent covariance.
[ "Compute", "covariance", "for", "latent", "covariance", "." ]
python
train
SectorLabs/django-postgres-extra
psqlextra/manager/manager.py
https://github.com/SectorLabs/django-postgres-extra/blob/eef2ed5504d225858d4e4f5d77a838082ca6053e/psqlextra/manager/manager.py#L197-L236
def insert_and_get(self, **fields): """Creates a new record in the database and then gets the entire row. This allows specifying custom conflict behavior using .on_conflict(). If no special behavior was specified, this uses the normal Django create(..) Arguments: fields: The fields of the row to create. Returns: The model instance representing the row that was created. """ if not self.conflict_target and not self.conflict_action: # no special action required, use the standard Django create(..) return super().create(**fields) compiler = self._build_insert_compiler([fields]) rows = compiler.execute_sql(return_id=False) columns = rows[0] # get a list of columns that are officially part of the model and preserve the fact that the attribute name # might be different than the database column name model_columns = {} for field in self.model._meta.local_concrete_fields: model_columns[field.column] = field.attname # strip out any columns/fields returned by the db that # are not present in the model model_init_fields = {} for column_name, column_value in columns.items(): try: model_init_fields[model_columns[column_name]] = column_value except KeyError: pass return self.model(**model_init_fields)
[ "def", "insert_and_get", "(", "self", ",", "*", "*", "fields", ")", ":", "if", "not", "self", ".", "conflict_target", "and", "not", "self", ".", "conflict_action", ":", "# no special action required, use the standard Django create(..)", "return", "super", "(", ")", ".", "create", "(", "*", "*", "fields", ")", "compiler", "=", "self", ".", "_build_insert_compiler", "(", "[", "fields", "]", ")", "rows", "=", "compiler", ".", "execute_sql", "(", "return_id", "=", "False", ")", "columns", "=", "rows", "[", "0", "]", "# get a list of columns that are officially part of the model and preserve the fact that the attribute name", "# might be different than the database column name", "model_columns", "=", "{", "}", "for", "field", "in", "self", ".", "model", ".", "_meta", ".", "local_concrete_fields", ":", "model_columns", "[", "field", ".", "column", "]", "=", "field", ".", "attname", "# strip out any columns/fields returned by the db that", "# are not present in the model", "model_init_fields", "=", "{", "}", "for", "column_name", ",", "column_value", "in", "columns", ".", "items", "(", ")", ":", "try", ":", "model_init_fields", "[", "model_columns", "[", "column_name", "]", "]", "=", "column_value", "except", "KeyError", ":", "pass", "return", "self", ".", "model", "(", "*", "*", "model_init_fields", ")" ]
Creates a new record in the database and then gets the entire row. This allows specifying custom conflict behavior using .on_conflict(). If no special behavior was specified, this uses the normal Django create(..) Arguments: fields: The fields of the row to create. Returns: The model instance representing the row that was created.
[ "Creates", "a", "new", "record", "in", "the", "database", "and", "then", "gets", "the", "entire", "row", "." ]
python
test
portfors-lab/sparkle
sparkle/gui/stim/auto_parameters_editor.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/auto_parameters_editor.py#L71-L91
def showEvent(self, event): """When this widget is shown it has an effect of putting other widgets in the parent widget into different editing modes, emits signal to notify other widgets. Restores the previous selection the last time this widget was visible""" selected = self.paramList.selectedIndexes() model = self.paramList.model() self.visibilityChanged.emit(1) if len(selected) > 0: # select the correct components in the StimulusView self.paramList.parameterChanged.emit(model.selection(selected[0])) self.hintRequested.emit('Select parameter to edit. \n\nParameter must have selected components in order to edit fields') elif model.rowCount() > 0: # just select first item self.paramList.selectRow(0) self.paramList.parameterChanged.emit(model.selection(model.index(0,0))) self.hintRequested.emit('Select parameter to edit. \n\nParameter must have selected components in order to edit fields') else: model.emptied.emit(True) self.hintRequested.emit('To add a parameter, Drag "Add" onto empty auto-parameter table')
[ "def", "showEvent", "(", "self", ",", "event", ")", ":", "selected", "=", "self", ".", "paramList", ".", "selectedIndexes", "(", ")", "model", "=", "self", ".", "paramList", ".", "model", "(", ")", "self", ".", "visibilityChanged", ".", "emit", "(", "1", ")", "if", "len", "(", "selected", ")", ">", "0", ":", "# select the correct components in the StimulusView", "self", ".", "paramList", ".", "parameterChanged", ".", "emit", "(", "model", ".", "selection", "(", "selected", "[", "0", "]", ")", ")", "self", ".", "hintRequested", ".", "emit", "(", "'Select parameter to edit. \\n\\nParameter must have selected components in order to edit fields'", ")", "elif", "model", ".", "rowCount", "(", ")", ">", "0", ":", "# just select first item", "self", ".", "paramList", ".", "selectRow", "(", "0", ")", "self", ".", "paramList", ".", "parameterChanged", ".", "emit", "(", "model", ".", "selection", "(", "model", ".", "index", "(", "0", ",", "0", ")", ")", ")", "self", ".", "hintRequested", ".", "emit", "(", "'Select parameter to edit. \\n\\nParameter must have selected components in order to edit fields'", ")", "else", ":", "model", ".", "emptied", ".", "emit", "(", "True", ")", "self", ".", "hintRequested", ".", "emit", "(", "'To add a parameter, Drag \"Add\" onto empty auto-parameter table'", ")" ]
When this widget is shown it has an effect of putting other widgets in the parent widget into different editing modes, emits signal to notify other widgets. Restores the previous selection the last time this widget was visible
[ "When", "this", "widget", "is", "shown", "it", "has", "an", "effect", "of", "putting", "other", "widgets", "in", "the", "parent", "widget", "into", "different", "editing", "modes", "emits", "signal", "to", "notify", "other", "widgets", ".", "Restores", "the", "previous", "selection", "the", "last", "time", "this", "widget", "was", "visible" ]
python
train
databio/pypiper
pypiper/utils.py
https://github.com/databio/pypiper/blob/00e6c2b94033c4187d47ff14c5580bbfc2ff097f/pypiper/utils.py#L313-L357
def get_first_value(param, param_pools, on_missing=None, error=True): """ Get the value for a particular parameter from the first pool in the provided priority list of parameter pools. :param str param: Name of parameter for which to determine/fetch value. :param Sequence[Mapping[str, object]] param_pools: Ordered (priority) collection of mapping from parameter name to value; this should be ordered according to descending priority. :param object | function(str) -> object on_missing: default value or action to take if the requested parameter is missing from all of the pools. If a callable, it should return a value when passed the requested parameter as the one and only argument. :param bool error: Whether to raise an error if the requested parameter is not mapped to a value AND there's no value or strategy provided with 'on_missing' with which to handle the case of a request for an unmapped parameter. :return object: Value to which the requested parameter first mapped in the (descending) priority collection of parameter 'pools,' or a value explicitly defined or derived with 'on_missing.' :raise KeyError: If the requested parameter is unmapped in all of the provided pools, and the argument to the 'error' parameter evaluates to True. """ # Search for the requested parameter. for pool in param_pools: if param in pool: return pool[param] # Raise error if unfound and no strategy or value is provided or handling # unmapped parameter requests. if error and on_missing is None: raise KeyError("Unmapped parameter: '{}'".format(param)) # Use the value or strategy for handling unmapped parameter case. try: return on_missing(param) except TypeError: if hasattr(on_missing, "__call__"): raise TypeError( "Any callable passed as the action to take when a requested " "parameter is missing should accept that parameter and return " "a value.") return on_missing
[ "def", "get_first_value", "(", "param", ",", "param_pools", ",", "on_missing", "=", "None", ",", "error", "=", "True", ")", ":", "# Search for the requested parameter.", "for", "pool", "in", "param_pools", ":", "if", "param", "in", "pool", ":", "return", "pool", "[", "param", "]", "# Raise error if unfound and no strategy or value is provided or handling", "# unmapped parameter requests.", "if", "error", "and", "on_missing", "is", "None", ":", "raise", "KeyError", "(", "\"Unmapped parameter: '{}'\"", ".", "format", "(", "param", ")", ")", "# Use the value or strategy for handling unmapped parameter case.", "try", ":", "return", "on_missing", "(", "param", ")", "except", "TypeError", ":", "if", "hasattr", "(", "on_missing", ",", "\"__call__\"", ")", ":", "raise", "TypeError", "(", "\"Any callable passed as the action to take when a requested \"", "\"parameter is missing should accept that parameter and return \"", "\"a value.\"", ")", "return", "on_missing" ]
Get the value for a particular parameter from the first pool in the provided priority list of parameter pools. :param str param: Name of parameter for which to determine/fetch value. :param Sequence[Mapping[str, object]] param_pools: Ordered (priority) collection of mapping from parameter name to value; this should be ordered according to descending priority. :param object | function(str) -> object on_missing: default value or action to take if the requested parameter is missing from all of the pools. If a callable, it should return a value when passed the requested parameter as the one and only argument. :param bool error: Whether to raise an error if the requested parameter is not mapped to a value AND there's no value or strategy provided with 'on_missing' with which to handle the case of a request for an unmapped parameter. :return object: Value to which the requested parameter first mapped in the (descending) priority collection of parameter 'pools,' or a value explicitly defined or derived with 'on_missing.' :raise KeyError: If the requested parameter is unmapped in all of the provided pools, and the argument to the 'error' parameter evaluates to True.
[ "Get", "the", "value", "for", "a", "particular", "parameter", "from", "the", "first", "pool", "in", "the", "provided", "priority", "list", "of", "parameter", "pools", "." ]
python
train
lsst-sqre/documenteer
documenteer/sphinxext/lssttasks/configfieldlists.py
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/configfieldlists.py#L1127-L1150
def create_configfield_ref_target_node(target_id, env, lineno): """Create a ``target`` node that marks a configuration field. Internally, this also adds to the ``lsst_configfields`` attribute of the environment that is consumed by `documenteer.sphinxext.lssttasks. crossrefs.process_pending_configfield_xref_nodes`. See also -------- `documenteer.sphinxext.lssttasks.crossrefs.process_pending_configfield_xref_nodes` """ target_node = nodes.target('', '', ids=[target_id]) # Store these task/configurable topic nodes in the environment for later # cross referencing. if not hasattr(env, 'lsst_configfields'): env.lsst_configfields = {} env.lsst_configfields[target_id] = { 'docname': env.docname, 'lineno': lineno, 'target': target_node, } return target_node
[ "def", "create_configfield_ref_target_node", "(", "target_id", ",", "env", ",", "lineno", ")", ":", "target_node", "=", "nodes", ".", "target", "(", "''", ",", "''", ",", "ids", "=", "[", "target_id", "]", ")", "# Store these task/configurable topic nodes in the environment for later", "# cross referencing.", "if", "not", "hasattr", "(", "env", ",", "'lsst_configfields'", ")", ":", "env", ".", "lsst_configfields", "=", "{", "}", "env", ".", "lsst_configfields", "[", "target_id", "]", "=", "{", "'docname'", ":", "env", ".", "docname", ",", "'lineno'", ":", "lineno", ",", "'target'", ":", "target_node", ",", "}", "return", "target_node" ]
Create a ``target`` node that marks a configuration field. Internally, this also adds to the ``lsst_configfields`` attribute of the environment that is consumed by `documenteer.sphinxext.lssttasks. crossrefs.process_pending_configfield_xref_nodes`. See also -------- `documenteer.sphinxext.lssttasks.crossrefs.process_pending_configfield_xref_nodes`
[ "Create", "a", "target", "node", "that", "marks", "a", "configuration", "field", "." ]
python
train
apache/incubator-mxnet
example/ssd/dataset/pycocotools/coco.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/ssd/dataset/pycocotools/coco.py#L217-L226
def loadImgs(self, ids=[]): """ Load anns with the specified ids. :param ids (int array) : integer ids specifying img :return: imgs (object array) : loaded img objects """ if type(ids) == list: return [self.imgs[id] for id in ids] elif type(ids) == int: return [self.imgs[ids]]
[ "def", "loadImgs", "(", "self", ",", "ids", "=", "[", "]", ")", ":", "if", "type", "(", "ids", ")", "==", "list", ":", "return", "[", "self", ".", "imgs", "[", "id", "]", "for", "id", "in", "ids", "]", "elif", "type", "(", "ids", ")", "==", "int", ":", "return", "[", "self", ".", "imgs", "[", "ids", "]", "]" ]
Load anns with the specified ids. :param ids (int array) : integer ids specifying img :return: imgs (object array) : loaded img objects
[ "Load", "anns", "with", "the", "specified", "ids", ".", ":", "param", "ids", "(", "int", "array", ")", ":", "integer", "ids", "specifying", "img", ":", "return", ":", "imgs", "(", "object", "array", ")", ":", "loaded", "img", "objects" ]
python
train
DataDog/integrations-core
sqlserver/datadog_checks/sqlserver/sqlserver.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/sqlserver/datadog_checks/sqlserver/sqlserver.py#L620-L673
def open_db_connections(self, instance, db_key, db_name=None): """ We open the db connections explicitly, so we can ensure they are open before we use them, and are closable, once we are finished. Open db connections keep locks on the db, presenting issues such as the SQL Server Agent being unable to stop. """ conn_key = self._conn_key(instance, db_key, db_name) timeout = int(instance.get('command_timeout', self.DEFAULT_COMMAND_TIMEOUT)) dsn, host, username, password, database, driver = self._get_access_info(instance, db_key, db_name) custom_tags = instance.get("tags", []) if custom_tags is None: custom_tags = [] service_check_tags = ['host:{}'.format(host), 'db:{}'.format(database)] service_check_tags.extend(custom_tags) service_check_tags = list(set(service_check_tags)) cs = instance.get('connection_string', '') cs += ';' if cs != '' else '' try: if self._get_connector(instance) == 'adodbapi': cs += self._conn_string_adodbapi(db_key, instance=instance, db_name=db_name) # autocommit: true disables implicit transaction rawconn = adodbapi.connect(cs, {'timeout': timeout, 'autocommit': True}) else: cs += self._conn_string_odbc(db_key, instance=instance, db_name=db_name) rawconn = pyodbc.connect(cs, timeout=timeout) self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=service_check_tags) if conn_key not in self.connections: self.connections[conn_key] = {'conn': rawconn, 'timeout': timeout} else: try: # explicitly trying to avoid leaks... self.connections[conn_key]['conn'].close() except Exception as e: self.log.info("Could not close adodbapi db connection\n{0}".format(e)) self.connections[conn_key]['conn'] = rawconn except Exception: cx = "{} - {}".format(host, database) message = "Unable to connect to SQL Server for instance {}.".format(cx) self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags, message=message) password = instance.get('password') tracebk = traceback.format_exc() if password is not None: tracebk = tracebk.replace(password, "*" * 6) cxn_failure_exp = SQLConnectionError("{} \n {}".format(message, tracebk)) raise cxn_failure_exp
[ "def", "open_db_connections", "(", "self", ",", "instance", ",", "db_key", ",", "db_name", "=", "None", ")", ":", "conn_key", "=", "self", ".", "_conn_key", "(", "instance", ",", "db_key", ",", "db_name", ")", "timeout", "=", "int", "(", "instance", ".", "get", "(", "'command_timeout'", ",", "self", ".", "DEFAULT_COMMAND_TIMEOUT", ")", ")", "dsn", ",", "host", ",", "username", ",", "password", ",", "database", ",", "driver", "=", "self", ".", "_get_access_info", "(", "instance", ",", "db_key", ",", "db_name", ")", "custom_tags", "=", "instance", ".", "get", "(", "\"tags\"", ",", "[", "]", ")", "if", "custom_tags", "is", "None", ":", "custom_tags", "=", "[", "]", "service_check_tags", "=", "[", "'host:{}'", ".", "format", "(", "host", ")", ",", "'db:{}'", ".", "format", "(", "database", ")", "]", "service_check_tags", ".", "extend", "(", "custom_tags", ")", "service_check_tags", "=", "list", "(", "set", "(", "service_check_tags", ")", ")", "cs", "=", "instance", ".", "get", "(", "'connection_string'", ",", "''", ")", "cs", "+=", "';'", "if", "cs", "!=", "''", "else", "''", "try", ":", "if", "self", ".", "_get_connector", "(", "instance", ")", "==", "'adodbapi'", ":", "cs", "+=", "self", ".", "_conn_string_adodbapi", "(", "db_key", ",", "instance", "=", "instance", ",", "db_name", "=", "db_name", ")", "# autocommit: true disables implicit transaction", "rawconn", "=", "adodbapi", ".", "connect", "(", "cs", ",", "{", "'timeout'", ":", "timeout", ",", "'autocommit'", ":", "True", "}", ")", "else", ":", "cs", "+=", "self", ".", "_conn_string_odbc", "(", "db_key", ",", "instance", "=", "instance", ",", "db_name", "=", "db_name", ")", "rawconn", "=", "pyodbc", ".", "connect", "(", "cs", ",", "timeout", "=", "timeout", ")", "self", ".", "service_check", "(", "self", ".", "SERVICE_CHECK_NAME", ",", "AgentCheck", ".", "OK", ",", "tags", "=", "service_check_tags", ")", "if", "conn_key", "not", "in", "self", ".", "connections", ":", "self", ".", "connections", "[", "conn_key", "]", "=", "{", "'conn'", ":", "rawconn", ",", "'timeout'", ":", "timeout", "}", "else", ":", "try", ":", "# explicitly trying to avoid leaks...", "self", ".", "connections", "[", "conn_key", "]", "[", "'conn'", "]", ".", "close", "(", ")", "except", "Exception", "as", "e", ":", "self", ".", "log", ".", "info", "(", "\"Could not close adodbapi db connection\\n{0}\"", ".", "format", "(", "e", ")", ")", "self", ".", "connections", "[", "conn_key", "]", "[", "'conn'", "]", "=", "rawconn", "except", "Exception", ":", "cx", "=", "\"{} - {}\"", ".", "format", "(", "host", ",", "database", ")", "message", "=", "\"Unable to connect to SQL Server for instance {}.\"", ".", "format", "(", "cx", ")", "self", ".", "service_check", "(", "self", ".", "SERVICE_CHECK_NAME", ",", "AgentCheck", ".", "CRITICAL", ",", "tags", "=", "service_check_tags", ",", "message", "=", "message", ")", "password", "=", "instance", ".", "get", "(", "'password'", ")", "tracebk", "=", "traceback", ".", "format_exc", "(", ")", "if", "password", "is", "not", "None", ":", "tracebk", "=", "tracebk", ".", "replace", "(", "password", ",", "\"*\"", "*", "6", ")", "cxn_failure_exp", "=", "SQLConnectionError", "(", "\"{} \\n {}\"", ".", "format", "(", "message", ",", "tracebk", ")", ")", "raise", "cxn_failure_exp" ]
We open the db connections explicitly, so we can ensure they are open before we use them, and are closable, once we are finished. Open db connections keep locks on the db, presenting issues such as the SQL Server Agent being unable to stop.
[ "We", "open", "the", "db", "connections", "explicitly", "so", "we", "can", "ensure", "they", "are", "open", "before", "we", "use", "them", "and", "are", "closable", "once", "we", "are", "finished", ".", "Open", "db", "connections", "keep", "locks", "on", "the", "db", "presenting", "issues", "such", "as", "the", "SQL", "Server", "Agent", "being", "unable", "to", "stop", "." ]
python
train
vitiral/gpio
gpio.py
https://github.com/vitiral/gpio/blob/d4d8bdc6965295b978eca882e2e2e5a1b35e047b/gpio.py#L158-L165
def set(pin, value): '''set the pin value to 0 or 1''' if value is LOW: value = 0 value = int(bool(value)) log.debug("Write {0}: {1}".format(pin, value)) f = _open[pin].value _write(f, value)
[ "def", "set", "(", "pin", ",", "value", ")", ":", "if", "value", "is", "LOW", ":", "value", "=", "0", "value", "=", "int", "(", "bool", "(", "value", ")", ")", "log", ".", "debug", "(", "\"Write {0}: {1}\"", ".", "format", "(", "pin", ",", "value", ")", ")", "f", "=", "_open", "[", "pin", "]", ".", "value", "_write", "(", "f", ",", "value", ")" ]
set the pin value to 0 or 1
[ "set", "the", "pin", "value", "to", "0", "or", "1" ]
python
train
philipsoutham/py-mysql2pgsql
mysql2pgsql/lib/postgres_file_writer.py
https://github.com/philipsoutham/py-mysql2pgsql/blob/66dc2a3a3119263b3fe77300fb636346509787ef/mysql2pgsql/lib/postgres_file_writer.py#L93-L101
def write_constraints(self, table): """Write DDL of `table` constraints to the output file :Parameters: - `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write. Returns None """ self.f.write('\n'.join(super(PostgresFileWriter, self).write_constraints(table)))
[ "def", "write_constraints", "(", "self", ",", "table", ")", ":", "self", ".", "f", ".", "write", "(", "'\\n'", ".", "join", "(", "super", "(", "PostgresFileWriter", ",", "self", ")", ".", "write_constraints", "(", "table", ")", ")", ")" ]
Write DDL of `table` constraints to the output file :Parameters: - `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write. Returns None
[ "Write", "DDL", "of", "table", "constraints", "to", "the", "output", "file" ]
python
test
ejeschke/ginga
ginga/rv/plugins/SaveImage.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/SaveImage.py#L259-L293
def update_channels(self): """Update the GUI to reflect channels and image listing. """ if not self.gui_up: return self.logger.debug("channel configuration has changed--updating gui") try: channel = self.fv.get_channel(self.chname) except KeyError: channel = self.fv.get_channel_info() if channel is None: raise ValueError('No channel available') self.chname = channel.name w = self.w.channel_name w.clear() self.chnames = list(self.fv.get_channel_names()) #self.chnames.sort() for chname in self.chnames: w.append_text(chname) # select the channel that is the current one try: i = self.chnames.index(channel.name) except IndexError: i = 0 self.w.channel_name.set_index(i) # update the image listing self.redo()
[ "def", "update_channels", "(", "self", ")", ":", "if", "not", "self", ".", "gui_up", ":", "return", "self", ".", "logger", ".", "debug", "(", "\"channel configuration has changed--updating gui\"", ")", "try", ":", "channel", "=", "self", ".", "fv", ".", "get_channel", "(", "self", ".", "chname", ")", "except", "KeyError", ":", "channel", "=", "self", ".", "fv", ".", "get_channel_info", "(", ")", "if", "channel", "is", "None", ":", "raise", "ValueError", "(", "'No channel available'", ")", "self", ".", "chname", "=", "channel", ".", "name", "w", "=", "self", ".", "w", ".", "channel_name", "w", ".", "clear", "(", ")", "self", ".", "chnames", "=", "list", "(", "self", ".", "fv", ".", "get_channel_names", "(", ")", ")", "#self.chnames.sort()", "for", "chname", "in", "self", ".", "chnames", ":", "w", ".", "append_text", "(", "chname", ")", "# select the channel that is the current one", "try", ":", "i", "=", "self", ".", "chnames", ".", "index", "(", "channel", ".", "name", ")", "except", "IndexError", ":", "i", "=", "0", "self", ".", "w", ".", "channel_name", ".", "set_index", "(", "i", ")", "# update the image listing", "self", ".", "redo", "(", ")" ]
Update the GUI to reflect channels and image listing.
[ "Update", "the", "GUI", "to", "reflect", "channels", "and", "image", "listing", "." ]
python
train
nathforge/pydentifier
src/pydentifier/__init__.py
https://github.com/nathforge/pydentifier/blob/b8d27076254c65cfd7893c1401e2a198abd6afb4/src/pydentifier/__init__.py#L32-L51
def upper_underscore(string, prefix='', suffix=''): """ Generate an underscore-separated upper-case identifier. Useful for constants. Takes a string, prefix, and optional suffix. `prefix` can be set to `''`, though be careful - without a prefix, the function will throw `InvalidIdentifier` when your string starts with a number. Example: >>> upper_underscore("This is a constant", prefix='') 'THIS_IS_A_CONSTANT' """ return require_valid(append_underscore_if_keyword('_'.join( word.upper() for word in en.words(' '.join([prefix, string, suffix]))) ))
[ "def", "upper_underscore", "(", "string", ",", "prefix", "=", "''", ",", "suffix", "=", "''", ")", ":", "return", "require_valid", "(", "append_underscore_if_keyword", "(", "'_'", ".", "join", "(", "word", ".", "upper", "(", ")", "for", "word", "in", "en", ".", "words", "(", "' '", ".", "join", "(", "[", "prefix", ",", "string", ",", "suffix", "]", ")", ")", ")", ")", ")" ]
Generate an underscore-separated upper-case identifier. Useful for constants. Takes a string, prefix, and optional suffix. `prefix` can be set to `''`, though be careful - without a prefix, the function will throw `InvalidIdentifier` when your string starts with a number. Example: >>> upper_underscore("This is a constant", prefix='') 'THIS_IS_A_CONSTANT'
[ "Generate", "an", "underscore", "-", "separated", "upper", "-", "case", "identifier", ".", "Useful", "for", "constants", "." ]
python
train
davenquinn/Attitude
docs/scripts/generate-json.py
https://github.com/davenquinn/Attitude/blob/2ce97b9aba0aa5deedc6617c2315e07e6396d240/docs/scripts/generate-json.py#L11-L26
def serialize(pca, **kwargs): """ Serialize an orientation object to a dict suitable for JSON """ strike, dip, rake = pca.strike_dip_rake() hyp_axes = sampling_axes(pca) return dict( **kwargs, principal_axes = pca.axes.tolist(), hyperbolic_axes = hyp_axes.tolist(), n_samples = pca.n, strike=strike, dip=dip, rake=rake, angular_errors=[2*N.degrees(i) for i in angular_errors(hyp_axes)])
[ "def", "serialize", "(", "pca", ",", "*", "*", "kwargs", ")", ":", "strike", ",", "dip", ",", "rake", "=", "pca", ".", "strike_dip_rake", "(", ")", "hyp_axes", "=", "sampling_axes", "(", "pca", ")", "return", "dict", "(", "*", "*", "kwargs", ",", "principal_axes", "=", "pca", ".", "axes", ".", "tolist", "(", ")", ",", "hyperbolic_axes", "=", "hyp_axes", ".", "tolist", "(", ")", ",", "n_samples", "=", "pca", ".", "n", ",", "strike", "=", "strike", ",", "dip", "=", "dip", ",", "rake", "=", "rake", ",", "angular_errors", "=", "[", "2", "*", "N", ".", "degrees", "(", "i", ")", "for", "i", "in", "angular_errors", "(", "hyp_axes", ")", "]", ")" ]
Serialize an orientation object to a dict suitable for JSON
[ "Serialize", "an", "orientation", "object", "to", "a", "dict", "suitable", "for", "JSON" ]
python
train
occrp-attic/exactitude
exactitude/country.py
https://github.com/occrp-attic/exactitude/blob/9fe13aa70f1aac644dbc999e0b21683db507f02d/exactitude/country.py#L35-L45
def clean_text(self, country, guess=False, **kwargs): """Determine a two-letter country code based on an input. The input may be a country code, a country name, etc. """ code = country.lower().strip() if code in self.names: return code country = countrynames.to_code(country, fuzzy=guess) if country is not None: return country.lower()
[ "def", "clean_text", "(", "self", ",", "country", ",", "guess", "=", "False", ",", "*", "*", "kwargs", ")", ":", "code", "=", "country", ".", "lower", "(", ")", ".", "strip", "(", ")", "if", "code", "in", "self", ".", "names", ":", "return", "code", "country", "=", "countrynames", ".", "to_code", "(", "country", ",", "fuzzy", "=", "guess", ")", "if", "country", "is", "not", "None", ":", "return", "country", ".", "lower", "(", ")" ]
Determine a two-letter country code based on an input. The input may be a country code, a country name, etc.
[ "Determine", "a", "two", "-", "letter", "country", "code", "based", "on", "an", "input", "." ]
python
train
yvesalexandre/bandicoot
bandicoot/helper/stops.py
https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/helper/stops.py#L37-L43
def get_neighbors(distance_matrix, source, eps): """ Given a matrix of distance between couples of points, return the list of every point closer than eps from a certain point. """ return [dest for dest, distance in enumerate(distance_matrix[source]) if distance < eps]
[ "def", "get_neighbors", "(", "distance_matrix", ",", "source", ",", "eps", ")", ":", "return", "[", "dest", "for", "dest", ",", "distance", "in", "enumerate", "(", "distance_matrix", "[", "source", "]", ")", "if", "distance", "<", "eps", "]" ]
Given a matrix of distance between couples of points, return the list of every point closer than eps from a certain point.
[ "Given", "a", "matrix", "of", "distance", "between", "couples", "of", "points", "return", "the", "list", "of", "every", "point", "closer", "than", "eps", "from", "a", "certain", "point", "." ]
python
train
HazyResearch/fonduer
src/fonduer/learning/classifier.py
https://github.com/HazyResearch/fonduer/blob/4520f86a716f03dcca458a9f4bddac75b4e7068f/src/fonduer/learning/classifier.py#L488-L516
def save(self, model_file, save_dir, verbose=True): """Save current model. :param model_file: Saved model file name. :type model_file: str :param save_dir: Saved model directory. :type save_dir: str :param verbose: Print log or not :type verbose: bool """ # Check existence of model saving directory and create if does not exist. if not os.path.exists(save_dir): os.makedirs(save_dir) params = { "model": self.state_dict(), "cardinality": self.cardinality, "name": self.name, "config": self.settings, } try: torch.save(params, f"{save_dir}/{model_file}") except BaseException: self.logger.warning("Saving failed... continuing anyway.") if verbose: self.logger.info(f"[{self.name}] Model saved as {model_file} in {save_dir}")
[ "def", "save", "(", "self", ",", "model_file", ",", "save_dir", ",", "verbose", "=", "True", ")", ":", "# Check existence of model saving directory and create if does not exist.", "if", "not", "os", ".", "path", ".", "exists", "(", "save_dir", ")", ":", "os", ".", "makedirs", "(", "save_dir", ")", "params", "=", "{", "\"model\"", ":", "self", ".", "state_dict", "(", ")", ",", "\"cardinality\"", ":", "self", ".", "cardinality", ",", "\"name\"", ":", "self", ".", "name", ",", "\"config\"", ":", "self", ".", "settings", ",", "}", "try", ":", "torch", ".", "save", "(", "params", ",", "f\"{save_dir}/{model_file}\"", ")", "except", "BaseException", ":", "self", ".", "logger", ".", "warning", "(", "\"Saving failed... continuing anyway.\"", ")", "if", "verbose", ":", "self", ".", "logger", ".", "info", "(", "f\"[{self.name}] Model saved as {model_file} in {save_dir}\"", ")" ]
Save current model. :param model_file: Saved model file name. :type model_file: str :param save_dir: Saved model directory. :type save_dir: str :param verbose: Print log or not :type verbose: bool
[ "Save", "current", "model", "." ]
python
train
PyHDI/Pyverilog
pyverilog/vparser/parser.py
https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L1367-L1371
def p_sens_all(self, p): 'senslist : AT TIMES' p[0] = SensList( (Sens(None, 'all', lineno=p.lineno(1)),), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_sens_all", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "SensList", "(", "(", "Sens", "(", "None", ",", "'all'", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", ",", ")", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "lineno", "(", "1", ")", ")" ]
senslist : AT TIMES
[ "senslist", ":", "AT", "TIMES" ]
python
train
aaugustin/websockets
src/websockets/handshake.py
https://github.com/aaugustin/websockets/blob/17b3f47549b6f752a1be07fa1ba3037cb59c7d56/src/websockets/handshake.py#L126-L135
def build_response(headers: Headers, key: str) -> None: """ Build a handshake response to send to the client. ``key`` comes from :func:`check_request`. """ headers["Upgrade"] = "websocket" headers["Connection"] = "Upgrade" headers["Sec-WebSocket-Accept"] = accept(key)
[ "def", "build_response", "(", "headers", ":", "Headers", ",", "key", ":", "str", ")", "->", "None", ":", "headers", "[", "\"Upgrade\"", "]", "=", "\"websocket\"", "headers", "[", "\"Connection\"", "]", "=", "\"Upgrade\"", "headers", "[", "\"Sec-WebSocket-Accept\"", "]", "=", "accept", "(", "key", ")" ]
Build a handshake response to send to the client. ``key`` comes from :func:`check_request`.
[ "Build", "a", "handshake", "response", "to", "send", "to", "the", "client", "." ]
python
train
tanghaibao/goatools
goatools/grouper/aart_geneproducts_all.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/grouper/aart_geneproducts_all.py#L49-L88
def prt_mrks(self, name_marks_list, prt=sys.stdout): """Print summary of all GOEAs. Example: Key for GO sections: A immune B viral/bacteria C neuro D cell death E lipid F adhesion G cell cycle H chromosome I development J extracellular matrix K ion L localization M membrane N metabolic O phosphorylation P signaling Q stimulus R prolif_differ S Misc. ABCDEFGHIJKLMNOPQRS XX.X..XXX..X.XX.XXX transient_increase XX.XXX.....X.X.XXXX consistent_increase XXXXXX..XXXXXXXXXXX late_increase ..X.....X.XX.X....X consistent_decrease ..X.XX..X.XX.XXX.XX late_decrease """ if not name_marks_list: return # prt.write("\nKey for GO sections:\n") # self.prt_section_key(prt) prt.write("\n{HDR}\n".format(HDR=self.str_hdr())) for name, mark in name_marks_list: if mark is not None: prt.write("{MRKS} {NAME}\n".format(MRKS="".join(mark), NAME=name)) prt.write("\n")
[ "def", "prt_mrks", "(", "self", ",", "name_marks_list", ",", "prt", "=", "sys", ".", "stdout", ")", ":", "if", "not", "name_marks_list", ":", "return", "# prt.write(\"\\nKey for GO sections:\\n\")", "# self.prt_section_key(prt)", "prt", ".", "write", "(", "\"\\n{HDR}\\n\"", ".", "format", "(", "HDR", "=", "self", ".", "str_hdr", "(", ")", ")", ")", "for", "name", ",", "mark", "in", "name_marks_list", ":", "if", "mark", "is", "not", "None", ":", "prt", ".", "write", "(", "\"{MRKS} {NAME}\\n\"", ".", "format", "(", "MRKS", "=", "\"\"", ".", "join", "(", "mark", ")", ",", "NAME", "=", "name", ")", ")", "prt", ".", "write", "(", "\"\\n\"", ")" ]
Print summary of all GOEAs. Example: Key for GO sections: A immune B viral/bacteria C neuro D cell death E lipid F adhesion G cell cycle H chromosome I development J extracellular matrix K ion L localization M membrane N metabolic O phosphorylation P signaling Q stimulus R prolif_differ S Misc. ABCDEFGHIJKLMNOPQRS XX.X..XXX..X.XX.XXX transient_increase XX.XXX.....X.X.XXXX consistent_increase XXXXXX..XXXXXXXXXXX late_increase ..X.....X.XX.X....X consistent_decrease ..X.XX..X.XX.XXX.XX late_decrease
[ "Print", "summary", "of", "all", "GOEAs", ".", "Example", ":", "Key", "for", "GO", "sections", ":", "A", "immune", "B", "viral", "/", "bacteria", "C", "neuro", "D", "cell", "death", "E", "lipid", "F", "adhesion", "G", "cell", "cycle", "H", "chromosome", "I", "development", "J", "extracellular", "matrix", "K", "ion", "L", "localization", "M", "membrane", "N", "metabolic", "O", "phosphorylation", "P", "signaling", "Q", "stimulus", "R", "prolif_differ", "S", "Misc", "." ]
python
train
proteanhq/protean
src/protean/core/usecase/generic.py
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/usecase/generic.py#L119-L123
def process_request(self, request_object): """Process Create Resource Request""" resource = request_object.entity_cls.create(**request_object.data) return ResponseSuccessCreated(resource)
[ "def", "process_request", "(", "self", ",", "request_object", ")", ":", "resource", "=", "request_object", ".", "entity_cls", ".", "create", "(", "*", "*", "request_object", ".", "data", ")", "return", "ResponseSuccessCreated", "(", "resource", ")" ]
Process Create Resource Request
[ "Process", "Create", "Resource", "Request" ]
python
train
ANTsX/ANTsPy
ants/registration/make_points_image.py
https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/registration/make_points_image.py#L11-L60
def make_points_image(pts, mask, radius=5): """ Create label image from physical space points Creates spherical points in the coordinate space of the target image based on the n-dimensional matrix of points that the user supplies. The image defines the dimensionality of the data so if the input image is 3D then the input points should be 2D or 3D. ANTsR function: `makePointsImage` Arguments --------- pts : numpy.ndarray input powers points mask : ANTsImage mask defining target space radius : integer radius for the points Returns ------- ANTsImage Example ------- >>> import ants >>> import pandas as pd >>> mni = ants.image_read(ants.get_data('mni')).get_mask() >>> powers_pts = pd.read_csv(ants.get_data('powers_mni_itk')) >>> powers_labels = ants.make_points_image(powers_pts.iloc[:,:3].values, mni, radius=3) """ powers_lblimg = mask * 0 npts = len(pts) dim = mask.dimension if pts.shape[1] != dim: raise ValueError('points dimensionality should match that of images') for r in range(npts): pt = pts[r,:] idx = tio.transform_physical_point_to_index(mask, pt.tolist() ).astype(int) in_image = (np.prod(idx <= mask.shape)==1) and (len(np.where(idx<0)[0])==0) if ( in_image == True ): if (dim == 3): powers_lblimg[idx[0],idx[1],idx[2]] = r + 1 elif (dim == 2): powers_lblimg[idx[0],idx[1]] = r + 1 return utils.morphology( powers_lblimg, 'dilate', radius, 'grayscale' )
[ "def", "make_points_image", "(", "pts", ",", "mask", ",", "radius", "=", "5", ")", ":", "powers_lblimg", "=", "mask", "*", "0", "npts", "=", "len", "(", "pts", ")", "dim", "=", "mask", ".", "dimension", "if", "pts", ".", "shape", "[", "1", "]", "!=", "dim", ":", "raise", "ValueError", "(", "'points dimensionality should match that of images'", ")", "for", "r", "in", "range", "(", "npts", ")", ":", "pt", "=", "pts", "[", "r", ",", ":", "]", "idx", "=", "tio", ".", "transform_physical_point_to_index", "(", "mask", ",", "pt", ".", "tolist", "(", ")", ")", ".", "astype", "(", "int", ")", "in_image", "=", "(", "np", ".", "prod", "(", "idx", "<=", "mask", ".", "shape", ")", "==", "1", ")", "and", "(", "len", "(", "np", ".", "where", "(", "idx", "<", "0", ")", "[", "0", "]", ")", "==", "0", ")", "if", "(", "in_image", "==", "True", ")", ":", "if", "(", "dim", "==", "3", ")", ":", "powers_lblimg", "[", "idx", "[", "0", "]", ",", "idx", "[", "1", "]", ",", "idx", "[", "2", "]", "]", "=", "r", "+", "1", "elif", "(", "dim", "==", "2", ")", ":", "powers_lblimg", "[", "idx", "[", "0", "]", ",", "idx", "[", "1", "]", "]", "=", "r", "+", "1", "return", "utils", ".", "morphology", "(", "powers_lblimg", ",", "'dilate'", ",", "radius", ",", "'grayscale'", ")" ]
Create label image from physical space points Creates spherical points in the coordinate space of the target image based on the n-dimensional matrix of points that the user supplies. The image defines the dimensionality of the data so if the input image is 3D then the input points should be 2D or 3D. ANTsR function: `makePointsImage` Arguments --------- pts : numpy.ndarray input powers points mask : ANTsImage mask defining target space radius : integer radius for the points Returns ------- ANTsImage Example ------- >>> import ants >>> import pandas as pd >>> mni = ants.image_read(ants.get_data('mni')).get_mask() >>> powers_pts = pd.read_csv(ants.get_data('powers_mni_itk')) >>> powers_labels = ants.make_points_image(powers_pts.iloc[:,:3].values, mni, radius=3)
[ "Create", "label", "image", "from", "physical", "space", "points" ]
python
train
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/batch_v2alpha1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/batch_v2alpha1_api.py#L283-L310
def delete_namespaced_cron_job(self, name, namespace, **kwargs): # noqa: E501 """delete_namespaced_cron_job # noqa: E501 delete a CronJob # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_cron_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the CronJob (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :param V1DeleteOptions body: :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_cron_job_with_http_info(name, namespace, **kwargs) # noqa: E501 else: (data) = self.delete_namespaced_cron_job_with_http_info(name, namespace, **kwargs) # noqa: E501 return data
[ "def", "delete_namespaced_cron_job", "(", "self", ",", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "delete_namespaced_cron_job_with_http_info", "(", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "delete_namespaced_cron_job_with_http_info", "(", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
delete_namespaced_cron_job # noqa: E501 delete a CronJob # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_cron_job(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the CronJob (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :param V1DeleteOptions body: :return: V1Status If the method is called asynchronously, returns the request thread.
[ "delete_namespaced_cron_job", "#", "noqa", ":", "E501" ]
python
train
pandas-dev/pandas
pandas/io/parsers.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/parsers.py#L2858-L2892
def _next_iter_line(self, row_num): """ Wrapper around iterating through `self.data` (CSV source). When a CSV error is raised, we check for specific error messages that allow us to customize the error message displayed to the user. Parameters ---------- row_num : The row number of the line being parsed. """ try: return next(self.data) except csv.Error as e: if self.warn_bad_lines or self.error_bad_lines: msg = str(e) if 'NULL byte' in msg: msg = ('NULL byte detected. This byte ' 'cannot be processed in Python\'s ' 'native csv library at the moment, ' 'so please pass in engine=\'c\' instead') if self.skipfooter > 0: reason = ('Error could possibly be due to ' 'parsing errors in the skipped footer rows ' '(the skipfooter keyword is only applied ' 'after Python\'s csv library has parsed ' 'all rows).') msg += '. ' + reason self._alert_malformed(msg, row_num) return None
[ "def", "_next_iter_line", "(", "self", ",", "row_num", ")", ":", "try", ":", "return", "next", "(", "self", ".", "data", ")", "except", "csv", ".", "Error", "as", "e", ":", "if", "self", ".", "warn_bad_lines", "or", "self", ".", "error_bad_lines", ":", "msg", "=", "str", "(", "e", ")", "if", "'NULL byte'", "in", "msg", ":", "msg", "=", "(", "'NULL byte detected. This byte '", "'cannot be processed in Python\\'s '", "'native csv library at the moment, '", "'so please pass in engine=\\'c\\' instead'", ")", "if", "self", ".", "skipfooter", ">", "0", ":", "reason", "=", "(", "'Error could possibly be due to '", "'parsing errors in the skipped footer rows '", "'(the skipfooter keyword is only applied '", "'after Python\\'s csv library has parsed '", "'all rows).'", ")", "msg", "+=", "'. '", "+", "reason", "self", ".", "_alert_malformed", "(", "msg", ",", "row_num", ")", "return", "None" ]
Wrapper around iterating through `self.data` (CSV source). When a CSV error is raised, we check for specific error messages that allow us to customize the error message displayed to the user. Parameters ---------- row_num : The row number of the line being parsed.
[ "Wrapper", "around", "iterating", "through", "self", ".", "data", "(", "CSV", "source", ")", "." ]
python
train
saltstack/salt
salt/modules/runit.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/runit.py#L651-L687
def disable(name, stop=False, **kwargs): ''' Don't start service ``name`` at boot Returns ``True`` if operation is successful name the service's name stop if True, also stops the service CLI Example: .. code-block:: bash salt '*' service.disable <name> [stop=True] ''' # non-existent as registrered service if not enabled(name): return False # down_file: file that prevent sv autostart svc_realpath = _get_svc_path(name)[0] down_file = os.path.join(svc_realpath, 'down') if stop: stop(name) if not os.path.exists(down_file): try: salt.utils.files.fopen(down_file, "w").close() # pylint: disable=resource-leakage except IOError: log.error('Unable to create file %s', down_file) return False return True
[ "def", "disable", "(", "name", ",", "stop", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# non-existent as registrered service", "if", "not", "enabled", "(", "name", ")", ":", "return", "False", "# down_file: file that prevent sv autostart", "svc_realpath", "=", "_get_svc_path", "(", "name", ")", "[", "0", "]", "down_file", "=", "os", ".", "path", ".", "join", "(", "svc_realpath", ",", "'down'", ")", "if", "stop", ":", "stop", "(", "name", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "down_file", ")", ":", "try", ":", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "down_file", ",", "\"w\"", ")", ".", "close", "(", ")", "# pylint: disable=resource-leakage", "except", "IOError", ":", "log", ".", "error", "(", "'Unable to create file %s'", ",", "down_file", ")", "return", "False", "return", "True" ]
Don't start service ``name`` at boot Returns ``True`` if operation is successful name the service's name stop if True, also stops the service CLI Example: .. code-block:: bash salt '*' service.disable <name> [stop=True]
[ "Don", "t", "start", "service", "name", "at", "boot", "Returns", "True", "if", "operation", "is", "successful" ]
python
train
xolox/python-qpass
qpass/__init__.py
https://github.com/xolox/python-qpass/blob/43ce447b0904ff42a54b8f1dd4d2479f950f258f/qpass/__init__.py#L104-L108
def filtered_entries(self): """A list of :class:`PasswordEntry` objects that don't match the exclude list.""" return [ e for e in self.entries if not any(fnmatch.fnmatch(e.name.lower(), p.lower()) for p in self.exclude_list) ]
[ "def", "filtered_entries", "(", "self", ")", ":", "return", "[", "e", "for", "e", "in", "self", ".", "entries", "if", "not", "any", "(", "fnmatch", ".", "fnmatch", "(", "e", ".", "name", ".", "lower", "(", ")", ",", "p", ".", "lower", "(", ")", ")", "for", "p", "in", "self", ".", "exclude_list", ")", "]" ]
A list of :class:`PasswordEntry` objects that don't match the exclude list.
[ "A", "list", "of", ":", "class", ":", "PasswordEntry", "objects", "that", "don", "t", "match", "the", "exclude", "list", "." ]
python
train
matllubos/django-is-core
is_core/utils/__init__.py
https://github.com/matllubos/django-is-core/blob/3f87ec56a814738683c732dce5f07e0328c2300d/is_core/utils/__init__.py#L44-L56
def flatten_fieldsets(fieldsets): """Returns a list of field names from an admin fieldsets structure.""" field_names = [] for _, opts in fieldsets or (): if 'fieldsets' in opts: field_names += flatten_fieldsets(opts.get('fieldsets')) else: for field in opts.get('fields', ()): if isinstance(field, (list, tuple)): field_names.extend(field) else: field_names.append(field) return field_names
[ "def", "flatten_fieldsets", "(", "fieldsets", ")", ":", "field_names", "=", "[", "]", "for", "_", ",", "opts", "in", "fieldsets", "or", "(", ")", ":", "if", "'fieldsets'", "in", "opts", ":", "field_names", "+=", "flatten_fieldsets", "(", "opts", ".", "get", "(", "'fieldsets'", ")", ")", "else", ":", "for", "field", "in", "opts", ".", "get", "(", "'fields'", ",", "(", ")", ")", ":", "if", "isinstance", "(", "field", ",", "(", "list", ",", "tuple", ")", ")", ":", "field_names", ".", "extend", "(", "field", ")", "else", ":", "field_names", ".", "append", "(", "field", ")", "return", "field_names" ]
Returns a list of field names from an admin fieldsets structure.
[ "Returns", "a", "list", "of", "field", "names", "from", "an", "admin", "fieldsets", "structure", "." ]
python
train
MacHu-GWU/angora-project
angora/algorithm/iterable.py
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/algorithm/iterable.py#L83-L97
def flatten_all(list_of_list): """Flatten arbitrary depth of nesting. Good for unknown nesting structure iterable object. Usage:: >>> flatten_all([[0, 1], [2, 3, [4, 5], [6, 7, 8]], [9,]]) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] """ for i in list_of_list: if hasattr(i, "__iter__"): for j in flatten_all(i): yield j else: yield i
[ "def", "flatten_all", "(", "list_of_list", ")", ":", "for", "i", "in", "list_of_list", ":", "if", "hasattr", "(", "i", ",", "\"__iter__\"", ")", ":", "for", "j", "in", "flatten_all", "(", "i", ")", ":", "yield", "j", "else", ":", "yield", "i" ]
Flatten arbitrary depth of nesting. Good for unknown nesting structure iterable object. Usage:: >>> flatten_all([[0, 1], [2, 3, [4, 5], [6, 7, 8]], [9,]]) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
[ "Flatten", "arbitrary", "depth", "of", "nesting", ".", "Good", "for", "unknown", "nesting", "structure", "iterable", "object", "." ]
python
train
pysathq/pysat
examples/hitman.py
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/examples/hitman.py#L317-L338
def hit(self, to_hit): """ This method adds a new set to hit to the hitting set solver. This is done by translating the input iterable of objects into a list of Boolean variables in the MaxSAT problem formulation. :param to_hit: a new set to hit :type to_hit: iterable(obj) """ # translating objects to variables to_hit = list(map(lambda obj: self.idpool.id(obj), to_hit)) # a soft clause should be added for each new object new_obj = list(filter(lambda vid: vid not in self.oracle.vmap.e2i, to_hit)) # new hard clause self.oracle.add_clause(to_hit) # new soft clauses for vid in new_obj: self.oracle.add_clause([-vid], 1)
[ "def", "hit", "(", "self", ",", "to_hit", ")", ":", "# translating objects to variables", "to_hit", "=", "list", "(", "map", "(", "lambda", "obj", ":", "self", ".", "idpool", ".", "id", "(", "obj", ")", ",", "to_hit", ")", ")", "# a soft clause should be added for each new object", "new_obj", "=", "list", "(", "filter", "(", "lambda", "vid", ":", "vid", "not", "in", "self", ".", "oracle", ".", "vmap", ".", "e2i", ",", "to_hit", ")", ")", "# new hard clause", "self", ".", "oracle", ".", "add_clause", "(", "to_hit", ")", "# new soft clauses", "for", "vid", "in", "new_obj", ":", "self", ".", "oracle", ".", "add_clause", "(", "[", "-", "vid", "]", ",", "1", ")" ]
This method adds a new set to hit to the hitting set solver. This is done by translating the input iterable of objects into a list of Boolean variables in the MaxSAT problem formulation. :param to_hit: a new set to hit :type to_hit: iterable(obj)
[ "This", "method", "adds", "a", "new", "set", "to", "hit", "to", "the", "hitting", "set", "solver", ".", "This", "is", "done", "by", "translating", "the", "input", "iterable", "of", "objects", "into", "a", "list", "of", "Boolean", "variables", "in", "the", "MaxSAT", "problem", "formulation", "." ]
python
train
cocaine/cocaine-framework-python
cocaine/detail/headers.py
https://github.com/cocaine/cocaine-framework-python/blob/d8a30074b6338bac4389eb996e00d404338115e4/cocaine/detail/headers.py#L145-L163
def get_by_index(self, index): """ Returns the entry specified by index Note that the table is 1-based ie an index of 0 is invalid. This is due to the fact that a zero value index signals that a completely unindexed header follows. The entry will either be from the static table or the dynamic table depending on the value of index. """ index -= 1 if 0 <= index < len(CocaineHeaders.STATIC_TABLE): return CocaineHeaders.STATIC_TABLE[index] index -= len(CocaineHeaders.STATIC_TABLE) if 0 <= index < len(self.dynamic_entries): return self.dynamic_entries[index] raise InvalidTableIndex("Invalid table index %d" % index)
[ "def", "get_by_index", "(", "self", ",", "index", ")", ":", "index", "-=", "1", "if", "0", "<=", "index", "<", "len", "(", "CocaineHeaders", ".", "STATIC_TABLE", ")", ":", "return", "CocaineHeaders", ".", "STATIC_TABLE", "[", "index", "]", "index", "-=", "len", "(", "CocaineHeaders", ".", "STATIC_TABLE", ")", "if", "0", "<=", "index", "<", "len", "(", "self", ".", "dynamic_entries", ")", ":", "return", "self", ".", "dynamic_entries", "[", "index", "]", "raise", "InvalidTableIndex", "(", "\"Invalid table index %d\"", "%", "index", ")" ]
Returns the entry specified by index Note that the table is 1-based ie an index of 0 is invalid. This is due to the fact that a zero value index signals that a completely unindexed header follows. The entry will either be from the static table or the dynamic table depending on the value of index.
[ "Returns", "the", "entry", "specified", "by", "index" ]
python
train
bfontaine/term2048
term2048/board.py
https://github.com/bfontaine/term2048/blob/8b5ce8b65f44f20a7ad36022a34dce56184070af/term2048/board.py#L95-L97
def setCell(self, x, y, v): """set the cell value at x,y""" self.cells[y][x] = v
[ "def", "setCell", "(", "self", ",", "x", ",", "y", ",", "v", ")", ":", "self", ".", "cells", "[", "y", "]", "[", "x", "]", "=", "v" ]
set the cell value at x,y
[ "set", "the", "cell", "value", "at", "x", "y" ]
python
train
Jajcus/pyxmpp2
pyxmpp2/roster.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/roster.py#L729-L741
def handle_got_features_event(self, event): """Check for roster related features in the stream features received and set `server_features` accordingly. """ server_features = set() logger.debug("Checking roster-related features") if event.features.find(FEATURE_ROSTERVER) is not None: logger.debug(" Roster versioning available") server_features.add("versioning") if event.features.find(FEATURE_APPROVALS) is not None: logger.debug(" Subscription pre-approvals available") server_features.add("pre-approvals") self.server_features = server_features
[ "def", "handle_got_features_event", "(", "self", ",", "event", ")", ":", "server_features", "=", "set", "(", ")", "logger", ".", "debug", "(", "\"Checking roster-related features\"", ")", "if", "event", ".", "features", ".", "find", "(", "FEATURE_ROSTERVER", ")", "is", "not", "None", ":", "logger", ".", "debug", "(", "\" Roster versioning available\"", ")", "server_features", ".", "add", "(", "\"versioning\"", ")", "if", "event", ".", "features", ".", "find", "(", "FEATURE_APPROVALS", ")", "is", "not", "None", ":", "logger", ".", "debug", "(", "\" Subscription pre-approvals available\"", ")", "server_features", ".", "add", "(", "\"pre-approvals\"", ")", "self", ".", "server_features", "=", "server_features" ]
Check for roster related features in the stream features received and set `server_features` accordingly.
[ "Check", "for", "roster", "related", "features", "in", "the", "stream", "features", "received", "and", "set", "server_features", "accordingly", "." ]
python
valid
OpenAgInitiative/openag_python
openag/cli/firmware/__init__.py
https://github.com/OpenAgInitiative/openag_python/blob/f6202340292bbf7185e1a7d4290188c0dacbb8d0/openag/cli/firmware/__init__.py#L386-L413
def load_plugin(plugin_name): """ Given a plugin name, load plugin cls from plugin directory. Will throw an exception if no plugin can be found. """ plugin_cls = plugin_map.get(plugin_name, None) if not plugin_cls: try: plugin_module_name, plugin_cls_name = plugin_name.split(":") plugin_module = import_module(plugin_module_name) plugin_cls = getattr(plugin_module, plugin_cls_name) except ValueError: raise click.ClickException( '"{}" is not a valid plugin path'.format(plugin_name) ) except ImportError: raise click.ClickException( '"{}" does not name a Python module'.format( plugin_module_name ) ) except AttributeError: raise click.ClickException( 'Module "{}" does not contain the class "{}"'.format( plugin_module_name, plugin_cls_name ) ) return plugin_cls
[ "def", "load_plugin", "(", "plugin_name", ")", ":", "plugin_cls", "=", "plugin_map", ".", "get", "(", "plugin_name", ",", "None", ")", "if", "not", "plugin_cls", ":", "try", ":", "plugin_module_name", ",", "plugin_cls_name", "=", "plugin_name", ".", "split", "(", "\":\"", ")", "plugin_module", "=", "import_module", "(", "plugin_module_name", ")", "plugin_cls", "=", "getattr", "(", "plugin_module", ",", "plugin_cls_name", ")", "except", "ValueError", ":", "raise", "click", ".", "ClickException", "(", "'\"{}\" is not a valid plugin path'", ".", "format", "(", "plugin_name", ")", ")", "except", "ImportError", ":", "raise", "click", ".", "ClickException", "(", "'\"{}\" does not name a Python module'", ".", "format", "(", "plugin_module_name", ")", ")", "except", "AttributeError", ":", "raise", "click", ".", "ClickException", "(", "'Module \"{}\" does not contain the class \"{}\"'", ".", "format", "(", "plugin_module_name", ",", "plugin_cls_name", ")", ")", "return", "plugin_cls" ]
Given a plugin name, load plugin cls from plugin directory. Will throw an exception if no plugin can be found.
[ "Given", "a", "plugin", "name", "load", "plugin", "cls", "from", "plugin", "directory", ".", "Will", "throw", "an", "exception", "if", "no", "plugin", "can", "be", "found", "." ]
python
train
dmlc/gluon-nlp
src/gluonnlp/data/utils.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/src/gluonnlp/data/utils.py#L152-L187
def slice_sequence(sequence, length, pad_last=False, pad_val=C.PAD_TOKEN, overlap=0): """Slice a flat sequence of tokens into sequences tokens, with each inner sequence's length equal to the specified `length`, taking into account the requested sequence overlap. Parameters ---------- sequence : list of object A flat list of tokens. length : int The length of each of the samples. pad_last : bool, default False Whether to pad the last sequence when its length doesn't align. If the last sequence's length doesn't align and ``pad_last`` is False, it will be dropped. pad_val : object, default The padding value to use when the padding of the last sequence is enabled. In general, the type of ``pad_val`` should be the same as the tokens. overlap : int, default 0 The extra number of items in current sample that should overlap with the next sample. Returns ------- List of list of tokens, with the length of each inner list equal to `length`. """ if length <= overlap: raise ValueError('length needs to be larger than overlap') if pad_last: pad_len = _slice_pad_length(len(sequence), length, overlap) sequence = sequence + [pad_val] * pad_len num_samples = (len(sequence) - length) // (length - overlap) + 1 return [sequence[i * (length - overlap): ((i + 1) * length - i * overlap)] for i in range(num_samples)]
[ "def", "slice_sequence", "(", "sequence", ",", "length", ",", "pad_last", "=", "False", ",", "pad_val", "=", "C", ".", "PAD_TOKEN", ",", "overlap", "=", "0", ")", ":", "if", "length", "<=", "overlap", ":", "raise", "ValueError", "(", "'length needs to be larger than overlap'", ")", "if", "pad_last", ":", "pad_len", "=", "_slice_pad_length", "(", "len", "(", "sequence", ")", ",", "length", ",", "overlap", ")", "sequence", "=", "sequence", "+", "[", "pad_val", "]", "*", "pad_len", "num_samples", "=", "(", "len", "(", "sequence", ")", "-", "length", ")", "//", "(", "length", "-", "overlap", ")", "+", "1", "return", "[", "sequence", "[", "i", "*", "(", "length", "-", "overlap", ")", ":", "(", "(", "i", "+", "1", ")", "*", "length", "-", "i", "*", "overlap", ")", "]", "for", "i", "in", "range", "(", "num_samples", ")", "]" ]
Slice a flat sequence of tokens into sequences tokens, with each inner sequence's length equal to the specified `length`, taking into account the requested sequence overlap. Parameters ---------- sequence : list of object A flat list of tokens. length : int The length of each of the samples. pad_last : bool, default False Whether to pad the last sequence when its length doesn't align. If the last sequence's length doesn't align and ``pad_last`` is False, it will be dropped. pad_val : object, default The padding value to use when the padding of the last sequence is enabled. In general, the type of ``pad_val`` should be the same as the tokens. overlap : int, default 0 The extra number of items in current sample that should overlap with the next sample. Returns ------- List of list of tokens, with the length of each inner list equal to `length`.
[ "Slice", "a", "flat", "sequence", "of", "tokens", "into", "sequences", "tokens", "with", "each", "inner", "sequence", "s", "length", "equal", "to", "the", "specified", "length", "taking", "into", "account", "the", "requested", "sequence", "overlap", "." ]
python
train
rwl/pylon
pylon/case.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/case.py#L751-L779
def d2Sbr_dV2(self, Cbr, Ybr, V, lam): """ Based on d2Sbr_dV2.m from MATPOWER by Ray Zimmerman, developed at PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more information. @rtype: tuple @return: The 2nd derivatives of complex power flow w.r.t. voltage. """ nb = len(V) nl = len(lam) ib = range(nb) il = range(nl) diaglam = csr_matrix((lam, (il, il))) diagV = csr_matrix((V, (ib, ib))) A = Ybr.H * diaglam * Cbr B = conj(diagV) * A * diagV D = csr_matrix( ((A * V) * conj(V), (ib, ib)) ) E = csr_matrix( ((A.T * conj(V) * V), (ib, ib)) ) F = B + B.T G = csr_matrix((ones(nb) / abs(V), (ib, ib))) Haa = F - D - E Hva = 1j * G * (B - B.T - D + E) Hav = Hva.T Hvv = G * F * G return Haa, Hav, Hva, Hvv
[ "def", "d2Sbr_dV2", "(", "self", ",", "Cbr", ",", "Ybr", ",", "V", ",", "lam", ")", ":", "nb", "=", "len", "(", "V", ")", "nl", "=", "len", "(", "lam", ")", "ib", "=", "range", "(", "nb", ")", "il", "=", "range", "(", "nl", ")", "diaglam", "=", "csr_matrix", "(", "(", "lam", ",", "(", "il", ",", "il", ")", ")", ")", "diagV", "=", "csr_matrix", "(", "(", "V", ",", "(", "ib", ",", "ib", ")", ")", ")", "A", "=", "Ybr", ".", "H", "*", "diaglam", "*", "Cbr", "B", "=", "conj", "(", "diagV", ")", "*", "A", "*", "diagV", "D", "=", "csr_matrix", "(", "(", "(", "A", "*", "V", ")", "*", "conj", "(", "V", ")", ",", "(", "ib", ",", "ib", ")", ")", ")", "E", "=", "csr_matrix", "(", "(", "(", "A", ".", "T", "*", "conj", "(", "V", ")", "*", "V", ")", ",", "(", "ib", ",", "ib", ")", ")", ")", "F", "=", "B", "+", "B", ".", "T", "G", "=", "csr_matrix", "(", "(", "ones", "(", "nb", ")", "/", "abs", "(", "V", ")", ",", "(", "ib", ",", "ib", ")", ")", ")", "Haa", "=", "F", "-", "D", "-", "E", "Hva", "=", "1j", "*", "G", "*", "(", "B", "-", "B", ".", "T", "-", "D", "+", "E", ")", "Hav", "=", "Hva", ".", "T", "Hvv", "=", "G", "*", "F", "*", "G", "return", "Haa", ",", "Hav", ",", "Hva", ",", "Hvv" ]
Based on d2Sbr_dV2.m from MATPOWER by Ray Zimmerman, developed at PSERC Cornell. See U{http://www.pserc.cornell.edu/matpower/} for more information. @rtype: tuple @return: The 2nd derivatives of complex power flow w.r.t. voltage.
[ "Based", "on", "d2Sbr_dV2", ".", "m", "from", "MATPOWER", "by", "Ray", "Zimmerman", "developed", "at", "PSERC", "Cornell", ".", "See", "U", "{", "http", ":", "//", "www", ".", "pserc", ".", "cornell", ".", "edu", "/", "matpower", "/", "}", "for", "more", "information", "." ]
python
train
sorgerlab/indra
indra/assemblers/kami/assembler.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/assemblers/kami/assembler.py#L172-L183
def add_node(self, name_base, attrs=None): """Add a node with a given base name to the Nugget and return ID.""" if name_base not in self.counters: node_id = name_base else: node_id = '%s_%d' % (name_base, self.counters[name_base]) node = {'id': node_id} if attrs: node['attrs'] = attrs self.nodes.append(node) self.counters[node_id] += 1 return node_id
[ "def", "add_node", "(", "self", ",", "name_base", ",", "attrs", "=", "None", ")", ":", "if", "name_base", "not", "in", "self", ".", "counters", ":", "node_id", "=", "name_base", "else", ":", "node_id", "=", "'%s_%d'", "%", "(", "name_base", ",", "self", ".", "counters", "[", "name_base", "]", ")", "node", "=", "{", "'id'", ":", "node_id", "}", "if", "attrs", ":", "node", "[", "'attrs'", "]", "=", "attrs", "self", ".", "nodes", ".", "append", "(", "node", ")", "self", ".", "counters", "[", "node_id", "]", "+=", "1", "return", "node_id" ]
Add a node with a given base name to the Nugget and return ID.
[ "Add", "a", "node", "with", "a", "given", "base", "name", "to", "the", "Nugget", "and", "return", "ID", "." ]
python
train
jantman/awslimitchecker
awslimitchecker/services/elasticbeanstalk.py
https://github.com/jantman/awslimitchecker/blob/e50197f70f3d0abcc5cfc7fde6336f548b790e34/awslimitchecker/services/elasticbeanstalk.py#L54-L68
def find_usage(self): """ Determine the current usage for each limit of this service, and update corresponding Limit via :py:meth:`~.AwsLimit._add_current_usage`. """ logger.debug("Checking usage for service %s", self.service_name) self.connect() for lim in self.limits.values(): lim._reset_usage() self._find_usage_applications() self._find_usage_application_versions() self._find_usage_environments() self._have_usage = True logger.debug("Done checking usage.")
[ "def", "find_usage", "(", "self", ")", ":", "logger", ".", "debug", "(", "\"Checking usage for service %s\"", ",", "self", ".", "service_name", ")", "self", ".", "connect", "(", ")", "for", "lim", "in", "self", ".", "limits", ".", "values", "(", ")", ":", "lim", ".", "_reset_usage", "(", ")", "self", ".", "_find_usage_applications", "(", ")", "self", ".", "_find_usage_application_versions", "(", ")", "self", ".", "_find_usage_environments", "(", ")", "self", ".", "_have_usage", "=", "True", "logger", ".", "debug", "(", "\"Done checking usage.\"", ")" ]
Determine the current usage for each limit of this service, and update corresponding Limit via :py:meth:`~.AwsLimit._add_current_usage`.
[ "Determine", "the", "current", "usage", "for", "each", "limit", "of", "this", "service", "and", "update", "corresponding", "Limit", "via", ":", "py", ":", "meth", ":", "~", ".", "AwsLimit", ".", "_add_current_usage", "." ]
python
train
barrust/mediawiki
mediawiki/mediawiki.py
https://github.com/barrust/mediawiki/blob/292e0be6c752409062dceed325d74839caf16a9b/mediawiki/mediawiki.py#L575-L591
def summary(self, title, sentences=0, chars=0, auto_suggest=True, redirect=True): """ Get the summary for the title in question Args: title (str): Page title to summarize sentences (int): Number of sentences to return in summary chars (int): Number of characters to return in summary auto_suggest (bool): Run auto-suggest on title before \ summarizing redirect (bool): Use page redirect on title before summarizing Returns: str: The summarized results of the page Note: Precedence for parameters: sentences then chars; if both are \ 0 then the entire first section is returned """ page_info = self.page(title, auto_suggest=auto_suggest, redirect=redirect) return page_info.summarize(sentences, chars)
[ "def", "summary", "(", "self", ",", "title", ",", "sentences", "=", "0", ",", "chars", "=", "0", ",", "auto_suggest", "=", "True", ",", "redirect", "=", "True", ")", ":", "page_info", "=", "self", ".", "page", "(", "title", ",", "auto_suggest", "=", "auto_suggest", ",", "redirect", "=", "redirect", ")", "return", "page_info", ".", "summarize", "(", "sentences", ",", "chars", ")" ]
Get the summary for the title in question Args: title (str): Page title to summarize sentences (int): Number of sentences to return in summary chars (int): Number of characters to return in summary auto_suggest (bool): Run auto-suggest on title before \ summarizing redirect (bool): Use page redirect on title before summarizing Returns: str: The summarized results of the page Note: Precedence for parameters: sentences then chars; if both are \ 0 then the entire first section is returned
[ "Get", "the", "summary", "for", "the", "title", "in", "question" ]
python
train
XuShaohua/bcloud
bcloud/DownloadPage.py
https://github.com/XuShaohua/bcloud/blob/4b54e0fdccf2b3013285fef05c97354cfa31697b/bcloud/DownloadPage.py#L607-L619
def start_task(self, row, scan=True): '''启动下载任务. 将任务状态设定为Downloading, 如果没有超过最大任务数的话; 否则将它设定为Waiting. ''' if not row or row[STATE_COL] in RUNNING_STATES : return row[STATE_COL] = State.WAITING row[STATENAME_COL] = StateNames[State.WAITING] self.update_task_db(row) if scan: self.scan_tasks()
[ "def", "start_task", "(", "self", ",", "row", ",", "scan", "=", "True", ")", ":", "if", "not", "row", "or", "row", "[", "STATE_COL", "]", "in", "RUNNING_STATES", ":", "return", "row", "[", "STATE_COL", "]", "=", "State", ".", "WAITING", "row", "[", "STATENAME_COL", "]", "=", "StateNames", "[", "State", ".", "WAITING", "]", "self", ".", "update_task_db", "(", "row", ")", "if", "scan", ":", "self", ".", "scan_tasks", "(", ")" ]
启动下载任务. 将任务状态设定为Downloading, 如果没有超过最大任务数的话; 否则将它设定为Waiting.
[ "启动下载任务", "." ]
python
train
eyurtsev/FlowCytometryTools
FlowCytometryTools/core/bases.py
https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L246-L255
def set_data(self, data=None, **kwargs): ''' Read data into memory, applying all actions in queue. Additionally, update queue and history. ''' if data is None: data = self.get_data(**kwargs) setattr(self, '_data', data) self.history += self.queue self.queue = []
[ "def", "set_data", "(", "self", ",", "data", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "data", "is", "None", ":", "data", "=", "self", ".", "get_data", "(", "*", "*", "kwargs", ")", "setattr", "(", "self", ",", "'_data'", ",", "data", ")", "self", ".", "history", "+=", "self", ".", "queue", "self", ".", "queue", "=", "[", "]" ]
Read data into memory, applying all actions in queue. Additionally, update queue and history.
[ "Read", "data", "into", "memory", "applying", "all", "actions", "in", "queue", ".", "Additionally", "update", "queue", "and", "history", "." ]
python
train
jxtech/wechatpy
wechatpy/client/api/invoice.py
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/invoice.py#L232-L249
def update_status(self, card_id, code, reimburse_status): """ 更新发票卡券的状态 详情请参考 https://mp.weixin.qq.com/wiki?id=mp1497082828_r1cI2 :param card_id: 发票卡券模板的编号 :param code: 发票卡券的编号 :param reimburse_status: 发票报销状态 """ return self._post( 'platform/updatestatus', data={ 'card_id': card_id, 'code': code, 'reimburse_status': reimburse_status, }, )
[ "def", "update_status", "(", "self", ",", "card_id", ",", "code", ",", "reimburse_status", ")", ":", "return", "self", ".", "_post", "(", "'platform/updatestatus'", ",", "data", "=", "{", "'card_id'", ":", "card_id", ",", "'code'", ":", "code", ",", "'reimburse_status'", ":", "reimburse_status", ",", "}", ",", ")" ]
更新发票卡券的状态 详情请参考 https://mp.weixin.qq.com/wiki?id=mp1497082828_r1cI2 :param card_id: 发票卡券模板的编号 :param code: 发票卡券的编号 :param reimburse_status: 发票报销状态
[ "更新发票卡券的状态", "详情请参考", "https", ":", "//", "mp", ".", "weixin", ".", "qq", ".", "com", "/", "wiki?id", "=", "mp1497082828_r1cI2" ]
python
train
saltstack/salt
salt/modules/win_pkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_pkg.py#L257-L314
def list_available(*names, **kwargs): ''' Return a list of available versions of the specified package. Args: names (str): One or more package names Kwargs: saltenv (str): The salt environment to use. Default ``base``. refresh (bool): Refresh package metadata. Default ``False``. return_dict_always (bool): Default ``False`` dict when a single package name is queried. Returns: dict: The package name with its available versions .. code-block:: cfg {'<package name>': ['<version>', '<version>', ]} CLI Example: .. code-block:: bash salt '*' pkg.list_available <package name> return_dict_always=True salt '*' pkg.list_available <package name01> <package name02> ''' if not names: return '' saltenv = kwargs.get('saltenv', 'base') refresh = salt.utils.data.is_true(kwargs.get('refresh', False)) _refresh_db_conditional(saltenv, force=refresh) return_dict_always = \ salt.utils.data.is_true(kwargs.get('return_dict_always', False)) if len(names) == 1 and not return_dict_always: pkginfo = _get_package_info(names[0], saltenv=saltenv) if not pkginfo: return '' versions = sorted( list(pkginfo.keys()), key=cmp_to_key(_reverse_cmp_pkg_versions) ) else: versions = {} for name in names: pkginfo = _get_package_info(name, saltenv=saltenv) if not pkginfo: continue verlist = sorted( list(pkginfo.keys()) if pkginfo else [], key=cmp_to_key(_reverse_cmp_pkg_versions) ) versions[name] = verlist return versions
[ "def", "list_available", "(", "*", "names", ",", "*", "*", "kwargs", ")", ":", "if", "not", "names", ":", "return", "''", "saltenv", "=", "kwargs", ".", "get", "(", "'saltenv'", ",", "'base'", ")", "refresh", "=", "salt", ".", "utils", ".", "data", ".", "is_true", "(", "kwargs", ".", "get", "(", "'refresh'", ",", "False", ")", ")", "_refresh_db_conditional", "(", "saltenv", ",", "force", "=", "refresh", ")", "return_dict_always", "=", "salt", ".", "utils", ".", "data", ".", "is_true", "(", "kwargs", ".", "get", "(", "'return_dict_always'", ",", "False", ")", ")", "if", "len", "(", "names", ")", "==", "1", "and", "not", "return_dict_always", ":", "pkginfo", "=", "_get_package_info", "(", "names", "[", "0", "]", ",", "saltenv", "=", "saltenv", ")", "if", "not", "pkginfo", ":", "return", "''", "versions", "=", "sorted", "(", "list", "(", "pkginfo", ".", "keys", "(", ")", ")", ",", "key", "=", "cmp_to_key", "(", "_reverse_cmp_pkg_versions", ")", ")", "else", ":", "versions", "=", "{", "}", "for", "name", "in", "names", ":", "pkginfo", "=", "_get_package_info", "(", "name", ",", "saltenv", "=", "saltenv", ")", "if", "not", "pkginfo", ":", "continue", "verlist", "=", "sorted", "(", "list", "(", "pkginfo", ".", "keys", "(", ")", ")", "if", "pkginfo", "else", "[", "]", ",", "key", "=", "cmp_to_key", "(", "_reverse_cmp_pkg_versions", ")", ")", "versions", "[", "name", "]", "=", "verlist", "return", "versions" ]
Return a list of available versions of the specified package. Args: names (str): One or more package names Kwargs: saltenv (str): The salt environment to use. Default ``base``. refresh (bool): Refresh package metadata. Default ``False``. return_dict_always (bool): Default ``False`` dict when a single package name is queried. Returns: dict: The package name with its available versions .. code-block:: cfg {'<package name>': ['<version>', '<version>', ]} CLI Example: .. code-block:: bash salt '*' pkg.list_available <package name> return_dict_always=True salt '*' pkg.list_available <package name01> <package name02>
[ "Return", "a", "list", "of", "available", "versions", "of", "the", "specified", "package", "." ]
python
train
choderalab/pymbar
examples/heat-capacity/heat-capacity.py
https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/examples/heat-capacity/heat-capacity.py#L75-L108
def read_total_energies(pathname,colnum): """Reads in the TEMP#/ener_box#.output file and parses it, returning an array of energies ARGUMENTS filename (string) - the path to the folder of the simulation colnum (integer) column the energy is found in """ print("--Reading total energies from %s/..." % pathname) # Initialize Return variables E_kn = numpy.zeros([NumTemps, NumIterations], numpy.float64) #Read files for k in range(NumTemps): #Construct each TEMP#/ener_box#.output name and read in the file filename = os.path.join(pathname,'TEMP' + str(k), 'ener_box'+ str(k) + '.output') infile = open(filename, 'r') lines = infile.readlines() infile.close() numLines = len(lines) #Initialize arrays for E E_from_file = numpy.zeros(NumIterations, numpy.float64) #Parse lines in each file for n in range(NumIterations): m = numLines - 2 - n #Count down (the 2 is for index purposes(1) and to not use the double-counted last line (1)) elements = lines[m].split() E_from_file[n] = float(elements[colnum]) #Add in the E's for each timestep (n) at this temperature (k) E_kn[k] = E_from_file; return E_kn
[ "def", "read_total_energies", "(", "pathname", ",", "colnum", ")", ":", "print", "(", "\"--Reading total energies from %s/...\"", "%", "pathname", ")", "# Initialize Return variables", "E_kn", "=", "numpy", ".", "zeros", "(", "[", "NumTemps", ",", "NumIterations", "]", ",", "numpy", ".", "float64", ")", "#Read files", "for", "k", "in", "range", "(", "NumTemps", ")", ":", "#Construct each TEMP#/ener_box#.output name and read in the file", "filename", "=", "os", ".", "path", ".", "join", "(", "pathname", ",", "'TEMP'", "+", "str", "(", "k", ")", ",", "'ener_box'", "+", "str", "(", "k", ")", "+", "'.output'", ")", "infile", "=", "open", "(", "filename", ",", "'r'", ")", "lines", "=", "infile", ".", "readlines", "(", ")", "infile", ".", "close", "(", ")", "numLines", "=", "len", "(", "lines", ")", "#Initialize arrays for E", "E_from_file", "=", "numpy", ".", "zeros", "(", "NumIterations", ",", "numpy", ".", "float64", ")", "#Parse lines in each file", "for", "n", "in", "range", "(", "NumIterations", ")", ":", "m", "=", "numLines", "-", "2", "-", "n", "#Count down (the 2 is for index purposes(1) and to not use the double-counted last line (1))", "elements", "=", "lines", "[", "m", "]", ".", "split", "(", ")", "E_from_file", "[", "n", "]", "=", "float", "(", "elements", "[", "colnum", "]", ")", "#Add in the E's for each timestep (n) at this temperature (k)", "E_kn", "[", "k", "]", "=", "E_from_file", "return", "E_kn" ]
Reads in the TEMP#/ener_box#.output file and parses it, returning an array of energies ARGUMENTS filename (string) - the path to the folder of the simulation colnum (integer) column the energy is found in
[ "Reads", "in", "the", "TEMP#", "/", "ener_box#", ".", "output", "file", "and", "parses", "it", "returning", "an", "array", "of", "energies" ]
python
train
sthysel/knobs
src/knobs.py
https://github.com/sthysel/knobs/blob/1d01f50f643068076e38118a93fed9375ea3ac81/src/knobs.py#L225-L250
def get(self): """ convert json env variable if set to list """ self._cast = type([]) source_value = os.getenv(self.env_name) # set the environment if it is not set if source_value is None: os.environ[self.env_name] = json.dumps(self.default) return self.default try: val = json.loads(source_value) except JSONDecodeError as e: click.secho(str(e), err=True, color='red') sys.exit(1) except ValueError as e: click.secho(e.message, err=True, color='red') sys.exit(1) if self.validator: val = self.validator(val) return val
[ "def", "get", "(", "self", ")", ":", "self", ".", "_cast", "=", "type", "(", "[", "]", ")", "source_value", "=", "os", ".", "getenv", "(", "self", ".", "env_name", ")", "# set the environment if it is not set", "if", "source_value", "is", "None", ":", "os", ".", "environ", "[", "self", ".", "env_name", "]", "=", "json", ".", "dumps", "(", "self", ".", "default", ")", "return", "self", ".", "default", "try", ":", "val", "=", "json", ".", "loads", "(", "source_value", ")", "except", "JSONDecodeError", "as", "e", ":", "click", ".", "secho", "(", "str", "(", "e", ")", ",", "err", "=", "True", ",", "color", "=", "'red'", ")", "sys", ".", "exit", "(", "1", ")", "except", "ValueError", "as", "e", ":", "click", ".", "secho", "(", "e", ".", "message", ",", "err", "=", "True", ",", "color", "=", "'red'", ")", "sys", ".", "exit", "(", "1", ")", "if", "self", ".", "validator", ":", "val", "=", "self", ".", "validator", "(", "val", ")", "return", "val" ]
convert json env variable if set to list
[ "convert", "json", "env", "variable", "if", "set", "to", "list" ]
python
train
numenta/nupic
src/nupic/encoders/scalar.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/encoders/scalar.py#L421-L468
def encodeIntoArray(self, input, output, learn=True): """ See method description in base.py """ if input is not None and not isinstance(input, numbers.Number): raise TypeError( "Expected a scalar input but got input of type %s" % type(input)) if type(input) is float and math.isnan(input): input = SENTINEL_VALUE_FOR_MISSING_DATA # Get the bucket index to use bucketIdx = self._getFirstOnBit(input)[0] if bucketIdx is None: # None is returned for missing value output[0:self.n] = 0 #TODO: should all 1s, or random SDR be returned instead? else: # The bucket index is the index of the first bit to set in the output output[:self.n] = 0 minbin = bucketIdx maxbin = minbin + 2*self.halfwidth if self.periodic: # Handle the edges by computing wrap-around if maxbin >= self.n: bottombins = maxbin - self.n + 1 output[:bottombins] = 1 maxbin = self.n - 1 if minbin < 0: topbins = -minbin output[self.n - topbins:self.n] = 1 minbin = 0 assert minbin >= 0 assert maxbin < self.n # set the output (except for periodic wraparound) output[minbin:maxbin + 1] = 1 # Debug the decode() method if self.verbosity >= 2: print print "input:", input print "range:", self.minval, "-", self.maxval print "n:", self.n, "w:", self.w, "resolution:", self.resolution, \ "radius", self.radius, "periodic:", self.periodic print "output:", self.pprint(output) print "input desc:", self.decodedToStr(self.decode(output))
[ "def", "encodeIntoArray", "(", "self", ",", "input", ",", "output", ",", "learn", "=", "True", ")", ":", "if", "input", "is", "not", "None", "and", "not", "isinstance", "(", "input", ",", "numbers", ".", "Number", ")", ":", "raise", "TypeError", "(", "\"Expected a scalar input but got input of type %s\"", "%", "type", "(", "input", ")", ")", "if", "type", "(", "input", ")", "is", "float", "and", "math", ".", "isnan", "(", "input", ")", ":", "input", "=", "SENTINEL_VALUE_FOR_MISSING_DATA", "# Get the bucket index to use", "bucketIdx", "=", "self", ".", "_getFirstOnBit", "(", "input", ")", "[", "0", "]", "if", "bucketIdx", "is", "None", ":", "# None is returned for missing value", "output", "[", "0", ":", "self", ".", "n", "]", "=", "0", "#TODO: should all 1s, or random SDR be returned instead?", "else", ":", "# The bucket index is the index of the first bit to set in the output", "output", "[", ":", "self", ".", "n", "]", "=", "0", "minbin", "=", "bucketIdx", "maxbin", "=", "minbin", "+", "2", "*", "self", ".", "halfwidth", "if", "self", ".", "periodic", ":", "# Handle the edges by computing wrap-around", "if", "maxbin", ">=", "self", ".", "n", ":", "bottombins", "=", "maxbin", "-", "self", ".", "n", "+", "1", "output", "[", ":", "bottombins", "]", "=", "1", "maxbin", "=", "self", ".", "n", "-", "1", "if", "minbin", "<", "0", ":", "topbins", "=", "-", "minbin", "output", "[", "self", ".", "n", "-", "topbins", ":", "self", ".", "n", "]", "=", "1", "minbin", "=", "0", "assert", "minbin", ">=", "0", "assert", "maxbin", "<", "self", ".", "n", "# set the output (except for periodic wraparound)", "output", "[", "minbin", ":", "maxbin", "+", "1", "]", "=", "1", "# Debug the decode() method", "if", "self", ".", "verbosity", ">=", "2", ":", "print", "print", "\"input:\"", ",", "input", "print", "\"range:\"", ",", "self", ".", "minval", ",", "\"-\"", ",", "self", ".", "maxval", "print", "\"n:\"", ",", "self", ".", "n", ",", "\"w:\"", ",", "self", ".", "w", ",", "\"resolution:\"", ",", "self", ".", "resolution", ",", "\"radius\"", ",", "self", ".", "radius", ",", "\"periodic:\"", ",", "self", ".", "periodic", "print", "\"output:\"", ",", "self", ".", "pprint", "(", "output", ")", "print", "\"input desc:\"", ",", "self", ".", "decodedToStr", "(", "self", ".", "decode", "(", "output", ")", ")" ]
See method description in base.py
[ "See", "method", "description", "in", "base", ".", "py" ]
python
valid
mamrhein/identifiers
identifiers/banking.py
https://github.com/mamrhein/identifiers/blob/93ab2609e461faff245d1f582411bf831b428eef/identifiers/banking.py#L155-L158
def bank_account_number(self): """Return the IBAN's Bank Account Number.""" start = get_iban_spec(self.country_code).bban_split_pos + 4 return self._id[start:]
[ "def", "bank_account_number", "(", "self", ")", ":", "start", "=", "get_iban_spec", "(", "self", ".", "country_code", ")", ".", "bban_split_pos", "+", "4", "return", "self", ".", "_id", "[", "start", ":", "]" ]
Return the IBAN's Bank Account Number.
[ "Return", "the", "IBAN", "s", "Bank", "Account", "Number", "." ]
python
train