repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
saltstack/salt
salt/modules/capirca_acl.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/capirca_acl.py#L419-L427
def _cleanup(lst): ''' Return a list of non-empty dictionaries. ''' clean = [] for ele in lst: if ele and isinstance(ele, dict): clean.append(ele) return clean
[ "def", "_cleanup", "(", "lst", ")", ":", "clean", "=", "[", "]", "for", "ele", "in", "lst", ":", "if", "ele", "and", "isinstance", "(", "ele", ",", "dict", ")", ":", "clean", ".", "append", "(", "ele", ")", "return", "clean" ]
Return a list of non-empty dictionaries.
[ "Return", "a", "list", "of", "non", "-", "empty", "dictionaries", "." ]
python
train
21.666667
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/prompts.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/prompts.py#L407-L439
def render(self, name, color=True, just=None, **kwargs): """ Render the selected prompt. Parameters ---------- name : str Which prompt to render. One of 'in', 'in2', 'out', 'rewrite' color : bool If True (default), include ANSI escape sequences for a coloured prompt. just : bool If True, justify the prompt to the width of the last prompt. The default is stored in self.justify. **kwargs : Additional arguments will be passed to the string formatting operation, so they can override the values that would otherwise fill in the template. Returns ------- A string containing the rendered prompt. """ res = self._render(name, color=color, **kwargs) # Handle justification of prompt invis_chars = self.invisible_chars[name] if color else 0 self.txtwidth = _lenlastline(res) - invis_chars just = self.justify if (just is None) else just # If the prompt spans more than one line, don't try to justify it: if just and name != 'in' and ('\n' not in res) and ('\r' not in res): res = res.rjust(self.width + invis_chars) self.width = _lenlastline(res) - invis_chars return res
[ "def", "render", "(", "self", ",", "name", ",", "color", "=", "True", ",", "just", "=", "None", ",", "*", "*", "kwargs", ")", ":", "res", "=", "self", ".", "_render", "(", "name", ",", "color", "=", "color", ",", "*", "*", "kwargs", ")", "# Handle justification of prompt", "invis_chars", "=", "self", ".", "invisible_chars", "[", "name", "]", "if", "color", "else", "0", "self", ".", "txtwidth", "=", "_lenlastline", "(", "res", ")", "-", "invis_chars", "just", "=", "self", ".", "justify", "if", "(", "just", "is", "None", ")", "else", "just", "# If the prompt spans more than one line, don't try to justify it:", "if", "just", "and", "name", "!=", "'in'", "and", "(", "'\\n'", "not", "in", "res", ")", "and", "(", "'\\r'", "not", "in", "res", ")", ":", "res", "=", "res", ".", "rjust", "(", "self", ".", "width", "+", "invis_chars", ")", "self", ".", "width", "=", "_lenlastline", "(", "res", ")", "-", "invis_chars", "return", "res" ]
Render the selected prompt. Parameters ---------- name : str Which prompt to render. One of 'in', 'in2', 'out', 'rewrite' color : bool If True (default), include ANSI escape sequences for a coloured prompt. just : bool If True, justify the prompt to the width of the last prompt. The default is stored in self.justify. **kwargs : Additional arguments will be passed to the string formatting operation, so they can override the values that would otherwise fill in the template. Returns ------- A string containing the rendered prompt.
[ "Render", "the", "selected", "prompt", ".", "Parameters", "----------", "name", ":", "str", "Which", "prompt", "to", "render", ".", "One", "of", "in", "in2", "out", "rewrite", "color", ":", "bool", "If", "True", "(", "default", ")", "include", "ANSI", "escape", "sequences", "for", "a", "coloured", "prompt", ".", "just", ":", "bool", "If", "True", "justify", "the", "prompt", "to", "the", "width", "of", "the", "last", "prompt", ".", "The", "default", "is", "stored", "in", "self", ".", "justify", ".", "**", "kwargs", ":", "Additional", "arguments", "will", "be", "passed", "to", "the", "string", "formatting", "operation", "so", "they", "can", "override", "the", "values", "that", "would", "otherwise", "fill", "in", "the", "template", ".", "Returns", "-------", "A", "string", "containing", "the", "rendered", "prompt", "." ]
python
test
39.757576
andrefsp/pyflot
flot/__init__.py
https://github.com/andrefsp/pyflot/blob/f2dde10709aeed39074fcce8172184b5cd8bfd66/flot/__init__.py#L168-L186
def _set_data(self): """ This method will be called to set Series data """ if getattr(self, 'data', False) and not getattr(self, '_x', False) and not getattr(self, '_y', False): _x = XVariable() _y = YVariable() _x.contribute_to_class(self, 'X', self.data) _y.contribute_to_class(self, 'Y', self.data) self['data'] = zip(self._x.points, self._y.points) else: for axis in ('_x', '_y'): axis_obj = getattr(self, axis, False) if not axis_obj: raise exception.MissingAxisException("%s missing" % axis) if not getattr(axis_obj, 'points', False): raise exception.MissingDataException() self['data'] = zip(self._x.points, self._y.points)
[ "def", "_set_data", "(", "self", ")", ":", "if", "getattr", "(", "self", ",", "'data'", ",", "False", ")", "and", "not", "getattr", "(", "self", ",", "'_x'", ",", "False", ")", "and", "not", "getattr", "(", "self", ",", "'_y'", ",", "False", ")", ":", "_x", "=", "XVariable", "(", ")", "_y", "=", "YVariable", "(", ")", "_x", ".", "contribute_to_class", "(", "self", ",", "'X'", ",", "self", ".", "data", ")", "_y", ".", "contribute_to_class", "(", "self", ",", "'Y'", ",", "self", ".", "data", ")", "self", "[", "'data'", "]", "=", "zip", "(", "self", ".", "_x", ".", "points", ",", "self", ".", "_y", ".", "points", ")", "else", ":", "for", "axis", "in", "(", "'_x'", ",", "'_y'", ")", ":", "axis_obj", "=", "getattr", "(", "self", ",", "axis", ",", "False", ")", "if", "not", "axis_obj", ":", "raise", "exception", ".", "MissingAxisException", "(", "\"%s missing\"", "%", "axis", ")", "if", "not", "getattr", "(", "axis_obj", ",", "'points'", ",", "False", ")", ":", "raise", "exception", ".", "MissingDataException", "(", ")", "self", "[", "'data'", "]", "=", "zip", "(", "self", ".", "_x", ".", "points", ",", "self", ".", "_y", ".", "points", ")" ]
This method will be called to set Series data
[ "This", "method", "will", "be", "called", "to", "set", "Series", "data" ]
python
test
43.473684
Netflix-Skunkworks/swag-client
swag_client/cli.py
https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/cli.py#L296-L339
def seed_aws_organization(ctx, owner): """Seeds SWAG from an AWS organziation.""" swag = create_swag_from_ctx(ctx) accounts = swag.get_all() _ids = [result.get('id') for result in accounts] client = boto3.client('organizations') paginator = client.get_paginator('list_accounts') response_iterator = paginator.paginate() count = 0 for response in response_iterator: for account in response['Accounts']: if account['Id'] in _ids: click.echo(click.style( 'Ignoring Duplicate Account. AccountId: {} already exists in SWAG'.format(account['Id']), fg='yellow') ) continue if account['Status'] == 'SUSPENDED': status = 'deprecated' else: status = 'created' data = { 'id': account['Id'], 'name': account['Name'], 'description': 'Account imported from AWS organization.', 'email': account['Email'], 'owner': owner, 'provider': 'aws', 'contacts': [], 'sensitive': False, 'status': [{'region': 'all', 'status': status}] } click.echo(click.style( 'Seeded Account. AccountName: {}'.format(data['name']), fg='green') ) count += 1 swag.create(data, dry_run=ctx.dry_run) click.echo('Seeded {} accounts to SWAG.'.format(count))
[ "def", "seed_aws_organization", "(", "ctx", ",", "owner", ")", ":", "swag", "=", "create_swag_from_ctx", "(", "ctx", ")", "accounts", "=", "swag", ".", "get_all", "(", ")", "_ids", "=", "[", "result", ".", "get", "(", "'id'", ")", "for", "result", "in", "accounts", "]", "client", "=", "boto3", ".", "client", "(", "'organizations'", ")", "paginator", "=", "client", ".", "get_paginator", "(", "'list_accounts'", ")", "response_iterator", "=", "paginator", ".", "paginate", "(", ")", "count", "=", "0", "for", "response", "in", "response_iterator", ":", "for", "account", "in", "response", "[", "'Accounts'", "]", ":", "if", "account", "[", "'Id'", "]", "in", "_ids", ":", "click", ".", "echo", "(", "click", ".", "style", "(", "'Ignoring Duplicate Account. AccountId: {} already exists in SWAG'", ".", "format", "(", "account", "[", "'Id'", "]", ")", ",", "fg", "=", "'yellow'", ")", ")", "continue", "if", "account", "[", "'Status'", "]", "==", "'SUSPENDED'", ":", "status", "=", "'deprecated'", "else", ":", "status", "=", "'created'", "data", "=", "{", "'id'", ":", "account", "[", "'Id'", "]", ",", "'name'", ":", "account", "[", "'Name'", "]", ",", "'description'", ":", "'Account imported from AWS organization.'", ",", "'email'", ":", "account", "[", "'Email'", "]", ",", "'owner'", ":", "owner", ",", "'provider'", ":", "'aws'", ",", "'contacts'", ":", "[", "]", ",", "'sensitive'", ":", "False", ",", "'status'", ":", "[", "{", "'region'", ":", "'all'", ",", "'status'", ":", "status", "}", "]", "}", "click", ".", "echo", "(", "click", ".", "style", "(", "'Seeded Account. AccountName: {}'", ".", "format", "(", "data", "[", "'name'", "]", ")", ",", "fg", "=", "'green'", ")", ")", "count", "+=", "1", "swag", ".", "create", "(", "data", ",", "dry_run", "=", "ctx", ".", "dry_run", ")", "click", ".", "echo", "(", "'Seeded {} accounts to SWAG.'", ".", "format", "(", "count", ")", ")" ]
Seeds SWAG from an AWS organziation.
[ "Seeds", "SWAG", "from", "an", "AWS", "organziation", "." ]
python
train
33.886364
googlemaps/google-maps-services-python
googlemaps/timezone.py
https://github.com/googlemaps/google-maps-services-python/blob/7ed40b4d8df63479794c46ce29d03ed6083071d7/googlemaps/timezone.py#L25-L54
def timezone(client, location, timestamp=None, language=None): """Get time zone for a location on the earth, as well as that location's time offset from UTC. :param location: The latitude/longitude value representing the location to look up. :type location: string, dict, list, or tuple :param timestamp: Timestamp specifies the desired time as seconds since midnight, January 1, 1970 UTC. The Time Zone API uses the timestamp to determine whether or not Daylight Savings should be applied. Times before 1970 can be expressed as negative values. Optional. Defaults to ``datetime.utcnow()``. :type timestamp: int or datetime.datetime :param language: The language in which to return results. :type language: string :rtype: dict """ params = { "location": convert.latlng(location), "timestamp": convert.time(timestamp or datetime.utcnow()) } if language: params["language"] = language return client._request( "/maps/api/timezone/json", params)
[ "def", "timezone", "(", "client", ",", "location", ",", "timestamp", "=", "None", ",", "language", "=", "None", ")", ":", "params", "=", "{", "\"location\"", ":", "convert", ".", "latlng", "(", "location", ")", ",", "\"timestamp\"", ":", "convert", ".", "time", "(", "timestamp", "or", "datetime", ".", "utcnow", "(", ")", ")", "}", "if", "language", ":", "params", "[", "\"language\"", "]", "=", "language", "return", "client", ".", "_request", "(", "\"/maps/api/timezone/json\"", ",", "params", ")" ]
Get time zone for a location on the earth, as well as that location's time offset from UTC. :param location: The latitude/longitude value representing the location to look up. :type location: string, dict, list, or tuple :param timestamp: Timestamp specifies the desired time as seconds since midnight, January 1, 1970 UTC. The Time Zone API uses the timestamp to determine whether or not Daylight Savings should be applied. Times before 1970 can be expressed as negative values. Optional. Defaults to ``datetime.utcnow()``. :type timestamp: int or datetime.datetime :param language: The language in which to return results. :type language: string :rtype: dict
[ "Get", "time", "zone", "for", "a", "location", "on", "the", "earth", "as", "well", "as", "that", "location", "s", "time", "offset", "from", "UTC", "." ]
python
train
34.633333
SmokinCaterpillar/pypet
examples/example_24_large_scale_brian2_simulation/clusternet.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/examples/example_24_large_scale_brian2_simulation/clusternet.py#L820-L864
def analyse(self, traj, network, current_subrun, subrun_list, network_dict): """Extracts monitor data and plots. Data extraction is done if all subruns have been completed, i.e. `len(subrun_list)==0` First, extracts results from the monitors and stores them into `traj`. Next, uses the extracted data for plots. :param traj: Trajectory container Adds: Data from monitors :param network: The BRIAN network :param current_subrun: BrianParameter :param subrun_list: List of coming subruns :param network_dict: Dictionary of items shared among all components """ if len(subrun_list)==0: traj.f_add_result(Brian2MonitorResult, 'monitors.spikes_e', self.spike_monitor, comment = 'The spiketimes of the excitatory population') traj.f_add_result(Brian2MonitorResult, 'monitors.V', self.V_monitor, comment = 'Membrane voltage of four neurons from 2 clusters') traj.f_add_result(Brian2MonitorResult, 'monitors.I_syn_e', self.I_syn_e_monitor, comment = 'I_syn_e of four neurons from 2 clusters') traj.f_add_result(Brian2MonitorResult, 'monitors.I_syn_i', self.I_syn_i_monitor, comment = 'I_syn_i of four neurons from 2 clusters') print('Plotting') if traj.parameters.analysis.make_plots: self._print_graphs(traj)
[ "def", "analyse", "(", "self", ",", "traj", ",", "network", ",", "current_subrun", ",", "subrun_list", ",", "network_dict", ")", ":", "if", "len", "(", "subrun_list", ")", "==", "0", ":", "traj", ".", "f_add_result", "(", "Brian2MonitorResult", ",", "'monitors.spikes_e'", ",", "self", ".", "spike_monitor", ",", "comment", "=", "'The spiketimes of the excitatory population'", ")", "traj", ".", "f_add_result", "(", "Brian2MonitorResult", ",", "'monitors.V'", ",", "self", ".", "V_monitor", ",", "comment", "=", "'Membrane voltage of four neurons from 2 clusters'", ")", "traj", ".", "f_add_result", "(", "Brian2MonitorResult", ",", "'monitors.I_syn_e'", ",", "self", ".", "I_syn_e_monitor", ",", "comment", "=", "'I_syn_e of four neurons from 2 clusters'", ")", "traj", ".", "f_add_result", "(", "Brian2MonitorResult", ",", "'monitors.I_syn_i'", ",", "self", ".", "I_syn_i_monitor", ",", "comment", "=", "'I_syn_i of four neurons from 2 clusters'", ")", "print", "(", "'Plotting'", ")", "if", "traj", ".", "parameters", ".", "analysis", ".", "make_plots", ":", "self", ".", "_print_graphs", "(", "traj", ")" ]
Extracts monitor data and plots. Data extraction is done if all subruns have been completed, i.e. `len(subrun_list)==0` First, extracts results from the monitors and stores them into `traj`. Next, uses the extracted data for plots. :param traj: Trajectory container Adds: Data from monitors :param network: The BRIAN network :param current_subrun: BrianParameter :param subrun_list: List of coming subruns :param network_dict: Dictionary of items shared among all components
[ "Extracts", "monitor", "data", "and", "plots", "." ]
python
test
33.666667
pyhys/minimalmodbus
omegacn7500.py
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/omegacn7500.py#L464-L477
def _checkSetpointValue( setpointvalue, maxvalue ): """Check that the given setpointvalue is valid. Args: * setpointvalue (numerical): The setpoint value to be checked. Must be positive. * maxvalue (numerical): Upper limit for setpoint value. Must be positive. Raises: TypeError, ValueError """ if maxvalue is None: raise TypeError('The maxvalue (for the setpoint) must not be None!') minimalmodbus._checkNumerical(setpointvalue, minvalue=0, maxvalue=maxvalue, description='setpoint value')
[ "def", "_checkSetpointValue", "(", "setpointvalue", ",", "maxvalue", ")", ":", "if", "maxvalue", "is", "None", ":", "raise", "TypeError", "(", "'The maxvalue (for the setpoint) must not be None!'", ")", "minimalmodbus", ".", "_checkNumerical", "(", "setpointvalue", ",", "minvalue", "=", "0", ",", "maxvalue", "=", "maxvalue", ",", "description", "=", "'setpoint value'", ")" ]
Check that the given setpointvalue is valid. Args: * setpointvalue (numerical): The setpoint value to be checked. Must be positive. * maxvalue (numerical): Upper limit for setpoint value. Must be positive. Raises: TypeError, ValueError
[ "Check", "that", "the", "given", "setpointvalue", "is", "valid", ".", "Args", ":", "*", "setpointvalue", "(", "numerical", ")", ":", "The", "setpoint", "value", "to", "be", "checked", ".", "Must", "be", "positive", ".", "*", "maxvalue", "(", "numerical", ")", ":", "Upper", "limit", "for", "setpoint", "value", ".", "Must", "be", "positive", ".", "Raises", ":", "TypeError", "ValueError" ]
python
train
39.642857
inveniosoftware/invenio-theme
invenio_theme/ext.py
https://github.com/inveniosoftware/invenio-theme/blob/4e07607b1a40805df1d8e4ab9cc2afd728579ca9/invenio_theme/ext.py#L76-L97
def init_config(self, app): """Initialize configuration. :param app: An instance of :class:`~flask.Flask`. """ _vars = ['BASE_TEMPLATE', 'COVER_TEMPLATE', 'SETTINGS_TEMPLATE'] # Sets RequireJS config and SASS binary as well if not already set. for k in dir(config): if k.startswith('THEME_') or k in [ 'REQUIREJS_CONFIG', 'SASS_BIN'] + _vars: app.config.setdefault(k, getattr(config, k)) # Set THEME_<name>_TEMPLATE from <name>_TEMPLATE variables if not # already set. for varname in _vars: theme_varname = 'THEME_{}'.format(varname) if app.config[theme_varname] is None: app.config[theme_varname] = app.config[varname] app.config.setdefault( 'ADMIN_BASE_TEMPLATE', config.ADMIN_BASE_TEMPLATE)
[ "def", "init_config", "(", "self", ",", "app", ")", ":", "_vars", "=", "[", "'BASE_TEMPLATE'", ",", "'COVER_TEMPLATE'", ",", "'SETTINGS_TEMPLATE'", "]", "# Sets RequireJS config and SASS binary as well if not already set.", "for", "k", "in", "dir", "(", "config", ")", ":", "if", "k", ".", "startswith", "(", "'THEME_'", ")", "or", "k", "in", "[", "'REQUIREJS_CONFIG'", ",", "'SASS_BIN'", "]", "+", "_vars", ":", "app", ".", "config", ".", "setdefault", "(", "k", ",", "getattr", "(", "config", ",", "k", ")", ")", "# Set THEME_<name>_TEMPLATE from <name>_TEMPLATE variables if not", "# already set.", "for", "varname", "in", "_vars", ":", "theme_varname", "=", "'THEME_{}'", ".", "format", "(", "varname", ")", "if", "app", ".", "config", "[", "theme_varname", "]", "is", "None", ":", "app", ".", "config", "[", "theme_varname", "]", "=", "app", ".", "config", "[", "varname", "]", "app", ".", "config", ".", "setdefault", "(", "'ADMIN_BASE_TEMPLATE'", ",", "config", ".", "ADMIN_BASE_TEMPLATE", ")" ]
Initialize configuration. :param app: An instance of :class:`~flask.Flask`.
[ "Initialize", "configuration", "." ]
python
train
38.909091
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/garbagegraph.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/garbagegraph.py#L47-L61
def print_stats(self, stream=None): """ Log annotated garbage objects to console or file. :param stream: open file, uses sys.stdout if not given """ if not stream: # pragma: no cover stream = sys.stdout self.metadata.sort(key=lambda x: -x.size) stream.write('%-10s %8s %-12s %-46s\n' % ('id', 'size', 'type', 'representation')) for g in self.metadata: stream.write('0x%08x %8d %-12s %-46s\n' % (g.id, g.size, trunc(g.type, 12), trunc(g.str, 46))) stream.write('Garbage: %8d collected objects (%s in cycles): %12s\n' % \ (self.count, self.num_in_cycles, pp(self.total_size)))
[ "def", "print_stats", "(", "self", ",", "stream", "=", "None", ")", ":", "if", "not", "stream", ":", "# pragma: no cover", "stream", "=", "sys", ".", "stdout", "self", ".", "metadata", ".", "sort", "(", "key", "=", "lambda", "x", ":", "-", "x", ".", "size", ")", "stream", ".", "write", "(", "'%-10s %8s %-12s %-46s\\n'", "%", "(", "'id'", ",", "'size'", ",", "'type'", ",", "'representation'", ")", ")", "for", "g", "in", "self", ".", "metadata", ":", "stream", ".", "write", "(", "'0x%08x %8d %-12s %-46s\\n'", "%", "(", "g", ".", "id", ",", "g", ".", "size", ",", "trunc", "(", "g", ".", "type", ",", "12", ")", ",", "trunc", "(", "g", ".", "str", ",", "46", ")", ")", ")", "stream", ".", "write", "(", "'Garbage: %8d collected objects (%s in cycles): %12s\\n'", "%", "(", "self", ".", "count", ",", "self", ".", "num_in_cycles", ",", "pp", "(", "self", ".", "total_size", ")", ")", ")" ]
Log annotated garbage objects to console or file. :param stream: open file, uses sys.stdout if not given
[ "Log", "annotated", "garbage", "objects", "to", "console", "or", "file", "." ]
python
train
45.666667
saltstack/salt
salt/modules/saltsupport.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/saltsupport.py#L169-L193
def delete_archives(self, *archives): ''' Delete archives :return: ''' # Remove paths _archives = [] for archive in archives: _archives.append(os.path.basename(archive)) archives = _archives[:] ret = {'files': {}, 'errors': {}} for archive in self.archives(): arc_dir = os.path.dirname(archive) archive = os.path.basename(archive) if archives and archive in archives or not archives: archive = os.path.join(arc_dir, archive) try: os.unlink(archive) ret['files'][archive] = 'removed' except Exception as err: ret['errors'][archive] = str(err) ret['files'][archive] = 'left' return ret
[ "def", "delete_archives", "(", "self", ",", "*", "archives", ")", ":", "# Remove paths", "_archives", "=", "[", "]", "for", "archive", "in", "archives", ":", "_archives", ".", "append", "(", "os", ".", "path", ".", "basename", "(", "archive", ")", ")", "archives", "=", "_archives", "[", ":", "]", "ret", "=", "{", "'files'", ":", "{", "}", ",", "'errors'", ":", "{", "}", "}", "for", "archive", "in", "self", ".", "archives", "(", ")", ":", "arc_dir", "=", "os", ".", "path", ".", "dirname", "(", "archive", ")", "archive", "=", "os", ".", "path", ".", "basename", "(", "archive", ")", "if", "archives", "and", "archive", "in", "archives", "or", "not", "archives", ":", "archive", "=", "os", ".", "path", ".", "join", "(", "arc_dir", ",", "archive", ")", "try", ":", "os", ".", "unlink", "(", "archive", ")", "ret", "[", "'files'", "]", "[", "archive", "]", "=", "'removed'", "except", "Exception", "as", "err", ":", "ret", "[", "'errors'", "]", "[", "archive", "]", "=", "str", "(", "err", ")", "ret", "[", "'files'", "]", "[", "archive", "]", "=", "'left'", "return", "ret" ]
Delete archives :return:
[ "Delete", "archives", ":", "return", ":" ]
python
train
33
draperjames/qtpandas
qtpandas/ui/fallback/easygui/boxes/derived_boxes.py
https://github.com/draperjames/qtpandas/blob/64294fb69f1839e53dee5ea453337266bfaf24f4/qtpandas/ui/fallback/easygui/boxes/derived_boxes.py#L319-L362
def multenterbox(msg="Fill in values for the fields.", title=" ", fields=(), values=()): r""" Show screen with multiple data entry fields. If there are fewer values than names, the list of values is padded with empty strings until the number of values is the same as the number of names. If there are more values than names, the list of values is truncated so that there are as many values as names. Returns a list of the values of the fields, or None if the user cancels the operation. Here is some example code, that shows how values returned from multenterbox can be checked for validity before they are accepted:: msg = "Enter your personal information" title = "Credit Card Application" fieldNames = ["Name","Street Address","City","State","ZipCode"] fieldValues = [] # we start with blanks for the values fieldValues = multenterbox(msg,title, fieldNames) # make sure that none of the fields was left blank while 1: if fieldValues is None: break errmsg = "" for i in range(len(fieldNames)): if fieldValues[i].strip() == "": errmsg += ('"%s" is a required field.\n\n' % fieldNames[i]) if errmsg == "": break # no problems found fieldValues = multenterbox(errmsg, title, fieldNames, fieldValues) print("Reply was: %s" % str(fieldValues)) :param str msg: the msg to be displayed. :param str title: the window title :param list fields: a list of fieldnames. :param list values: a list of field values :return: String """ return bb.__multfillablebox(msg, title, fields, values, None)
[ "def", "multenterbox", "(", "msg", "=", "\"Fill in values for the fields.\"", ",", "title", "=", "\" \"", ",", "fields", "=", "(", ")", ",", "values", "=", "(", ")", ")", ":", "return", "bb", ".", "__multfillablebox", "(", "msg", ",", "title", ",", "fields", ",", "values", ",", "None", ")" ]
r""" Show screen with multiple data entry fields. If there are fewer values than names, the list of values is padded with empty strings until the number of values is the same as the number of names. If there are more values than names, the list of values is truncated so that there are as many values as names. Returns a list of the values of the fields, or None if the user cancels the operation. Here is some example code, that shows how values returned from multenterbox can be checked for validity before they are accepted:: msg = "Enter your personal information" title = "Credit Card Application" fieldNames = ["Name","Street Address","City","State","ZipCode"] fieldValues = [] # we start with blanks for the values fieldValues = multenterbox(msg,title, fieldNames) # make sure that none of the fields was left blank while 1: if fieldValues is None: break errmsg = "" for i in range(len(fieldNames)): if fieldValues[i].strip() == "": errmsg += ('"%s" is a required field.\n\n' % fieldNames[i]) if errmsg == "": break # no problems found fieldValues = multenterbox(errmsg, title, fieldNames, fieldValues) print("Reply was: %s" % str(fieldValues)) :param str msg: the msg to be displayed. :param str title: the window title :param list fields: a list of fieldnames. :param list values: a list of field values :return: String
[ "r", "Show", "screen", "with", "multiple", "data", "entry", "fields", "." ]
python
train
38.954545
rwl/pylon
pyreto/roth_erev.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pyreto/roth_erev.py#L254-L267
def _forwardImplementation(self, inbuf, outbuf): """ Proportional probability method. """ assert self.module propensities = self.module.getActionValues(0) summedProps = sum(propensities) probabilities = propensities / summedProps action = eventGenerator(probabilities) # action = drawIndex(probabilities) outbuf[:] = scipy.array([action])
[ "def", "_forwardImplementation", "(", "self", ",", "inbuf", ",", "outbuf", ")", ":", "assert", "self", ".", "module", "propensities", "=", "self", ".", "module", ".", "getActionValues", "(", "0", ")", "summedProps", "=", "sum", "(", "propensities", ")", "probabilities", "=", "propensities", "/", "summedProps", "action", "=", "eventGenerator", "(", "probabilities", ")", "# action = drawIndex(probabilities)", "outbuf", "[", ":", "]", "=", "scipy", ".", "array", "(", "[", "action", "]", ")" ]
Proportional probability method.
[ "Proportional", "probability", "method", "." ]
python
train
28.571429
saltstack/salt
salt/modules/http.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/http.py#L17-L44
def query(url, **kwargs): ''' Query a resource, and decode the return data Passes through all the parameters described in the :py:func:`utils.http.query function <salt.utils.http.query>`: .. autofunction:: salt.utils.http.query CLI Example: .. code-block:: bash salt '*' http.query http://somelink.com/ salt '*' http.query http://somelink.com/ method=POST \ params='key1=val1&key2=val2' salt '*' http.query http://somelink.com/ method=POST \ data='<xml>somecontent</xml>' For more information about the ``http.query`` module, refer to the :ref:`HTTP Tutorial <tutorial-http>`. ''' opts = __opts__.copy() if 'opts' in kwargs: opts.update(kwargs['opts']) del kwargs['opts'] return salt.utils.http.query(url=url, opts=opts, **kwargs)
[ "def", "query", "(", "url", ",", "*", "*", "kwargs", ")", ":", "opts", "=", "__opts__", ".", "copy", "(", ")", "if", "'opts'", "in", "kwargs", ":", "opts", ".", "update", "(", "kwargs", "[", "'opts'", "]", ")", "del", "kwargs", "[", "'opts'", "]", "return", "salt", ".", "utils", ".", "http", ".", "query", "(", "url", "=", "url", ",", "opts", "=", "opts", ",", "*", "*", "kwargs", ")" ]
Query a resource, and decode the return data Passes through all the parameters described in the :py:func:`utils.http.query function <salt.utils.http.query>`: .. autofunction:: salt.utils.http.query CLI Example: .. code-block:: bash salt '*' http.query http://somelink.com/ salt '*' http.query http://somelink.com/ method=POST \ params='key1=val1&key2=val2' salt '*' http.query http://somelink.com/ method=POST \ data='<xml>somecontent</xml>' For more information about the ``http.query`` module, refer to the :ref:`HTTP Tutorial <tutorial-http>`.
[ "Query", "a", "resource", "and", "decode", "the", "return", "data" ]
python
train
29.5
ggaughan/pipe2py
pipe2py/modules/pipesubelement.py
https://github.com/ggaughan/pipe2py/blob/4767d6d1fd354d2a35e6528594b8deb8a033eed4/pipe2py/modules/pipesubelement.py#L14-L38
def pipe_subelement(context=None, _INPUT=None, conf=None, **kwargs): """An operator extracts select sub-elements from a feed. Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : pipe2py.modules pipe like object (iterable of items) conf : {'path': {'value': <element path>}} Yields ------ _OUTPUT : items """ path = DotDict(conf).get('path', **kwargs) for item in _INPUT: element = DotDict(item).get(path, **kwargs) for i in utils.gen_items(element): yield {'content': i} if item.get('forever'): # _INPUT is pipeforever and not a loop, # so we just yield our item once break
[ "def", "pipe_subelement", "(", "context", "=", "None", ",", "_INPUT", "=", "None", ",", "conf", "=", "None", ",", "*", "*", "kwargs", ")", ":", "path", "=", "DotDict", "(", "conf", ")", ".", "get", "(", "'path'", ",", "*", "*", "kwargs", ")", "for", "item", "in", "_INPUT", ":", "element", "=", "DotDict", "(", "item", ")", ".", "get", "(", "path", ",", "*", "*", "kwargs", ")", "for", "i", "in", "utils", ".", "gen_items", "(", "element", ")", ":", "yield", "{", "'content'", ":", "i", "}", "if", "item", ".", "get", "(", "'forever'", ")", ":", "# _INPUT is pipeforever and not a loop,", "# so we just yield our item once", "break" ]
An operator extracts select sub-elements from a feed. Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : pipe2py.modules pipe like object (iterable of items) conf : {'path': {'value': <element path>}} Yields ------ _OUTPUT : items
[ "An", "operator", "extracts", "select", "sub", "-", "elements", "from", "a", "feed", ".", "Not", "loopable", "." ]
python
train
28
ellmetha/django-machina
machina/apps/forum_permission/handler.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_permission/handler.py#L254-L258
def get_target_forums_for_moved_topics(self, user): """ Returns a list of forums in which the considered user can add topics that have been moved from another forum. """ return [f for f in self._get_forums_for_user(user, ['can_move_topics', ]) if f.is_forum]
[ "def", "get_target_forums_for_moved_topics", "(", "self", ",", "user", ")", ":", "return", "[", "f", "for", "f", "in", "self", ".", "_get_forums_for_user", "(", "user", ",", "[", "'can_move_topics'", ",", "]", ")", "if", "f", ".", "is_forum", "]" ]
Returns a list of forums in which the considered user can add topics that have been moved from another forum.
[ "Returns", "a", "list", "of", "forums", "in", "which", "the", "considered", "user", "can", "add", "topics", "that", "have", "been", "moved", "from", "another", "forum", "." ]
python
train
58
blazelibs/blazeutils
blazeutils/importing.py
https://github.com/blazelibs/blazeutils/blob/c94476325146007553cbddeeb9ef83394756babf/blazeutils/importing.py#L57-L75
def import_split(import_name): """ takes a dotted string path and returns the components: import_split('path') == 'path', None, None import_split('path.part.object') == 'path.part', 'object', None import_split('path.part:object') == 'path.part', 'object', None import_split('path.part:object.attribute') == 'path.part', 'object', 'attribute' """ obj = None attr = None if ':' in import_name: module, obj = import_name.split(':', 1) if '.' in obj: obj, attr = obj.rsplit('.', 1) elif '.' in import_name: module, obj = import_name.rsplit('.', 1) else: module = import_name return module, obj, attr
[ "def", "import_split", "(", "import_name", ")", ":", "obj", "=", "None", "attr", "=", "None", "if", "':'", "in", "import_name", ":", "module", ",", "obj", "=", "import_name", ".", "split", "(", "':'", ",", "1", ")", "if", "'.'", "in", "obj", ":", "obj", ",", "attr", "=", "obj", ".", "rsplit", "(", "'.'", ",", "1", ")", "elif", "'.'", "in", "import_name", ":", "module", ",", "obj", "=", "import_name", ".", "rsplit", "(", "'.'", ",", "1", ")", "else", ":", "module", "=", "import_name", "return", "module", ",", "obj", ",", "attr" ]
takes a dotted string path and returns the components: import_split('path') == 'path', None, None import_split('path.part.object') == 'path.part', 'object', None import_split('path.part:object') == 'path.part', 'object', None import_split('path.part:object.attribute') == 'path.part', 'object', 'attribute'
[ "takes", "a", "dotted", "string", "path", "and", "returns", "the", "components", ":", "import_split", "(", "path", ")", "==", "path", "None", "None", "import_split", "(", "path", ".", "part", ".", "object", ")", "==", "path", ".", "part", "object", "None", "import_split", "(", "path", ".", "part", ":", "object", ")", "==", "path", ".", "part", "object", "None", "import_split", "(", "path", ".", "part", ":", "object", ".", "attribute", ")", "==", "path", ".", "part", "object", "attribute" ]
python
train
36.684211
casacore/python-casacore
casacore/tables/tableutil.py
https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/tables/tableutil.py#L244-L313
def makescacoldesc(columnname, value, datamanagertype='', datamanagergroup='', options=0, maxlen=0, comment='', valuetype='', keywords={}): """Create description of a scalar column. A description for a scalar column can be created from a name for the column and a data value, which is used only to determine the type of the column. Note that a dict value is also possible. It is possible to create the column description in more detail by giving the data manager name, group, option, and comment as well. The data manager type tells which data manager (storage manager) is used to store the columns. The data manager type and group are explained in more detail in the `casacore Tables <../../casacore/doc/html/group__Tables__module.html>`_ documentation. It returns a dict with fields `name` and `desc` which can thereafter be used to build a table description using function :func:`maketabdesc`. `columname` Name of column `value` Example data value used to determine the column's data type. It is only used if argument `valuetype` is not given. `datamanagertype` Type of data manager which can be one of StandardStMan (default) or IncrementalStMan. The latter one can save disk space if many subsequent cells in the column will have the same value. `datamanagergroup` Data manager group. Only for the expert user. `options` Options. Need not be filled in. `maxlen` Maximum length of string values in a column. Default 0 means unlimited. `comment` Comment: informational for user. `valuetype` A string giving the column's data type. Possible data types are bool (or boolean), uchar (or byte), short, int (or integer), uint, float, double, complex, dcomplex, and string. 'keywords' A dict defining initial keywords for the column. For example:: scd1 = makescacoldesc("col2", "")) scd2 = makescacoldesc("col1", 1, "IncrementalStMan") td = maketabdesc([scd1, scd2]) This creates a table description consisting of an integer column `col1`, and a string column `col2`. `col1` uses the IncrementalStMan storage manager, while `col2` uses the default storage manager StandardStMan. """ vtype = valuetype if vtype == '': vtype = _value_type_name(value) rec2 = {'valueType': vtype, 'dataManagerType': datamanagertype, 'dataManagerGroup': datamanagergroup, 'option': options, 'maxlen': maxlen, 'comment': comment, 'keywords': keywords} return {'name': columnname, 'desc': rec2}
[ "def", "makescacoldesc", "(", "columnname", ",", "value", ",", "datamanagertype", "=", "''", ",", "datamanagergroup", "=", "''", ",", "options", "=", "0", ",", "maxlen", "=", "0", ",", "comment", "=", "''", ",", "valuetype", "=", "''", ",", "keywords", "=", "{", "}", ")", ":", "vtype", "=", "valuetype", "if", "vtype", "==", "''", ":", "vtype", "=", "_value_type_name", "(", "value", ")", "rec2", "=", "{", "'valueType'", ":", "vtype", ",", "'dataManagerType'", ":", "datamanagertype", ",", "'dataManagerGroup'", ":", "datamanagergroup", ",", "'option'", ":", "options", ",", "'maxlen'", ":", "maxlen", ",", "'comment'", ":", "comment", ",", "'keywords'", ":", "keywords", "}", "return", "{", "'name'", ":", "columnname", ",", "'desc'", ":", "rec2", "}" ]
Create description of a scalar column. A description for a scalar column can be created from a name for the column and a data value, which is used only to determine the type of the column. Note that a dict value is also possible. It is possible to create the column description in more detail by giving the data manager name, group, option, and comment as well. The data manager type tells which data manager (storage manager) is used to store the columns. The data manager type and group are explained in more detail in the `casacore Tables <../../casacore/doc/html/group__Tables__module.html>`_ documentation. It returns a dict with fields `name` and `desc` which can thereafter be used to build a table description using function :func:`maketabdesc`. `columname` Name of column `value` Example data value used to determine the column's data type. It is only used if argument `valuetype` is not given. `datamanagertype` Type of data manager which can be one of StandardStMan (default) or IncrementalStMan. The latter one can save disk space if many subsequent cells in the column will have the same value. `datamanagergroup` Data manager group. Only for the expert user. `options` Options. Need not be filled in. `maxlen` Maximum length of string values in a column. Default 0 means unlimited. `comment` Comment: informational for user. `valuetype` A string giving the column's data type. Possible data types are bool (or boolean), uchar (or byte), short, int (or integer), uint, float, double, complex, dcomplex, and string. 'keywords' A dict defining initial keywords for the column. For example:: scd1 = makescacoldesc("col2", "")) scd2 = makescacoldesc("col1", 1, "IncrementalStMan") td = maketabdesc([scd1, scd2]) This creates a table description consisting of an integer column `col1`, and a string column `col2`. `col1` uses the IncrementalStMan storage manager, while `col2` uses the default storage manager StandardStMan.
[ "Create", "description", "of", "a", "scalar", "column", "." ]
python
train
38.628571
PyCQA/astroid
astroid/node_classes.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/node_classes.py#L2735-L2754
def from_elements(cls, items=None): """Create a :class:`Dict` of constants from a live dictionary. :param items: The items to store in the node. :type items: dict :returns: The created dictionary node. :rtype: Dict """ node = cls() if items is None: node.items = [] else: node.items = [ (const_factory(k), const_factory(v) if _is_const(v) else v) for k, v in items.items() # The keys need to be constants if _is_const(k) ] return node
[ "def", "from_elements", "(", "cls", ",", "items", "=", "None", ")", ":", "node", "=", "cls", "(", ")", "if", "items", "is", "None", ":", "node", ".", "items", "=", "[", "]", "else", ":", "node", ".", "items", "=", "[", "(", "const_factory", "(", "k", ")", ",", "const_factory", "(", "v", ")", "if", "_is_const", "(", "v", ")", "else", "v", ")", "for", "k", ",", "v", "in", "items", ".", "items", "(", ")", "# The keys need to be constants", "if", "_is_const", "(", "k", ")", "]", "return", "node" ]
Create a :class:`Dict` of constants from a live dictionary. :param items: The items to store in the node. :type items: dict :returns: The created dictionary node. :rtype: Dict
[ "Create", "a", ":", "class", ":", "Dict", "of", "constants", "from", "a", "live", "dictionary", "." ]
python
train
29.85
pandas-dev/pandas
pandas/core/arrays/timedeltas.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/arrays/timedeltas.py#L819-L845
def components(self): """ Return a dataframe of the components (days, hours, minutes, seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas. Returns ------- a DataFrame """ from pandas import DataFrame columns = ['days', 'hours', 'minutes', 'seconds', 'milliseconds', 'microseconds', 'nanoseconds'] hasnans = self._hasnans if hasnans: def f(x): if isna(x): return [np.nan] * len(columns) return x.components else: def f(x): return x.components result = DataFrame([f(x) for x in self], columns=columns) if not hasnans: result = result.astype('int64') return result
[ "def", "components", "(", "self", ")", ":", "from", "pandas", "import", "DataFrame", "columns", "=", "[", "'days'", ",", "'hours'", ",", "'minutes'", ",", "'seconds'", ",", "'milliseconds'", ",", "'microseconds'", ",", "'nanoseconds'", "]", "hasnans", "=", "self", ".", "_hasnans", "if", "hasnans", ":", "def", "f", "(", "x", ")", ":", "if", "isna", "(", "x", ")", ":", "return", "[", "np", ".", "nan", "]", "*", "len", "(", "columns", ")", "return", "x", ".", "components", "else", ":", "def", "f", "(", "x", ")", ":", "return", "x", ".", "components", "result", "=", "DataFrame", "(", "[", "f", "(", "x", ")", "for", "x", "in", "self", "]", ",", "columns", "=", "columns", ")", "if", "not", "hasnans", ":", "result", "=", "result", ".", "astype", "(", "'int64'", ")", "return", "result" ]
Return a dataframe of the components (days, hours, minutes, seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas. Returns ------- a DataFrame
[ "Return", "a", "dataframe", "of", "the", "components", "(", "days", "hours", "minutes", "seconds", "milliseconds", "microseconds", "nanoseconds", ")", "of", "the", "Timedeltas", "." ]
python
train
29.518519
pandas-dev/pandas
pandas/core/series.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/series.py#L1870-L1938
def idxmin(self, axis=0, skipna=True, *args, **kwargs): """ Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. axis : int, default 0 For compatibility with DataFrame.idxmin. Redundant for application on Series. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan """ skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs) i = nanops.nanargmin(com.values_from_object(self), skipna=skipna) if i == -1: return np.nan return self.index[i]
[ "def", "idxmin", "(", "self", ",", "axis", "=", "0", ",", "skipna", "=", "True", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "skipna", "=", "nv", ".", "validate_argmin_with_skipna", "(", "skipna", ",", "args", ",", "kwargs", ")", "i", "=", "nanops", ".", "nanargmin", "(", "com", ".", "values_from_object", "(", "self", ")", ",", "skipna", "=", "skipna", ")", "if", "i", "==", "-", "1", ":", "return", "np", ".", "nan", "return", "self", ".", "index", "[", "i", "]" ]
Return the row label of the minimum value. If multiple values equal the minimum, the first row label with that value is returned. Parameters ---------- skipna : bool, default True Exclude NA/null values. If the entire Series is NA, the result will be NA. axis : int, default 0 For compatibility with DataFrame.idxmin. Redundant for application on Series. *args, **kwargs Additional keywords have no effect but might be accepted for compatibility with NumPy. Returns ------- Index Label of the minimum value. Raises ------ ValueError If the Series is empty. See Also -------- numpy.argmin : Return indices of the minimum values along the given axis. DataFrame.idxmin : Return index of first occurrence of minimum over requested axis. Series.idxmax : Return index *label* of the first occurrence of maximum of values. Notes ----- This method is the Series version of ``ndarray.argmin``. This method returns the label of the minimum, while ``ndarray.argmin`` returns the position. To get the position, use ``series.values.argmin()``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 1], ... index=['A', 'B', 'C', 'D']) >>> s A 1.0 B NaN C 4.0 D 1.0 dtype: float64 >>> s.idxmin() 'A' If `skipna` is False and there is an NA value in the data, the function returns ``nan``. >>> s.idxmin(skipna=False) nan
[ "Return", "the", "row", "label", "of", "the", "minimum", "value", "." ]
python
train
29.318841
cloudendpoints/endpoints-python
endpoints/api_config.py
https://github.com/cloudendpoints/endpoints-python/blob/00dd7c7a52a9ee39d5923191c2604b8eafdb3f24/endpoints/api_config.py#L1438-L1503
def __field_to_subfields(self, field): """Fully describes data represented by field, including the nested case. In the case that the field is not a message field, we have no fields nested within a message definition, so we can simply return that field. However, in the nested case, we can't simply describe the data with one field or even with one chain of fields. For example, if we have a message field m_field = messages.MessageField(RefClass, 1) which references a class with two fields: class RefClass(messages.Message): one = messages.StringField(1) two = messages.IntegerField(2) then we would need to include both one and two to represent all the data contained. Calling __field_to_subfields(m_field) would return: [ [<MessageField "m_field">, <StringField "one">], [<MessageField "m_field">, <StringField "two">], ] If the second field was instead a message field class RefClass(messages.Message): one = messages.StringField(1) two = messages.MessageField(OtherRefClass, 2) referencing another class with two fields class OtherRefClass(messages.Message): three = messages.BooleanField(1) four = messages.FloatField(2) then we would need to recurse one level deeper for two. With this change, calling __field_to_subfields(m_field) would return: [ [<MessageField "m_field">, <StringField "one">], [<MessageField "m_field">, <StringField "two">, <StringField "three">], [<MessageField "m_field">, <StringField "two">, <StringField "four">], ] Args: field: An instance of a subclass of messages.Field. Returns: A list of lists, where each sublist is a list of fields. """ # Termination condition if not isinstance(field, messages.MessageField): return [[field]] result = [] for subfield in sorted(field.message_type.all_fields(), key=lambda f: f.number): subfield_results = self.__field_to_subfields(subfield) for subfields_list in subfield_results: subfields_list.insert(0, field) result.append(subfields_list) return result
[ "def", "__field_to_subfields", "(", "self", ",", "field", ")", ":", "# Termination condition", "if", "not", "isinstance", "(", "field", ",", "messages", ".", "MessageField", ")", ":", "return", "[", "[", "field", "]", "]", "result", "=", "[", "]", "for", "subfield", "in", "sorted", "(", "field", ".", "message_type", ".", "all_fields", "(", ")", ",", "key", "=", "lambda", "f", ":", "f", ".", "number", ")", ":", "subfield_results", "=", "self", ".", "__field_to_subfields", "(", "subfield", ")", "for", "subfields_list", "in", "subfield_results", ":", "subfields_list", ".", "insert", "(", "0", ",", "field", ")", "result", ".", "append", "(", "subfields_list", ")", "return", "result" ]
Fully describes data represented by field, including the nested case. In the case that the field is not a message field, we have no fields nested within a message definition, so we can simply return that field. However, in the nested case, we can't simply describe the data with one field or even with one chain of fields. For example, if we have a message field m_field = messages.MessageField(RefClass, 1) which references a class with two fields: class RefClass(messages.Message): one = messages.StringField(1) two = messages.IntegerField(2) then we would need to include both one and two to represent all the data contained. Calling __field_to_subfields(m_field) would return: [ [<MessageField "m_field">, <StringField "one">], [<MessageField "m_field">, <StringField "two">], ] If the second field was instead a message field class RefClass(messages.Message): one = messages.StringField(1) two = messages.MessageField(OtherRefClass, 2) referencing another class with two fields class OtherRefClass(messages.Message): three = messages.BooleanField(1) four = messages.FloatField(2) then we would need to recurse one level deeper for two. With this change, calling __field_to_subfields(m_field) would return: [ [<MessageField "m_field">, <StringField "one">], [<MessageField "m_field">, <StringField "two">, <StringField "three">], [<MessageField "m_field">, <StringField "two">, <StringField "four">], ] Args: field: An instance of a subclass of messages.Field. Returns: A list of lists, where each sublist is a list of fields.
[ "Fully", "describes", "data", "represented", "by", "field", "including", "the", "nested", "case", "." ]
python
train
32.727273
basecrm/basecrm-python
basecrm/services.py
https://github.com/basecrm/basecrm-python/blob/7c1cf97dbaba8aeb9ff89f8a54f945a8702349f6/basecrm/services.py#L1969-L1982
def list(self, **params): """ Retrieve text messages Returns Text Messages, according to the parameters provided :calls: ``get /text_messages`` :param dict params: (optional) Search options. :return: List of dictionaries that support attriubte-style access, which represent collection of TextMessages. :rtype: list """ _, _, text_messages = self.http_client.get("/text_messages", params=params) return text_messages
[ "def", "list", "(", "self", ",", "*", "*", "params", ")", ":", "_", ",", "_", ",", "text_messages", "=", "self", ".", "http_client", ".", "get", "(", "\"/text_messages\"", ",", "params", "=", "params", ")", "return", "text_messages" ]
Retrieve text messages Returns Text Messages, according to the parameters provided :calls: ``get /text_messages`` :param dict params: (optional) Search options. :return: List of dictionaries that support attriubte-style access, which represent collection of TextMessages. :rtype: list
[ "Retrieve", "text", "messages" ]
python
train
34.642857
nion-software/nionswift
nion/swift/Facade.py
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/Facade.py#L1241-L1255
def add_point_region(self, y: float, x: float) -> Graphic: """Add a point graphic to the data item. :param x: The x coordinate, in relative units [0.0, 1.0] :param y: The y coordinate, in relative units [0.0, 1.0] :return: The :py:class:`nion.swift.Facade.Graphic` object that was added. .. versionadded:: 1.0 Scriptable: Yes """ graphic = Graphics.PointGraphic() graphic.position = Geometry.FloatPoint(y, x) self.__display_item.add_graphic(graphic) return Graphic(graphic)
[ "def", "add_point_region", "(", "self", ",", "y", ":", "float", ",", "x", ":", "float", ")", "->", "Graphic", ":", "graphic", "=", "Graphics", ".", "PointGraphic", "(", ")", "graphic", ".", "position", "=", "Geometry", ".", "FloatPoint", "(", "y", ",", "x", ")", "self", ".", "__display_item", ".", "add_graphic", "(", "graphic", ")", "return", "Graphic", "(", "graphic", ")" ]
Add a point graphic to the data item. :param x: The x coordinate, in relative units [0.0, 1.0] :param y: The y coordinate, in relative units [0.0, 1.0] :return: The :py:class:`nion.swift.Facade.Graphic` object that was added. .. versionadded:: 1.0 Scriptable: Yes
[ "Add", "a", "point", "graphic", "to", "the", "data", "item", "." ]
python
train
36.666667
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L1518-L1549
def getTJstr(text, glyphs, simple, ordering): """ Return a PDF string enclosed in [] brackets, suitable for the PDF TJ operator. Notes: The input string is converted to either 2 or 4 hex digits per character. Args: simple: no glyphs: 2-chars, use char codes as the glyph glyphs: 2-chars, use glyphs instead of char codes (Symbol, ZapfDingbats) not simple: ordering < 0: 4-chars, use glyphs not char codes ordering >=0: a CJK font! 4 chars, use char codes as glyphs """ if text.startswith("[<") and text.endswith(">]"): # already done return text if not bool(text): return "[<>]" if simple: if glyphs is None: # simple and not Symbol / ZapfDingbats otxt = "".join([hex(ord(c))[2:].rjust(2, "0") if ord(c)<256 else "b7" for c in text]) else: # Symbol or ZapfDingbats otxt = "".join([hex(glyphs[ord(c)][0])[2:].rjust(2, "0") if ord(c)<256 else "b7" for c in text]) return "[<" + otxt + ">]" if ordering < 0: # not a CJK font: use the glyphs otxt = "".join([hex(glyphs[ord(c)][0])[2:].rjust(4, "0") for c in text]) else: # CJK: use char codes, no glyphs otxt = "".join([hex(ord(c))[2:].rjust(4, "0") for c in text]) return "[<" + otxt + ">]"
[ "def", "getTJstr", "(", "text", ",", "glyphs", ",", "simple", ",", "ordering", ")", ":", "if", "text", ".", "startswith", "(", "\"[<\"", ")", "and", "text", ".", "endswith", "(", "\">]\"", ")", ":", "# already done", "return", "text", "if", "not", "bool", "(", "text", ")", ":", "return", "\"[<>]\"", "if", "simple", ":", "if", "glyphs", "is", "None", ":", "# simple and not Symbol / ZapfDingbats", "otxt", "=", "\"\"", ".", "join", "(", "[", "hex", "(", "ord", "(", "c", ")", ")", "[", "2", ":", "]", ".", "rjust", "(", "2", ",", "\"0\"", ")", "if", "ord", "(", "c", ")", "<", "256", "else", "\"b7\"", "for", "c", "in", "text", "]", ")", "else", ":", "# Symbol or ZapfDingbats", "otxt", "=", "\"\"", ".", "join", "(", "[", "hex", "(", "glyphs", "[", "ord", "(", "c", ")", "]", "[", "0", "]", ")", "[", "2", ":", "]", ".", "rjust", "(", "2", ",", "\"0\"", ")", "if", "ord", "(", "c", ")", "<", "256", "else", "\"b7\"", "for", "c", "in", "text", "]", ")", "return", "\"[<\"", "+", "otxt", "+", "\">]\"", "if", "ordering", "<", "0", ":", "# not a CJK font: use the glyphs", "otxt", "=", "\"\"", ".", "join", "(", "[", "hex", "(", "glyphs", "[", "ord", "(", "c", ")", "]", "[", "0", "]", ")", "[", "2", ":", "]", ".", "rjust", "(", "4", ",", "\"0\"", ")", "for", "c", "in", "text", "]", ")", "else", ":", "# CJK: use char codes, no glyphs", "otxt", "=", "\"\"", ".", "join", "(", "[", "hex", "(", "ord", "(", "c", ")", ")", "[", "2", ":", "]", ".", "rjust", "(", "4", ",", "\"0\"", ")", "for", "c", "in", "text", "]", ")", "return", "\"[<\"", "+", "otxt", "+", "\">]\"" ]
Return a PDF string enclosed in [] brackets, suitable for the PDF TJ operator. Notes: The input string is converted to either 2 or 4 hex digits per character. Args: simple: no glyphs: 2-chars, use char codes as the glyph glyphs: 2-chars, use glyphs instead of char codes (Symbol, ZapfDingbats) not simple: ordering < 0: 4-chars, use glyphs not char codes ordering >=0: a CJK font! 4 chars, use char codes as glyphs
[ "Return", "a", "PDF", "string", "enclosed", "in", "[]", "brackets", "suitable", "for", "the", "PDF", "TJ", "operator", "." ]
python
train
43.46875
peterbrittain/asciimatics
asciimatics/widgets.py
https://github.com/peterbrittain/asciimatics/blob/f471427d7786ce2d5f1eeb2dae0e67d19e46e085/asciimatics/widgets.py#L3142-L3151
def clone(self, screen, scene): """ Create a clone of this Dialog into a new Screen. :param screen: The new Screen object to clone into. :param scene: The new Scene object to clone into. """ # Only clone the object if the function is safe to do so. if self._on_close is None or isfunction(self._on_close): scene.add_effect(PopUpDialog(screen, self._text, self._buttons, self._on_close))
[ "def", "clone", "(", "self", ",", "screen", ",", "scene", ")", ":", "# Only clone the object if the function is safe to do so.", "if", "self", ".", "_on_close", "is", "None", "or", "isfunction", "(", "self", ".", "_on_close", ")", ":", "scene", ".", "add_effect", "(", "PopUpDialog", "(", "screen", ",", "self", ".", "_text", ",", "self", ".", "_buttons", ",", "self", ".", "_on_close", ")", ")" ]
Create a clone of this Dialog into a new Screen. :param screen: The new Screen object to clone into. :param scene: The new Scene object to clone into.
[ "Create", "a", "clone", "of", "this", "Dialog", "into", "a", "new", "Screen", "." ]
python
train
44.6
guaix-ucm/numina
numina/core/recipes.py
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/recipes.py#L183-L188
def set_base_headers(self, hdr): """Set metadata in FITS headers.""" hdr['NUMXVER'] = (__version__, 'Numina package version') hdr['NUMRNAM'] = (self.__class__.__name__, 'Numina recipe name') hdr['NUMRVER'] = (self.__version__, 'Numina recipe version') return hdr
[ "def", "set_base_headers", "(", "self", ",", "hdr", ")", ":", "hdr", "[", "'NUMXVER'", "]", "=", "(", "__version__", ",", "'Numina package version'", ")", "hdr", "[", "'NUMRNAM'", "]", "=", "(", "self", ".", "__class__", ".", "__name__", ",", "'Numina recipe name'", ")", "hdr", "[", "'NUMRVER'", "]", "=", "(", "self", ".", "__version__", ",", "'Numina recipe version'", ")", "return", "hdr" ]
Set metadata in FITS headers.
[ "Set", "metadata", "in", "FITS", "headers", "." ]
python
train
49.5
sixty-north/cosmic-ray
src/cosmic_ray/work_db.py
https://github.com/sixty-north/cosmic-ray/blob/c654e074afbb7b7fcbc23359083c1287c0d3e991/src/cosmic_ray/work_db.py#L118-L125
def clear(self): """Clear all work items from the session. This removes any associated results as well. """ with self._conn: self._conn.execute('DELETE FROM results') self._conn.execute('DELETE FROM work_items')
[ "def", "clear", "(", "self", ")", ":", "with", "self", ".", "_conn", ":", "self", ".", "_conn", ".", "execute", "(", "'DELETE FROM results'", ")", "self", ".", "_conn", ".", "execute", "(", "'DELETE FROM work_items'", ")" ]
Clear all work items from the session. This removes any associated results as well.
[ "Clear", "all", "work", "items", "from", "the", "session", "." ]
python
train
32.625
thiagopbueno/pyrddl
pyrddl/parser.py
https://github.com/thiagopbueno/pyrddl/blob/3bcfa850b1a7532c7744358f3c6b9e0f8ab978c9/pyrddl/parser.py#L383-L389
def p_intermfluent_def(self, p): '''intermfluent_def : IDENT LPAREN param_list RPAREN COLON LCURLY INTERMEDIATE COMMA type_spec COMMA LEVEL ASSIGN_EQUAL range_const RCURLY SEMI | IDENT COLON LCURLY INTERMEDIATE COMMA type_spec COMMA LEVEL ASSIGN_EQUAL range_const RCURLY SEMI''' if len(p) == 16: p[0] = PVariable(name=p[1], fluent_type='interm-fluent', range_type=p[9], param_types=p[3], level=p[13]) else: p[0] = PVariable(name=p[1], fluent_type='interm-fluent', range_type=p[6], level=p[10])
[ "def", "p_intermfluent_def", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "16", ":", "p", "[", "0", "]", "=", "PVariable", "(", "name", "=", "p", "[", "1", "]", ",", "fluent_type", "=", "'interm-fluent'", ",", "range_type", "=", "p", "[", "9", "]", ",", "param_types", "=", "p", "[", "3", "]", ",", "level", "=", "p", "[", "13", "]", ")", "else", ":", "p", "[", "0", "]", "=", "PVariable", "(", "name", "=", "p", "[", "1", "]", ",", "fluent_type", "=", "'interm-fluent'", ",", "range_type", "=", "p", "[", "6", "]", ",", "level", "=", "p", "[", "10", "]", ")" ]
intermfluent_def : IDENT LPAREN param_list RPAREN COLON LCURLY INTERMEDIATE COMMA type_spec COMMA LEVEL ASSIGN_EQUAL range_const RCURLY SEMI | IDENT COLON LCURLY INTERMEDIATE COMMA type_spec COMMA LEVEL ASSIGN_EQUAL range_const RCURLY SEMI
[ "intermfluent_def", ":", "IDENT", "LPAREN", "param_list", "RPAREN", "COLON", "LCURLY", "INTERMEDIATE", "COMMA", "type_spec", "COMMA", "LEVEL", "ASSIGN_EQUAL", "range_const", "RCURLY", "SEMI", "|", "IDENT", "COLON", "LCURLY", "INTERMEDIATE", "COMMA", "type_spec", "COMMA", "LEVEL", "ASSIGN_EQUAL", "range_const", "RCURLY", "SEMI" ]
python
train
80.428571
GemHQ/round-py
round/client.py
https://github.com/GemHQ/round-py/blob/d0838f849cd260b1eb5df67ed3c6f2fe56c91c21/round/client.py#L71-L106
def authenticate_device(self, api_token, device_token, email=None, user_url=None, override=False, fetch=True): """Set credentials for Device authentication. Args: api_token (str): Token issued to your Application through the Gem Developer Console. device_token (str): Physical device identifier. You will receive this from a user.devices.create call or from users.create. email (str, optional): User's email address, required if user_url is not provided. user_url (str, optional): User's Gem url. override (boolean, optional): Replace existing Application credentials. fetch (boolean, optional): Return the authenticated User. Returns: An User object if `fetch` is True. """ if (self.context.has_auth_params('Gem-Device') and not override): raise OverrideError('Gem-Device') if (not api_token or not device_token or (not email and not user_url) or not self.context.authorize('Gem-Device', api_token=api_token, user_email=email, user_url=user_url, device_token=device_token)): raise AuthUsageError(self.context, 'Gem-Device') if fetch: user = self.user(email) if email else self.user() return user.refresh() else: return True
[ "def", "authenticate_device", "(", "self", ",", "api_token", ",", "device_token", ",", "email", "=", "None", ",", "user_url", "=", "None", ",", "override", "=", "False", ",", "fetch", "=", "True", ")", ":", "if", "(", "self", ".", "context", ".", "has_auth_params", "(", "'Gem-Device'", ")", "and", "not", "override", ")", ":", "raise", "OverrideError", "(", "'Gem-Device'", ")", "if", "(", "not", "api_token", "or", "not", "device_token", "or", "(", "not", "email", "and", "not", "user_url", ")", "or", "not", "self", ".", "context", ".", "authorize", "(", "'Gem-Device'", ",", "api_token", "=", "api_token", ",", "user_email", "=", "email", ",", "user_url", "=", "user_url", ",", "device_token", "=", "device_token", ")", ")", ":", "raise", "AuthUsageError", "(", "self", ".", "context", ",", "'Gem-Device'", ")", "if", "fetch", ":", "user", "=", "self", ".", "user", "(", "email", ")", "if", "email", "else", "self", ".", "user", "(", ")", "return", "user", ".", "refresh", "(", ")", "else", ":", "return", "True" ]
Set credentials for Device authentication. Args: api_token (str): Token issued to your Application through the Gem Developer Console. device_token (str): Physical device identifier. You will receive this from a user.devices.create call or from users.create. email (str, optional): User's email address, required if user_url is not provided. user_url (str, optional): User's Gem url. override (boolean, optional): Replace existing Application credentials. fetch (boolean, optional): Return the authenticated User. Returns: An User object if `fetch` is True.
[ "Set", "credentials", "for", "Device", "authentication", "." ]
python
train
42.861111
urinieto/msaf
msaf/algorithms/sf/segmenter.py
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/algorithms/sf/segmenter.py#L83-L90
def circular_shift(X): """Shifts circularly the X squre matrix in order to get a time-lag matrix.""" N = X.shape[0] L = np.zeros(X.shape) for i in range(N): L[i, :] = np.asarray([X[(i + j) % N, j] for j in range(N)]) return L
[ "def", "circular_shift", "(", "X", ")", ":", "N", "=", "X", ".", "shape", "[", "0", "]", "L", "=", "np", ".", "zeros", "(", "X", ".", "shape", ")", "for", "i", "in", "range", "(", "N", ")", ":", "L", "[", "i", ",", ":", "]", "=", "np", ".", "asarray", "(", "[", "X", "[", "(", "i", "+", "j", ")", "%", "N", ",", "j", "]", "for", "j", "in", "range", "(", "N", ")", "]", ")", "return", "L" ]
Shifts circularly the X squre matrix in order to get a time-lag matrix.
[ "Shifts", "circularly", "the", "X", "squre", "matrix", "in", "order", "to", "get", "a", "time", "-", "lag", "matrix", "." ]
python
test
31.75
pgjones/quart
quart/app.py
https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/app.py#L1123-L1143
def after_serving(self, func: Callable) -> Callable: """Add a after serving function. This will allow the function provided to be called once after anything is served (after last byte is sent). This is designed to be used as a decorator. An example usage, .. code-block:: python @app.after_serving def func(): ... Arguments: func: The function itself. """ handler = ensure_coroutine(func) self.after_serving_funcs.append(handler) return func
[ "def", "after_serving", "(", "self", ",", "func", ":", "Callable", ")", "->", "Callable", ":", "handler", "=", "ensure_coroutine", "(", "func", ")", "self", ".", "after_serving_funcs", ".", "append", "(", "handler", ")", "return", "func" ]
Add a after serving function. This will allow the function provided to be called once after anything is served (after last byte is sent). This is designed to be used as a decorator. An example usage, .. code-block:: python @app.after_serving def func(): ... Arguments: func: The function itself.
[ "Add", "a", "after", "serving", "function", "." ]
python
train
26.619048
gitpython-developers/smmap
smmap/util.py
https://github.com/gitpython-developers/smmap/blob/48e9e30b0ef3c24ac7ed88e6e3bfa37dc945bf4c/smmap/util.py#L229-L238
def file_size(self): """:return: size of file we manager""" if self._file_size is None: if isinstance(self._path_or_fd, string_types()): self._file_size = os.stat(self._path_or_fd).st_size else: self._file_size = os.fstat(self._path_or_fd).st_size # END handle path type # END update file size return self._file_size
[ "def", "file_size", "(", "self", ")", ":", "if", "self", ".", "_file_size", "is", "None", ":", "if", "isinstance", "(", "self", ".", "_path_or_fd", ",", "string_types", "(", ")", ")", ":", "self", ".", "_file_size", "=", "os", ".", "stat", "(", "self", ".", "_path_or_fd", ")", ".", "st_size", "else", ":", "self", ".", "_file_size", "=", "os", ".", "fstat", "(", "self", ".", "_path_or_fd", ")", ".", "st_size", "# END handle path type", "# END update file size", "return", "self", ".", "_file_size" ]
:return: size of file we manager
[ ":", "return", ":", "size", "of", "file", "we", "manager" ]
python
train
40.7
pyviz/holoviews
holoviews/core/util.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/util.py#L1639-L1641
def iterkeys(obj): "Get key iterator from dictionary for Python 2 and 3" return iter(obj.keys()) if sys.version_info.major == 3 else obj.iterkeys()
[ "def", "iterkeys", "(", "obj", ")", ":", "return", "iter", "(", "obj", ".", "keys", "(", ")", ")", "if", "sys", ".", "version_info", ".", "major", "==", "3", "else", "obj", ".", "iterkeys", "(", ")" ]
Get key iterator from dictionary for Python 2 and 3
[ "Get", "key", "iterator", "from", "dictionary", "for", "Python", "2", "and", "3" ]
python
train
51
iterative/dvc
dvc/analytics.py
https://github.com/iterative/dvc/blob/8bb21261e34c9632453e09090de7ebe50e38d341/dvc/analytics.py#L196-L206
def dump(self): """Save analytics report to a temporary file. Returns: str: path to the temporary file that contains the analytics report. """ import tempfile with tempfile.NamedTemporaryFile(delete=False, mode="w") as fobj: json.dump(self.info, fobj) return fobj.name
[ "def", "dump", "(", "self", ")", ":", "import", "tempfile", "with", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ",", "mode", "=", "\"w\"", ")", "as", "fobj", ":", "json", ".", "dump", "(", "self", ".", "info", ",", "fobj", ")", "return", "fobj", ".", "name" ]
Save analytics report to a temporary file. Returns: str: path to the temporary file that contains the analytics report.
[ "Save", "analytics", "report", "to", "a", "temporary", "file", "." ]
python
train
30.545455
romanvm/python-web-pdb
web_pdb/web_console.py
https://github.com/romanvm/python-web-pdb/blob/f2df2207e870dbf50a4bb30ca12a59cab39a809f/web_pdb/web_console.py#L176-L184
def flush(self): """ Wait until history is read but no more than 10 cycles in case a browser session is closed. """ i = 0 while self._frame_data.is_dirty and i < 10: i += 1 time.sleep(0.1)
[ "def", "flush", "(", "self", ")", ":", "i", "=", "0", "while", "self", ".", "_frame_data", ".", "is_dirty", "and", "i", "<", "10", ":", "i", "+=", "1", "time", ".", "sleep", "(", "0.1", ")" ]
Wait until history is read but no more than 10 cycles in case a browser session is closed.
[ "Wait", "until", "history", "is", "read", "but", "no", "more", "than", "10", "cycles", "in", "case", "a", "browser", "session", "is", "closed", "." ]
python
train
28
tarbell-project/tarbell
tarbell/cli.py
https://github.com/tarbell-project/tarbell/blob/818b3d3623dcda5a08a5bf45550219719b0f0365/tarbell/cli.py#L671-L687
def _clean_suffix(string, suffix): """ If string endswith the suffix, remove it. Else leave it alone. """ suffix_len = len(suffix) if len(string) < suffix_len: # the string param was shorter than the suffix raise ValueError("A suffix can not be bigger than string argument.") if string.endswith(suffix): # return from the beginning up to # but not including the first letter # in the suffix return string[0:-suffix_len] else: # leave unharmed return string
[ "def", "_clean_suffix", "(", "string", ",", "suffix", ")", ":", "suffix_len", "=", "len", "(", "suffix", ")", "if", "len", "(", "string", ")", "<", "suffix_len", ":", "# the string param was shorter than the suffix", "raise", "ValueError", "(", "\"A suffix can not be bigger than string argument.\"", ")", "if", "string", ".", "endswith", "(", "suffix", ")", ":", "# return from the beginning up to", "# but not including the first letter", "# in the suffix", "return", "string", "[", "0", ":", "-", "suffix_len", "]", "else", ":", "# leave unharmed", "return", "string" ]
If string endswith the suffix, remove it. Else leave it alone.
[ "If", "string", "endswith", "the", "suffix", "remove", "it", ".", "Else", "leave", "it", "alone", "." ]
python
train
31.352941
hyperledger/sawtooth-core
rest_api/sawtooth_rest_api/route_handlers.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/rest_api/sawtooth_rest_api/route_handlers.py#L377-L407
async def list_batches(self, request): """Fetches list of batches from validator, optionally filtered by id. Request: query: - head: The id of the block to use as the head of the chain - id: Comma separated list of batch ids to include in results Response: data: JSON array of fully expanded Batch objects head: The head used for this query (most recent if unspecified) link: The link to this exact query, including head block paging: Paging info and nav, like total resources and a next link """ paging_controls = self._get_paging_controls(request) validator_query = client_batch_pb2.ClientBatchListRequest( head_id=self._get_head_id(request), batch_ids=self._get_filter_ids(request), sorting=self._get_sorting_message(request, "default"), paging=self._make_paging_message(paging_controls)) response = await self._query_validator( Message.CLIENT_BATCH_LIST_REQUEST, client_batch_pb2.ClientBatchListResponse, validator_query) return self._wrap_paginated_response( request=request, response=response, controls=paging_controls, data=[self._expand_batch(b) for b in response['batches']])
[ "async", "def", "list_batches", "(", "self", ",", "request", ")", ":", "paging_controls", "=", "self", ".", "_get_paging_controls", "(", "request", ")", "validator_query", "=", "client_batch_pb2", ".", "ClientBatchListRequest", "(", "head_id", "=", "self", ".", "_get_head_id", "(", "request", ")", ",", "batch_ids", "=", "self", ".", "_get_filter_ids", "(", "request", ")", ",", "sorting", "=", "self", ".", "_get_sorting_message", "(", "request", ",", "\"default\"", ")", ",", "paging", "=", "self", ".", "_make_paging_message", "(", "paging_controls", ")", ")", "response", "=", "await", "self", ".", "_query_validator", "(", "Message", ".", "CLIENT_BATCH_LIST_REQUEST", ",", "client_batch_pb2", ".", "ClientBatchListResponse", ",", "validator_query", ")", "return", "self", ".", "_wrap_paginated_response", "(", "request", "=", "request", ",", "response", "=", "response", ",", "controls", "=", "paging_controls", ",", "data", "=", "[", "self", ".", "_expand_batch", "(", "b", ")", "for", "b", "in", "response", "[", "'batches'", "]", "]", ")" ]
Fetches list of batches from validator, optionally filtered by id. Request: query: - head: The id of the block to use as the head of the chain - id: Comma separated list of batch ids to include in results Response: data: JSON array of fully expanded Batch objects head: The head used for this query (most recent if unspecified) link: The link to this exact query, including head block paging: Paging info and nav, like total resources and a next link
[ "Fetches", "list", "of", "batches", "from", "validator", "optionally", "filtered", "by", "id", "." ]
python
train
43.419355
etal/biofrills
biofrills/consensus.py
https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/consensus.py#L215-L261
def supported(aln): """Get only the supported consensus residues in each column. Meaning: - Omit majority-gap columns - Omit columns where no residue type appears more than once - In case of a tie, return all the top-scoring residue types (no prioritization) Returns a *list* -- not a string! -- where elements are strings of the consensus character(s), potentially a gap ('-') or multiple chars ('KR'). """ def col_consensus(columns): """Calculate the consensus chars for an iterable of columns.""" for col in columns: if (# Majority gap chars (col.count('-') >= len(col)/2) or # Lowercase cols mean "don't include in consensus" all(c.islower() for c in col if c not in '.-') ): yield '-' continue # Validation - copied from consensus() above if any(c.islower() for c in col): logging.warn('Mixed lowercase and uppercase letters in a ' 'column: ' + ''.join(col)) col = map(str.upper, col) # Calculate the consensus character most_common = Counter( [c for c in col if c not in '-'] ).most_common() if not most_common: # XXX ever reached? logging.warn("Column is all gaps! How did that happen?") if most_common[0][1] == 1: # No char has frequency > 1; no consensus char yield '-' elif (len(most_common) > 1 and most_common[0][1] == most_common[1][1]): # Tie for most-common residue type ties = [x[0] for x in most_common if x[1] == most_common[0][1]] yield ''.join(ties) else: yield most_common[0][0] return list(col_consensus(zip(*aln)))
[ "def", "supported", "(", "aln", ")", ":", "def", "col_consensus", "(", "columns", ")", ":", "\"\"\"Calculate the consensus chars for an iterable of columns.\"\"\"", "for", "col", "in", "columns", ":", "if", "(", "# Majority gap chars", "(", "col", ".", "count", "(", "'-'", ")", ">=", "len", "(", "col", ")", "/", "2", ")", "or", "# Lowercase cols mean \"don't include in consensus\"", "all", "(", "c", ".", "islower", "(", ")", "for", "c", "in", "col", "if", "c", "not", "in", "'.-'", ")", ")", ":", "yield", "'-'", "continue", "# Validation - copied from consensus() above", "if", "any", "(", "c", ".", "islower", "(", ")", "for", "c", "in", "col", ")", ":", "logging", ".", "warn", "(", "'Mixed lowercase and uppercase letters in a '", "'column: '", "+", "''", ".", "join", "(", "col", ")", ")", "col", "=", "map", "(", "str", ".", "upper", ",", "col", ")", "# Calculate the consensus character", "most_common", "=", "Counter", "(", "[", "c", "for", "c", "in", "col", "if", "c", "not", "in", "'-'", "]", ")", ".", "most_common", "(", ")", "if", "not", "most_common", ":", "# XXX ever reached?", "logging", ".", "warn", "(", "\"Column is all gaps! How did that happen?\"", ")", "if", "most_common", "[", "0", "]", "[", "1", "]", "==", "1", ":", "# No char has frequency > 1; no consensus char", "yield", "'-'", "elif", "(", "len", "(", "most_common", ")", ">", "1", "and", "most_common", "[", "0", "]", "[", "1", "]", "==", "most_common", "[", "1", "]", "[", "1", "]", ")", ":", "# Tie for most-common residue type", "ties", "=", "[", "x", "[", "0", "]", "for", "x", "in", "most_common", "if", "x", "[", "1", "]", "==", "most_common", "[", "0", "]", "[", "1", "]", "]", "yield", "''", ".", "join", "(", "ties", ")", "else", ":", "yield", "most_common", "[", "0", "]", "[", "0", "]", "return", "list", "(", "col_consensus", "(", "zip", "(", "*", "aln", ")", ")", ")" ]
Get only the supported consensus residues in each column. Meaning: - Omit majority-gap columns - Omit columns where no residue type appears more than once - In case of a tie, return all the top-scoring residue types (no prioritization) Returns a *list* -- not a string! -- where elements are strings of the consensus character(s), potentially a gap ('-') or multiple chars ('KR').
[ "Get", "only", "the", "supported", "consensus", "residues", "in", "each", "column", "." ]
python
train
41.021277
houluy/chessboard
chessboard/__init__.py
https://github.com/houluy/chessboard/blob/b834819d93d71b492f27780a58dfbb3a107d7e85/chessboard/__init__.py#L361-L367
def get_not_num(self, seq, num=0): '''Find the index of first non num element''' ind = next((i for i, x in enumerate(seq) if x != num), None) if ind == None: return self.board_size else: return ind
[ "def", "get_not_num", "(", "self", ",", "seq", ",", "num", "=", "0", ")", ":", "ind", "=", "next", "(", "(", "i", "for", "i", ",", "x", "in", "enumerate", "(", "seq", ")", "if", "x", "!=", "num", ")", ",", "None", ")", "if", "ind", "==", "None", ":", "return", "self", ".", "board_size", "else", ":", "return", "ind" ]
Find the index of first non num element
[ "Find", "the", "index", "of", "first", "non", "num", "element" ]
python
train
35.285714
pypa/setuptools
setuptools/msvc.py
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/msvc.py#L971-L1013
def OSLibpath(self): """ Microsoft Windows SDK Libraries Paths """ ref = os.path.join(self.si.WindowsSdkDir, 'References') libpath = [] if self.vc_ver <= 9.0: libpath += self.OSLibraries if self.vc_ver >= 11.0: libpath += [os.path.join(ref, r'CommonConfiguration\Neutral')] if self.vc_ver >= 14.0: libpath += [ ref, os.path.join(self.si.WindowsSdkDir, 'UnionMetadata'), os.path.join( ref, 'Windows.Foundation.UniversalApiContract', '1.0.0.0', ), os.path.join( ref, 'Windows.Foundation.FoundationContract', '1.0.0.0', ), os.path.join( ref, 'Windows.Networking.Connectivity.WwanContract', '1.0.0.0', ), os.path.join( self.si.WindowsSdkDir, 'ExtensionSDKs', 'Microsoft.VCLibs', '%0.1f' % self.vc_ver, 'References', 'CommonConfiguration', 'neutral', ), ] return libpath
[ "def", "OSLibpath", "(", "self", ")", ":", "ref", "=", "os", ".", "path", ".", "join", "(", "self", ".", "si", ".", "WindowsSdkDir", ",", "'References'", ")", "libpath", "=", "[", "]", "if", "self", ".", "vc_ver", "<=", "9.0", ":", "libpath", "+=", "self", ".", "OSLibraries", "if", "self", ".", "vc_ver", ">=", "11.0", ":", "libpath", "+=", "[", "os", ".", "path", ".", "join", "(", "ref", ",", "r'CommonConfiguration\\Neutral'", ")", "]", "if", "self", ".", "vc_ver", ">=", "14.0", ":", "libpath", "+=", "[", "ref", ",", "os", ".", "path", ".", "join", "(", "self", ".", "si", ".", "WindowsSdkDir", ",", "'UnionMetadata'", ")", ",", "os", ".", "path", ".", "join", "(", "ref", ",", "'Windows.Foundation.UniversalApiContract'", ",", "'1.0.0.0'", ",", ")", ",", "os", ".", "path", ".", "join", "(", "ref", ",", "'Windows.Foundation.FoundationContract'", ",", "'1.0.0.0'", ",", ")", ",", "os", ".", "path", ".", "join", "(", "ref", ",", "'Windows.Networking.Connectivity.WwanContract'", ",", "'1.0.0.0'", ",", ")", ",", "os", ".", "path", ".", "join", "(", "self", ".", "si", ".", "WindowsSdkDir", ",", "'ExtensionSDKs'", ",", "'Microsoft.VCLibs'", ",", "'%0.1f'", "%", "self", ".", "vc_ver", ",", "'References'", ",", "'CommonConfiguration'", ",", "'neutral'", ",", ")", ",", "]", "return", "libpath" ]
Microsoft Windows SDK Libraries Paths
[ "Microsoft", "Windows", "SDK", "Libraries", "Paths" ]
python
train
30.837209
zabertech/python-swampyer
swampyer/__init__.py
https://github.com/zabertech/python-swampyer/blob/31b040e7570455718709a496d6d9faacfb372a00/swampyer/__init__.py#L559-L566
def start(self): """ Initialize websockets, say hello, and start listening for events """ self.connect() if not self.isAlive(): super(WAMPClient,self).start() self.hello() return self
[ "def", "start", "(", "self", ")", ":", "self", ".", "connect", "(", ")", "if", "not", "self", ".", "isAlive", "(", ")", ":", "super", "(", "WAMPClient", ",", "self", ")", ".", "start", "(", ")", "self", ".", "hello", "(", ")", "return", "self" ]
Initialize websockets, say hello, and start listening for events
[ "Initialize", "websockets", "say", "hello", "and", "start", "listening", "for", "events" ]
python
train
29.5
NLeSC/noodles
noodles/prov/workflow.py
https://github.com/NLeSC/noodles/blob/3759e24e6e54a3a1a364431309dbb1061f617c04/noodles/prov/workflow.py#L20-L68
def set_global_provenance(wf: Workflow, registry: Registry): """Compute a global provenance key for the entire workflow before evaluation. This key can be used to store and retrieve results in a database. The key computed in this stage is different from the (local) provenance key that can be computed for a node if all its arguments are known. In cases where a result derives from other results that were computed in child workflows, we can prevent the workflow system from reevaluating the results at each step to find that we already had the end-result somewhere. This is where the global prov-key comes in. Each node is assigned a `prov` attribute. If all arguments for this node are known, this key will be the same as the local prov-key. If some of the arguments are still empty, we add the global prov-keys of the dependent nodes to the hash. In this algorithm we traverse from the bottom of the DAG to the top and back using a stack. This allows us to compute the keys for each node without modifying the node other than setting the `prov` attribute with the resulting key.""" stack = [wf.root] while stack: i = stack.pop() n = wf.nodes[i] if n.prov: continue if is_node_ready(n): job_msg = registry.deep_encode(n) n.prov = prov_key(job_msg) continue deps = wf.inverse_links[i] todo = [j for j in deps if not wf.nodes[j].prov] if not todo: link_dict = dict(links(wf, i, deps)) link_prov = registry.deep_encode( [link_dict[arg] for arg in empty_args(n)]) job_msg = registry.deep_encode(n) n.prov = prov_key(job_msg, link_prov) continue stack.append(i) stack.extend(deps)
[ "def", "set_global_provenance", "(", "wf", ":", "Workflow", ",", "registry", ":", "Registry", ")", ":", "stack", "=", "[", "wf", ".", "root", "]", "while", "stack", ":", "i", "=", "stack", ".", "pop", "(", ")", "n", "=", "wf", ".", "nodes", "[", "i", "]", "if", "n", ".", "prov", ":", "continue", "if", "is_node_ready", "(", "n", ")", ":", "job_msg", "=", "registry", ".", "deep_encode", "(", "n", ")", "n", ".", "prov", "=", "prov_key", "(", "job_msg", ")", "continue", "deps", "=", "wf", ".", "inverse_links", "[", "i", "]", "todo", "=", "[", "j", "for", "j", "in", "deps", "if", "not", "wf", ".", "nodes", "[", "j", "]", ".", "prov", "]", "if", "not", "todo", ":", "link_dict", "=", "dict", "(", "links", "(", "wf", ",", "i", ",", "deps", ")", ")", "link_prov", "=", "registry", ".", "deep_encode", "(", "[", "link_dict", "[", "arg", "]", "for", "arg", "in", "empty_args", "(", "n", ")", "]", ")", "job_msg", "=", "registry", ".", "deep_encode", "(", "n", ")", "n", ".", "prov", "=", "prov_key", "(", "job_msg", ",", "link_prov", ")", "continue", "stack", ".", "append", "(", "i", ")", "stack", ".", "extend", "(", "deps", ")" ]
Compute a global provenance key for the entire workflow before evaluation. This key can be used to store and retrieve results in a database. The key computed in this stage is different from the (local) provenance key that can be computed for a node if all its arguments are known. In cases where a result derives from other results that were computed in child workflows, we can prevent the workflow system from reevaluating the results at each step to find that we already had the end-result somewhere. This is where the global prov-key comes in. Each node is assigned a `prov` attribute. If all arguments for this node are known, this key will be the same as the local prov-key. If some of the arguments are still empty, we add the global prov-keys of the dependent nodes to the hash. In this algorithm we traverse from the bottom of the DAG to the top and back using a stack. This allows us to compute the keys for each node without modifying the node other than setting the `prov` attribute with the resulting key.
[ "Compute", "a", "global", "provenance", "key", "for", "the", "entire", "workflow", "before", "evaluation", ".", "This", "key", "can", "be", "used", "to", "store", "and", "retrieve", "results", "in", "a", "database", ".", "The", "key", "computed", "in", "this", "stage", "is", "different", "from", "the", "(", "local", ")", "provenance", "key", "that", "can", "be", "computed", "for", "a", "node", "if", "all", "its", "arguments", "are", "known", "." ]
python
train
37.020408
rmorshea/spectate
spectate/mvc/base.py
https://github.com/rmorshea/spectate/blob/79bd84dd8d00889015ce1d1e190db865a02cdb93/spectate/mvc/base.py#L137-L149
def before(self, callback: Union[Callable, str]) -> "Control": """Register a control method that reacts before the trigger method is called. Parameters: callback: The control method. If given as a callable, then that function will be used as the callback. If given as a string, then the control will look up a method with that name when reacting (useful when subclassing). """ if isinstance(callback, Control): callback = callback._before self._before = callback return self
[ "def", "before", "(", "self", ",", "callback", ":", "Union", "[", "Callable", ",", "str", "]", ")", "->", "\"Control\"", ":", "if", "isinstance", "(", "callback", ",", "Control", ")", ":", "callback", "=", "callback", ".", "_before", "self", ".", "_before", "=", "callback", "return", "self" ]
Register a control method that reacts before the trigger method is called. Parameters: callback: The control method. If given as a callable, then that function will be used as the callback. If given as a string, then the control will look up a method with that name when reacting (useful when subclassing).
[ "Register", "a", "control", "method", "that", "reacts", "before", "the", "trigger", "method", "is", "called", "." ]
python
train
44.846154
dereneaton/ipyrad
ipyrad/assemble/cluster_across.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_across.py#L46-L287
def persistent_popen_align3(data, samples, chunk): """ notes """ ## data are already chunked, read in the whole thing with open(chunk, 'rb') as infile: clusts = infile.read().split("//\n//\n")[:-1] ## snames to ensure sorted order samples.sort(key=lambda x: x.name) snames = [sample.name for sample in samples] ## make a tmparr to store metadata (this can get huge, consider using h5) maxlen = data._hackersonly["max_fragment_length"] + 20 indels = np.zeros((len(samples), len(clusts), maxlen), dtype=np.bool_) duples = np.zeros(len(clusts), dtype=np.bool_) ## create a persistent shell for running muscle in. proc = sps.Popen(["bash"], stdin=sps.PIPE, stdout=sps.PIPE, universal_newlines=True) ## iterate over clusters until finished allstack = [] #istack = [] for ldx in xrange(len(clusts)): ## new alignment string for read1s and read2s aligned = [] istack = [] lines = clusts[ldx].strip().split("\n") names = lines[::2] seqs = lines[1::2] align1 = "" align2 = "" ## we don't allow seeds with no hits to make it here, currently #if len(names) == 1: # aligned.append(clusts[ldx].replace(">", "").strip()) ## find duplicates and skip aligning but keep it for downstream. if len(names) != len(set([x.rsplit("_", 1)[0] for x in names])): duples[ldx] = 1 istack = ["{}\n{}".format(i[1:], j) for i, j in zip(names, seqs)] #aligned.append(clusts[ldx].replace(">", "").strip()) else: ## append counter to names because muscle doesn't retain order names = [">{};*{}".format(j[1:], i) for i, j in enumerate(names)] try: ## try to split names on nnnn splitter clust1, clust2 = zip(*[i.split("nnnn") for i in seqs]) ## make back into strings cl1 = "\n".join(itertools.chain(*zip(names, clust1))) cl2 = "\n".join(itertools.chain(*zip(names, clust2))) ## store allele (lowercase) info shape = (len(seqs), max([len(i) for i in seqs])) arrseqs = np.zeros(shape, dtype="S1") for row in range(arrseqs.shape[0]): seqsrow = seqs[row] arrseqs[row, :len(seqsrow)] = list(seqsrow) amask = np.char.islower(arrseqs) save_alleles = np.any(amask) ## send align1 to the bash shell ## TODO: check for pipe-overflow here and use files for i/o cmd1 = "echo -e '{}' | {} -quiet -in - ; echo {}"\ .format(cl1, ipyrad.bins.muscle, "//") print(cmd1, file=proc.stdin) ## read the stdout by line until splitter is reached for line in iter(proc.stdout.readline, "//\n"): align1 += line ## send align2 to the bash shell ## TODO: check for pipe-overflow here and use files for i/o cmd2 = "echo -e '{}' | {} -quiet -in - ; echo {}"\ .format(cl2, ipyrad.bins.muscle, "//") print(cmd2, file=proc.stdin) ## read the stdout by line until splitter is reached for line in iter(proc.stdout.readline, "//\n"): align2 += line ## join the aligned read1 and read2 and ensure name order match la1 = align1[1:].split("\n>") la2 = align2[1:].split("\n>") dalign1 = dict([i.split("\n", 1) for i in la1]) dalign2 = dict([i.split("\n", 1) for i in la2]) keys = sorted(dalign1.keys(), key=DEREP) keys2 = sorted(dalign2.keys(), key=DEREP) ## Make sure R1 and R2 actually exist for each sample. If not ## bail out of this cluster. if not len(keys) == len(keys2): LOGGER.error("R1 and R2 results differ in length: "\ + "\nR1 - {}\nR2 - {}".format(keys, keys2)) continue ## impute allele (lowercase) info back into alignments for kidx, key in enumerate(keys): concatseq = dalign1[key].replace("\n", "")+\ "nnnn"+dalign2[key].replace("\n", "") ## impute alleles if save_alleles: newmask = np.zeros(len(concatseq), dtype=np.bool_) ## check for indels and impute to amask indidx = np.where(np.array(list(concatseq)) == "-")[0] if indidx.size: allrows = np.arange(amask.shape[1]) mask = np.ones(allrows.shape[0], dtype=np.bool_) for idx in indidx: if idx < mask.shape[0]: mask[idx] = False not_idx = allrows[mask == 1] ## fill in new data into all other spots newmask[not_idx] = amask[kidx, :not_idx.shape[0]] else: newmask = amask[kidx] ## lower the alleles concatarr = np.array(list(concatseq)) concatarr[newmask] = np.char.lower(concatarr[newmask]) concatseq = concatarr.tostring() #LOGGER.info(concatseq) ## fill list with aligned data aligned.append("{}\n{}".format(key, concatseq)) ## put into a dict for writing to file #aligned = [] #for key in keys: # aligned.append("\n".join( # [key, # dalign1[key].replace("\n", "")+"nnnn"+\ # dalign2[key].replace("\n", "")])) except IndexError as inst: LOGGER.debug("Error in PE - ldx: {}".format()) LOGGER.debug("Vars: {}".format(dict(globals(), **locals()))) raise except ValueError: ## make back into strings cl1 = "\n".join(["\n".join(i) for i in zip(names, seqs)]) ## store allele (lowercase) info shape = (len(seqs), max([len(i) for i in seqs])) arrseqs = np.zeros(shape, dtype="S1") for row in range(arrseqs.shape[0]): seqsrow = seqs[row] arrseqs[row, :len(seqsrow)] = list(seqsrow) amask = np.char.islower(arrseqs) save_alleles = np.any(amask) ## send align1 to the bash shell (TODO: check for pipe-overflow) cmd1 = "echo -e '{}' | {} -quiet -in - ; echo {}"\ .format(cl1, ipyrad.bins.muscle, "//") print(cmd1, file=proc.stdin) ## read the stdout by line until splitter is reached for line in iter(proc.stdout.readline, "//\n"): align1 += line ## ensure name order match la1 = align1[1:].split("\n>") dalign1 = dict([i.split("\n", 1) for i in la1]) keys = sorted(dalign1.keys(), key=DEREP) ## put into dict for writing to file for kidx, key in enumerate(keys): concatseq = dalign1[key].replace("\n", "") ## impute alleles if save_alleles: newmask = np.zeros(len(concatseq), dtype=np.bool_) ## check for indels and impute to amask indidx = np.where(np.array(list(concatseq)) == "-")[0] if indidx.size: allrows = np.arange(amask.shape[1]) mask = np.ones(allrows.shape[0], dtype=np.bool_) for idx in indidx: if idx < mask.shape[0]: mask[idx] = False not_idx = allrows[mask == 1] ## fill in new data into all other spots newmask[not_idx] = amask[kidx, :not_idx.shape[0]] else: newmask = amask[kidx] ## lower the alleles concatarr = np.array(list(concatseq)) concatarr[newmask] = np.char.lower(concatarr[newmask]) concatseq = concatarr.tostring() ## fill list with aligned data aligned.append("{}\n{}".format(key, concatseq)) ## put aligned locus in list #aligned.append("\n".join(inner_aligned)) ## enforce maxlen on aligned seqs aseqs = np.vstack([list(i.split("\n")[1]) for i in aligned]) LOGGER.info("\naseqs here: %s", aseqs) ## index names by snames order sidxs = [snames.index(key.rsplit("_", 1)[0]) for key in keys] thislen = min(maxlen, aseqs.shape[1]) for idx in xrange(aseqs.shape[0]): ## enter into stack newn = aligned[idx].split(";", 1)[0] #newn = key[idx].split(";", 1)[0] istack.append("{}\n{}".format(newn, aseqs[idx, :thislen].tostring())) ## name index in sorted list (indels order) sidx = sidxs[idx] indels[sidx, ldx, :thislen] = aseqs[idx, :thislen] == "-" if istack: allstack.append("\n".join(istack)) #LOGGER.debug("\n\nSTACK (%s)\n%s\n", duples[ldx], "\n".join(istack)) ## cleanup proc.stdout.close() if proc.stderr: proc.stderr.close() proc.stdin.close() proc.wait() #LOGGER.info("\n\nALLSTACK %s\n", "\n".join(i) for i in allstack[:5]]) ## write to file after odx = chunk.rsplit("_")[-1] alignfile = os.path.join(data.tmpdir, "align_{}.fa".format(odx)) with open(alignfile, 'wb') as outfile: outfile.write("\n//\n//\n".join(allstack)+"\n") os.remove(chunk) ## save indels array to tmp dir ifile = os.path.join(data.tmpdir, "indels_{}.tmp.npy".format(odx)) np.save(ifile, indels) dfile = os.path.join(data.tmpdir, "duples_{}.tmp.npy".format(odx)) np.save(dfile, duples)
[ "def", "persistent_popen_align3", "(", "data", ",", "samples", ",", "chunk", ")", ":", "## data are already chunked, read in the whole thing", "with", "open", "(", "chunk", ",", "'rb'", ")", "as", "infile", ":", "clusts", "=", "infile", ".", "read", "(", ")", ".", "split", "(", "\"//\\n//\\n\"", ")", "[", ":", "-", "1", "]", "## snames to ensure sorted order", "samples", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", ".", "name", ")", "snames", "=", "[", "sample", ".", "name", "for", "sample", "in", "samples", "]", "## make a tmparr to store metadata (this can get huge, consider using h5)", "maxlen", "=", "data", ".", "_hackersonly", "[", "\"max_fragment_length\"", "]", "+", "20", "indels", "=", "np", ".", "zeros", "(", "(", "len", "(", "samples", ")", ",", "len", "(", "clusts", ")", ",", "maxlen", ")", ",", "dtype", "=", "np", ".", "bool_", ")", "duples", "=", "np", ".", "zeros", "(", "len", "(", "clusts", ")", ",", "dtype", "=", "np", ".", "bool_", ")", "## create a persistent shell for running muscle in. ", "proc", "=", "sps", ".", "Popen", "(", "[", "\"bash\"", "]", ",", "stdin", "=", "sps", ".", "PIPE", ",", "stdout", "=", "sps", ".", "PIPE", ",", "universal_newlines", "=", "True", ")", "## iterate over clusters until finished", "allstack", "=", "[", "]", "#istack = [] ", "for", "ldx", "in", "xrange", "(", "len", "(", "clusts", ")", ")", ":", "## new alignment string for read1s and read2s", "aligned", "=", "[", "]", "istack", "=", "[", "]", "lines", "=", "clusts", "[", "ldx", "]", ".", "strip", "(", ")", ".", "split", "(", "\"\\n\"", ")", "names", "=", "lines", "[", ":", ":", "2", "]", "seqs", "=", "lines", "[", "1", ":", ":", "2", "]", "align1", "=", "\"\"", "align2", "=", "\"\"", "## we don't allow seeds with no hits to make it here, currently", "#if len(names) == 1:", "# aligned.append(clusts[ldx].replace(\">\", \"\").strip())", "## find duplicates and skip aligning but keep it for downstream.", "if", "len", "(", "names", ")", "!=", "len", "(", "set", "(", "[", "x", ".", "rsplit", "(", "\"_\"", ",", "1", ")", "[", "0", "]", "for", "x", "in", "names", "]", ")", ")", ":", "duples", "[", "ldx", "]", "=", "1", "istack", "=", "[", "\"{}\\n{}\"", ".", "format", "(", "i", "[", "1", ":", "]", ",", "j", ")", "for", "i", ",", "j", "in", "zip", "(", "names", ",", "seqs", ")", "]", "#aligned.append(clusts[ldx].replace(\">\", \"\").strip())", "else", ":", "## append counter to names because muscle doesn't retain order", "names", "=", "[", "\">{};*{}\"", ".", "format", "(", "j", "[", "1", ":", "]", ",", "i", ")", "for", "i", ",", "j", "in", "enumerate", "(", "names", ")", "]", "try", ":", "## try to split names on nnnn splitter", "clust1", ",", "clust2", "=", "zip", "(", "*", "[", "i", ".", "split", "(", "\"nnnn\"", ")", "for", "i", "in", "seqs", "]", ")", "## make back into strings", "cl1", "=", "\"\\n\"", ".", "join", "(", "itertools", ".", "chain", "(", "*", "zip", "(", "names", ",", "clust1", ")", ")", ")", "cl2", "=", "\"\\n\"", ".", "join", "(", "itertools", ".", "chain", "(", "*", "zip", "(", "names", ",", "clust2", ")", ")", ")", "## store allele (lowercase) info", "shape", "=", "(", "len", "(", "seqs", ")", ",", "max", "(", "[", "len", "(", "i", ")", "for", "i", "in", "seqs", "]", ")", ")", "arrseqs", "=", "np", ".", "zeros", "(", "shape", ",", "dtype", "=", "\"S1\"", ")", "for", "row", "in", "range", "(", "arrseqs", ".", "shape", "[", "0", "]", ")", ":", "seqsrow", "=", "seqs", "[", "row", "]", "arrseqs", "[", "row", ",", ":", "len", "(", "seqsrow", ")", "]", "=", "list", "(", "seqsrow", ")", "amask", "=", "np", ".", "char", ".", "islower", "(", "arrseqs", ")", "save_alleles", "=", "np", ".", "any", "(", "amask", ")", "## send align1 to the bash shell", "## TODO: check for pipe-overflow here and use files for i/o ", "cmd1", "=", "\"echo -e '{}' | {} -quiet -in - ; echo {}\"", ".", "format", "(", "cl1", ",", "ipyrad", ".", "bins", ".", "muscle", ",", "\"//\"", ")", "print", "(", "cmd1", ",", "file", "=", "proc", ".", "stdin", ")", "## read the stdout by line until splitter is reached", "for", "line", "in", "iter", "(", "proc", ".", "stdout", ".", "readline", ",", "\"//\\n\"", ")", ":", "align1", "+=", "line", "## send align2 to the bash shell", "## TODO: check for pipe-overflow here and use files for i/o ", "cmd2", "=", "\"echo -e '{}' | {} -quiet -in - ; echo {}\"", ".", "format", "(", "cl2", ",", "ipyrad", ".", "bins", ".", "muscle", ",", "\"//\"", ")", "print", "(", "cmd2", ",", "file", "=", "proc", ".", "stdin", ")", "## read the stdout by line until splitter is reached", "for", "line", "in", "iter", "(", "proc", ".", "stdout", ".", "readline", ",", "\"//\\n\"", ")", ":", "align2", "+=", "line", "## join the aligned read1 and read2 and ensure name order match", "la1", "=", "align1", "[", "1", ":", "]", ".", "split", "(", "\"\\n>\"", ")", "la2", "=", "align2", "[", "1", ":", "]", ".", "split", "(", "\"\\n>\"", ")", "dalign1", "=", "dict", "(", "[", "i", ".", "split", "(", "\"\\n\"", ",", "1", ")", "for", "i", "in", "la1", "]", ")", "dalign2", "=", "dict", "(", "[", "i", ".", "split", "(", "\"\\n\"", ",", "1", ")", "for", "i", "in", "la2", "]", ")", "keys", "=", "sorted", "(", "dalign1", ".", "keys", "(", ")", ",", "key", "=", "DEREP", ")", "keys2", "=", "sorted", "(", "dalign2", ".", "keys", "(", ")", ",", "key", "=", "DEREP", ")", "## Make sure R1 and R2 actually exist for each sample. If not", "## bail out of this cluster.", "if", "not", "len", "(", "keys", ")", "==", "len", "(", "keys2", ")", ":", "LOGGER", ".", "error", "(", "\"R1 and R2 results differ in length: \"", "+", "\"\\nR1 - {}\\nR2 - {}\"", ".", "format", "(", "keys", ",", "keys2", ")", ")", "continue", "## impute allele (lowercase) info back into alignments", "for", "kidx", ",", "key", "in", "enumerate", "(", "keys", ")", ":", "concatseq", "=", "dalign1", "[", "key", "]", ".", "replace", "(", "\"\\n\"", ",", "\"\"", ")", "+", "\"nnnn\"", "+", "dalign2", "[", "key", "]", ".", "replace", "(", "\"\\n\"", ",", "\"\"", ")", "## impute alleles", "if", "save_alleles", ":", "newmask", "=", "np", ".", "zeros", "(", "len", "(", "concatseq", ")", ",", "dtype", "=", "np", ".", "bool_", ")", "## check for indels and impute to amask", "indidx", "=", "np", ".", "where", "(", "np", ".", "array", "(", "list", "(", "concatseq", ")", ")", "==", "\"-\"", ")", "[", "0", "]", "if", "indidx", ".", "size", ":", "allrows", "=", "np", ".", "arange", "(", "amask", ".", "shape", "[", "1", "]", ")", "mask", "=", "np", ".", "ones", "(", "allrows", ".", "shape", "[", "0", "]", ",", "dtype", "=", "np", ".", "bool_", ")", "for", "idx", "in", "indidx", ":", "if", "idx", "<", "mask", ".", "shape", "[", "0", "]", ":", "mask", "[", "idx", "]", "=", "False", "not_idx", "=", "allrows", "[", "mask", "==", "1", "]", "## fill in new data into all other spots", "newmask", "[", "not_idx", "]", "=", "amask", "[", "kidx", ",", ":", "not_idx", ".", "shape", "[", "0", "]", "]", "else", ":", "newmask", "=", "amask", "[", "kidx", "]", "## lower the alleles", "concatarr", "=", "np", ".", "array", "(", "list", "(", "concatseq", ")", ")", "concatarr", "[", "newmask", "]", "=", "np", ".", "char", ".", "lower", "(", "concatarr", "[", "newmask", "]", ")", "concatseq", "=", "concatarr", ".", "tostring", "(", ")", "#LOGGER.info(concatseq)", "## fill list with aligned data", "aligned", ".", "append", "(", "\"{}\\n{}\"", ".", "format", "(", "key", ",", "concatseq", ")", ")", "## put into a dict for writing to file", "#aligned = []", "#for key in keys:", "# aligned.append(\"\\n\".join(", "# [key, ", "# dalign1[key].replace(\"\\n\", \"\")+\"nnnn\"+\\", "# dalign2[key].replace(\"\\n\", \"\")]))", "except", "IndexError", "as", "inst", ":", "LOGGER", ".", "debug", "(", "\"Error in PE - ldx: {}\"", ".", "format", "(", ")", ")", "LOGGER", ".", "debug", "(", "\"Vars: {}\"", ".", "format", "(", "dict", "(", "globals", "(", ")", ",", "*", "*", "locals", "(", ")", ")", ")", ")", "raise", "except", "ValueError", ":", "## make back into strings", "cl1", "=", "\"\\n\"", ".", "join", "(", "[", "\"\\n\"", ".", "join", "(", "i", ")", "for", "i", "in", "zip", "(", "names", ",", "seqs", ")", "]", ")", "## store allele (lowercase) info", "shape", "=", "(", "len", "(", "seqs", ")", ",", "max", "(", "[", "len", "(", "i", ")", "for", "i", "in", "seqs", "]", ")", ")", "arrseqs", "=", "np", ".", "zeros", "(", "shape", ",", "dtype", "=", "\"S1\"", ")", "for", "row", "in", "range", "(", "arrseqs", ".", "shape", "[", "0", "]", ")", ":", "seqsrow", "=", "seqs", "[", "row", "]", "arrseqs", "[", "row", ",", ":", "len", "(", "seqsrow", ")", "]", "=", "list", "(", "seqsrow", ")", "amask", "=", "np", ".", "char", ".", "islower", "(", "arrseqs", ")", "save_alleles", "=", "np", ".", "any", "(", "amask", ")", "## send align1 to the bash shell (TODO: check for pipe-overflow)", "cmd1", "=", "\"echo -e '{}' | {} -quiet -in - ; echo {}\"", ".", "format", "(", "cl1", ",", "ipyrad", ".", "bins", ".", "muscle", ",", "\"//\"", ")", "print", "(", "cmd1", ",", "file", "=", "proc", ".", "stdin", ")", "## read the stdout by line until splitter is reached", "for", "line", "in", "iter", "(", "proc", ".", "stdout", ".", "readline", ",", "\"//\\n\"", ")", ":", "align1", "+=", "line", "## ensure name order match", "la1", "=", "align1", "[", "1", ":", "]", ".", "split", "(", "\"\\n>\"", ")", "dalign1", "=", "dict", "(", "[", "i", ".", "split", "(", "\"\\n\"", ",", "1", ")", "for", "i", "in", "la1", "]", ")", "keys", "=", "sorted", "(", "dalign1", ".", "keys", "(", ")", ",", "key", "=", "DEREP", ")", "## put into dict for writing to file", "for", "kidx", ",", "key", "in", "enumerate", "(", "keys", ")", ":", "concatseq", "=", "dalign1", "[", "key", "]", ".", "replace", "(", "\"\\n\"", ",", "\"\"", ")", "## impute alleles", "if", "save_alleles", ":", "newmask", "=", "np", ".", "zeros", "(", "len", "(", "concatseq", ")", ",", "dtype", "=", "np", ".", "bool_", ")", "## check for indels and impute to amask", "indidx", "=", "np", ".", "where", "(", "np", ".", "array", "(", "list", "(", "concatseq", ")", ")", "==", "\"-\"", ")", "[", "0", "]", "if", "indidx", ".", "size", ":", "allrows", "=", "np", ".", "arange", "(", "amask", ".", "shape", "[", "1", "]", ")", "mask", "=", "np", ".", "ones", "(", "allrows", ".", "shape", "[", "0", "]", ",", "dtype", "=", "np", ".", "bool_", ")", "for", "idx", "in", "indidx", ":", "if", "idx", "<", "mask", ".", "shape", "[", "0", "]", ":", "mask", "[", "idx", "]", "=", "False", "not_idx", "=", "allrows", "[", "mask", "==", "1", "]", "## fill in new data into all other spots", "newmask", "[", "not_idx", "]", "=", "amask", "[", "kidx", ",", ":", "not_idx", ".", "shape", "[", "0", "]", "]", "else", ":", "newmask", "=", "amask", "[", "kidx", "]", "## lower the alleles", "concatarr", "=", "np", ".", "array", "(", "list", "(", "concatseq", ")", ")", "concatarr", "[", "newmask", "]", "=", "np", ".", "char", ".", "lower", "(", "concatarr", "[", "newmask", "]", ")", "concatseq", "=", "concatarr", ".", "tostring", "(", ")", "## fill list with aligned data", "aligned", ".", "append", "(", "\"{}\\n{}\"", ".", "format", "(", "key", ",", "concatseq", ")", ")", "## put aligned locus in list", "#aligned.append(\"\\n\".join(inner_aligned))", "## enforce maxlen on aligned seqs", "aseqs", "=", "np", ".", "vstack", "(", "[", "list", "(", "i", ".", "split", "(", "\"\\n\"", ")", "[", "1", "]", ")", "for", "i", "in", "aligned", "]", ")", "LOGGER", ".", "info", "(", "\"\\naseqs here: %s\"", ",", "aseqs", ")", "## index names by snames order", "sidxs", "=", "[", "snames", ".", "index", "(", "key", ".", "rsplit", "(", "\"_\"", ",", "1", ")", "[", "0", "]", ")", "for", "key", "in", "keys", "]", "thislen", "=", "min", "(", "maxlen", ",", "aseqs", ".", "shape", "[", "1", "]", ")", "for", "idx", "in", "xrange", "(", "aseqs", ".", "shape", "[", "0", "]", ")", ":", "## enter into stack", "newn", "=", "aligned", "[", "idx", "]", ".", "split", "(", "\";\"", ",", "1", ")", "[", "0", "]", "#newn = key[idx].split(\";\", 1)[0]", "istack", ".", "append", "(", "\"{}\\n{}\"", ".", "format", "(", "newn", ",", "aseqs", "[", "idx", ",", ":", "thislen", "]", ".", "tostring", "(", ")", ")", ")", "## name index in sorted list (indels order)", "sidx", "=", "sidxs", "[", "idx", "]", "indels", "[", "sidx", ",", "ldx", ",", ":", "thislen", "]", "=", "aseqs", "[", "idx", ",", ":", "thislen", "]", "==", "\"-\"", "if", "istack", ":", "allstack", ".", "append", "(", "\"\\n\"", ".", "join", "(", "istack", ")", ")", "#LOGGER.debug(\"\\n\\nSTACK (%s)\\n%s\\n\", duples[ldx], \"\\n\".join(istack))", "## cleanup", "proc", ".", "stdout", ".", "close", "(", ")", "if", "proc", ".", "stderr", ":", "proc", ".", "stderr", ".", "close", "(", ")", "proc", ".", "stdin", ".", "close", "(", ")", "proc", ".", "wait", "(", ")", "#LOGGER.info(\"\\n\\nALLSTACK %s\\n\", \"\\n\".join(i) for i in allstack[:5]])", "## write to file after", "odx", "=", "chunk", ".", "rsplit", "(", "\"_\"", ")", "[", "-", "1", "]", "alignfile", "=", "os", ".", "path", ".", "join", "(", "data", ".", "tmpdir", ",", "\"align_{}.fa\"", ".", "format", "(", "odx", ")", ")", "with", "open", "(", "alignfile", ",", "'wb'", ")", "as", "outfile", ":", "outfile", ".", "write", "(", "\"\\n//\\n//\\n\"", ".", "join", "(", "allstack", ")", "+", "\"\\n\"", ")", "os", ".", "remove", "(", "chunk", ")", "## save indels array to tmp dir", "ifile", "=", "os", ".", "path", ".", "join", "(", "data", ".", "tmpdir", ",", "\"indels_{}.tmp.npy\"", ".", "format", "(", "odx", ")", ")", "np", ".", "save", "(", "ifile", ",", "indels", ")", "dfile", "=", "os", ".", "path", ".", "join", "(", "data", ".", "tmpdir", ",", "\"duples_{}.tmp.npy\"", ".", "format", "(", "odx", ")", ")", "np", ".", "save", "(", "dfile", ",", "duples", ")" ]
notes
[ "notes" ]
python
valid
44.264463
nuagenetworks/bambou
bambou/nurest_object.py
https://github.com/nuagenetworks/bambou/blob/d334fea23e384d3df8e552fe1849ad707941c666/bambou/nurest_object.py#L853-L865
def _did_receive_response(self, connection): """ Receive a response from the connection """ if connection.has_timeouted: bambou_logger.info("NURESTConnection has timeout.") return has_callbacks = connection.has_callbacks() should_post = not has_callbacks if connection.handle_response_for_connection(should_post=should_post) and has_callbacks: callback = connection.callbacks['local'] callback(connection)
[ "def", "_did_receive_response", "(", "self", ",", "connection", ")", ":", "if", "connection", ".", "has_timeouted", ":", "bambou_logger", ".", "info", "(", "\"NURESTConnection has timeout.\"", ")", "return", "has_callbacks", "=", "connection", ".", "has_callbacks", "(", ")", "should_post", "=", "not", "has_callbacks", "if", "connection", ".", "handle_response_for_connection", "(", "should_post", "=", "should_post", ")", "and", "has_callbacks", ":", "callback", "=", "connection", ".", "callbacks", "[", "'local'", "]", "callback", "(", "connection", ")" ]
Receive a response from the connection
[ "Receive", "a", "response", "from", "the", "connection" ]
python
train
37.230769
earlzo/hfut
hfut/log.py
https://github.com/earlzo/hfut/blob/09270a9647fba79f26fd1a8a3c53c0678b5257a1/hfut/log.py#L20-L67
def report_response(response, request_headers=True, request_body=True, response_headers=False, response_body=False, redirection=False): """ 生成响应报告 :param response: ``requests.models.Response`` 对象 :param request_headers: 是否加入请求头 :param request_body: 是否加入请求体 :param response_headers: 是否加入响应头 :param response_body: 是否加入响应体 :param redirection: 是否加入重定向响应 :return: str """ # https://docs.python.org/3/library/string.html#formatstrings url = 'Url: [{method}]{url} {status} {elapsed:.2f}ms'.format( method=response.request.method, url=response.url, status=response.status_code, elapsed=response.elapsed.total_seconds() * 1000 ) pieces = [url] if request_headers: request_headers = 'Request headers: {request_headers}'.format(request_headers=response.request.headers) pieces.append(request_headers) if request_body: request_body = 'Request body: {request_body}'.format(request_body=response.request.body) pieces.append(request_body) if response_headers: response_headers = 'Response headers: {response_headers}'.format(response_headers=response.headers) pieces.append(response_headers) if response_body: response_body = 'Response body: {response_body}'.format(response_body=response.text) pieces.append(response_body) reporter = '\n'.join(pieces) if redirection and response.history: for h in response.history[::-1]: redirect_reporter = report_response( h, request_headers, request_body, response_headers, response_body, redirection=False ) reporter = '\n'.join([redirect_reporter, ' Redirect ↓ '.center(72, '-'), reporter]) return reporter
[ "def", "report_response", "(", "response", ",", "request_headers", "=", "True", ",", "request_body", "=", "True", ",", "response_headers", "=", "False", ",", "response_body", "=", "False", ",", "redirection", "=", "False", ")", ":", "# https://docs.python.org/3/library/string.html#formatstrings", "url", "=", "'Url: [{method}]{url} {status} {elapsed:.2f}ms'", ".", "format", "(", "method", "=", "response", ".", "request", ".", "method", ",", "url", "=", "response", ".", "url", ",", "status", "=", "response", ".", "status_code", ",", "elapsed", "=", "response", ".", "elapsed", ".", "total_seconds", "(", ")", "*", "1000", ")", "pieces", "=", "[", "url", "]", "if", "request_headers", ":", "request_headers", "=", "'Request headers: {request_headers}'", ".", "format", "(", "request_headers", "=", "response", ".", "request", ".", "headers", ")", "pieces", ".", "append", "(", "request_headers", ")", "if", "request_body", ":", "request_body", "=", "'Request body: {request_body}'", ".", "format", "(", "request_body", "=", "response", ".", "request", ".", "body", ")", "pieces", ".", "append", "(", "request_body", ")", "if", "response_headers", ":", "response_headers", "=", "'Response headers: {response_headers}'", ".", "format", "(", "response_headers", "=", "response", ".", "headers", ")", "pieces", ".", "append", "(", "response_headers", ")", "if", "response_body", ":", "response_body", "=", "'Response body: {response_body}'", ".", "format", "(", "response_body", "=", "response", ".", "text", ")", "pieces", ".", "append", "(", "response_body", ")", "reporter", "=", "'\\n'", ".", "join", "(", "pieces", ")", "if", "redirection", "and", "response", ".", "history", ":", "for", "h", "in", "response", ".", "history", "[", ":", ":", "-", "1", "]", ":", "redirect_reporter", "=", "report_response", "(", "h", ",", "request_headers", ",", "request_body", ",", "response_headers", ",", "response_body", ",", "redirection", "=", "False", ")", "reporter", "=", "'\\n'", ".", "join", "(", "[", "redirect_reporter", ",", "' Redirect ↓ '.c", "e", "nter(7", "2", ", ", "'", "'),", " ", "r", "porter])", "", "", "return", "reporter" ]
生成响应报告 :param response: ``requests.models.Response`` 对象 :param request_headers: 是否加入请求头 :param request_body: 是否加入请求体 :param response_headers: 是否加入响应头 :param response_body: 是否加入响应体 :param redirection: 是否加入重定向响应 :return: str
[ "生成响应报告" ]
python
train
37.9375
google/grr
grr/client_builder/grr_response_client_builder/client_build.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client_builder/grr_response_client_builder/client_build.py#L296-L384
def RepackTemplates(self, repack_configs, templates, output_dir, config=None, sign=False, signed_template=False): """Call repacker in a subprocess.""" pool = multiprocessing.Pool(processes=10) results = [] bulk_sign_installers = False for repack_config in repack_configs: for template in templates: repack_args = ["grr_client_build"] if config: repack_args.extend(["--config", config]) repack_args.extend([ "--secondary_configs", repack_config, "repack", "--template", template, "--output_dir", self.GetOutputDir(output_dir, repack_config) ]) # We only sign exes and rpms at the moment. The others will raise if we # try to ask for signing. passwd = None if sign: if template.endswith(".exe.zip"): # This is for osslsigncode only. if platform.system() != "Windows": passwd = self.GetWindowsPassphrase() repack_args.append("--sign") else: bulk_sign_installers = True if signed_template: repack_args.append("--signed_template") elif template.endswith(".rpm.zip"): bulk_sign_installers = True print("Calling %s" % " ".join(repack_args)) results.append( pool.apply_async(SpawnProcess, (repack_args,), dict(passwd=passwd))) # Also build debug if it's windows. if template.endswith(".exe.zip"): debug_args = [] debug_args.extend(repack_args) debug_args.append("--debug_build") print("Calling %s" % " ".join(debug_args)) results.append( pool.apply_async(SpawnProcess, (debug_args,), dict(passwd=passwd))) try: pool.close() # Workaround to handle keyboard kills # http://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool # get will raise if the child raises. for result_obj in results: result_obj.get(9999) pool.join() except KeyboardInterrupt: print("parent received control-c") pool.terminate() except ErrorDuringRepacking: pool.terminate() raise if bulk_sign_installers: to_sign = {} for root, _, files in os.walk(output_dir): for f in files: if f.endswith(".exe"): to_sign.setdefault("windows", []).append(os.path.join(root, f)) elif f.endswith(".rpm"): to_sign.setdefault("rpm", []).append(os.path.join(root, f)) if to_sign.get("windows"): signer = repacking.TemplateRepacker().GetSigner([ "ClientBuilder Context", "Platform:%s" % platform.system(), "Target:Windows" ]) signer.SignFiles(to_sign.get("windows")) if to_sign.get("rpm"): signer = repacking.TemplateRepacker().GetSigner([ "ClientBuilder Context", "Platform:%s" % platform.system(), "Target:Linux", "Target:LinuxRpm" ]) signer.AddSignatureToRPMs(to_sign.get("rpm"))
[ "def", "RepackTemplates", "(", "self", ",", "repack_configs", ",", "templates", ",", "output_dir", ",", "config", "=", "None", ",", "sign", "=", "False", ",", "signed_template", "=", "False", ")", ":", "pool", "=", "multiprocessing", ".", "Pool", "(", "processes", "=", "10", ")", "results", "=", "[", "]", "bulk_sign_installers", "=", "False", "for", "repack_config", "in", "repack_configs", ":", "for", "template", "in", "templates", ":", "repack_args", "=", "[", "\"grr_client_build\"", "]", "if", "config", ":", "repack_args", ".", "extend", "(", "[", "\"--config\"", ",", "config", "]", ")", "repack_args", ".", "extend", "(", "[", "\"--secondary_configs\"", ",", "repack_config", ",", "\"repack\"", ",", "\"--template\"", ",", "template", ",", "\"--output_dir\"", ",", "self", ".", "GetOutputDir", "(", "output_dir", ",", "repack_config", ")", "]", ")", "# We only sign exes and rpms at the moment. The others will raise if we", "# try to ask for signing.", "passwd", "=", "None", "if", "sign", ":", "if", "template", ".", "endswith", "(", "\".exe.zip\"", ")", ":", "# This is for osslsigncode only.", "if", "platform", ".", "system", "(", ")", "!=", "\"Windows\"", ":", "passwd", "=", "self", ".", "GetWindowsPassphrase", "(", ")", "repack_args", ".", "append", "(", "\"--sign\"", ")", "else", ":", "bulk_sign_installers", "=", "True", "if", "signed_template", ":", "repack_args", ".", "append", "(", "\"--signed_template\"", ")", "elif", "template", ".", "endswith", "(", "\".rpm.zip\"", ")", ":", "bulk_sign_installers", "=", "True", "print", "(", "\"Calling %s\"", "%", "\" \"", ".", "join", "(", "repack_args", ")", ")", "results", ".", "append", "(", "pool", ".", "apply_async", "(", "SpawnProcess", ",", "(", "repack_args", ",", ")", ",", "dict", "(", "passwd", "=", "passwd", ")", ")", ")", "# Also build debug if it's windows.", "if", "template", ".", "endswith", "(", "\".exe.zip\"", ")", ":", "debug_args", "=", "[", "]", "debug_args", ".", "extend", "(", "repack_args", ")", "debug_args", ".", "append", "(", "\"--debug_build\"", ")", "print", "(", "\"Calling %s\"", "%", "\" \"", ".", "join", "(", "debug_args", ")", ")", "results", ".", "append", "(", "pool", ".", "apply_async", "(", "SpawnProcess", ",", "(", "debug_args", ",", ")", ",", "dict", "(", "passwd", "=", "passwd", ")", ")", ")", "try", ":", "pool", ".", "close", "(", ")", "# Workaround to handle keyboard kills", "# http://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool", "# get will raise if the child raises.", "for", "result_obj", "in", "results", ":", "result_obj", ".", "get", "(", "9999", ")", "pool", ".", "join", "(", ")", "except", "KeyboardInterrupt", ":", "print", "(", "\"parent received control-c\"", ")", "pool", ".", "terminate", "(", ")", "except", "ErrorDuringRepacking", ":", "pool", ".", "terminate", "(", ")", "raise", "if", "bulk_sign_installers", ":", "to_sign", "=", "{", "}", "for", "root", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "output_dir", ")", ":", "for", "f", "in", "files", ":", "if", "f", ".", "endswith", "(", "\".exe\"", ")", ":", "to_sign", ".", "setdefault", "(", "\"windows\"", ",", "[", "]", ")", ".", "append", "(", "os", ".", "path", ".", "join", "(", "root", ",", "f", ")", ")", "elif", "f", ".", "endswith", "(", "\".rpm\"", ")", ":", "to_sign", ".", "setdefault", "(", "\"rpm\"", ",", "[", "]", ")", ".", "append", "(", "os", ".", "path", ".", "join", "(", "root", ",", "f", ")", ")", "if", "to_sign", ".", "get", "(", "\"windows\"", ")", ":", "signer", "=", "repacking", ".", "TemplateRepacker", "(", ")", ".", "GetSigner", "(", "[", "\"ClientBuilder Context\"", ",", "\"Platform:%s\"", "%", "platform", ".", "system", "(", ")", ",", "\"Target:Windows\"", "]", ")", "signer", ".", "SignFiles", "(", "to_sign", ".", "get", "(", "\"windows\"", ")", ")", "if", "to_sign", ".", "get", "(", "\"rpm\"", ")", ":", "signer", "=", "repacking", ".", "TemplateRepacker", "(", ")", ".", "GetSigner", "(", "[", "\"ClientBuilder Context\"", ",", "\"Platform:%s\"", "%", "platform", ".", "system", "(", ")", ",", "\"Target:Linux\"", ",", "\"Target:LinuxRpm\"", "]", ")", "signer", ".", "AddSignatureToRPMs", "(", "to_sign", ".", "get", "(", "\"rpm\"", ")", ")" ]
Call repacker in a subprocess.
[ "Call", "repacker", "in", "a", "subprocess", "." ]
python
train
35.741573
fastai/fastai
fastai/callbacks/loss_metrics.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callbacks/loss_metrics.py#L22-L28
def on_batch_end(self, last_target, train, **kwargs): "Update the metrics if not `train`" if train: return bs = last_target.size(0) for name in self.names: self.metrics[name] += bs * self.learn.loss_func.metrics[name].detach().cpu() self.nums += bs
[ "def", "on_batch_end", "(", "self", ",", "last_target", ",", "train", ",", "*", "*", "kwargs", ")", ":", "if", "train", ":", "return", "bs", "=", "last_target", ".", "size", "(", "0", ")", "for", "name", "in", "self", ".", "names", ":", "self", ".", "metrics", "[", "name", "]", "+=", "bs", "*", "self", ".", "learn", ".", "loss_func", ".", "metrics", "[", "name", "]", ".", "detach", "(", ")", ".", "cpu", "(", ")", "self", ".", "nums", "+=", "bs" ]
Update the metrics if not `train`
[ "Update", "the", "metrics", "if", "not", "train" ]
python
train
42
JukeboxPipeline/jukeboxmaya
src/jukeboxmaya/menu.py
https://github.com/JukeboxPipeline/jukeboxmaya/blob/c8d6318d53cdb5493453c4a6b65ef75bdb2d5f2c/src/jukeboxmaya/menu.py#L208-L219
def delete_menu(self, menu): """ Delete the specified menu :param menu: :type menu: :returns: :rtype: :raises: """ if menu.parent is None: del self.menus[menu.name()] menu._delete()
[ "def", "delete_menu", "(", "self", ",", "menu", ")", ":", "if", "menu", ".", "parent", "is", "None", ":", "del", "self", ".", "menus", "[", "menu", ".", "name", "(", ")", "]", "menu", ".", "_delete", "(", ")" ]
Delete the specified menu :param menu: :type menu: :returns: :rtype: :raises:
[ "Delete", "the", "specified", "menu" ]
python
train
21.25
apache/incubator-heron
heron/tools/explorer/src/python/args.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/explorer/src/python/args.py#L30-L40
def add_config(parser): """ add config """ # the default config path default_config_path = config.get_heron_conf_dir() parser.add_argument( '--config-path', metavar='(a string; path to cluster config; default: "' + default_config_path + '")', default=os.path.join(config.get_heron_dir(), default_config_path)) return parser
[ "def", "add_config", "(", "parser", ")", ":", "# the default config path", "default_config_path", "=", "config", ".", "get_heron_conf_dir", "(", ")", "parser", ".", "add_argument", "(", "'--config-path'", ",", "metavar", "=", "'(a string; path to cluster config; default: \"'", "+", "default_config_path", "+", "'\")'", ",", "default", "=", "os", ".", "path", ".", "join", "(", "config", ".", "get_heron_dir", "(", ")", ",", "default_config_path", ")", ")", "return", "parser" ]
add config
[ "add", "config" ]
python
valid
31.181818
MultipedRobotics/pyxl320
pyxl320/Packet.py
https://github.com/MultipedRobotics/pyxl320/blob/1a56540e208b028ee47d5fa0a7c7babcee0d9214/pyxl320/Packet.py#L176-L188
def makeResetPacket(ID, param): """ Resets a servo to one of 3 reset states: XL320_RESET_ALL = 0xFF XL320_RESET_ALL_BUT_ID = 0x01 XL320_RESET_ALL_BUT_ID_BAUD_RATE = 0x02 """ if param not in [0x01, 0x02, 0xff]: raise Exception('Packet.makeResetPacket invalide parameter {}'.format(param)) # pkt = makePacket(ID, xl320.XL320_RESET, None, [param]) pkt = makePacket(ID, xl320.XL320_RESET, None, [1]) return pkt
[ "def", "makeResetPacket", "(", "ID", ",", "param", ")", ":", "if", "param", "not", "in", "[", "0x01", ",", "0x02", ",", "0xff", "]", ":", "raise", "Exception", "(", "'Packet.makeResetPacket invalide parameter {}'", ".", "format", "(", "param", ")", ")", "# pkt = makePacket(ID, xl320.XL320_RESET, None, [param])", "pkt", "=", "makePacket", "(", "ID", ",", "xl320", ".", "XL320_RESET", ",", "None", ",", "[", "1", "]", ")", "return", "pkt" ]
Resets a servo to one of 3 reset states: XL320_RESET_ALL = 0xFF XL320_RESET_ALL_BUT_ID = 0x01 XL320_RESET_ALL_BUT_ID_BAUD_RATE = 0x02
[ "Resets", "a", "servo", "to", "one", "of", "3", "reset", "states", ":" ]
python
train
33.384615
DataDog/integrations-core
kubernetes/datadog_checks/kubernetes/kubernetes.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/kubernetes/datadog_checks/kubernetes/kubernetes.py#L514-L577
def _update_kube_events(self, instance, pods_list, event_items): """ Process kube events and send ddog events The namespace filtering is done here instead of KubeEventRetriever to avoid interfering with service discovery """ node_ip, node_name = self.kubeutil.get_node_info() self.log.debug('Processing events on {} [{}]'.format(node_name, node_ip)) k8s_namespaces = instance.get('namespaces', DEFAULT_NAMESPACES) if not isinstance(k8s_namespaces, list): self.log.warning('Configuration key "namespaces" is not a list: fallback to the default value') k8s_namespaces = DEFAULT_NAMESPACES # handle old config value if 'namespace' in instance and instance.get('namespace') not in (None, 'default'): self.log.warning('''The 'namespace' parameter is deprecated and will stop being supported starting ''' '''from 5.13. Please use 'namespaces' and/or 'namespace_name_regexp' instead.''') k8s_namespaces.append(instance.get('namespace')) if self.k8s_namespace_regexp: namespaces_endpoint = '{}/namespaces'.format(self.kubeutil.kubernetes_api_url) self.log.debug('Kubernetes API endpoint to query namespaces: %s' % namespaces_endpoint) namespaces = self.kubeutil.retrieve_json_auth(namespaces_endpoint).json() for namespace in namespaces.get('items', []): name = namespace.get('metadata', {}).get('name', None) if name and self.k8s_namespace_regexp.match(name): k8s_namespaces.append(name) k8s_namespaces = set(k8s_namespaces) for event in event_items: event_ts = calendar.timegm(time.strptime(event.get('lastTimestamp'), '%Y-%m-%dT%H:%M:%SZ')) involved_obj = event.get('involvedObject', {}) # filter events by white listed namespaces (empty namespace belong to the 'default' one) if involved_obj.get('namespace', 'default') not in k8s_namespaces: continue tags = self.kubeutil.extract_event_tags(event) tags.extend(instance.get('tags', [])) title = '{} {} on {}'.format(involved_obj.get('name'), event.get('reason'), node_name) message = event.get('message') source = event.get('source') k8s_event_type = event.get('type') alert_type = K8S_ALERT_MAP.get(k8s_event_type, 'info') if source: message += '\nSource: {} {}\n'.format(source.get('component', ''), source.get('host', '')) msg_body = "%%%\n{}\n```\n{}\n```\n%%%".format(title, message) dd_event = { 'timestamp': event_ts, 'host': node_ip, 'event_type': EVENT_TYPE, 'msg_title': title, 'msg_text': msg_body, 'source_type_name': EVENT_TYPE, 'alert_type': alert_type, 'event_object': 'kubernetes:{}'.format(involved_obj.get('name')), 'tags': tags, } self.event(dd_event)
[ "def", "_update_kube_events", "(", "self", ",", "instance", ",", "pods_list", ",", "event_items", ")", ":", "node_ip", ",", "node_name", "=", "self", ".", "kubeutil", ".", "get_node_info", "(", ")", "self", ".", "log", ".", "debug", "(", "'Processing events on {} [{}]'", ".", "format", "(", "node_name", ",", "node_ip", ")", ")", "k8s_namespaces", "=", "instance", ".", "get", "(", "'namespaces'", ",", "DEFAULT_NAMESPACES", ")", "if", "not", "isinstance", "(", "k8s_namespaces", ",", "list", ")", ":", "self", ".", "log", ".", "warning", "(", "'Configuration key \"namespaces\" is not a list: fallback to the default value'", ")", "k8s_namespaces", "=", "DEFAULT_NAMESPACES", "# handle old config value", "if", "'namespace'", "in", "instance", "and", "instance", ".", "get", "(", "'namespace'", ")", "not", "in", "(", "None", ",", "'default'", ")", ":", "self", ".", "log", ".", "warning", "(", "'''The 'namespace' parameter is deprecated and will stop being supported starting '''", "'''from 5.13. Please use 'namespaces' and/or 'namespace_name_regexp' instead.'''", ")", "k8s_namespaces", ".", "append", "(", "instance", ".", "get", "(", "'namespace'", ")", ")", "if", "self", ".", "k8s_namespace_regexp", ":", "namespaces_endpoint", "=", "'{}/namespaces'", ".", "format", "(", "self", ".", "kubeutil", ".", "kubernetes_api_url", ")", "self", ".", "log", ".", "debug", "(", "'Kubernetes API endpoint to query namespaces: %s'", "%", "namespaces_endpoint", ")", "namespaces", "=", "self", ".", "kubeutil", ".", "retrieve_json_auth", "(", "namespaces_endpoint", ")", ".", "json", "(", ")", "for", "namespace", "in", "namespaces", ".", "get", "(", "'items'", ",", "[", "]", ")", ":", "name", "=", "namespace", ".", "get", "(", "'metadata'", ",", "{", "}", ")", ".", "get", "(", "'name'", ",", "None", ")", "if", "name", "and", "self", ".", "k8s_namespace_regexp", ".", "match", "(", "name", ")", ":", "k8s_namespaces", ".", "append", "(", "name", ")", "k8s_namespaces", "=", "set", "(", "k8s_namespaces", ")", "for", "event", "in", "event_items", ":", "event_ts", "=", "calendar", ".", "timegm", "(", "time", ".", "strptime", "(", "event", ".", "get", "(", "'lastTimestamp'", ")", ",", "'%Y-%m-%dT%H:%M:%SZ'", ")", ")", "involved_obj", "=", "event", ".", "get", "(", "'involvedObject'", ",", "{", "}", ")", "# filter events by white listed namespaces (empty namespace belong to the 'default' one)", "if", "involved_obj", ".", "get", "(", "'namespace'", ",", "'default'", ")", "not", "in", "k8s_namespaces", ":", "continue", "tags", "=", "self", ".", "kubeutil", ".", "extract_event_tags", "(", "event", ")", "tags", ".", "extend", "(", "instance", ".", "get", "(", "'tags'", ",", "[", "]", ")", ")", "title", "=", "'{} {} on {}'", ".", "format", "(", "involved_obj", ".", "get", "(", "'name'", ")", ",", "event", ".", "get", "(", "'reason'", ")", ",", "node_name", ")", "message", "=", "event", ".", "get", "(", "'message'", ")", "source", "=", "event", ".", "get", "(", "'source'", ")", "k8s_event_type", "=", "event", ".", "get", "(", "'type'", ")", "alert_type", "=", "K8S_ALERT_MAP", ".", "get", "(", "k8s_event_type", ",", "'info'", ")", "if", "source", ":", "message", "+=", "'\\nSource: {} {}\\n'", ".", "format", "(", "source", ".", "get", "(", "'component'", ",", "''", ")", ",", "source", ".", "get", "(", "'host'", ",", "''", ")", ")", "msg_body", "=", "\"%%%\\n{}\\n```\\n{}\\n```\\n%%%\"", ".", "format", "(", "title", ",", "message", ")", "dd_event", "=", "{", "'timestamp'", ":", "event_ts", ",", "'host'", ":", "node_ip", ",", "'event_type'", ":", "EVENT_TYPE", ",", "'msg_title'", ":", "title", ",", "'msg_text'", ":", "msg_body", ",", "'source_type_name'", ":", "EVENT_TYPE", ",", "'alert_type'", ":", "alert_type", ",", "'event_object'", ":", "'kubernetes:{}'", ".", "format", "(", "involved_obj", ".", "get", "(", "'name'", ")", ")", ",", "'tags'", ":", "tags", ",", "}", "self", ".", "event", "(", "dd_event", ")" ]
Process kube events and send ddog events The namespace filtering is done here instead of KubeEventRetriever to avoid interfering with service discovery
[ "Process", "kube", "events", "and", "send", "ddog", "events", "The", "namespace", "filtering", "is", "done", "here", "instead", "of", "KubeEventRetriever", "to", "avoid", "interfering", "with", "service", "discovery" ]
python
train
48.75
LPgenerator/django-db-mailer
dbmail/providers/parse_com/push.py
https://github.com/LPgenerator/django-db-mailer/blob/217a73c21ba5c6b68738f74b2c55a6dd2c1afe35/dbmail/providers/parse_com/push.py#L19-L59
def send(device_id, description, **kwargs): """ Site: http://parse.com API: https://www.parse.com/docs/push_guide#scheduled/REST Desc: Best app for system administrators """ headers = { "X-Parse-Application-Id": settings.PARSE_APP_ID, "X-Parse-REST-API-Key": settings.PARSE_API_KEY, "User-Agent": "DBMail/%s" % get_version(), "Content-type": "application/json", } data = { "where": { "user_id": device_id, }, "data": { "alert": description, "title": kwargs.pop("event") } } _data = kwargs.pop('data', None) if _data is not None: data.update(_data) http = HTTPSConnection(kwargs.pop("api_url", "api.parse.com")) http.request( "POST", "/1/push", headers=headers, body=dumps(data)) response = http.getresponse() if response.status != 200: raise ParseComError(response.reason) body = loads(response.read()) if body['error']: raise ParseComError(body['error']) return True
[ "def", "send", "(", "device_id", ",", "description", ",", "*", "*", "kwargs", ")", ":", "headers", "=", "{", "\"X-Parse-Application-Id\"", ":", "settings", ".", "PARSE_APP_ID", ",", "\"X-Parse-REST-API-Key\"", ":", "settings", ".", "PARSE_API_KEY", ",", "\"User-Agent\"", ":", "\"DBMail/%s\"", "%", "get_version", "(", ")", ",", "\"Content-type\"", ":", "\"application/json\"", ",", "}", "data", "=", "{", "\"where\"", ":", "{", "\"user_id\"", ":", "device_id", ",", "}", ",", "\"data\"", ":", "{", "\"alert\"", ":", "description", ",", "\"title\"", ":", "kwargs", ".", "pop", "(", "\"event\"", ")", "}", "}", "_data", "=", "kwargs", ".", "pop", "(", "'data'", ",", "None", ")", "if", "_data", "is", "not", "None", ":", "data", ".", "update", "(", "_data", ")", "http", "=", "HTTPSConnection", "(", "kwargs", ".", "pop", "(", "\"api_url\"", ",", "\"api.parse.com\"", ")", ")", "http", ".", "request", "(", "\"POST\"", ",", "\"/1/push\"", ",", "headers", "=", "headers", ",", "body", "=", "dumps", "(", "data", ")", ")", "response", "=", "http", ".", "getresponse", "(", ")", "if", "response", ".", "status", "!=", "200", ":", "raise", "ParseComError", "(", "response", ".", "reason", ")", "body", "=", "loads", "(", "response", ".", "read", "(", ")", ")", "if", "body", "[", "'error'", "]", ":", "raise", "ParseComError", "(", "body", "[", "'error'", "]", ")", "return", "True" ]
Site: http://parse.com API: https://www.parse.com/docs/push_guide#scheduled/REST Desc: Best app for system administrators
[ "Site", ":", "http", ":", "//", "parse", ".", "com", "API", ":", "https", ":", "//", "www", ".", "parse", ".", "com", "/", "docs", "/", "push_guide#scheduled", "/", "REST", "Desc", ":", "Best", "app", "for", "system", "administrators" ]
python
train
25.658537
CygnusNetworks/pypureomapi
pypureomapi.py
https://github.com/CygnusNetworks/pypureomapi/blob/ff4459678ec023fd56e64ce518a86860efec26bf/pypureomapi.py#L1354-L1366
def add_group(self, groupname, statements): """ Adds a group @type groupname: bytes @type statements: str """ msg = OmapiMessage.open(b"group") msg.message.append(("create", struct.pack("!I", 1))) msg.obj.append(("name", groupname)) msg.obj.append(("statements", statements)) response = self.query_server(msg) if response.opcode != OMAPI_OP_UPDATE: raise OmapiError("add group failed")
[ "def", "add_group", "(", "self", ",", "groupname", ",", "statements", ")", ":", "msg", "=", "OmapiMessage", ".", "open", "(", "b\"group\"", ")", "msg", ".", "message", ".", "append", "(", "(", "\"create\"", ",", "struct", ".", "pack", "(", "\"!I\"", ",", "1", ")", ")", ")", "msg", ".", "obj", ".", "append", "(", "(", "\"name\"", ",", "groupname", ")", ")", "msg", ".", "obj", ".", "append", "(", "(", "\"statements\"", ",", "statements", ")", ")", "response", "=", "self", ".", "query_server", "(", "msg", ")", "if", "response", ".", "opcode", "!=", "OMAPI_OP_UPDATE", ":", "raise", "OmapiError", "(", "\"add group failed\"", ")" ]
Adds a group @type groupname: bytes @type statements: str
[ "Adds", "a", "group" ]
python
train
30.615385
Chilipp/psyplot
psyplot/project.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/project.py#L346-L373
def _register_plotter(cls, identifier, module, plotter_name, plotter_cls=None): """ Register a plotter in the :class:`Project` class to easy access it Parameters ---------- identifier: str Name of the attribute that is used to filter for the instances belonging to this plotter module: str The module from where to import the `plotter_name` plotter_name: str The name of the plotter class in `module` plotter_cls: type The imported class of `plotter_name`. If None, it will be imported when it is needed """ if plotter_cls is not None: # plotter has already been imported def get_x(self): return self(plotter_cls) else: def get_x(self): return self(getattr(import_module(module), plotter_name)) setattr(cls, identifier, property(get_x, doc=( "List of data arrays that are plotted by :class:`%s.%s`" " plotters") % (module, plotter_name))) cls._registered_plotters[identifier] = (module, plotter_name)
[ "def", "_register_plotter", "(", "cls", ",", "identifier", ",", "module", ",", "plotter_name", ",", "plotter_cls", "=", "None", ")", ":", "if", "plotter_cls", "is", "not", "None", ":", "# plotter has already been imported", "def", "get_x", "(", "self", ")", ":", "return", "self", "(", "plotter_cls", ")", "else", ":", "def", "get_x", "(", "self", ")", ":", "return", "self", "(", "getattr", "(", "import_module", "(", "module", ")", ",", "plotter_name", ")", ")", "setattr", "(", "cls", ",", "identifier", ",", "property", "(", "get_x", ",", "doc", "=", "(", "\"List of data arrays that are plotted by :class:`%s.%s`\"", "\" plotters\"", ")", "%", "(", "module", ",", "plotter_name", ")", ")", ")", "cls", ".", "_registered_plotters", "[", "identifier", "]", "=", "(", "module", ",", "plotter_name", ")" ]
Register a plotter in the :class:`Project` class to easy access it Parameters ---------- identifier: str Name of the attribute that is used to filter for the instances belonging to this plotter module: str The module from where to import the `plotter_name` plotter_name: str The name of the plotter class in `module` plotter_cls: type The imported class of `plotter_name`. If None, it will be imported when it is needed
[ "Register", "a", "plotter", "in", "the", ":", "class", ":", "Project", "class", "to", "easy", "access", "it" ]
python
train
41.321429
pyQode/pyqode.core
pyqode/core/panels/search_and_replace.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/panels/search_and_replace.py#L443-L487
def replace(self, text=None): """ Replaces the selected occurrence. :param text: The replacement text. If it is None, the lineEditReplace's text is used instead. :return True if the text could be replace properly, False if there is no more occurrences to replace. """ if text is None or isinstance(text, bool): text = self.lineEditReplace.text() current_occurences = self._current_occurrence() occurrences = self.get_occurences() if current_occurences == -1: self.select_next() current_occurences = self._current_occurrence() try: # prevent search request due to editor textChanged try: self.editor.textChanged.disconnect(self.request_search) except (RuntimeError, TypeError): # already disconnected pass occ = occurrences[current_occurences] cursor = self.editor.textCursor() cursor.setPosition(occ[0]) cursor.setPosition(occ[1], cursor.KeepAnchor) len_to_replace = len(cursor.selectedText()) len_replacement = len(text) offset = len_replacement - len_to_replace cursor.insertText(text) self.editor.setTextCursor(cursor) self._remove_occurrence(current_occurences, offset) current_occurences -= 1 self._set_current_occurrence(current_occurences) self.select_next() self.cpt_occurences = len(self.get_occurences()) self._update_label_matches() self._update_buttons() return True except IndexError: return False finally: self.editor.textChanged.connect(self.request_search)
[ "def", "replace", "(", "self", ",", "text", "=", "None", ")", ":", "if", "text", "is", "None", "or", "isinstance", "(", "text", ",", "bool", ")", ":", "text", "=", "self", ".", "lineEditReplace", ".", "text", "(", ")", "current_occurences", "=", "self", ".", "_current_occurrence", "(", ")", "occurrences", "=", "self", ".", "get_occurences", "(", ")", "if", "current_occurences", "==", "-", "1", ":", "self", ".", "select_next", "(", ")", "current_occurences", "=", "self", ".", "_current_occurrence", "(", ")", "try", ":", "# prevent search request due to editor textChanged", "try", ":", "self", ".", "editor", ".", "textChanged", ".", "disconnect", "(", "self", ".", "request_search", ")", "except", "(", "RuntimeError", ",", "TypeError", ")", ":", "# already disconnected", "pass", "occ", "=", "occurrences", "[", "current_occurences", "]", "cursor", "=", "self", ".", "editor", ".", "textCursor", "(", ")", "cursor", ".", "setPosition", "(", "occ", "[", "0", "]", ")", "cursor", ".", "setPosition", "(", "occ", "[", "1", "]", ",", "cursor", ".", "KeepAnchor", ")", "len_to_replace", "=", "len", "(", "cursor", ".", "selectedText", "(", ")", ")", "len_replacement", "=", "len", "(", "text", ")", "offset", "=", "len_replacement", "-", "len_to_replace", "cursor", ".", "insertText", "(", "text", ")", "self", ".", "editor", ".", "setTextCursor", "(", "cursor", ")", "self", ".", "_remove_occurrence", "(", "current_occurences", ",", "offset", ")", "current_occurences", "-=", "1", "self", ".", "_set_current_occurrence", "(", "current_occurences", ")", "self", ".", "select_next", "(", ")", "self", ".", "cpt_occurences", "=", "len", "(", "self", ".", "get_occurences", "(", ")", ")", "self", ".", "_update_label_matches", "(", ")", "self", ".", "_update_buttons", "(", ")", "return", "True", "except", "IndexError", ":", "return", "False", "finally", ":", "self", ".", "editor", ".", "textChanged", ".", "connect", "(", "self", ".", "request_search", ")" ]
Replaces the selected occurrence. :param text: The replacement text. If it is None, the lineEditReplace's text is used instead. :return True if the text could be replace properly, False if there is no more occurrences to replace.
[ "Replaces", "the", "selected", "occurrence", "." ]
python
train
40.244444
pandas-dev/pandas
pandas/core/indexes/numeric.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/indexes/numeric.py#L364-L373
def get_value(self, series, key): """ we always want to get an index value, never a value """ if not is_scalar(key): raise InvalidIndexError k = com.values_from_object(key) loc = self.get_loc(k) new_values = com.values_from_object(series)[loc] return new_values
[ "def", "get_value", "(", "self", ",", "series", ",", "key", ")", ":", "if", "not", "is_scalar", "(", "key", ")", ":", "raise", "InvalidIndexError", "k", "=", "com", ".", "values_from_object", "(", "key", ")", "loc", "=", "self", ".", "get_loc", "(", "k", ")", "new_values", "=", "com", ".", "values_from_object", "(", "series", ")", "[", "loc", "]", "return", "new_values" ]
we always want to get an index value, never a value
[ "we", "always", "want", "to", "get", "an", "index", "value", "never", "a", "value" ]
python
train
31.4
opinkerfi/nago
nago/core/__init__.py
https://github.com/opinkerfi/nago/blob/85e1bdd1de0122f56868a483e7599e1b36a439b0/nago/core/__init__.py#L66-L81
def get_nodes(): """ Returns all nodes in a list of dicts format """ cfg_file = "/etc/nago/nago.ini" config = ConfigParser.ConfigParser() config.read(cfg_file) result = {} for section in config.sections(): if section in ['main']: continue token = section node = Node(token) for key, value in config.items(token): node[key] = value result[token] = node return result
[ "def", "get_nodes", "(", ")", ":", "cfg_file", "=", "\"/etc/nago/nago.ini\"", "config", "=", "ConfigParser", ".", "ConfigParser", "(", ")", "config", ".", "read", "(", "cfg_file", ")", "result", "=", "{", "}", "for", "section", "in", "config", ".", "sections", "(", ")", ":", "if", "section", "in", "[", "'main'", "]", ":", "continue", "token", "=", "section", "node", "=", "Node", "(", "token", ")", "for", "key", ",", "value", "in", "config", ".", "items", "(", "token", ")", ":", "node", "[", "key", "]", "=", "value", "result", "[", "token", "]", "=", "node", "return", "result" ]
Returns all nodes in a list of dicts format
[ "Returns", "all", "nodes", "in", "a", "list", "of", "dicts", "format" ]
python
train
27.875
schettino72/import-deps
import_deps/__init__.py
https://github.com/schettino72/import-deps/blob/311f2badd2c93f743d09664397f21e7eaa16e1f1/import_deps/__init__.py#L119-L145
def get_imports(self, module, return_fqn=False): """return set of imported modules that are in self :param module: PyModule :return: (set - str) of path names """ # print('####', module.fqn) # print(self.by_name.keys(), '\n\n') imports = set() raw_imports = ast_imports(module.path) for import_entry in raw_imports: # join 'from' and 'import' part of import statement full = ".".join(s for s in import_entry[:2] if s) import_level = import_entry[3] if import_level: # intra package imports intra = '.'.join(module.fqn[:-import_level] + [full]) imported = self._get_imported_module(intra) else: imported = self._get_imported_module(full) if imported: if return_fqn: imports.add('.'.join(imported.fqn)) else: imports.add(imported.path) return imports
[ "def", "get_imports", "(", "self", ",", "module", ",", "return_fqn", "=", "False", ")", ":", "# print('####', module.fqn)", "# print(self.by_name.keys(), '\\n\\n')", "imports", "=", "set", "(", ")", "raw_imports", "=", "ast_imports", "(", "module", ".", "path", ")", "for", "import_entry", "in", "raw_imports", ":", "# join 'from' and 'import' part of import statement", "full", "=", "\".\"", ".", "join", "(", "s", "for", "s", "in", "import_entry", "[", ":", "2", "]", "if", "s", ")", "import_level", "=", "import_entry", "[", "3", "]", "if", "import_level", ":", "# intra package imports", "intra", "=", "'.'", ".", "join", "(", "module", ".", "fqn", "[", ":", "-", "import_level", "]", "+", "[", "full", "]", ")", "imported", "=", "self", ".", "_get_imported_module", "(", "intra", ")", "else", ":", "imported", "=", "self", ".", "_get_imported_module", "(", "full", ")", "if", "imported", ":", "if", "return_fqn", ":", "imports", ".", "add", "(", "'.'", ".", "join", "(", "imported", ".", "fqn", ")", ")", "else", ":", "imports", ".", "add", "(", "imported", ".", "path", ")", "return", "imports" ]
return set of imported modules that are in self :param module: PyModule :return: (set - str) of path names
[ "return", "set", "of", "imported", "modules", "that", "are", "in", "self", ":", "param", "module", ":", "PyModule", ":", "return", ":", "(", "set", "-", "str", ")", "of", "path", "names" ]
python
train
37.481481
materialsvirtuallab/monty
monty/functools.py
https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/functools.py#L81-L225
def lru_cache(maxsize=128, typed=False): """ Least-recently-used cache decorator, which is a backport of the same function in Python >= 3.2. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. If *typed* is True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. Arguments to the cached function must be hashable. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). Access the underlying function with f.__wrapped__. See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used """ # Users should only access the lru_cache through its public API: # cache_info, cache_clear, and f.__wrapped__ # The internals of the lru_cache are encapsulated for thread safety and # to allow the implementation to change (including a possible C version). # Early detection of an erroneous call to @lru_cache without any arguments # resulting in the inner function being passed to maxsize instead of an # integer or None. if maxsize is not None and not isinstance(maxsize, int): raise TypeError('Expected maxsize to be an integer or None') # Constants shared by all lru cache instances: sentinel = object() # unique object used to signal cache misses make_key = _make_key # build a key from the function arguments PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields def decorating_function(user_function): cache = {} hits = [0] misses = [0] full = [False] cache_get = cache.get # bound method to lookup a key or return None lock = RLock() # because linkedlist updates aren't threadsafe root = [] # root of the circular doubly linked list root[:] = [root, root, None, None] # initialize by pointing to self r = [root] if maxsize == 0: def wrapper(*args, **kwds): # No caching -- just a statistics update after a successful call result = user_function(*args, **kwds) misses[0] += 1 return result elif maxsize is None: def wrapper(*args, **kwds): # Simple caching without ordering or size limit key = make_key(args, kwds, typed) result = cache_get(key, sentinel) if result is not sentinel: hits[0] += 1 return result result = user_function(*args, **kwds) cache[key] = result misses[0] += 1 return result else: def wrapper(*args, **kwds): # Size limited caching that tracks accesses by recency key = make_key(args, kwds, typed) with lock: link = cache_get(key) if link is not None: # Move the link to the front of the circular queue link_prev, link_next, _key, result = link link_prev[NEXT] = link_next link_next[PREV] = link_prev last = r[0][PREV] last[NEXT] = r[0][PREV] = link link[PREV] = last link[NEXT] = r[0] hits[0] += 1 return result result = user_function(*args, **kwds) with lock: if key in cache: # Getting here means that this same key was added to the # cache while the lock was released. Since the link # update is already done, we need only return the # computed result and update the count of misses. pass elif full[0]: # Use the old root to store the new key and result. oldroot = r[0] oldroot[KEY] = key oldroot[RESULT] = result # Empty the oldest link and make it the new root. # Keep a reference to the old key and old result to # prevent their ref counts from going to zero during the # update. That will prevent potentially arbitrary object # clean-up code (i.e. __del__) from running while we're # still adjusting the links. r[0] = oldroot[NEXT] oldkey = r[0][KEY] oldresult = r[0][RESULT] r[0][KEY] = r[0][RESULT] = None # Now update the cache dictionary. del cache[oldkey] # Save the potentially reentrant cache[key] assignment # for last, after the root and links have been put in # a consistent state. cache[key] = oldroot else: # Put result in a new link at the front of the queue. last = r[0][PREV] link = [last, r[0], key, result] last[NEXT] = r[0][PREV] = cache[key] = link full[0] = (len(cache) >= maxsize) misses[0] += 1 return result def cache_info(): """Report cache statistics""" with lock: return _CacheInfo(hits[0], misses[0], maxsize, len(cache)) def cache_clear(): """Clear the cache and cache statistics""" with lock: cache.clear() root[:] = [root, root, None, None] r[0] = root hits[0] = 0 misses[0] = 0 full[0] = False wrapper.cache_info = cache_info wrapper.cache_clear = cache_clear return update_wrapper(wrapper, user_function) return decorating_function
[ "def", "lru_cache", "(", "maxsize", "=", "128", ",", "typed", "=", "False", ")", ":", "# Users should only access the lru_cache through its public API:", "# cache_info, cache_clear, and f.__wrapped__", "# The internals of the lru_cache are encapsulated for thread safety and", "# to allow the implementation to change (including a possible C version).", "# Early detection of an erroneous call to @lru_cache without any arguments", "# resulting in the inner function being passed to maxsize instead of an", "# integer or None.", "if", "maxsize", "is", "not", "None", "and", "not", "isinstance", "(", "maxsize", ",", "int", ")", ":", "raise", "TypeError", "(", "'Expected maxsize to be an integer or None'", ")", "# Constants shared by all lru cache instances:", "sentinel", "=", "object", "(", ")", "# unique object used to signal cache misses", "make_key", "=", "_make_key", "# build a key from the function arguments", "PREV", ",", "NEXT", ",", "KEY", ",", "RESULT", "=", "0", ",", "1", ",", "2", ",", "3", "# names for the link fields", "def", "decorating_function", "(", "user_function", ")", ":", "cache", "=", "{", "}", "hits", "=", "[", "0", "]", "misses", "=", "[", "0", "]", "full", "=", "[", "False", "]", "cache_get", "=", "cache", ".", "get", "# bound method to lookup a key or return None", "lock", "=", "RLock", "(", ")", "# because linkedlist updates aren't threadsafe", "root", "=", "[", "]", "# root of the circular doubly linked list", "root", "[", ":", "]", "=", "[", "root", ",", "root", ",", "None", ",", "None", "]", "# initialize by pointing to self", "r", "=", "[", "root", "]", "if", "maxsize", "==", "0", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "# No caching -- just a statistics update after a successful call", "result", "=", "user_function", "(", "*", "args", ",", "*", "*", "kwds", ")", "misses", "[", "0", "]", "+=", "1", "return", "result", "elif", "maxsize", "is", "None", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "# Simple caching without ordering or size limit", "key", "=", "make_key", "(", "args", ",", "kwds", ",", "typed", ")", "result", "=", "cache_get", "(", "key", ",", "sentinel", ")", "if", "result", "is", "not", "sentinel", ":", "hits", "[", "0", "]", "+=", "1", "return", "result", "result", "=", "user_function", "(", "*", "args", ",", "*", "*", "kwds", ")", "cache", "[", "key", "]", "=", "result", "misses", "[", "0", "]", "+=", "1", "return", "result", "else", ":", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "# Size limited caching that tracks accesses by recency", "key", "=", "make_key", "(", "args", ",", "kwds", ",", "typed", ")", "with", "lock", ":", "link", "=", "cache_get", "(", "key", ")", "if", "link", "is", "not", "None", ":", "# Move the link to the front of the circular queue", "link_prev", ",", "link_next", ",", "_key", ",", "result", "=", "link", "link_prev", "[", "NEXT", "]", "=", "link_next", "link_next", "[", "PREV", "]", "=", "link_prev", "last", "=", "r", "[", "0", "]", "[", "PREV", "]", "last", "[", "NEXT", "]", "=", "r", "[", "0", "]", "[", "PREV", "]", "=", "link", "link", "[", "PREV", "]", "=", "last", "link", "[", "NEXT", "]", "=", "r", "[", "0", "]", "hits", "[", "0", "]", "+=", "1", "return", "result", "result", "=", "user_function", "(", "*", "args", ",", "*", "*", "kwds", ")", "with", "lock", ":", "if", "key", "in", "cache", ":", "# Getting here means that this same key was added to the", "# cache while the lock was released. Since the link", "# update is already done, we need only return the", "# computed result and update the count of misses.", "pass", "elif", "full", "[", "0", "]", ":", "# Use the old root to store the new key and result.", "oldroot", "=", "r", "[", "0", "]", "oldroot", "[", "KEY", "]", "=", "key", "oldroot", "[", "RESULT", "]", "=", "result", "# Empty the oldest link and make it the new root.", "# Keep a reference to the old key and old result to", "# prevent their ref counts from going to zero during the", "# update. That will prevent potentially arbitrary object", "# clean-up code (i.e. __del__) from running while we're", "# still adjusting the links.", "r", "[", "0", "]", "=", "oldroot", "[", "NEXT", "]", "oldkey", "=", "r", "[", "0", "]", "[", "KEY", "]", "oldresult", "=", "r", "[", "0", "]", "[", "RESULT", "]", "r", "[", "0", "]", "[", "KEY", "]", "=", "r", "[", "0", "]", "[", "RESULT", "]", "=", "None", "# Now update the cache dictionary.", "del", "cache", "[", "oldkey", "]", "# Save the potentially reentrant cache[key] assignment", "# for last, after the root and links have been put in", "# a consistent state.", "cache", "[", "key", "]", "=", "oldroot", "else", ":", "# Put result in a new link at the front of the queue.", "last", "=", "r", "[", "0", "]", "[", "PREV", "]", "link", "=", "[", "last", ",", "r", "[", "0", "]", ",", "key", ",", "result", "]", "last", "[", "NEXT", "]", "=", "r", "[", "0", "]", "[", "PREV", "]", "=", "cache", "[", "key", "]", "=", "link", "full", "[", "0", "]", "=", "(", "len", "(", "cache", ")", ">=", "maxsize", ")", "misses", "[", "0", "]", "+=", "1", "return", "result", "def", "cache_info", "(", ")", ":", "\"\"\"Report cache statistics\"\"\"", "with", "lock", ":", "return", "_CacheInfo", "(", "hits", "[", "0", "]", ",", "misses", "[", "0", "]", ",", "maxsize", ",", "len", "(", "cache", ")", ")", "def", "cache_clear", "(", ")", ":", "\"\"\"Clear the cache and cache statistics\"\"\"", "with", "lock", ":", "cache", ".", "clear", "(", ")", "root", "[", ":", "]", "=", "[", "root", ",", "root", ",", "None", ",", "None", "]", "r", "[", "0", "]", "=", "root", "hits", "[", "0", "]", "=", "0", "misses", "[", "0", "]", "=", "0", "full", "[", "0", "]", "=", "False", "wrapper", ".", "cache_info", "=", "cache_info", "wrapper", ".", "cache_clear", "=", "cache_clear", "return", "update_wrapper", "(", "wrapper", ",", "user_function", ")", "return", "decorating_function" ]
Least-recently-used cache decorator, which is a backport of the same function in Python >= 3.2. If *maxsize* is set to None, the LRU features are disabled and the cache can grow without bound. If *typed* is True, arguments of different types will be cached separately. For example, f(3.0) and f(3) will be treated as distinct calls with distinct results. Arguments to the cached function must be hashable. View the cache statistics named tuple (hits, misses, maxsize, currsize) with f.cache_info(). Clear the cache and statistics with f.cache_clear(). Access the underlying function with f.__wrapped__. See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
[ "Least", "-", "recently", "-", "used", "cache", "decorator", "which", "is", "a", "backport", "of", "the", "same", "function", "in", "Python", ">", "=", "3", ".", "2", "." ]
python
train
42.944828
tensorflow/tensorboard
tensorboard/plugins/core/core_plugin.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/core/core_plugin.py#L467-L485
def fix_flags(self, flags): """Fixes standard TensorBoard CLI flags to parser.""" FlagsError = base_plugin.FlagsError if flags.version_tb: pass elif flags.inspect: if flags.logdir and flags.event_file: raise FlagsError( 'Must specify either --logdir or --event_file, but not both.') if not (flags.logdir or flags.event_file): raise FlagsError('Must specify either --logdir or --event_file.') elif not flags.db and not flags.logdir: raise FlagsError('A logdir or db must be specified. ' 'For example `tensorboard --logdir mylogdir` ' 'or `tensorboard --db sqlite:~/.tensorboard.db`. ' 'Run `tensorboard --helpfull` for details and examples.') if flags.path_prefix.endswith('/'): flags.path_prefix = flags.path_prefix[:-1]
[ "def", "fix_flags", "(", "self", ",", "flags", ")", ":", "FlagsError", "=", "base_plugin", ".", "FlagsError", "if", "flags", ".", "version_tb", ":", "pass", "elif", "flags", ".", "inspect", ":", "if", "flags", ".", "logdir", "and", "flags", ".", "event_file", ":", "raise", "FlagsError", "(", "'Must specify either --logdir or --event_file, but not both.'", ")", "if", "not", "(", "flags", ".", "logdir", "or", "flags", ".", "event_file", ")", ":", "raise", "FlagsError", "(", "'Must specify either --logdir or --event_file.'", ")", "elif", "not", "flags", ".", "db", "and", "not", "flags", ".", "logdir", ":", "raise", "FlagsError", "(", "'A logdir or db must be specified. '", "'For example `tensorboard --logdir mylogdir` '", "'or `tensorboard --db sqlite:~/.tensorboard.db`. '", "'Run `tensorboard --helpfull` for details and examples.'", ")", "if", "flags", ".", "path_prefix", ".", "endswith", "(", "'/'", ")", ":", "flags", ".", "path_prefix", "=", "flags", ".", "path_prefix", "[", ":", "-", "1", "]" ]
Fixes standard TensorBoard CLI flags to parser.
[ "Fixes", "standard", "TensorBoard", "CLI", "flags", "to", "parser", "." ]
python
train
44.947368
Kronuz/pyScss
yapps2.py
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/yapps2.py#L1098-L1157
def generate(inputfilename, outputfilename='', dump=0, **flags): """Generate a grammar, given an input filename (X.g) and an output filename (defaulting to X.py).""" if not outputfilename: if inputfilename[-2:] == '.g': outputfilename = inputfilename[:-2] + '.py' else: raise Exception("Missing output filename") print 'Input Grammar:', inputfilename print 'Output File:', outputfilename DIVIDER = '\n%%\n' # This pattern separates the pre/post parsers preparser, postparser = None, None # Code before and after the parser desc # Read the entire file s = open(inputfilename, 'r').read() # See if there's a separation between the pre-parser and parser f = find(s, DIVIDER) if f >= 0: preparser, s = s[:f] + '\n\n', s[f + len(DIVIDER):] # See if there's a separation between the parser and post-parser f = find(s, DIVIDER) if f >= 0: s, postparser = s[:f], '\n\n' + s[f + len(DIVIDER):] # Create the parser and scanner p = ParserDescription(ParserDescriptionScanner(s)) if not p: return # Now parse the file t = wrap_error_reporter(p, 'Parser') if not t: return # Error if preparser is not None: t.preparser = preparser if postparser is not None: t.postparser = postparser # Check the options for f in t.options.keys(): for opt, _, _ in yapps_options: if f == opt: break else: print 'Warning: unrecognized option', f # Add command line options to the set for f in flags.keys(): t.options[f] = flags[f] # Generate the output if dump: t.dump_information() else: t.output = open(outputfilename, 'w') t.generate_output()
[ "def", "generate", "(", "inputfilename", ",", "outputfilename", "=", "''", ",", "dump", "=", "0", ",", "*", "*", "flags", ")", ":", "if", "not", "outputfilename", ":", "if", "inputfilename", "[", "-", "2", ":", "]", "==", "'.g'", ":", "outputfilename", "=", "inputfilename", "[", ":", "-", "2", "]", "+", "'.py'", "else", ":", "raise", "Exception", "(", "\"Missing output filename\"", ")", "print", "'Input Grammar:'", ",", "inputfilename", "print", "'Output File:'", ",", "outputfilename", "DIVIDER", "=", "'\\n%%\\n'", "# This pattern separates the pre/post parsers", "preparser", ",", "postparser", "=", "None", ",", "None", "# Code before and after the parser desc", "# Read the entire file", "s", "=", "open", "(", "inputfilename", ",", "'r'", ")", ".", "read", "(", ")", "# See if there's a separation between the pre-parser and parser", "f", "=", "find", "(", "s", ",", "DIVIDER", ")", "if", "f", ">=", "0", ":", "preparser", ",", "s", "=", "s", "[", ":", "f", "]", "+", "'\\n\\n'", ",", "s", "[", "f", "+", "len", "(", "DIVIDER", ")", ":", "]", "# See if there's a separation between the parser and post-parser", "f", "=", "find", "(", "s", ",", "DIVIDER", ")", "if", "f", ">=", "0", ":", "s", ",", "postparser", "=", "s", "[", ":", "f", "]", ",", "'\\n\\n'", "+", "s", "[", "f", "+", "len", "(", "DIVIDER", ")", ":", "]", "# Create the parser and scanner", "p", "=", "ParserDescription", "(", "ParserDescriptionScanner", "(", "s", ")", ")", "if", "not", "p", ":", "return", "# Now parse the file", "t", "=", "wrap_error_reporter", "(", "p", ",", "'Parser'", ")", "if", "not", "t", ":", "return", "# Error", "if", "preparser", "is", "not", "None", ":", "t", ".", "preparser", "=", "preparser", "if", "postparser", "is", "not", "None", ":", "t", ".", "postparser", "=", "postparser", "# Check the options", "for", "f", "in", "t", ".", "options", ".", "keys", "(", ")", ":", "for", "opt", ",", "_", ",", "_", "in", "yapps_options", ":", "if", "f", "==", "opt", ":", "break", "else", ":", "print", "'Warning: unrecognized option'", ",", "f", "# Add command line options to the set", "for", "f", "in", "flags", ".", "keys", "(", ")", ":", "t", ".", "options", "[", "f", "]", "=", "flags", "[", "f", "]", "# Generate the output", "if", "dump", ":", "t", ".", "dump_information", "(", ")", "else", ":", "t", ".", "output", "=", "open", "(", "outputfilename", ",", "'w'", ")", "t", ".", "generate_output", "(", ")" ]
Generate a grammar, given an input filename (X.g) and an output filename (defaulting to X.py).
[ "Generate", "a", "grammar", "given", "an", "input", "filename", "(", "X", ".", "g", ")", "and", "an", "output", "filename", "(", "defaulting", "to", "X", ".", "py", ")", "." ]
python
train
29.4
andsor/pypercolate
percolate/hpc.py
https://github.com/andsor/pypercolate/blob/92478c1fc4d4ff5ae157f7607fd74f6f9ec360ac/percolate/hpc.py#L561-L635
def bond_initialize_canonical_averages( canonical_statistics, **kwargs ): """ Initialize the canonical averages from a single-run cluster statistics Parameters ---------- canonical_statistics : 1-D structured ndarray Typically contains the canonical statistics for a range of values of the occupation probability ``p``. The dtype is the result of `canonical_statistics_dtype`. Returns ------- ret : structured ndarray The dype is the result of `canonical_averages_dtype`. ret['number_of_runs'] : 1-D ndarray of int Equals ``1`` (initial run). ret['percolation_probability_mean'] : 1-D array of float Equals ``canonical_statistics['percolation_probability']`` (if ``percolation_probability`` is present) ret['percolation_probability_m2'] : 1-D array of float Each entry is ``0.0`` ret['max_cluster_size_mean'] : 1-D array of float Equals ``canonical_statistics['max_cluster_size']`` ret['max_cluster_size_m2'] : 1-D array of float Each entry is ``0.0`` ret['moments_mean'] : 2-D array of float Equals ``canonical_statistics['moments']`` ret['moments_m2'] : 2-D array of float Each entry is ``0.0`` See Also -------- canonical_averages_dtype bond_canonical_statistics """ # initialize return array spanning_cluster = ( 'percolation_probability' in canonical_statistics.dtype.names ) # array should have the same size as the input array ret = np.empty_like( canonical_statistics, dtype=canonical_averages_dtype(spanning_cluster=spanning_cluster), ) ret['number_of_runs'] = 1 # initialize percolation probability mean and sum of squared differences if spanning_cluster: ret['percolation_probability_mean'] = ( canonical_statistics['percolation_probability'] ) ret['percolation_probability_m2'] = 0.0 # initialize maximum cluster size mean and sum of squared differences ret['max_cluster_size_mean'] = ( canonical_statistics['max_cluster_size'] ) ret['max_cluster_size_m2'] = 0.0 # initialize moments means and sums of squared differences ret['moments_mean'] = canonical_statistics['moments'] ret['moments_m2'] = 0.0 return ret
[ "def", "bond_initialize_canonical_averages", "(", "canonical_statistics", ",", "*", "*", "kwargs", ")", ":", "# initialize return array", "spanning_cluster", "=", "(", "'percolation_probability'", "in", "canonical_statistics", ".", "dtype", ".", "names", ")", "# array should have the same size as the input array", "ret", "=", "np", ".", "empty_like", "(", "canonical_statistics", ",", "dtype", "=", "canonical_averages_dtype", "(", "spanning_cluster", "=", "spanning_cluster", ")", ",", ")", "ret", "[", "'number_of_runs'", "]", "=", "1", "# initialize percolation probability mean and sum of squared differences", "if", "spanning_cluster", ":", "ret", "[", "'percolation_probability_mean'", "]", "=", "(", "canonical_statistics", "[", "'percolation_probability'", "]", ")", "ret", "[", "'percolation_probability_m2'", "]", "=", "0.0", "# initialize maximum cluster size mean and sum of squared differences", "ret", "[", "'max_cluster_size_mean'", "]", "=", "(", "canonical_statistics", "[", "'max_cluster_size'", "]", ")", "ret", "[", "'max_cluster_size_m2'", "]", "=", "0.0", "# initialize moments means and sums of squared differences", "ret", "[", "'moments_mean'", "]", "=", "canonical_statistics", "[", "'moments'", "]", "ret", "[", "'moments_m2'", "]", "=", "0.0", "return", "ret" ]
Initialize the canonical averages from a single-run cluster statistics Parameters ---------- canonical_statistics : 1-D structured ndarray Typically contains the canonical statistics for a range of values of the occupation probability ``p``. The dtype is the result of `canonical_statistics_dtype`. Returns ------- ret : structured ndarray The dype is the result of `canonical_averages_dtype`. ret['number_of_runs'] : 1-D ndarray of int Equals ``1`` (initial run). ret['percolation_probability_mean'] : 1-D array of float Equals ``canonical_statistics['percolation_probability']`` (if ``percolation_probability`` is present) ret['percolation_probability_m2'] : 1-D array of float Each entry is ``0.0`` ret['max_cluster_size_mean'] : 1-D array of float Equals ``canonical_statistics['max_cluster_size']`` ret['max_cluster_size_m2'] : 1-D array of float Each entry is ``0.0`` ret['moments_mean'] : 2-D array of float Equals ``canonical_statistics['moments']`` ret['moments_m2'] : 2-D array of float Each entry is ``0.0`` See Also -------- canonical_averages_dtype bond_canonical_statistics
[ "Initialize", "the", "canonical", "averages", "from", "a", "single", "-", "run", "cluster", "statistics" ]
python
valid
30.386667
HazyResearch/pdftotree
pdftotree/utils/pdf/node.py
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/node.py#L118-L128
def get_grid(self): """ Standardize the layout of the table into grids """ mentions, lines = _split_text_n_lines(self.elems) # Sort mentions in reading order where y values are snapped to half # height-sized grid mentions.sort(key=lambda m: (m.yc_grid, m.xc)) grid = Grid(mentions, lines, self) return grid
[ "def", "get_grid", "(", "self", ")", ":", "mentions", ",", "lines", "=", "_split_text_n_lines", "(", "self", ".", "elems", ")", "# Sort mentions in reading order where y values are snapped to half", "# height-sized grid", "mentions", ".", "sort", "(", "key", "=", "lambda", "m", ":", "(", "m", ".", "yc_grid", ",", "m", ".", "xc", ")", ")", "grid", "=", "Grid", "(", "mentions", ",", "lines", ",", "self", ")", "return", "grid" ]
Standardize the layout of the table into grids
[ "Standardize", "the", "layout", "of", "the", "table", "into", "grids" ]
python
train
33.545455
jonathf/chaospy
chaospy/poly/constructor/identifier.py
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/poly/constructor/identifier.py#L46-L56
def _identify_dict(core): """Specification for a dictionary.""" if not core: return {}, 1, (), int core = core.copy() key = sorted(core.keys(), key=chaospy.poly.base.sort_key)[0] shape = numpy.array(core[key]).shape dtype = numpy.array(core[key]).dtype dim = len(key) return core, dim, shape, dtype
[ "def", "_identify_dict", "(", "core", ")", ":", "if", "not", "core", ":", "return", "{", "}", ",", "1", ",", "(", ")", ",", "int", "core", "=", "core", ".", "copy", "(", ")", "key", "=", "sorted", "(", "core", ".", "keys", "(", ")", ",", "key", "=", "chaospy", ".", "poly", ".", "base", ".", "sort_key", ")", "[", "0", "]", "shape", "=", "numpy", ".", "array", "(", "core", "[", "key", "]", ")", ".", "shape", "dtype", "=", "numpy", ".", "array", "(", "core", "[", "key", "]", ")", ".", "dtype", "dim", "=", "len", "(", "key", ")", "return", "core", ",", "dim", ",", "shape", ",", "dtype" ]
Specification for a dictionary.
[ "Specification", "for", "a", "dictionary", "." ]
python
train
29.909091
collectiveacuity/labPack
labpack/storage/dropbox.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/storage/dropbox.py#L422-L462
def load(self, record_key, secret_key=''): ''' a method to retrieve byte data of appdata record :param record_key: string with name of record :param secret_key: [optional] string used to decrypt data :return: byte data for record body ''' title = '%s.load' % self.__class__.__name__ # validate inputs input_fields = { 'record_key': record_key, 'secret_key': secret_key } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # construct file path file_path = '/%s' % record_key # request file data try: metadata, response = self.dropbox.files_download(file_path) except Exception as err: if str(err).find("LookupError('not_found'") > -1: raise Exception('%s(record_key=%s) does not exist.' % (title, record_key)) else: raise DropboxConnectionError(title) record_data = response.content # decrypt (if necessary) if secret_key: from labpack.encryption import cryptolab record_data = cryptolab.decrypt(record_data, secret_key) return record_data
[ "def", "load", "(", "self", ",", "record_key", ",", "secret_key", "=", "''", ")", ":", "title", "=", "'%s.load'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs\r", "input_fields", "=", "{", "'record_key'", ":", "record_key", ",", "'secret_key'", ":", "secret_key", "}", "for", "key", ",", "value", "in", "input_fields", ".", "items", "(", ")", ":", "if", "value", ":", "object_title", "=", "'%s(%s=%s)'", "%", "(", "title", ",", "key", ",", "str", "(", "value", ")", ")", "self", ".", "fields", ".", "validate", "(", "value", ",", "'.%s'", "%", "key", ",", "object_title", ")", "# construct file path\r", "file_path", "=", "'/%s'", "%", "record_key", "# request file data\r", "try", ":", "metadata", ",", "response", "=", "self", ".", "dropbox", ".", "files_download", "(", "file_path", ")", "except", "Exception", "as", "err", ":", "if", "str", "(", "err", ")", ".", "find", "(", "\"LookupError('not_found'\"", ")", ">", "-", "1", ":", "raise", "Exception", "(", "'%s(record_key=%s) does not exist.'", "%", "(", "title", ",", "record_key", ")", ")", "else", ":", "raise", "DropboxConnectionError", "(", "title", ")", "record_data", "=", "response", ".", "content", "# decrypt (if necessary)\r", "if", "secret_key", ":", "from", "labpack", ".", "encryption", "import", "cryptolab", "record_data", "=", "cryptolab", ".", "decrypt", "(", "record_data", ",", "secret_key", ")", "return", "record_data" ]
a method to retrieve byte data of appdata record :param record_key: string with name of record :param secret_key: [optional] string used to decrypt data :return: byte data for record body
[ "a", "method", "to", "retrieve", "byte", "data", "of", "appdata", "record", ":", "param", "record_key", ":", "string", "with", "name", "of", "record", ":", "param", "secret_key", ":", "[", "optional", "]", "string", "used", "to", "decrypt", "data", ":", "return", ":", "byte", "data", "for", "record", "body" ]
python
train
33.658537
ihgazni2/elist
elist/elist.py
https://github.com/ihgazni2/elist/blob/8c07b5029bda34ead60ce10335ceb145f209263c/elist/elist.py#L5492-L5508
def broken_seqs(ol,break_points): ''' ol = initRange(0,20,1) ol break_points = [1,6,14,9] secs = broken_seqs(ol,break_points) forEach(secs,print) ''' bps = list(break_points) length = ol.__len__() rgs = rangize(bps,length) rslt = [] for i in range(0,rgs.__len__()): si,ei = rgs[i] sec = ol[si:ei] rslt.append(sec) return(rslt)
[ "def", "broken_seqs", "(", "ol", ",", "break_points", ")", ":", "bps", "=", "list", "(", "break_points", ")", "length", "=", "ol", ".", "__len__", "(", ")", "rgs", "=", "rangize", "(", "bps", ",", "length", ")", "rslt", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "rgs", ".", "__len__", "(", ")", ")", ":", "si", ",", "ei", "=", "rgs", "[", "i", "]", "sec", "=", "ol", "[", "si", ":", "ei", "]", "rslt", ".", "append", "(", "sec", ")", "return", "(", "rslt", ")" ]
ol = initRange(0,20,1) ol break_points = [1,6,14,9] secs = broken_seqs(ol,break_points) forEach(secs,print)
[ "ol", "=", "initRange", "(", "0", "20", "1", ")", "ol", "break_points", "=", "[", "1", "6", "14", "9", "]", "secs", "=", "broken_seqs", "(", "ol", "break_points", ")", "forEach", "(", "secs", "print", ")" ]
python
valid
23.882353
google/grr
grr/core/grr_response_core/lib/util/compatibility.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/util/compatibility.py#L152-L179
def MakeType(name, base_classes, namespace): """A compatibility wrapper for the `type` built-in function. In Python 2 `type` (used as a type constructor) requires the name argument to be a `bytes` object whereas in Python 3 it is required to be an `unicode` object. Since class name is human readable text rather than arbitrary stream of bytes, the Python 3 behaviour is considered to be the sane one. Once support for Python 2 is dropped all invocations of this call can be replaced with the `type` built-in. Args: name: A name of the type to create. base_classes: A tuple of base classes that the returned type is supposed to derive from. namespace: A dictionary of methods and fields that the returned type is supposed to contain. Returns: A new type with specified parameters. """ precondition.AssertType(name, str) if PY2: name = name.encode("ascii") return type(name, base_classes, namespace)
[ "def", "MakeType", "(", "name", ",", "base_classes", ",", "namespace", ")", ":", "precondition", ".", "AssertType", "(", "name", ",", "str", ")", "if", "PY2", ":", "name", "=", "name", ".", "encode", "(", "\"ascii\"", ")", "return", "type", "(", "name", ",", "base_classes", ",", "namespace", ")" ]
A compatibility wrapper for the `type` built-in function. In Python 2 `type` (used as a type constructor) requires the name argument to be a `bytes` object whereas in Python 3 it is required to be an `unicode` object. Since class name is human readable text rather than arbitrary stream of bytes, the Python 3 behaviour is considered to be the sane one. Once support for Python 2 is dropped all invocations of this call can be replaced with the `type` built-in. Args: name: A name of the type to create. base_classes: A tuple of base classes that the returned type is supposed to derive from. namespace: A dictionary of methods and fields that the returned type is supposed to contain. Returns: A new type with specified parameters.
[ "A", "compatibility", "wrapper", "for", "the", "type", "built", "-", "in", "function", "." ]
python
train
33.857143
mwickert/scikit-dsp-comm
sk_dsp_comm/sigsys.py
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/sigsys.py#L474-L531
def OS_filter(x,h,N,mode=0): """ Overlap and save transform domain FIR filtering. This function implements the classical overlap and save method of transform domain filtering using a length P FIR filter. Parameters ---------- x : input signal to be filtered as an ndarray h : FIR filter coefficients as an ndarray of length P N : FFT size > P, typically a power of two mode : 0 or 1, when 1 returns a diagnostic matrix Returns ------- y : the filtered output as an ndarray y_mat : an ndarray whose rows are the individual overlap outputs. Notes ----- y_mat is used for diagnostics and to gain understanding of the algorithm. Examples -------- >>> n = arange(0,100) >>> x = cos(2*pi*0.05*n) >>> b = ones(10) >>> y = OS_filter(x,h,N) >>> # set mode = 1 >>> y, y_mat = OS_filter(x,h,N,1) """ P = len(h) # zero pad start of x so first frame can recover first true samples of x x = np.hstack((np.zeros(P-1),x)) L = N - P + 1 Nx = len(x) Nframe = int(np.ceil(Nx/float(L))) # zero pad end of x to full number of frames needed x = np.hstack((x,np.zeros(Nframe*L-Nx))) y = np.zeros(int(Nframe*N)) # create an instrumentation matrix to observe the overlap and save behavior y_mat = np.zeros((Nframe,int(Nframe*N))) H = fft.fft(h,N) # begin the filtering operation for k in range(Nframe): xk = x[k*L:k*L+N] Xk = fft.fft(xk,N) Yk = H*Xk yk = np.real(fft.ifft(Yk)) # imag part should be zero y[k*L+P-1:k*L+N] = yk[P-1:] y_mat[k,k*L:k*L+N] = yk if mode == 1: return y[P-1:Nx], y_mat[:,P-1:Nx] else: return y[P-1:Nx]
[ "def", "OS_filter", "(", "x", ",", "h", ",", "N", ",", "mode", "=", "0", ")", ":", "P", "=", "len", "(", "h", ")", "# zero pad start of x so first frame can recover first true samples of x", "x", "=", "np", ".", "hstack", "(", "(", "np", ".", "zeros", "(", "P", "-", "1", ")", ",", "x", ")", ")", "L", "=", "N", "-", "P", "+", "1", "Nx", "=", "len", "(", "x", ")", "Nframe", "=", "int", "(", "np", ".", "ceil", "(", "Nx", "/", "float", "(", "L", ")", ")", ")", "# zero pad end of x to full number of frames needed", "x", "=", "np", ".", "hstack", "(", "(", "x", ",", "np", ".", "zeros", "(", "Nframe", "*", "L", "-", "Nx", ")", ")", ")", "y", "=", "np", ".", "zeros", "(", "int", "(", "Nframe", "*", "N", ")", ")", "# create an instrumentation matrix to observe the overlap and save behavior", "y_mat", "=", "np", ".", "zeros", "(", "(", "Nframe", ",", "int", "(", "Nframe", "*", "N", ")", ")", ")", "H", "=", "fft", ".", "fft", "(", "h", ",", "N", ")", "# begin the filtering operation", "for", "k", "in", "range", "(", "Nframe", ")", ":", "xk", "=", "x", "[", "k", "*", "L", ":", "k", "*", "L", "+", "N", "]", "Xk", "=", "fft", ".", "fft", "(", "xk", ",", "N", ")", "Yk", "=", "H", "*", "Xk", "yk", "=", "np", ".", "real", "(", "fft", ".", "ifft", "(", "Yk", ")", ")", "# imag part should be zero", "y", "[", "k", "*", "L", "+", "P", "-", "1", ":", "k", "*", "L", "+", "N", "]", "=", "yk", "[", "P", "-", "1", ":", "]", "y_mat", "[", "k", ",", "k", "*", "L", ":", "k", "*", "L", "+", "N", "]", "=", "yk", "if", "mode", "==", "1", ":", "return", "y", "[", "P", "-", "1", ":", "Nx", "]", ",", "y_mat", "[", ":", ",", "P", "-", "1", ":", "Nx", "]", "else", ":", "return", "y", "[", "P", "-", "1", ":", "Nx", "]" ]
Overlap and save transform domain FIR filtering. This function implements the classical overlap and save method of transform domain filtering using a length P FIR filter. Parameters ---------- x : input signal to be filtered as an ndarray h : FIR filter coefficients as an ndarray of length P N : FFT size > P, typically a power of two mode : 0 or 1, when 1 returns a diagnostic matrix Returns ------- y : the filtered output as an ndarray y_mat : an ndarray whose rows are the individual overlap outputs. Notes ----- y_mat is used for diagnostics and to gain understanding of the algorithm. Examples -------- >>> n = arange(0,100) >>> x = cos(2*pi*0.05*n) >>> b = ones(10) >>> y = OS_filter(x,h,N) >>> # set mode = 1 >>> y, y_mat = OS_filter(x,h,N,1)
[ "Overlap", "and", "save", "transform", "domain", "FIR", "filtering", ".", "This", "function", "implements", "the", "classical", "overlap", "and", "save", "method", "of", "transform", "domain", "filtering", "using", "a", "length", "P", "FIR", "filter", ".", "Parameters", "----------", "x", ":", "input", "signal", "to", "be", "filtered", "as", "an", "ndarray", "h", ":", "FIR", "filter", "coefficients", "as", "an", "ndarray", "of", "length", "P", "N", ":", "FFT", "size", ">", "P", "typically", "a", "power", "of", "two", "mode", ":", "0", "or", "1", "when", "1", "returns", "a", "diagnostic", "matrix", "Returns", "-------", "y", ":", "the", "filtered", "output", "as", "an", "ndarray", "y_mat", ":", "an", "ndarray", "whose", "rows", "are", "the", "individual", "overlap", "outputs", ".", "Notes", "-----", "y_mat", "is", "used", "for", "diagnostics", "and", "to", "gain", "understanding", "of", "the", "algorithm", ".", "Examples", "--------", ">>>", "n", "=", "arange", "(", "0", "100", ")", ">>>", "x", "=", "cos", "(", "2", "*", "pi", "*", "0", ".", "05", "*", "n", ")", ">>>", "b", "=", "ones", "(", "10", ")", ">>>", "y", "=", "OS_filter", "(", "x", "h", "N", ")", ">>>", "#", "set", "mode", "=", "1", ">>>", "y", "y_mat", "=", "OS_filter", "(", "x", "h", "N", "1", ")" ]
python
valid
29.431034
cogniteev/docido-python-sdk
docido_sdk/toolbox/date_ext.py
https://github.com/cogniteev/docido-python-sdk/blob/58ecb6c6f5757fd40c0601657ab18368da7ddf33/docido_sdk/toolbox/date_ext.py#L38-L51
def feeling_lucky(cls, obj): """Tries to convert given object to an UTC timestamp is ms, based on its type. """ if isinstance(obj, six.string_types): return cls.from_str(obj) elif isinstance(obj, six.integer_types) and obj <= MAX_POSIX_TIMESTAMP: return cls.from_posix_timestamp(obj) elif isinstance(obj, datetime): return cls.from_datetime(obj) else: raise ValueError( u"Don't know how to get timestamp from '{}'".format(obj) )
[ "def", "feeling_lucky", "(", "cls", ",", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "six", ".", "string_types", ")", ":", "return", "cls", ".", "from_str", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "six", ".", "integer_types", ")", "and", "obj", "<=", "MAX_POSIX_TIMESTAMP", ":", "return", "cls", ".", "from_posix_timestamp", "(", "obj", ")", "elif", "isinstance", "(", "obj", ",", "datetime", ")", ":", "return", "cls", ".", "from_datetime", "(", "obj", ")", "else", ":", "raise", "ValueError", "(", "u\"Don't know how to get timestamp from '{}'\"", ".", "format", "(", "obj", ")", ")" ]
Tries to convert given object to an UTC timestamp is ms, based on its type.
[ "Tries", "to", "convert", "given", "object", "to", "an", "UTC", "timestamp", "is", "ms", "based", "on", "its", "type", "." ]
python
train
39.071429
datajoint/datajoint-python
datajoint/expression.py
https://github.com/datajoint/datajoint-python/blob/4f29bb154a7ed2b8b64b4d3a9c8be4c16b39621c/datajoint/expression.py#L606-L610
def make_argument_subquery(arg): """ Decide when a Join argument needs to be wrapped in a subquery """ return Subquery.create(arg) if isinstance(arg, (GroupBy, Projection)) or arg.restriction else arg
[ "def", "make_argument_subquery", "(", "arg", ")", ":", "return", "Subquery", ".", "create", "(", "arg", ")", "if", "isinstance", "(", "arg", ",", "(", "GroupBy", ",", "Projection", ")", ")", "or", "arg", ".", "restriction", "else", "arg" ]
Decide when a Join argument needs to be wrapped in a subquery
[ "Decide", "when", "a", "Join", "argument", "needs", "to", "be", "wrapped", "in", "a", "subquery" ]
python
train
45.6
acutesoftware/virtual-AI-simulator
vais/worlds.py
https://github.com/acutesoftware/virtual-AI-simulator/blob/57de679a5b1a58c38fefe6aea58af1f3a7e79c58/vais/worlds.py#L304-L332
def highlight_cell_surroundings(self, target_y, target_x): """ highlights the cells around a target to make it simpler to see on a grid. Currently assumes the target is within the boundary by 1 on all sides """ #print('SELF_WORLD', self.world) #print('target_y, target_x, self.world.grd.grid_height, self.world.grd.grid_width ', target_y, target_x, self.#world.grd.grid_height, self.world.grd.grid_width ) #exit(0) if target_y < 1: print("target too close to top") if target_y > self.world.grd.grid_height - 1: print("target too close to bottom") if target_x < 1: print("target too close to left") if target_x < self.world.grd.grid_width: print("target too close to right") #valid_cells = ['\\', '-', '|', '/'] self.world.grd.set_tile(target_y - 1, target_x - 1, '\\') self.world.grd.set_tile(target_y - 0, target_x - 1, '-') self.world.grd.set_tile(target_y + 1, target_x - 1, '/') self.world.grd.set_tile(target_y - 1, target_x - 0, '|') self.world.grd.set_tile(target_y + 1, target_x - 0, '|') self.world.grd.set_tile(target_y - 1, target_x + 1, '/') self.world.grd.set_tile(target_y - 0, target_x + 1, '-') self.world.grd.set_tile(target_y + 1, target_x + 1, '\\')
[ "def", "highlight_cell_surroundings", "(", "self", ",", "target_y", ",", "target_x", ")", ":", "#print('SELF_WORLD', self.world)", "#print('target_y, target_x, self.world.grd.grid_height, self.world.grd.grid_width ', target_y, target_x, self.#world.grd.grid_height, self.world.grd.grid_width )", "#exit(0)", "if", "target_y", "<", "1", ":", "print", "(", "\"target too close to top\"", ")", "if", "target_y", ">", "self", ".", "world", ".", "grd", ".", "grid_height", "-", "1", ":", "print", "(", "\"target too close to bottom\"", ")", "if", "target_x", "<", "1", ":", "print", "(", "\"target too close to left\"", ")", "if", "target_x", "<", "self", ".", "world", ".", "grd", ".", "grid_width", ":", "print", "(", "\"target too close to right\"", ")", "#valid_cells = ['\\\\', '-', '|', '/'] ", "self", ".", "world", ".", "grd", ".", "set_tile", "(", "target_y", "-", "1", ",", "target_x", "-", "1", ",", "'\\\\'", ")", "self", ".", "world", ".", "grd", ".", "set_tile", "(", "target_y", "-", "0", ",", "target_x", "-", "1", ",", "'-'", ")", "self", ".", "world", ".", "grd", ".", "set_tile", "(", "target_y", "+", "1", ",", "target_x", "-", "1", ",", "'/'", ")", "self", ".", "world", ".", "grd", ".", "set_tile", "(", "target_y", "-", "1", ",", "target_x", "-", "0", ",", "'|'", ")", "self", ".", "world", ".", "grd", ".", "set_tile", "(", "target_y", "+", "1", ",", "target_x", "-", "0", ",", "'|'", ")", "self", ".", "world", ".", "grd", ".", "set_tile", "(", "target_y", "-", "1", ",", "target_x", "+", "1", ",", "'/'", ")", "self", ".", "world", ".", "grd", ".", "set_tile", "(", "target_y", "-", "0", ",", "target_x", "+", "1", ",", "'-'", ")", "self", ".", "world", ".", "grd", ".", "set_tile", "(", "target_y", "+", "1", ",", "target_x", "+", "1", ",", "'\\\\'", ")" ]
highlights the cells around a target to make it simpler to see on a grid. Currently assumes the target is within the boundary by 1 on all sides
[ "highlights", "the", "cells", "around", "a", "target", "to", "make", "it", "simpler", "to", "see", "on", "a", "grid", ".", "Currently", "assumes", "the", "target", "is", "within", "the", "boundary", "by", "1", "on", "all", "sides" ]
python
train
47.689655
mitsei/dlkit
dlkit/json_/learning/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/learning/managers.py#L1673-L1690
def get_objective_hierarchy_design_session(self, proxy): """Gets the session for designing objective hierarchies. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.learning.ObjectiveHierarchyDesignSession) - an ``ObjectiveHierarchyDesignSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_objective_hierarchy_design()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_objective_hierarchy_design()`` is ``true``.* """ if not self.supports_objective_hierarchy_design(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.ObjectiveHierarchyDesignSession(proxy=proxy, runtime=self._runtime)
[ "def", "get_objective_hierarchy_design_session", "(", "self", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_objective_hierarchy_design", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", "ObjectiveHierarchyDesignSession", "(", "proxy", "=", "proxy", ",", "runtime", "=", "self", ".", "_runtime", ")" ]
Gets the session for designing objective hierarchies. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.learning.ObjectiveHierarchyDesignSession) - an ``ObjectiveHierarchyDesignSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_objective_hierarchy_design()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_objective_hierarchy_design()`` is ``true``.*
[ "Gets", "the", "session", "for", "designing", "objective", "hierarchies", "." ]
python
train
48.111111
peopledoc/django-agnocomplete
agnocomplete/views.py
https://github.com/peopledoc/django-agnocomplete/blob/9bf21db2f2036ba5059b843acd32902a09192053/agnocomplete/views.py#L20-L46
def get_error(exc): """ Return the appropriate HTTP status code according to the Exception/Error. """ if isinstance(exc, HTTPError): # Returning the HTTP Error code coming from requests module return exc.response.status_code, text(exc.response.content) if isinstance(exc, Timeout): # A timeout is a 408, and it's not a HTTPError (why? dunno). return 408, exc if isinstance(exc, Http404): # 404 is 404 return 404, exc if isinstance(exc, PermissionDenied): # Permission denied is 403 return 403, exc if isinstance(exc, SuspiciousOperation): # Shouldn't happen, but you never know return 400, exc # The default error code is 500 return 500, exc
[ "def", "get_error", "(", "exc", ")", ":", "if", "isinstance", "(", "exc", ",", "HTTPError", ")", ":", "# Returning the HTTP Error code coming from requests module", "return", "exc", ".", "response", ".", "status_code", ",", "text", "(", "exc", ".", "response", ".", "content", ")", "if", "isinstance", "(", "exc", ",", "Timeout", ")", ":", "# A timeout is a 408, and it's not a HTTPError (why? dunno).", "return", "408", ",", "exc", "if", "isinstance", "(", "exc", ",", "Http404", ")", ":", "# 404 is 404", "return", "404", ",", "exc", "if", "isinstance", "(", "exc", ",", "PermissionDenied", ")", ":", "# Permission denied is 403", "return", "403", ",", "exc", "if", "isinstance", "(", "exc", ",", "SuspiciousOperation", ")", ":", "# Shouldn't happen, but you never know", "return", "400", ",", "exc", "# The default error code is 500", "return", "500", ",", "exc" ]
Return the appropriate HTTP status code according to the Exception/Error.
[ "Return", "the", "appropriate", "HTTP", "status", "code", "according", "to", "the", "Exception", "/", "Error", "." ]
python
train
27.444444
MartinThoma/hwrt
hwrt/partitions.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/partitions.py#L160-L177
def get_top_segmentations(table, n): """ Parameters ---------- table : matrix of probabilities Each cell (i, j) of `table` gives the probability that i and j are in the same symbol. n : int Number of best segmentations which get returned """ stroke_count = list(range(len(table))) topf = TopFinder(n) for curr_segmentation in all_segmentations(stroke_count): curr_seg_score = score_segmentation(curr_segmentation, table) topf.push(curr_segmentation, curr_seg_score) for el, score in topf: yield [normalize_segmentation(el), score]
[ "def", "get_top_segmentations", "(", "table", ",", "n", ")", ":", "stroke_count", "=", "list", "(", "range", "(", "len", "(", "table", ")", ")", ")", "topf", "=", "TopFinder", "(", "n", ")", "for", "curr_segmentation", "in", "all_segmentations", "(", "stroke_count", ")", ":", "curr_seg_score", "=", "score_segmentation", "(", "curr_segmentation", ",", "table", ")", "topf", ".", "push", "(", "curr_segmentation", ",", "curr_seg_score", ")", "for", "el", ",", "score", "in", "topf", ":", "yield", "[", "normalize_segmentation", "(", "el", ")", ",", "score", "]" ]
Parameters ---------- table : matrix of probabilities Each cell (i, j) of `table` gives the probability that i and j are in the same symbol. n : int Number of best segmentations which get returned
[ "Parameters", "----------", "table", ":", "matrix", "of", "probabilities", "Each", "cell", "(", "i", "j", ")", "of", "table", "gives", "the", "probability", "that", "i", "and", "j", "are", "in", "the", "same", "symbol", ".", "n", ":", "int", "Number", "of", "best", "segmentations", "which", "get", "returned" ]
python
train
33.444444
mitsei/dlkit
dlkit/authz_adapter/assessment_authoring/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/authz_adapter/assessment_authoring/sessions.py#L2039-L2045
def get_sequence_rule_enablers_by_search(self, sequence_rule_enabler_query, sequence_rule_enabler_search): """Pass through to provider SequenceRuleEnablerSearchSession.get_sequence_rule_enablers_by_search""" # Implemented from azosid template for - # osid.resource.ResourceSearchSession.get_resources_by_search_template if not self._can('search'): raise PermissionDenied() return self._provider_session.get_sequence_rule_enablers_by_search(sequence_rule_enabler_query, sequence_rule_enabler_search)
[ "def", "get_sequence_rule_enablers_by_search", "(", "self", ",", "sequence_rule_enabler_query", ",", "sequence_rule_enabler_search", ")", ":", "# Implemented from azosid template for -", "# osid.resource.ResourceSearchSession.get_resources_by_search_template", "if", "not", "self", ".", "_can", "(", "'search'", ")", ":", "raise", "PermissionDenied", "(", ")", "return", "self", ".", "_provider_session", ".", "get_sequence_rule_enablers_by_search", "(", "sequence_rule_enabler_query", ",", "sequence_rule_enabler_search", ")" ]
Pass through to provider SequenceRuleEnablerSearchSession.get_sequence_rule_enablers_by_search
[ "Pass", "through", "to", "provider", "SequenceRuleEnablerSearchSession", ".", "get_sequence_rule_enablers_by_search" ]
python
train
77.714286
coleifer/peewee
examples/analytics/reports.py
https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/examples/analytics/reports.py#L40-L47
def cookies(self): """ Retrieve the cookies header from all the users who visited. """ return (self.get_query() .select(PageView.ip, PageView.headers['Cookie']) .where(PageView.headers['Cookie'].is_null(False)) .tuples())
[ "def", "cookies", "(", "self", ")", ":", "return", "(", "self", ".", "get_query", "(", ")", ".", "select", "(", "PageView", ".", "ip", ",", "PageView", ".", "headers", "[", "'Cookie'", "]", ")", ".", "where", "(", "PageView", ".", "headers", "[", "'Cookie'", "]", ".", "is_null", "(", "False", ")", ")", ".", "tuples", "(", ")", ")" ]
Retrieve the cookies header from all the users who visited.
[ "Retrieve", "the", "cookies", "header", "from", "all", "the", "users", "who", "visited", "." ]
python
train
36.75
secdev/scapy
scapy/layers/tls/record.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/tls/record.py#L83-L108
def m2i(self, pkt, m): """ Try to parse one of the TLS subprotocols (ccs, alert, handshake or application_data). This is used inside a loop managed by .getfield(). """ cls = Raw if pkt.type == 22: if len(m) >= 1: msgtype = orb(m[0]) cls = _tls_handshake_cls.get(msgtype, Raw) elif pkt.type == 20: cls = TLSChangeCipherSpec elif pkt.type == 21: cls = TLSAlert elif pkt.type == 23: cls = TLSApplicationData if cls is Raw: return Raw(m) else: try: return cls(m, tls_session=pkt.tls_session) except Exception: if conf.debug_dissector: raise return Raw(m)
[ "def", "m2i", "(", "self", ",", "pkt", ",", "m", ")", ":", "cls", "=", "Raw", "if", "pkt", ".", "type", "==", "22", ":", "if", "len", "(", "m", ")", ">=", "1", ":", "msgtype", "=", "orb", "(", "m", "[", "0", "]", ")", "cls", "=", "_tls_handshake_cls", ".", "get", "(", "msgtype", ",", "Raw", ")", "elif", "pkt", ".", "type", "==", "20", ":", "cls", "=", "TLSChangeCipherSpec", "elif", "pkt", ".", "type", "==", "21", ":", "cls", "=", "TLSAlert", "elif", "pkt", ".", "type", "==", "23", ":", "cls", "=", "TLSApplicationData", "if", "cls", "is", "Raw", ":", "return", "Raw", "(", "m", ")", "else", ":", "try", ":", "return", "cls", "(", "m", ",", "tls_session", "=", "pkt", ".", "tls_session", ")", "except", "Exception", ":", "if", "conf", ".", "debug_dissector", ":", "raise", "return", "Raw", "(", "m", ")" ]
Try to parse one of the TLS subprotocols (ccs, alert, handshake or application_data). This is used inside a loop managed by .getfield().
[ "Try", "to", "parse", "one", "of", "the", "TLS", "subprotocols", "(", "ccs", "alert", "handshake", "or", "application_data", ")", ".", "This", "is", "used", "inside", "a", "loop", "managed", "by", ".", "getfield", "()", "." ]
python
train
30.692308
jplusplus/statscraper
statscraper/scrapers/VantetiderScraper.py
https://github.com/jplusplus/statscraper/blob/932ec048b23d15b3dbdaf829facc55fd78ec0109/statscraper/scrapers/VantetiderScraper.py#L550-L569
def as_dictlist(self): """ Returns a dictlist with values [ { "row": "row_a", "col": "col_a", "value": 1, } ] """ data = [] for row_i, row in enumerate(self.row_index): for col_i, col in enumerate(self.col_index): value = self.values_by_row[row_i][col_i] data.append({ "row": row, "col": col, "value": value, }) return data
[ "def", "as_dictlist", "(", "self", ")", ":", "data", "=", "[", "]", "for", "row_i", ",", "row", "in", "enumerate", "(", "self", ".", "row_index", ")", ":", "for", "col_i", ",", "col", "in", "enumerate", "(", "self", ".", "col_index", ")", ":", "value", "=", "self", ".", "values_by_row", "[", "row_i", "]", "[", "col_i", "]", "data", ".", "append", "(", "{", "\"row\"", ":", "row", ",", "\"col\"", ":", "col", ",", "\"value\"", ":", "value", ",", "}", ")", "return", "data" ]
Returns a dictlist with values [ { "row": "row_a", "col": "col_a", "value": 1, } ]
[ "Returns", "a", "dictlist", "with", "values", "[", "{", "row", ":", "row_a", "col", ":", "col_a", "value", ":", "1", "}", "]" ]
python
train
29.2
pvlib/pvlib-python
pvlib/pvsystem.py
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/pvsystem.py#L490-L522
def sapm_effective_irradiance(self, poa_direct, poa_diffuse, airmass_absolute, aoi, reference_irradiance=1000): """ Use the :py:func:`sapm_effective_irradiance` function, the input parameters, and ``self.module_parameters`` to calculate effective irradiance. Parameters ---------- poa_direct : numeric The direct irradiance incident upon the module. poa_diffuse : numeric The diffuse irradiance incident on module. airmass_absolute : numeric Absolute airmass. aoi : numeric Angle of incidence in degrees. reference_irradiance : numeric, default 1000 Reference irradiance by which to divide the input irradiance. Returns ------- effective_irradiance : numeric The SAPM effective irradiance. """ return sapm_effective_irradiance( poa_direct, poa_diffuse, airmass_absolute, aoi, self.module_parameters, reference_irradiance=reference_irradiance)
[ "def", "sapm_effective_irradiance", "(", "self", ",", "poa_direct", ",", "poa_diffuse", ",", "airmass_absolute", ",", "aoi", ",", "reference_irradiance", "=", "1000", ")", ":", "return", "sapm_effective_irradiance", "(", "poa_direct", ",", "poa_diffuse", ",", "airmass_absolute", ",", "aoi", ",", "self", ".", "module_parameters", ",", "reference_irradiance", "=", "reference_irradiance", ")" ]
Use the :py:func:`sapm_effective_irradiance` function, the input parameters, and ``self.module_parameters`` to calculate effective irradiance. Parameters ---------- poa_direct : numeric The direct irradiance incident upon the module. poa_diffuse : numeric The diffuse irradiance incident on module. airmass_absolute : numeric Absolute airmass. aoi : numeric Angle of incidence in degrees. reference_irradiance : numeric, default 1000 Reference irradiance by which to divide the input irradiance. Returns ------- effective_irradiance : numeric The SAPM effective irradiance.
[ "Use", "the", ":", "py", ":", "func", ":", "sapm_effective_irradiance", "function", "the", "input", "parameters", "and", "self", ".", "module_parameters", "to", "calculate", "effective", "irradiance", "." ]
python
train
33.575758
czielinski/portfolioopt
example.py
https://github.com/czielinski/portfolioopt/blob/96ac25daab0c0dbc8933330a92ff31fb898112f2/example.py#L36-L46
def print_portfolio_info(returns, avg_rets, weights): """ Print information on expected portfolio performance. """ ret = (weights * avg_rets).sum() std = (weights * returns).sum(1).std() sharpe = ret / std print("Optimal weights:\n{}\n".format(weights)) print("Expected return: {}".format(ret)) print("Expected variance: {}".format(std**2)) print("Expected Sharpe: {}".format(sharpe))
[ "def", "print_portfolio_info", "(", "returns", ",", "avg_rets", ",", "weights", ")", ":", "ret", "=", "(", "weights", "*", "avg_rets", ")", ".", "sum", "(", ")", "std", "=", "(", "weights", "*", "returns", ")", ".", "sum", "(", "1", ")", ".", "std", "(", ")", "sharpe", "=", "ret", "/", "std", "print", "(", "\"Optimal weights:\\n{}\\n\"", ".", "format", "(", "weights", ")", ")", "print", "(", "\"Expected return: {}\"", ".", "format", "(", "ret", ")", ")", "print", "(", "\"Expected variance: {}\"", ".", "format", "(", "std", "**", "2", ")", ")", "print", "(", "\"Expected Sharpe: {}\"", ".", "format", "(", "sharpe", ")", ")" ]
Print information on expected portfolio performance.
[ "Print", "information", "on", "expected", "portfolio", "performance", "." ]
python
train
38
xtrinch/fcm-django
fcm_django/fcm.py
https://github.com/xtrinch/fcm-django/blob/8480d1cf935bfb28e2ad6d86a0abf923c2ecb266/fcm_django/fcm.py#L65-L168
def fcm_send_message( registration_id, title=None, body=None, icon=None, data=None, sound=None, badge=None, low_priority=False, condition=None, time_to_live=None, click_action=None, collapse_key=None, delay_while_idle=False, restricted_package_name=None, dry_run=False, color=None, tag=None, body_loc_key=None, body_loc_args=None, title_loc_key=None, title_loc_args=None, content_available=None, extra_kwargs={}, api_key=None, json_encoder=None, **kwargs): """ Copied from https://github.com/olucurious/PyFCM/blob/master/pyfcm/fcm.py: Send push notification to a single device Args: registration_id (str): FCM device registration IDs. body (str): Message string to display in the notification tray data (dict): Data message payload to send alone or with the notification message sound (str): The sound file name to play. Specify "Default" for device default sound. Keyword Args: collapse_key (str, optional): Identifier for a group of messages that can be collapsed so that only the last message gets sent when delivery can be resumed. Defaults to ``None``. delay_while_idle (bool, optional): If ``True`` indicates that the message should not be sent until the device becomes active. time_to_live (int, optional): How long (in seconds) the message should be kept in FCM storage if the device is offline. The maximum time to live supported is 4 weeks. Defaults to ``None`` which uses the FCM default of 4 weeks. low_priority (boolean, optional): Whether to send notification with the low priority flag. Defaults to ``False``. restricted_package_name (str, optional): Package name of the application where the registration IDs must match in order to receive the message. Defaults to ``None``. dry_run (bool, optional): If ``True`` no message will be sent but request will be tested. Returns: :tuple:`multicast_id(long), success(int), failure(int), canonical_ids(int), results(list)`: Response from FCM server. Raises: AuthenticationError: If :attr:`api_key` is not set or provided or there is an error authenticating the sender. FCMServerError: Internal server error or timeout error on Firebase cloud messaging server InvalidDataError: Invalid data provided InternalPackageError: JSON parsing error, mostly from changes in the response of FCM, create a new github issue to resolve it. """ if api_key is None: api_key = SETTINGS.get("FCM_SERVER_KEY") push_service = FCMNotification(api_key=api_key, json_encoder=json_encoder) result = push_service.notify_single_device( registration_id=registration_id, message_title=title, message_body=body, message_icon=icon, data_message=data, sound=sound, badge=badge, collapse_key=collapse_key, low_priority=low_priority, condition=condition, time_to_live=time_to_live, click_action=click_action, delay_while_idle=delay_while_idle, restricted_package_name=restricted_package_name, dry_run=dry_run, color=color, tag=tag, body_loc_key=body_loc_key, body_loc_args=body_loc_args, title_loc_key=title_loc_key, title_loc_args=title_loc_args, content_available=content_available, extra_kwargs=extra_kwargs, **kwargs ) # do not raise errors, pyfcm will raise exceptions if response status will # be anything but 200 return result
[ "def", "fcm_send_message", "(", "registration_id", ",", "title", "=", "None", ",", "body", "=", "None", ",", "icon", "=", "None", ",", "data", "=", "None", ",", "sound", "=", "None", ",", "badge", "=", "None", ",", "low_priority", "=", "False", ",", "condition", "=", "None", ",", "time_to_live", "=", "None", ",", "click_action", "=", "None", ",", "collapse_key", "=", "None", ",", "delay_while_idle", "=", "False", ",", "restricted_package_name", "=", "None", ",", "dry_run", "=", "False", ",", "color", "=", "None", ",", "tag", "=", "None", ",", "body_loc_key", "=", "None", ",", "body_loc_args", "=", "None", ",", "title_loc_key", "=", "None", ",", "title_loc_args", "=", "None", ",", "content_available", "=", "None", ",", "extra_kwargs", "=", "{", "}", ",", "api_key", "=", "None", ",", "json_encoder", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "api_key", "is", "None", ":", "api_key", "=", "SETTINGS", ".", "get", "(", "\"FCM_SERVER_KEY\"", ")", "push_service", "=", "FCMNotification", "(", "api_key", "=", "api_key", ",", "json_encoder", "=", "json_encoder", ")", "result", "=", "push_service", ".", "notify_single_device", "(", "registration_id", "=", "registration_id", ",", "message_title", "=", "title", ",", "message_body", "=", "body", ",", "message_icon", "=", "icon", ",", "data_message", "=", "data", ",", "sound", "=", "sound", ",", "badge", "=", "badge", ",", "collapse_key", "=", "collapse_key", ",", "low_priority", "=", "low_priority", ",", "condition", "=", "condition", ",", "time_to_live", "=", "time_to_live", ",", "click_action", "=", "click_action", ",", "delay_while_idle", "=", "delay_while_idle", ",", "restricted_package_name", "=", "restricted_package_name", ",", "dry_run", "=", "dry_run", ",", "color", "=", "color", ",", "tag", "=", "tag", ",", "body_loc_key", "=", "body_loc_key", ",", "body_loc_args", "=", "body_loc_args", ",", "title_loc_key", "=", "title_loc_key", ",", "title_loc_args", "=", "title_loc_args", ",", "content_available", "=", "content_available", ",", "extra_kwargs", "=", "extra_kwargs", ",", "*", "*", "kwargs", ")", "# do not raise errors, pyfcm will raise exceptions if response status will", "# be anything but 200", "return", "result" ]
Copied from https://github.com/olucurious/PyFCM/blob/master/pyfcm/fcm.py: Send push notification to a single device Args: registration_id (str): FCM device registration IDs. body (str): Message string to display in the notification tray data (dict): Data message payload to send alone or with the notification message sound (str): The sound file name to play. Specify "Default" for device default sound. Keyword Args: collapse_key (str, optional): Identifier for a group of messages that can be collapsed so that only the last message gets sent when delivery can be resumed. Defaults to ``None``. delay_while_idle (bool, optional): If ``True`` indicates that the message should not be sent until the device becomes active. time_to_live (int, optional): How long (in seconds) the message should be kept in FCM storage if the device is offline. The maximum time to live supported is 4 weeks. Defaults to ``None`` which uses the FCM default of 4 weeks. low_priority (boolean, optional): Whether to send notification with the low priority flag. Defaults to ``False``. restricted_package_name (str, optional): Package name of the application where the registration IDs must match in order to receive the message. Defaults to ``None``. dry_run (bool, optional): If ``True`` no message will be sent but request will be tested. Returns: :tuple:`multicast_id(long), success(int), failure(int), canonical_ids(int), results(list)`: Response from FCM server. Raises: AuthenticationError: If :attr:`api_key` is not set or provided or there is an error authenticating the sender. FCMServerError: Internal server error or timeout error on Firebase cloud messaging server InvalidDataError: Invalid data provided InternalPackageError: JSON parsing error, mostly from changes in the response of FCM, create a new github issue to resolve it.
[ "Copied", "from", "https", ":", "//", "github", ".", "com", "/", "olucurious", "/", "PyFCM", "/", "blob", "/", "master", "/", "pyfcm", "/", "fcm", ".", "py", ":" ]
python
train
37
proycon/clam
clam/common/data.py
https://github.com/proycon/clam/blob/09d15cfc26d7cbe0f5976cdd5424dc446d10dbf3/clam/common/data.py#L676-L682
def parameter(self, parameter_id): """Return the specified global parameter (the entire object, not just the value)""" for parametergroup, parameters in self.parameters: #pylint: disable=unused-variable for parameter in parameters: if parameter.id == parameter_id: return parameter raise KeyError("No such parameter exists: " + parameter_id )
[ "def", "parameter", "(", "self", ",", "parameter_id", ")", ":", "for", "parametergroup", ",", "parameters", "in", "self", ".", "parameters", ":", "#pylint: disable=unused-variable", "for", "parameter", "in", "parameters", ":", "if", "parameter", ".", "id", "==", "parameter_id", ":", "return", "parameter", "raise", "KeyError", "(", "\"No such parameter exists: \"", "+", "parameter_id", ")" ]
Return the specified global parameter (the entire object, not just the value)
[ "Return", "the", "specified", "global", "parameter", "(", "the", "entire", "object", "not", "just", "the", "value", ")" ]
python
train
58.285714
ARMmbed/icetea
icetea_lib/Plugin/plugins/LocalAllocator/DutSerial.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/Plugin/plugins/LocalAllocator/DutSerial.py#L316-L327
def close_connection(self): # pylint: disable=C0103 """ Closes serial port connection. :return: Nothing """ if self.port: self.stop() self.logger.debug("Close port '%s'" % self.comport, extra={'type': '<->'}) self.port.close() self.port = False
[ "def", "close_connection", "(", "self", ")", ":", "# pylint: disable=C0103", "if", "self", ".", "port", ":", "self", ".", "stop", "(", ")", "self", ".", "logger", ".", "debug", "(", "\"Close port '%s'\"", "%", "self", ".", "comport", ",", "extra", "=", "{", "'type'", ":", "'<->'", "}", ")", "self", ".", "port", ".", "close", "(", ")", "self", ".", "port", "=", "False" ]
Closes serial port connection. :return: Nothing
[ "Closes", "serial", "port", "connection", "." ]
python
train
29.416667
bjodah/pyneqsys
pyneqsys/core.py
https://github.com/bjodah/pyneqsys/blob/1c8f2fe1ab2b6cc6cb55b7a1328aca2e3a3c5c77/pyneqsys/core.py#L195-L204
def plot_series_residuals(self, xres, varied_data, varied_idx, params, **kwargs): """ Analogous to :meth:`plot_series` but will plot residuals. """ nf = len(self.f_cb(*self.pre_process(xres[0], params))) xerr = np.empty((xres.shape[0], nf)) new_params = np.array(params) for idx, row in enumerate(xres): new_params[varied_idx] = varied_data[idx] xerr[idx, :] = self.f_cb(*self.pre_process(row, params)) return self.plot_series(xerr, varied_data, varied_idx, **kwargs)
[ "def", "plot_series_residuals", "(", "self", ",", "xres", ",", "varied_data", ",", "varied_idx", ",", "params", ",", "*", "*", "kwargs", ")", ":", "nf", "=", "len", "(", "self", ".", "f_cb", "(", "*", "self", ".", "pre_process", "(", "xres", "[", "0", "]", ",", "params", ")", ")", ")", "xerr", "=", "np", ".", "empty", "(", "(", "xres", ".", "shape", "[", "0", "]", ",", "nf", ")", ")", "new_params", "=", "np", ".", "array", "(", "params", ")", "for", "idx", ",", "row", "in", "enumerate", "(", "xres", ")", ":", "new_params", "[", "varied_idx", "]", "=", "varied_data", "[", "idx", "]", "xerr", "[", "idx", ",", ":", "]", "=", "self", ".", "f_cb", "(", "*", "self", ".", "pre_process", "(", "row", ",", "params", ")", ")", "return", "self", ".", "plot_series", "(", "xerr", ",", "varied_data", ",", "varied_idx", ",", "*", "*", "kwargs", ")" ]
Analogous to :meth:`plot_series` but will plot residuals.
[ "Analogous", "to", ":", "meth", ":", "plot_series", "but", "will", "plot", "residuals", "." ]
python
train
53.1
oscarbranson/latools
latools/filtering/signal_optimiser.py
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/signal_optimiser.py#L132-L389
def signal_optimiser(d, analytes, min_points=5, threshold_mode='kde_first_max', threshold_mult=1., x_bias=0, weights=None, ind=None, mode='minimise'): """ Optimise data selection based on specified analytes. Identifies the longest possible contiguous data region in the signal where the relative standard deviation (std) and concentration of all analytes is minimised. Optimisation is performed via a grid search of all possible contiguous data regions. For each region, the mean std and mean scaled analyte concentration ('amplitude') are calculated. The size and position of the optimal data region are identified using threshold std and amplitude values. Thresholds are derived from all calculated stds and amplitudes using the method specified by `threshold_mode`. For example, using the 'kde_max' method, a probability density function (PDF) is calculated for std and amplitude values, and the threshold is set as the maximum of the PDF. These thresholds are then used to identify the size and position of the longest contiguous region where the std is below the threshold, and the amplitude is either below the threshold. All possible regions of the data that have at least `min_points` are considered. For a graphical demonstration of the action of signal_optimiser, use `optimisation_plot`. Parameters ---------- d : latools.D object An latools data object. analytes : str or array-like Which analytes to consider. min_points : int The minimum number of contiguous points to consider. threshold_mode : str The method used to calculate the optimisation thresholds. Can be 'mean', 'median', 'kde_max' or 'bayes_mvs', or a custom function. If a function, must take a 1D array, and return a single, real number. threshood_mult : float or tuple A multiplier applied to the calculated threshold before use. If a tuple, the first value is applied to the mean threshold, and the second is applied to the standard deviation threshold. Reduce this to make data selection more stringent. x_bias : float If non-zero, a bias is applied to the calculated statistics to prefer the beginning (if > 0) or end (if < 0) of the signal. Should be between zero and 1. weights : array-like of length len(analytes) An array of numbers specifying the importance of each analyte considered. Larger number makes the analyte have a greater effect on the optimisation. Default is None. ind : boolean array A boolean array the same length as the data. Where false, data will not be included. mode : str Whether to 'minimise' or 'maximise' the concentration of the elements. Returns ------- dict, str : optimisation result, error message """ errmsg = '' if isinstance(analytes, str): analytes = [analytes] if ind is None: ind = np.full(len(d.Time), True) # initial catch if not any(ind) or (np.diff(bool_2_indices(ind)).max() < min_points): errmsg = 'Optmisation failed. No contiguous data regions longer than {:.0f} points.'.format(min_points) return Bunch({'means': np.nan, 'stds': np.nan, 'mean_threshold': np.nan, 'std_threshold': np.nan, 'lims': np.nan, 'filt': ind, 'threshold_mode': threshold_mode, 'min_points': min_points, 'analytes': analytes, 'opt_centre': np.nan, 'opt_n_points': np.nan, 'weights': weights, 'optimisation_success': False, 'errmsg': errmsg}), errmsg msmeans, msstds = calculate_optimisation_stats(d, analytes, min_points, weights, ind, x_bias) # second catch if all(np.isnan(msmeans).flat) or all(np.isnan(msmeans).flat): errmsg = 'Optmisation failed. No contiguous data regions longer than {:.0f} points.'.format(min_points) return Bunch({'means': np.nan, 'stds': np.nan, 'mean_threshold': np.nan, 'std_threshold': np.nan, 'lims': np.nan, 'filt': ind, 'threshold_mode': threshold_mode, 'min_points': min_points, 'analytes': analytes, 'opt_centre': np.nan, 'opt_n_points': np.nan, 'weights': weights, 'optimisation_success': False, 'errmsg': errmsg}), errmsg # define thresholds valid = ['kde_first_max', 'kde_max', 'median', 'bayes_mvs', 'mean'] n_under = 0 i = np.argwhere(np.array(valid) == threshold_mode)[0, 0] o_threshold_mode = threshold_mode while (n_under <= 0) & (i < len(valid)): if threshold_mode == 'median': # median - OK, but best? std_threshold = np.nanmedian(msstds) mean_threshold = np.nanmedian(msmeans) elif threshold_mode == 'mean': # mean std_threshold = np.nanmean(msstds) mean_threshold = np.nanmean(msmeans) elif threshold_mode == 'kde_max': # maximum of gaussian kernel density estimator mkd = gaussian_kde(msmeans[~np.isnan(msmeans)].flat) xm = np.linspace(*np.percentile(msmeans.flatten()[~np.isnan(msmeans.flatten())], (1, 99)), 100) mdf = mkd.pdf(xm) mean_threshold = xm[np.argmax(mdf)] rkd = gaussian_kde(msstds[~np.isnan(msstds)]) xr = np.linspace(*np.percentile(msstds.flatten()[~np.isnan(msstds.flatten())], (1, 99)), 100) rdf = rkd.pdf(xr) std_threshold = xr[np.argmax(rdf)] elif threshold_mode == 'kde_first_max': # first local maximum of gaussian kernel density estimator mkd = gaussian_kde(msmeans[~np.isnan(msmeans)].flat) xm = np.linspace(*np.percentile(msmeans.flatten()[~np.isnan(msmeans.flatten())], (1, 99)), 100) mdf = mkd.pdf(xm) inds = np.argwhere(np.r_[False, mdf[1:] > mdf[:-1]] & np.r_[mdf[:-1] > mdf[1:], False] & (mdf > 0.25 * mdf.max())) mean_threshold = xm[np.min(inds)] rkd = gaussian_kde(msstds[~np.isnan(msstds)]) xr = np.linspace(*np.percentile(msstds.flatten()[~np.isnan(msstds.flatten())], (1, 99)), 100) rdf = rkd.pdf(xr) inds = np.argwhere(np.r_[False, rdf[1:] > rdf[:-1]] & np.r_[rdf[:-1] > rdf[1:], False] & (rdf > 0.25 * rdf.max())) std_threshold = xr[np.min(inds)] elif threshold_mode == 'bayes_mvs': # bayesian mvs. bm, _, bs = bayes_mvs(msstds[~np.isnan(msstds)]) std_threshold = bm.statistic bm, _, bs = bayes_mvs(msmeans[~np.isnan(msmeans)]) mean_threshold = bm.statistic elif callable(threshold_mode): std_threshold = threshold_mode(msstds[~np.isnan(msstds)].flatten()) mean_threshold = threshold_mode(msmeans[~np.isnan(msmeans)].flatten()) else: try: mean_threshold, std_threshold = threshold_mode except: raise ValueError('\nthreshold_mode must be one of:\n ' + ', '.join(valid) + ',\na custom function, or a \n(mean_threshold, std_threshold) tuple.') # apply threshold_mult if isinstance(threshold_mult, (int, float)): std_threshold *= threshold_mult mean_threshold *= threshold_mult elif len(threshold_mult) == 2: mean_threshold *= threshold_mult[0] std_threshold *= threshold_mult[1] else: raise ValueError('\nthreshold_mult must be a float, int or tuple of length 2.') rind = (msstds < std_threshold) if mode == 'minimise': mind = (msmeans < mean_threshold) else: mind = (msmeans > mean_threshold) ind = rind & mind n_under = ind.sum() if n_under == 0: i += 1 if i <= len(valid) - 1: threshold_mode = valid[i] else: errmsg = 'Optimisation failed. No of the threshold_mode would work. Try reducting min_points.' return Bunch({'means': np.nan, 'stds': np.nan, 'mean_threshold': np.nan, 'std_threshold': np.nan, 'lims': np.nan, 'filt': ind, 'threshold_mode': threshold_mode, 'min_points': min_points, 'analytes': analytes, 'opt_centre': np.nan, 'opt_n_points': np.nan, 'weights': weights, 'optimisation_success': False, 'errmsg': errmsg}), errmsg if i > 0: errmsg = "optimisation failed using threshold_mode='{:}', falling back to '{:}'".format(o_threshold_mode, threshold_mode) # identify max number of points within thresholds passing = np.argwhere(ind) opt_n_points = passing[:, 0].max() opt_centre = passing[passing[:, 0] == opt_n_points, 1].min() opt_n_points += min_points # centres, npoints = np.meshgrid(np.arange(msmeans.shape[1]), # np.arange(min_points, min_points + msmeans.shape[0])) # opt_n_points = npoints[ind].max() # plus/minus one point to allow some freedom to shift selection window. # cind = ind & (npoints == opt_n_points) # opt_centre = centres[cind].min() if opt_n_points % 2 == 0: lims = (opt_centre - opt_n_points // 2, opt_centre + opt_n_points // 2) else: lims = (opt_centre - opt_n_points // 2, opt_centre + opt_n_points // 2 + 1) filt = np.zeros(d.Time.shape, dtype=bool) filt[lims[0]:lims[1]] = True return Bunch({'means': msmeans, 'stds': msstds, 'mean_threshold': mean_threshold, 'std_threshold': std_threshold, 'lims': lims, 'filt': filt, 'threshold_mode': threshold_mode, 'min_points': min_points, 'analytes': analytes, 'opt_centre': opt_centre, 'opt_n_points': opt_n_points, 'weights': weights, 'optimisation_success': True, 'errmsg': errmsg}), errmsg
[ "def", "signal_optimiser", "(", "d", ",", "analytes", ",", "min_points", "=", "5", ",", "threshold_mode", "=", "'kde_first_max'", ",", "threshold_mult", "=", "1.", ",", "x_bias", "=", "0", ",", "weights", "=", "None", ",", "ind", "=", "None", ",", "mode", "=", "'minimise'", ")", ":", "errmsg", "=", "''", "if", "isinstance", "(", "analytes", ",", "str", ")", ":", "analytes", "=", "[", "analytes", "]", "if", "ind", "is", "None", ":", "ind", "=", "np", ".", "full", "(", "len", "(", "d", ".", "Time", ")", ",", "True", ")", "# initial catch", "if", "not", "any", "(", "ind", ")", "or", "(", "np", ".", "diff", "(", "bool_2_indices", "(", "ind", ")", ")", ".", "max", "(", ")", "<", "min_points", ")", ":", "errmsg", "=", "'Optmisation failed. No contiguous data regions longer than {:.0f} points.'", ".", "format", "(", "min_points", ")", "return", "Bunch", "(", "{", "'means'", ":", "np", ".", "nan", ",", "'stds'", ":", "np", ".", "nan", ",", "'mean_threshold'", ":", "np", ".", "nan", ",", "'std_threshold'", ":", "np", ".", "nan", ",", "'lims'", ":", "np", ".", "nan", ",", "'filt'", ":", "ind", ",", "'threshold_mode'", ":", "threshold_mode", ",", "'min_points'", ":", "min_points", ",", "'analytes'", ":", "analytes", ",", "'opt_centre'", ":", "np", ".", "nan", ",", "'opt_n_points'", ":", "np", ".", "nan", ",", "'weights'", ":", "weights", ",", "'optimisation_success'", ":", "False", ",", "'errmsg'", ":", "errmsg", "}", ")", ",", "errmsg", "msmeans", ",", "msstds", "=", "calculate_optimisation_stats", "(", "d", ",", "analytes", ",", "min_points", ",", "weights", ",", "ind", ",", "x_bias", ")", "# second catch", "if", "all", "(", "np", ".", "isnan", "(", "msmeans", ")", ".", "flat", ")", "or", "all", "(", "np", ".", "isnan", "(", "msmeans", ")", ".", "flat", ")", ":", "errmsg", "=", "'Optmisation failed. No contiguous data regions longer than {:.0f} points.'", ".", "format", "(", "min_points", ")", "return", "Bunch", "(", "{", "'means'", ":", "np", ".", "nan", ",", "'stds'", ":", "np", ".", "nan", ",", "'mean_threshold'", ":", "np", ".", "nan", ",", "'std_threshold'", ":", "np", ".", "nan", ",", "'lims'", ":", "np", ".", "nan", ",", "'filt'", ":", "ind", ",", "'threshold_mode'", ":", "threshold_mode", ",", "'min_points'", ":", "min_points", ",", "'analytes'", ":", "analytes", ",", "'opt_centre'", ":", "np", ".", "nan", ",", "'opt_n_points'", ":", "np", ".", "nan", ",", "'weights'", ":", "weights", ",", "'optimisation_success'", ":", "False", ",", "'errmsg'", ":", "errmsg", "}", ")", ",", "errmsg", "# define thresholds", "valid", "=", "[", "'kde_first_max'", ",", "'kde_max'", ",", "'median'", ",", "'bayes_mvs'", ",", "'mean'", "]", "n_under", "=", "0", "i", "=", "np", ".", "argwhere", "(", "np", ".", "array", "(", "valid", ")", "==", "threshold_mode", ")", "[", "0", ",", "0", "]", "o_threshold_mode", "=", "threshold_mode", "while", "(", "n_under", "<=", "0", ")", "&", "(", "i", "<", "len", "(", "valid", ")", ")", ":", "if", "threshold_mode", "==", "'median'", ":", "# median - OK, but best?", "std_threshold", "=", "np", ".", "nanmedian", "(", "msstds", ")", "mean_threshold", "=", "np", ".", "nanmedian", "(", "msmeans", ")", "elif", "threshold_mode", "==", "'mean'", ":", "# mean", "std_threshold", "=", "np", ".", "nanmean", "(", "msstds", ")", "mean_threshold", "=", "np", ".", "nanmean", "(", "msmeans", ")", "elif", "threshold_mode", "==", "'kde_max'", ":", "# maximum of gaussian kernel density estimator", "mkd", "=", "gaussian_kde", "(", "msmeans", "[", "~", "np", ".", "isnan", "(", "msmeans", ")", "]", ".", "flat", ")", "xm", "=", "np", ".", "linspace", "(", "*", "np", ".", "percentile", "(", "msmeans", ".", "flatten", "(", ")", "[", "~", "np", ".", "isnan", "(", "msmeans", ".", "flatten", "(", ")", ")", "]", ",", "(", "1", ",", "99", ")", ")", ",", "100", ")", "mdf", "=", "mkd", ".", "pdf", "(", "xm", ")", "mean_threshold", "=", "xm", "[", "np", ".", "argmax", "(", "mdf", ")", "]", "rkd", "=", "gaussian_kde", "(", "msstds", "[", "~", "np", ".", "isnan", "(", "msstds", ")", "]", ")", "xr", "=", "np", ".", "linspace", "(", "*", "np", ".", "percentile", "(", "msstds", ".", "flatten", "(", ")", "[", "~", "np", ".", "isnan", "(", "msstds", ".", "flatten", "(", ")", ")", "]", ",", "(", "1", ",", "99", ")", ")", ",", "100", ")", "rdf", "=", "rkd", ".", "pdf", "(", "xr", ")", "std_threshold", "=", "xr", "[", "np", ".", "argmax", "(", "rdf", ")", "]", "elif", "threshold_mode", "==", "'kde_first_max'", ":", "# first local maximum of gaussian kernel density estimator", "mkd", "=", "gaussian_kde", "(", "msmeans", "[", "~", "np", ".", "isnan", "(", "msmeans", ")", "]", ".", "flat", ")", "xm", "=", "np", ".", "linspace", "(", "*", "np", ".", "percentile", "(", "msmeans", ".", "flatten", "(", ")", "[", "~", "np", ".", "isnan", "(", "msmeans", ".", "flatten", "(", ")", ")", "]", ",", "(", "1", ",", "99", ")", ")", ",", "100", ")", "mdf", "=", "mkd", ".", "pdf", "(", "xm", ")", "inds", "=", "np", ".", "argwhere", "(", "np", ".", "r_", "[", "False", ",", "mdf", "[", "1", ":", "]", ">", "mdf", "[", ":", "-", "1", "]", "]", "&", "np", ".", "r_", "[", "mdf", "[", ":", "-", "1", "]", ">", "mdf", "[", "1", ":", "]", ",", "False", "]", "&", "(", "mdf", ">", "0.25", "*", "mdf", ".", "max", "(", ")", ")", ")", "mean_threshold", "=", "xm", "[", "np", ".", "min", "(", "inds", ")", "]", "rkd", "=", "gaussian_kde", "(", "msstds", "[", "~", "np", ".", "isnan", "(", "msstds", ")", "]", ")", "xr", "=", "np", ".", "linspace", "(", "*", "np", ".", "percentile", "(", "msstds", ".", "flatten", "(", ")", "[", "~", "np", ".", "isnan", "(", "msstds", ".", "flatten", "(", ")", ")", "]", ",", "(", "1", ",", "99", ")", ")", ",", "100", ")", "rdf", "=", "rkd", ".", "pdf", "(", "xr", ")", "inds", "=", "np", ".", "argwhere", "(", "np", ".", "r_", "[", "False", ",", "rdf", "[", "1", ":", "]", ">", "rdf", "[", ":", "-", "1", "]", "]", "&", "np", ".", "r_", "[", "rdf", "[", ":", "-", "1", "]", ">", "rdf", "[", "1", ":", "]", ",", "False", "]", "&", "(", "rdf", ">", "0.25", "*", "rdf", ".", "max", "(", ")", ")", ")", "std_threshold", "=", "xr", "[", "np", ".", "min", "(", "inds", ")", "]", "elif", "threshold_mode", "==", "'bayes_mvs'", ":", "# bayesian mvs.", "bm", ",", "_", ",", "bs", "=", "bayes_mvs", "(", "msstds", "[", "~", "np", ".", "isnan", "(", "msstds", ")", "]", ")", "std_threshold", "=", "bm", ".", "statistic", "bm", ",", "_", ",", "bs", "=", "bayes_mvs", "(", "msmeans", "[", "~", "np", ".", "isnan", "(", "msmeans", ")", "]", ")", "mean_threshold", "=", "bm", ".", "statistic", "elif", "callable", "(", "threshold_mode", ")", ":", "std_threshold", "=", "threshold_mode", "(", "msstds", "[", "~", "np", ".", "isnan", "(", "msstds", ")", "]", ".", "flatten", "(", ")", ")", "mean_threshold", "=", "threshold_mode", "(", "msmeans", "[", "~", "np", ".", "isnan", "(", "msmeans", ")", "]", ".", "flatten", "(", ")", ")", "else", ":", "try", ":", "mean_threshold", ",", "std_threshold", "=", "threshold_mode", "except", ":", "raise", "ValueError", "(", "'\\nthreshold_mode must be one of:\\n '", "+", "', '", ".", "join", "(", "valid", ")", "+", "',\\na custom function, or a \\n(mean_threshold, std_threshold) tuple.'", ")", "# apply threshold_mult", "if", "isinstance", "(", "threshold_mult", ",", "(", "int", ",", "float", ")", ")", ":", "std_threshold", "*=", "threshold_mult", "mean_threshold", "*=", "threshold_mult", "elif", "len", "(", "threshold_mult", ")", "==", "2", ":", "mean_threshold", "*=", "threshold_mult", "[", "0", "]", "std_threshold", "*=", "threshold_mult", "[", "1", "]", "else", ":", "raise", "ValueError", "(", "'\\nthreshold_mult must be a float, int or tuple of length 2.'", ")", "rind", "=", "(", "msstds", "<", "std_threshold", ")", "if", "mode", "==", "'minimise'", ":", "mind", "=", "(", "msmeans", "<", "mean_threshold", ")", "else", ":", "mind", "=", "(", "msmeans", ">", "mean_threshold", ")", "ind", "=", "rind", "&", "mind", "n_under", "=", "ind", ".", "sum", "(", ")", "if", "n_under", "==", "0", ":", "i", "+=", "1", "if", "i", "<=", "len", "(", "valid", ")", "-", "1", ":", "threshold_mode", "=", "valid", "[", "i", "]", "else", ":", "errmsg", "=", "'Optimisation failed. No of the threshold_mode would work. Try reducting min_points.'", "return", "Bunch", "(", "{", "'means'", ":", "np", ".", "nan", ",", "'stds'", ":", "np", ".", "nan", ",", "'mean_threshold'", ":", "np", ".", "nan", ",", "'std_threshold'", ":", "np", ".", "nan", ",", "'lims'", ":", "np", ".", "nan", ",", "'filt'", ":", "ind", ",", "'threshold_mode'", ":", "threshold_mode", ",", "'min_points'", ":", "min_points", ",", "'analytes'", ":", "analytes", ",", "'opt_centre'", ":", "np", ".", "nan", ",", "'opt_n_points'", ":", "np", ".", "nan", ",", "'weights'", ":", "weights", ",", "'optimisation_success'", ":", "False", ",", "'errmsg'", ":", "errmsg", "}", ")", ",", "errmsg", "if", "i", ">", "0", ":", "errmsg", "=", "\"optimisation failed using threshold_mode='{:}', falling back to '{:}'\"", ".", "format", "(", "o_threshold_mode", ",", "threshold_mode", ")", "# identify max number of points within thresholds", "passing", "=", "np", ".", "argwhere", "(", "ind", ")", "opt_n_points", "=", "passing", "[", ":", ",", "0", "]", ".", "max", "(", ")", "opt_centre", "=", "passing", "[", "passing", "[", ":", ",", "0", "]", "==", "opt_n_points", ",", "1", "]", ".", "min", "(", ")", "opt_n_points", "+=", "min_points", "# centres, npoints = np.meshgrid(np.arange(msmeans.shape[1]),", "# np.arange(min_points, min_points + msmeans.shape[0]))", "# opt_n_points = npoints[ind].max()", "# plus/minus one point to allow some freedom to shift selection window.", "# cind = ind & (npoints == opt_n_points)", "# opt_centre = centres[cind].min()", "if", "opt_n_points", "%", "2", "==", "0", ":", "lims", "=", "(", "opt_centre", "-", "opt_n_points", "//", "2", ",", "opt_centre", "+", "opt_n_points", "//", "2", ")", "else", ":", "lims", "=", "(", "opt_centre", "-", "opt_n_points", "//", "2", ",", "opt_centre", "+", "opt_n_points", "//", "2", "+", "1", ")", "filt", "=", "np", ".", "zeros", "(", "d", ".", "Time", ".", "shape", ",", "dtype", "=", "bool", ")", "filt", "[", "lims", "[", "0", "]", ":", "lims", "[", "1", "]", "]", "=", "True", "return", "Bunch", "(", "{", "'means'", ":", "msmeans", ",", "'stds'", ":", "msstds", ",", "'mean_threshold'", ":", "mean_threshold", ",", "'std_threshold'", ":", "std_threshold", ",", "'lims'", ":", "lims", ",", "'filt'", ":", "filt", ",", "'threshold_mode'", ":", "threshold_mode", ",", "'min_points'", ":", "min_points", ",", "'analytes'", ":", "analytes", ",", "'opt_centre'", ":", "opt_centre", ",", "'opt_n_points'", ":", "opt_n_points", ",", "'weights'", ":", "weights", ",", "'optimisation_success'", ":", "True", ",", "'errmsg'", ":", "errmsg", "}", ")", ",", "errmsg" ]
Optimise data selection based on specified analytes. Identifies the longest possible contiguous data region in the signal where the relative standard deviation (std) and concentration of all analytes is minimised. Optimisation is performed via a grid search of all possible contiguous data regions. For each region, the mean std and mean scaled analyte concentration ('amplitude') are calculated. The size and position of the optimal data region are identified using threshold std and amplitude values. Thresholds are derived from all calculated stds and amplitudes using the method specified by `threshold_mode`. For example, using the 'kde_max' method, a probability density function (PDF) is calculated for std and amplitude values, and the threshold is set as the maximum of the PDF. These thresholds are then used to identify the size and position of the longest contiguous region where the std is below the threshold, and the amplitude is either below the threshold. All possible regions of the data that have at least `min_points` are considered. For a graphical demonstration of the action of signal_optimiser, use `optimisation_plot`. Parameters ---------- d : latools.D object An latools data object. analytes : str or array-like Which analytes to consider. min_points : int The minimum number of contiguous points to consider. threshold_mode : str The method used to calculate the optimisation thresholds. Can be 'mean', 'median', 'kde_max' or 'bayes_mvs', or a custom function. If a function, must take a 1D array, and return a single, real number. threshood_mult : float or tuple A multiplier applied to the calculated threshold before use. If a tuple, the first value is applied to the mean threshold, and the second is applied to the standard deviation threshold. Reduce this to make data selection more stringent. x_bias : float If non-zero, a bias is applied to the calculated statistics to prefer the beginning (if > 0) or end (if < 0) of the signal. Should be between zero and 1. weights : array-like of length len(analytes) An array of numbers specifying the importance of each analyte considered. Larger number makes the analyte have a greater effect on the optimisation. Default is None. ind : boolean array A boolean array the same length as the data. Where false, data will not be included. mode : str Whether to 'minimise' or 'maximise' the concentration of the elements. Returns ------- dict, str : optimisation result, error message
[ "Optimise", "data", "selection", "based", "on", "specified", "analytes", "." ]
python
test
42.232558
tensorflow/tensor2tensor
tensor2tensor/utils/data_reader.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/data_reader.py#L292-L307
def pad_batch(features, batch_multiple): """Pad batch dim of features to nearest multiple of batch_multiple.""" feature = list(features.items())[0][1] batch_size = tf.shape(feature)[0] mod = batch_size % batch_multiple has_mod = tf.cast(tf.cast(mod, tf.bool), tf.int32) batch_padding = batch_multiple * has_mod - mod padded_features = {} for k, feature in features.items(): rank = len(feature.shape) paddings = [[0, 0] for _ in range(rank)] paddings[0][1] = batch_padding padded_feature = tf.pad(feature, paddings) padded_features[k] = padded_feature return padded_features
[ "def", "pad_batch", "(", "features", ",", "batch_multiple", ")", ":", "feature", "=", "list", "(", "features", ".", "items", "(", ")", ")", "[", "0", "]", "[", "1", "]", "batch_size", "=", "tf", ".", "shape", "(", "feature", ")", "[", "0", "]", "mod", "=", "batch_size", "%", "batch_multiple", "has_mod", "=", "tf", ".", "cast", "(", "tf", ".", "cast", "(", "mod", ",", "tf", ".", "bool", ")", ",", "tf", ".", "int32", ")", "batch_padding", "=", "batch_multiple", "*", "has_mod", "-", "mod", "padded_features", "=", "{", "}", "for", "k", ",", "feature", "in", "features", ".", "items", "(", ")", ":", "rank", "=", "len", "(", "feature", ".", "shape", ")", "paddings", "=", "[", "[", "0", ",", "0", "]", "for", "_", "in", "range", "(", "rank", ")", "]", "paddings", "[", "0", "]", "[", "1", "]", "=", "batch_padding", "padded_feature", "=", "tf", ".", "pad", "(", "feature", ",", "paddings", ")", "padded_features", "[", "k", "]", "=", "padded_feature", "return", "padded_features" ]
Pad batch dim of features to nearest multiple of batch_multiple.
[ "Pad", "batch", "dim", "of", "features", "to", "nearest", "multiple", "of", "batch_multiple", "." ]
python
train
37.3125
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/lib/deepreload.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/lib/deepreload.py#L262-L311
def deep_reload_hook(m): """Replacement for reload().""" if not isinstance(m, ModuleType): raise TypeError("reload() argument must be module") name = m.__name__ if name not in sys.modules: raise ImportError("reload(): module %.200s not in sys.modules" % name) global modules_reloading try: return modules_reloading[name] except: modules_reloading[name] = m dot = name.rfind('.') if dot < 0: subname = name path = None else: try: parent = sys.modules[name[:dot]] except KeyError: modules_reloading.clear() raise ImportError("reload(): parent %.200s not in sys.modules" % name[:dot]) subname = name[dot+1:] path = getattr(parent, "__path__", None) try: # This appears to be necessary on Python 3, because imp.find_module() # tries to import standard libraries (like io) itself, and we don't # want them to be processed by our deep_import_hook. with replace_import_hook(original_import): fp, filename, stuff = imp.find_module(subname, path) finally: modules_reloading.clear() try: newm = imp.load_module(name, fp, filename, stuff) except: # load_module probably removed name from modules because of # the error. Put back the original module object. sys.modules[name] = m raise finally: if fp: fp.close() modules_reloading.clear() return newm
[ "def", "deep_reload_hook", "(", "m", ")", ":", "if", "not", "isinstance", "(", "m", ",", "ModuleType", ")", ":", "raise", "TypeError", "(", "\"reload() argument must be module\"", ")", "name", "=", "m", ".", "__name__", "if", "name", "not", "in", "sys", ".", "modules", ":", "raise", "ImportError", "(", "\"reload(): module %.200s not in sys.modules\"", "%", "name", ")", "global", "modules_reloading", "try", ":", "return", "modules_reloading", "[", "name", "]", "except", ":", "modules_reloading", "[", "name", "]", "=", "m", "dot", "=", "name", ".", "rfind", "(", "'.'", ")", "if", "dot", "<", "0", ":", "subname", "=", "name", "path", "=", "None", "else", ":", "try", ":", "parent", "=", "sys", ".", "modules", "[", "name", "[", ":", "dot", "]", "]", "except", "KeyError", ":", "modules_reloading", ".", "clear", "(", ")", "raise", "ImportError", "(", "\"reload(): parent %.200s not in sys.modules\"", "%", "name", "[", ":", "dot", "]", ")", "subname", "=", "name", "[", "dot", "+", "1", ":", "]", "path", "=", "getattr", "(", "parent", ",", "\"__path__\"", ",", "None", ")", "try", ":", "# This appears to be necessary on Python 3, because imp.find_module()", "# tries to import standard libraries (like io) itself, and we don't", "# want them to be processed by our deep_import_hook.", "with", "replace_import_hook", "(", "original_import", ")", ":", "fp", ",", "filename", ",", "stuff", "=", "imp", ".", "find_module", "(", "subname", ",", "path", ")", "finally", ":", "modules_reloading", ".", "clear", "(", ")", "try", ":", "newm", "=", "imp", ".", "load_module", "(", "name", ",", "fp", ",", "filename", ",", "stuff", ")", "except", ":", "# load_module probably removed name from modules because of", "# the error. Put back the original module object.", "sys", ".", "modules", "[", "name", "]", "=", "m", "raise", "finally", ":", "if", "fp", ":", "fp", ".", "close", "(", ")", "modules_reloading", ".", "clear", "(", ")", "return", "newm" ]
Replacement for reload().
[ "Replacement", "for", "reload", "()", "." ]
python
test
29.68
SmartTeleMax/iktomi
iktomi/auth.py
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/auth.py#L33-L40
def check_password(raw_password, enc_password): """ Returns a boolean of whether the raw_password was correct. Handles encryption formats behind the scenes. """ algo, salt, hsh = enc_password.split('$') return enc_password == encrypt_password(raw_password, algorithm=algo, salt=salt)
[ "def", "check_password", "(", "raw_password", ",", "enc_password", ")", ":", "algo", ",", "salt", ",", "hsh", "=", "enc_password", ".", "split", "(", "'$'", ")", "return", "enc_password", "==", "encrypt_password", "(", "raw_password", ",", "algorithm", "=", "algo", ",", "salt", "=", "salt", ")" ]
Returns a boolean of whether the raw_password was correct. Handles encryption formats behind the scenes.
[ "Returns", "a", "boolean", "of", "whether", "the", "raw_password", "was", "correct", ".", "Handles", "encryption", "formats", "behind", "the", "scenes", "." ]
python
train
43
spatialaudio/python-pa-ringbuffer
src/pa_ringbuffer.py
https://github.com/spatialaudio/python-pa-ringbuffer/blob/b4a5eaa9b53a437c05d196ed59e1791db159e4b0/src/pa_ringbuffer.py#L177-L198
def readinto(self, data): """Read data from the ring buffer into a user-provided buffer. This advances the read index after reading; calling :meth:`advance_read_index` is *not* necessary. :param data: The memory where the data should be stored. :type data: CData pointer or buffer :returns: The number of elements read, which may be less than the size of *data*. :rtype: int """ try: data = self._ffi.from_buffer(data) except TypeError: pass # input is not a buffer size, rest = divmod(self._ffi.sizeof(data), self.elementsize) if rest: raise ValueError('data size must be multiple of elementsize') return self._lib.PaUtil_ReadRingBuffer(self._ptr, data, size)
[ "def", "readinto", "(", "self", ",", "data", ")", ":", "try", ":", "data", "=", "self", ".", "_ffi", ".", "from_buffer", "(", "data", ")", "except", "TypeError", ":", "pass", "# input is not a buffer", "size", ",", "rest", "=", "divmod", "(", "self", ".", "_ffi", ".", "sizeof", "(", "data", ")", ",", "self", ".", "elementsize", ")", "if", "rest", ":", "raise", "ValueError", "(", "'data size must be multiple of elementsize'", ")", "return", "self", ".", "_lib", ".", "PaUtil_ReadRingBuffer", "(", "self", ".", "_ptr", ",", "data", ",", "size", ")" ]
Read data from the ring buffer into a user-provided buffer. This advances the read index after reading; calling :meth:`advance_read_index` is *not* necessary. :param data: The memory where the data should be stored. :type data: CData pointer or buffer :returns: The number of elements read, which may be less than the size of *data*. :rtype: int
[ "Read", "data", "from", "the", "ring", "buffer", "into", "a", "user", "-", "provided", "buffer", "." ]
python
train
36.181818
RaRe-Technologies/smart_open
smart_open/smart_open_lib.py
https://github.com/RaRe-Technologies/smart_open/blob/2dc8d60f223fc7b00a2000c56362a7bd6cd0850e/smart_open/smart_open_lib.py#L658-L719
def _parse_uri(uri_as_string): """ Parse the given URI from a string. Supported URI schemes are: * file * hdfs * http * https * s3 * s3a * s3n * s3u * webhdfs .s3, s3a and s3n are treated the same way. s3u is s3 but without SSL. Valid URI examples:: * s3://my_bucket/my_key * s3://my_key:my_secret@my_bucket/my_key * s3://my_key:my_secret@my_server:my_port@my_bucket/my_key * hdfs:///path/file * hdfs://path/file * webhdfs://host:port/path/file * ./local/path/file * ~/local/path/file * local/path/file * ./local/path/file.gz * file:///home/user/file * file:///home/user/file.bz2 * [ssh|scp|sftp]://username@host//path/file * [ssh|scp|sftp]://username@host/path/file """ if os.name == 'nt': # urlsplit doesn't work on Windows -- it parses the drive as the scheme... if '://' not in uri_as_string: # no protocol given => assume a local file uri_as_string = 'file://' + uri_as_string parsed_uri = _my_urlsplit(uri_as_string) if parsed_uri.scheme == "hdfs": return _parse_uri_hdfs(parsed_uri) elif parsed_uri.scheme == "webhdfs": return _parse_uri_webhdfs(parsed_uri) elif parsed_uri.scheme in smart_open_s3.SUPPORTED_SCHEMES: return _parse_uri_s3x(parsed_uri) elif parsed_uri.scheme == 'file': return _parse_uri_file(parsed_uri.netloc + parsed_uri.path) elif parsed_uri.scheme in ('', None): return _parse_uri_file(uri_as_string) elif parsed_uri.scheme.startswith('http'): return Uri(scheme=parsed_uri.scheme, uri_path=uri_as_string) elif parsed_uri.scheme in smart_open_ssh.SCHEMES: return _parse_uri_ssh(parsed_uri) else: raise NotImplementedError( "unknown URI scheme %r in %r" % (parsed_uri.scheme, uri_as_string) )
[ "def", "_parse_uri", "(", "uri_as_string", ")", ":", "if", "os", ".", "name", "==", "'nt'", ":", "# urlsplit doesn't work on Windows -- it parses the drive as the scheme...", "if", "'://'", "not", "in", "uri_as_string", ":", "# no protocol given => assume a local file", "uri_as_string", "=", "'file://'", "+", "uri_as_string", "parsed_uri", "=", "_my_urlsplit", "(", "uri_as_string", ")", "if", "parsed_uri", ".", "scheme", "==", "\"hdfs\"", ":", "return", "_parse_uri_hdfs", "(", "parsed_uri", ")", "elif", "parsed_uri", ".", "scheme", "==", "\"webhdfs\"", ":", "return", "_parse_uri_webhdfs", "(", "parsed_uri", ")", "elif", "parsed_uri", ".", "scheme", "in", "smart_open_s3", ".", "SUPPORTED_SCHEMES", ":", "return", "_parse_uri_s3x", "(", "parsed_uri", ")", "elif", "parsed_uri", ".", "scheme", "==", "'file'", ":", "return", "_parse_uri_file", "(", "parsed_uri", ".", "netloc", "+", "parsed_uri", ".", "path", ")", "elif", "parsed_uri", ".", "scheme", "in", "(", "''", ",", "None", ")", ":", "return", "_parse_uri_file", "(", "uri_as_string", ")", "elif", "parsed_uri", ".", "scheme", ".", "startswith", "(", "'http'", ")", ":", "return", "Uri", "(", "scheme", "=", "parsed_uri", ".", "scheme", ",", "uri_path", "=", "uri_as_string", ")", "elif", "parsed_uri", ".", "scheme", "in", "smart_open_ssh", ".", "SCHEMES", ":", "return", "_parse_uri_ssh", "(", "parsed_uri", ")", "else", ":", "raise", "NotImplementedError", "(", "\"unknown URI scheme %r in %r\"", "%", "(", "parsed_uri", ".", "scheme", ",", "uri_as_string", ")", ")" ]
Parse the given URI from a string. Supported URI schemes are: * file * hdfs * http * https * s3 * s3a * s3n * s3u * webhdfs .s3, s3a and s3n are treated the same way. s3u is s3 but without SSL. Valid URI examples:: * s3://my_bucket/my_key * s3://my_key:my_secret@my_bucket/my_key * s3://my_key:my_secret@my_server:my_port@my_bucket/my_key * hdfs:///path/file * hdfs://path/file * webhdfs://host:port/path/file * ./local/path/file * ~/local/path/file * local/path/file * ./local/path/file.gz * file:///home/user/file * file:///home/user/file.bz2 * [ssh|scp|sftp]://username@host//path/file * [ssh|scp|sftp]://username@host/path/file
[ "Parse", "the", "given", "URI", "from", "a", "string", "." ]
python
train
30.5
rene-aguirre/pywinusb
pywinusb/hid/winapi.py
https://github.com/rene-aguirre/pywinusb/blob/954c4b2105d9f01cb0c50e24500bb747d4ecdc43/pywinusb/hid/winapi.py#L443-L454
def open(self): """ Calls SetupDiGetClassDevs to obtain a handle to an opaque device information set that describes the device interfaces supported by all the USB collections currently installed in the system. The application should specify DIGCF.PRESENT and DIGCF.INTERFACEDEVICE in the Flags parameter passed to SetupDiGetClassDevs. """ self.h_info = SetupDiGetClassDevs(byref(self.guid), None, None, (DIGCF.PRESENT | DIGCF.DEVICEINTERFACE) ) return self.h_info
[ "def", "open", "(", "self", ")", ":", "self", ".", "h_info", "=", "SetupDiGetClassDevs", "(", "byref", "(", "self", ".", "guid", ")", ",", "None", ",", "None", ",", "(", "DIGCF", ".", "PRESENT", "|", "DIGCF", ".", "DEVICEINTERFACE", ")", ")", "return", "self", ".", "h_info" ]
Calls SetupDiGetClassDevs to obtain a handle to an opaque device information set that describes the device interfaces supported by all the USB collections currently installed in the system. The application should specify DIGCF.PRESENT and DIGCF.INTERFACEDEVICE in the Flags parameter passed to SetupDiGetClassDevs.
[ "Calls", "SetupDiGetClassDevs", "to", "obtain", "a", "handle", "to", "an", "opaque", "device", "information", "set", "that", "describes", "the", "device", "interfaces", "supported", "by", "all", "the", "USB", "collections", "currently", "installed", "in", "the", "system", ".", "The", "application", "should", "specify", "DIGCF", ".", "PRESENT", "and", "DIGCF", ".", "INTERFACEDEVICE", "in", "the", "Flags", "parameter", "passed", "to", "SetupDiGetClassDevs", "." ]
python
train
46
dcramer/peek
peek/tracer.py
https://github.com/dcramer/peek/blob/da7c086660fc870c6632c4dc5ccb2ff9bfbee52e/peek/tracer.py#L237-L248
def start(self, origin): """ Start this Tracer. Return a Python function suitable for use with sys.settrace(). """ self.start_time = time.time() self.pause_until = None self.data.update(self._get_struct(origin, 'origin')) self.data_stack.append(self.data) sys.settrace(self._trace) return self._trace
[ "def", "start", "(", "self", ",", "origin", ")", ":", "self", ".", "start_time", "=", "time", ".", "time", "(", ")", "self", ".", "pause_until", "=", "None", "self", ".", "data", ".", "update", "(", "self", ".", "_get_struct", "(", "origin", ",", "'origin'", ")", ")", "self", ".", "data_stack", ".", "append", "(", "self", ".", "data", ")", "sys", ".", "settrace", "(", "self", ".", "_trace", ")", "return", "self", ".", "_trace" ]
Start this Tracer. Return a Python function suitable for use with sys.settrace().
[ "Start", "this", "Tracer", "." ]
python
train
30.833333
rgalanakis/goless
write_benchresults.py
https://github.com/rgalanakis/goless/blob/286cd69482ae5a56c899a0c0d5d895772d96e83d/write_benchresults.py#L86-L98
def insert_seperator_results(results): """Given a sequence of BenchmarkResults, return a new sequence where a "seperator" BenchmarkResult has been placed between differing benchmarks to provide a visual difference.""" sepbench = BenchmarkResult(*[' ' * w for w in COLUMN_WIDTHS]) last_bm = None for r in results: if last_bm is None: last_bm = r.benchmark elif last_bm != r.benchmark: yield sepbench last_bm = r.benchmark yield r
[ "def", "insert_seperator_results", "(", "results", ")", ":", "sepbench", "=", "BenchmarkResult", "(", "*", "[", "' '", "*", "w", "for", "w", "in", "COLUMN_WIDTHS", "]", ")", "last_bm", "=", "None", "for", "r", "in", "results", ":", "if", "last_bm", "is", "None", ":", "last_bm", "=", "r", ".", "benchmark", "elif", "last_bm", "!=", "r", ".", "benchmark", ":", "yield", "sepbench", "last_bm", "=", "r", ".", "benchmark", "yield", "r" ]
Given a sequence of BenchmarkResults, return a new sequence where a "seperator" BenchmarkResult has been placed between differing benchmarks to provide a visual difference.
[ "Given", "a", "sequence", "of", "BenchmarkResults", "return", "a", "new", "sequence", "where", "a", "seperator", "BenchmarkResult", "has", "been", "placed", "between", "differing", "benchmarks", "to", "provide", "a", "visual", "difference", "." ]
python
train
38.461538
radjkarl/imgProcessor
imgProcessor/camera/CameraCalibration.py
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/CameraCalibration.py#L590-L611
def getCoeff(self, name, light=None, date=None): ''' try to get calibration for right light source, but use another if they is none existent ''' d = self.coeffs[name] try: c = d[light] except KeyError: try: k, i = next(iter(d.items())) if light is not None: print( 'no calibration found for [%s] - using [%s] instead' % (light, k)) except StopIteration: return None c = i except TypeError: # coeff not dependent on light source c = d return _getFromDate(c, date)
[ "def", "getCoeff", "(", "self", ",", "name", ",", "light", "=", "None", ",", "date", "=", "None", ")", ":", "d", "=", "self", ".", "coeffs", "[", "name", "]", "try", ":", "c", "=", "d", "[", "light", "]", "except", "KeyError", ":", "try", ":", "k", ",", "i", "=", "next", "(", "iter", "(", "d", ".", "items", "(", ")", ")", ")", "if", "light", "is", "not", "None", ":", "print", "(", "'no calibration found for [%s] - using [%s] instead'", "%", "(", "light", ",", "k", ")", ")", "except", "StopIteration", ":", "return", "None", "c", "=", "i", "except", "TypeError", ":", "# coeff not dependent on light source\r", "c", "=", "d", "return", "_getFromDate", "(", "c", ",", "date", ")" ]
try to get calibration for right light source, but use another if they is none existent
[ "try", "to", "get", "calibration", "for", "right", "light", "source", "but", "use", "another", "if", "they", "is", "none", "existent" ]
python
train
31.772727
cqparts/cqparts
src/cqparts/codec/gltf.py
https://github.com/cqparts/cqparts/blob/018e87e14c2c4d1d40b4bfe6a7e22bcf9baf0a53/src/cqparts/codec/gltf.py#L223-L245
def read(self): """ Read buffer out as a single stream. .. warning:: Avoid using this function! **Why?** This is a *convenience* function; it doesn't encourage good memory management. All memory required for a mesh is duplicated, and returned as a single :class:`str`. So at best, using this function will double the memory required for a single model. **Instead:** Wherever possible, please use :meth:`buffer_iter`. """ buffer = BytesIO() for chunk in self.buffer_iter(): log.debug('buffer.write(%r)', chunk) buffer.write(chunk) buffer.seek(0) return buffer.read()
[ "def", "read", "(", "self", ")", ":", "buffer", "=", "BytesIO", "(", ")", "for", "chunk", "in", "self", ".", "buffer_iter", "(", ")", ":", "log", ".", "debug", "(", "'buffer.write(%r)'", ",", "chunk", ")", "buffer", ".", "write", "(", "chunk", ")", "buffer", ".", "seek", "(", "0", ")", "return", "buffer", ".", "read", "(", ")" ]
Read buffer out as a single stream. .. warning:: Avoid using this function! **Why?** This is a *convenience* function; it doesn't encourage good memory management. All memory required for a mesh is duplicated, and returned as a single :class:`str`. So at best, using this function will double the memory required for a single model. **Instead:** Wherever possible, please use :meth:`buffer_iter`.
[ "Read", "buffer", "out", "as", "a", "single", "stream", "." ]
python
train
31.304348
soldag/python-pwmled
pwmled/led/__init__.py
https://github.com/soldag/python-pwmled/blob/09cde36ecc0153fa81dc2a1b9bb07d1c0e418c8c/pwmled/led/__init__.py#L78-L85
def _update_pwm(self): """Update the pwm values of the driver regarding the current state.""" if self._is_on: values = self._get_pwm_values() else: values = [0] * len(self._driver.pins) self._driver.set_pwm(values)
[ "def", "_update_pwm", "(", "self", ")", ":", "if", "self", ".", "_is_on", ":", "values", "=", "self", ".", "_get_pwm_values", "(", ")", "else", ":", "values", "=", "[", "0", "]", "*", "len", "(", "self", ".", "_driver", ".", "pins", ")", "self", ".", "_driver", ".", "set_pwm", "(", "values", ")" ]
Update the pwm values of the driver regarding the current state.
[ "Update", "the", "pwm", "values", "of", "the", "driver", "regarding", "the", "current", "state", "." ]
python
train
33
PMEAL/OpenPNM
openpnm/models/phases/surface_tension.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/models/phases/surface_tension.py#L58-L91
def eotvos(target, k, temperature='pore.temperature', critical_temperature='pore.critical_temperature', molar_density='pore.molar_density'): r""" Missing description Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. k : float Constant parameter specific to fluid temperature : string The dictionary key containing the temperature values (K) critical_temperature : string The dictionary key containing the critical temperature values (K) molar_density : string The dictionary key containing the molar density values (K) TODO: Needs description, and improve definition of k """ Tc = target[critical_temperature] T = target[temperature] Vm = 1/target[molar_density] value = k*(Tc-T)/(Vm**(2/3)) return value
[ "def", "eotvos", "(", "target", ",", "k", ",", "temperature", "=", "'pore.temperature'", ",", "critical_temperature", "=", "'pore.critical_temperature'", ",", "molar_density", "=", "'pore.molar_density'", ")", ":", "Tc", "=", "target", "[", "critical_temperature", "]", "T", "=", "target", "[", "temperature", "]", "Vm", "=", "1", "/", "target", "[", "molar_density", "]", "value", "=", "k", "*", "(", "Tc", "-", "T", ")", "/", "(", "Vm", "**", "(", "2", "/", "3", ")", ")", "return", "value" ]
r""" Missing description Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. k : float Constant parameter specific to fluid temperature : string The dictionary key containing the temperature values (K) critical_temperature : string The dictionary key containing the critical temperature values (K) molar_density : string The dictionary key containing the molar density values (K) TODO: Needs description, and improve definition of k
[ "r", "Missing", "description" ]
python
train
29.176471
vaexio/vaex
packages/vaex-arrow/vaex_arrow/convert.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-arrow/vaex_arrow/convert.py#L81-L88
def arrow_table_from_vaex_df(ds, column_names=None, selection=None, strings=True, virtual=False): """Implementation of Dataset.to_arrow_table""" names = [] arrays = [] for name, array in ds.to_items(column_names=column_names, selection=selection, strings=strings, virtual=virtual): names.append(name) arrays.append(arrow_array_from_numpy_array(array)) return pyarrow.Table.from_arrays(arrays, names)
[ "def", "arrow_table_from_vaex_df", "(", "ds", ",", "column_names", "=", "None", ",", "selection", "=", "None", ",", "strings", "=", "True", ",", "virtual", "=", "False", ")", ":", "names", "=", "[", "]", "arrays", "=", "[", "]", "for", "name", ",", "array", "in", "ds", ".", "to_items", "(", "column_names", "=", "column_names", ",", "selection", "=", "selection", ",", "strings", "=", "strings", ",", "virtual", "=", "virtual", ")", ":", "names", ".", "append", "(", "name", ")", "arrays", ".", "append", "(", "arrow_array_from_numpy_array", "(", "array", ")", ")", "return", "pyarrow", ".", "Table", ".", "from_arrays", "(", "arrays", ",", "names", ")" ]
Implementation of Dataset.to_arrow_table
[ "Implementation", "of", "Dataset", ".", "to_arrow_table" ]
python
test
53.5
sentinel-hub/eo-learn
ml_tools/eolearn/ml_tools/classifier.py
https://github.com/sentinel-hub/eo-learn/blob/b8c390b9f553c561612fe9eb64e720611633a035/ml_tools/eolearn/ml_tools/classifier.py#L418-L447
def image_predict_proba(self, X): """ Predicts class probabilities for the entire image. Parameters: ----------- X: array, shape = [n_samples, n_pixels_x, n_pixels_y, n_bands] Array of training images y: array, shape = [n_samples] or [n_samples, n_pixels_x, n_pixels_y, n_classes] Target probabilities """ self._check_image(X) probabilities = self.pixel_classifier.image_predict_proba(X) patches, _ = self._to_patches(probabilities) row_steps = self._image_size[0] // self.patch_size[0] col_steps = self._image_size[1] // self.patch_size[1] ps = self.patch_size[0] * self.patch_size[1] # how can this be optimised? for i, j, k in itertools.product(range(row_steps), range(col_steps), range(self._samples)): patches[k, i, j, 0] = np.sum(patches[k, i, j, 0]) / ps patches[k, i, j, 1] = np.sum(patches[k, i, j, 1]) / ps return probabilities
[ "def", "image_predict_proba", "(", "self", ",", "X", ")", ":", "self", ".", "_check_image", "(", "X", ")", "probabilities", "=", "self", ".", "pixel_classifier", ".", "image_predict_proba", "(", "X", ")", "patches", ",", "_", "=", "self", ".", "_to_patches", "(", "probabilities", ")", "row_steps", "=", "self", ".", "_image_size", "[", "0", "]", "//", "self", ".", "patch_size", "[", "0", "]", "col_steps", "=", "self", ".", "_image_size", "[", "1", "]", "//", "self", ".", "patch_size", "[", "1", "]", "ps", "=", "self", ".", "patch_size", "[", "0", "]", "*", "self", ".", "patch_size", "[", "1", "]", "# how can this be optimised?\r", "for", "i", ",", "j", ",", "k", "in", "itertools", ".", "product", "(", "range", "(", "row_steps", ")", ",", "range", "(", "col_steps", ")", ",", "range", "(", "self", ".", "_samples", ")", ")", ":", "patches", "[", "k", ",", "i", ",", "j", ",", "0", "]", "=", "np", ".", "sum", "(", "patches", "[", "k", ",", "i", ",", "j", ",", "0", "]", ")", "/", "ps", "patches", "[", "k", ",", "i", ",", "j", ",", "1", "]", "=", "np", ".", "sum", "(", "patches", "[", "k", ",", "i", ",", "j", ",", "1", "]", ")", "/", "ps", "return", "probabilities" ]
Predicts class probabilities for the entire image. Parameters: ----------- X: array, shape = [n_samples, n_pixels_x, n_pixels_y, n_bands] Array of training images y: array, shape = [n_samples] or [n_samples, n_pixels_x, n_pixels_y, n_classes] Target probabilities
[ "Predicts", "class", "probabilities", "for", "the", "entire", "image", ".", "Parameters", ":", "-----------", "X", ":", "array", "shape", "=", "[", "n_samples", "n_pixels_x", "n_pixels_y", "n_bands", "]", "Array", "of", "training", "images", "y", ":", "array", "shape", "=", "[", "n_samples", "]", "or", "[", "n_samples", "n_pixels_x", "n_pixels_y", "n_classes", "]", "Target", "probabilities" ]
python
train
34.066667