text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def update_subscription(self, update_parameters, subscription_id): """UpdateSubscription. [Preview API] Update an existing subscription. Depending on the type of subscription and permissions, the caller can update the description, filter settings, channel (delivery) settings and more. :param :class:`<NotificationSubscriptionUpdateParameters> <azure.devops.v5_0.notification.models.NotificationSubscriptionUpdateParameters>` update_parameters: :param str subscription_id: :rtype: :class:`<NotificationSubscription> <azure.devops.v5_0.notification.models.NotificationSubscription>` """ route_values = {} if subscription_id is not None: route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str') content = self._serialize.body(update_parameters, 'NotificationSubscriptionUpdateParameters') response = self._send(http_method='PATCH', location_id='70f911d6-abac-488c-85b3-a206bf57e165', version='5.0-preview.1', route_values=route_values, content=content) return self._deserialize('NotificationSubscription', response)
[ "def", "update_subscription", "(", "self", ",", "update_parameters", ",", "subscription_id", ")", ":", "route_values", "=", "{", "}", "if", "subscription_id", "is", "not", "None", ":", "route_values", "[", "'subscriptionId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'subscription_id'", ",", "subscription_id", ",", "'str'", ")", "content", "=", "self", ".", "_serialize", ".", "body", "(", "update_parameters", ",", "'NotificationSubscriptionUpdateParameters'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'PATCH'", ",", "location_id", "=", "'70f911d6-abac-488c-85b3-a206bf57e165'", ",", "version", "=", "'5.0-preview.1'", ",", "route_values", "=", "route_values", ",", "content", "=", "content", ")", "return", "self", ".", "_deserialize", "(", "'NotificationSubscription'", ",", "response", ")" ]
73.882353
38.647059
def _subArrayShape(self): """ Returns the shape of the sub-array. An empty tuple is returned for regular fields, which have no sub array. """ fieldName = self.nodeName fieldDtype = self._array.dtype.fields[fieldName][0] return fieldDtype.shape
[ "def", "_subArrayShape", "(", "self", ")", ":", "fieldName", "=", "self", ".", "nodeName", "fieldDtype", "=", "self", ".", "_array", ".", "dtype", ".", "fields", "[", "fieldName", "]", "[", "0", "]", "return", "fieldDtype", ".", "shape" ]
41.285714
13.285714
def get_backend_setting(cls, name, default=None): """ Reads ``name`` setting from backend settings dictionary. If `default` value is omitted, raises ``ImproperlyConfigured`` when setting ``name`` is not available. """ backend_settings = get_backend_settings(cls.BACKEND) if default is not None: return backend_settings.get(name, default) else: try: return backend_settings[name] except KeyError: raise ImproperlyConfigured("getpaid '%s' requires backend '%s' setting" % (cls.BACKEND, name))
[ "def", "get_backend_setting", "(", "cls", ",", "name", ",", "default", "=", "None", ")", ":", "backend_settings", "=", "get_backend_settings", "(", "cls", ".", "BACKEND", ")", "if", "default", "is", "not", "None", ":", "return", "backend_settings", ".", "get", "(", "name", ",", "default", ")", "else", ":", "try", ":", "return", "backend_settings", "[", "name", "]", "except", "KeyError", ":", "raise", "ImproperlyConfigured", "(", "\"getpaid '%s' requires backend '%s' setting\"", "%", "(", "cls", ".", "BACKEND", ",", "name", ")", ")" ]
40.6
19.4
def _set_route_distinguisher(self, v, load=False): """ Setter method for route_distinguisher, mapped from YANG variable /rbridge_id/evpn_instance/route_distinguisher (container) If this variable is read-only (config: false) in the source YANG file, then _set_route_distinguisher is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_route_distinguisher() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=route_distinguisher.route_distinguisher, is_container='container', presence=False, yang_name="route-distinguisher", rest_name="rd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Route distinguisher', u'cli-compact-syntax': None, u'alt-name': u'rd', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """route_distinguisher must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=route_distinguisher.route_distinguisher, is_container='container', presence=False, yang_name="route-distinguisher", rest_name="rd", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Route distinguisher', u'cli-compact-syntax': None, u'alt-name': u'rd', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""", }) self.__route_distinguisher = t if hasattr(self, '_set'): self._set()
[ "def", "_set_route_distinguisher", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "route_distinguisher", ".", "route_distinguisher", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"route-distinguisher\"", ",", "rest_name", "=", "\"rd\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Configure Route distinguisher'", ",", "u'cli-compact-syntax'", ":", "None", ",", "u'alt-name'", ":", "u'rd'", ",", "u'cli-incomplete-no'", ":", "None", ",", "u'cli-incomplete-command'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-bgp'", ",", "defining_module", "=", "'brocade-bgp'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"route_distinguisher must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=route_distinguisher.route_distinguisher, is_container='container', presence=False, yang_name=\"route-distinguisher\", rest_name=\"rd\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Route distinguisher', u'cli-compact-syntax': None, u'alt-name': u'rd', u'cli-incomplete-no': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__route_distinguisher", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
87.545455
41.318182
def singleton(the_class): """ Decorator for a class to make a singleton out of it. @type the_class: class @param the_class: the class that should work as a singleton @rtype: decorator @return: decorator """ class_instances = {} def get_instance(*args, **kwargs): """ Creating or just return the one and only class instance. The singleton depends on the parameters used in __init__ @type args: list @param args: positional arguments of the constructor. @type kwargs: dict @param kwargs: named parameters of the constructor. @rtype: decorated class type @return: singleton instance of decorated class. """ key = (the_class, args, str(kwargs)) if key not in class_instances: class_instances[key] = the_class(*args, **kwargs) return class_instances[key] return get_instance
[ "def", "singleton", "(", "the_class", ")", ":", "class_instances", "=", "{", "}", "def", "get_instance", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"\n Creating or just return the one and only class instance.\n\n The singleton depends on the parameters used in __init__\n @type args: list\n @param args: positional arguments of the constructor.\n @type kwargs: dict\n @param kwargs: named parameters of the constructor.\n @rtype: decorated class type\n @return: singleton instance of decorated class.\n \"\"\"", "key", "=", "(", "the_class", ",", "args", ",", "str", "(", "kwargs", ")", ")", "if", "key", "not", "in", "class_instances", ":", "class_instances", "[", "key", "]", "=", "the_class", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "class_instances", "[", "key", "]", "return", "get_instance" ]
31.068966
16.172414
def _write_function(schema): """Add a write method for named schema to a class. """ def func( data, filename=None, schema=schema, id_col='uid', sequence_col='sequence', extra_data=None, alphabet=None, **kwargs): # Use generic write class to write data. return _write( data, filename=filename, schema=schema, id_col=id_col, sequence_col=sequence_col, extra_data=extra_data, alphabet=alphabet, **kwargs ) # Update docs func.__doc__ = _write_doc_template(schema) return func
[ "def", "_write_function", "(", "schema", ")", ":", "def", "func", "(", "data", ",", "filename", "=", "None", ",", "schema", "=", "schema", ",", "id_col", "=", "'uid'", ",", "sequence_col", "=", "'sequence'", ",", "extra_data", "=", "None", ",", "alphabet", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# Use generic write class to write data.", "return", "_write", "(", "data", ",", "filename", "=", "filename", ",", "schema", "=", "schema", ",", "id_col", "=", "id_col", ",", "sequence_col", "=", "sequence_col", ",", "extra_data", "=", "extra_data", ",", "alphabet", "=", "alphabet", ",", "*", "*", "kwargs", ")", "# Update docs", "func", ".", "__doc__", "=", "_write_doc_template", "(", "schema", ")", "return", "func" ]
25.153846
15.192308
def contains_all(self, other): """Return ``True`` if all strings in ``other`` have size `length`.""" dtype = getattr(other, 'dtype', None) if dtype is None: dtype = np.result_type(*other) dtype_str = np.dtype('S{}'.format(self.length)) dtype_uni = np.dtype('<U{}'.format(self.length)) return dtype in (dtype_str, dtype_uni)
[ "def", "contains_all", "(", "self", ",", "other", ")", ":", "dtype", "=", "getattr", "(", "other", ",", "'dtype'", ",", "None", ")", "if", "dtype", "is", "None", ":", "dtype", "=", "np", ".", "result_type", "(", "*", "other", ")", "dtype_str", "=", "np", ".", "dtype", "(", "'S{}'", ".", "format", "(", "self", ".", "length", ")", ")", "dtype_uni", "=", "np", ".", "dtype", "(", "'<U{}'", ".", "format", "(", "self", ".", "length", ")", ")", "return", "dtype", "in", "(", "dtype_str", ",", "dtype_uni", ")" ]
47
8.625
def colors_json_ids(self): """ A proxy property based access to vertices in current edge When edge is serialized to JSON object, no explicit object for its multicolor is created, but rather all colors, taking into account their multiplicity, are referenced by their json_ids. """ return [color.json_id if hasattr(color, "json_id") else hash(color) for color in self.multicolor.multicolors.elements()]
[ "def", "colors_json_ids", "(", "self", ")", ":", "return", "[", "color", ".", "json_id", "if", "hasattr", "(", "color", ",", "\"json_id\"", ")", "else", "hash", "(", "color", ")", "for", "color", "in", "self", ".", "multicolor", ".", "multicolors", ".", "elements", "(", ")", "]" ]
62.142857
37.571429
def _request(self): """ retrieve the caller frame, extract the parameters from the caller function, find the matching function, and fire the request """ caller_frame = inspect.getouterframes(inspect.currentframe())[1] args, _, _, values = inspect.getargvalues(caller_frame[0]) caller_name = caller_frame[3] kwargs = {arg: values[arg] for arg in args if arg != 'self'} func = reduce( lambda resource, name: resource.__getattr__(name), self.mappings[caller_name].split('.'), self) return func(**kwargs)
[ "def", "_request", "(", "self", ")", ":", "caller_frame", "=", "inspect", ".", "getouterframes", "(", "inspect", ".", "currentframe", "(", ")", ")", "[", "1", "]", "args", ",", "_", ",", "_", ",", "values", "=", "inspect", ".", "getargvalues", "(", "caller_frame", "[", "0", "]", ")", "caller_name", "=", "caller_frame", "[", "3", "]", "kwargs", "=", "{", "arg", ":", "values", "[", "arg", "]", "for", "arg", "in", "args", "if", "arg", "!=", "'self'", "}", "func", "=", "reduce", "(", "lambda", "resource", ",", "name", ":", "resource", ".", "__getattr__", "(", "name", ")", ",", "self", ".", "mappings", "[", "caller_name", "]", ".", "split", "(", "'.'", ")", ",", "self", ")", "return", "func", "(", "*", "*", "kwargs", ")" ]
42.785714
16.214286
def get_context_data(self, **kwargs): """ Get the context for this view. """ #max_columns, max_rows = self.get_max_dimension() context = { 'gadgets': self._registry, 'columns': self.columns, 'rows': self.rows, 'column_ratio': 100 - self.columns * 2, 'row_ratio': 100 - self.rows * 2, } context.update(kwargs) return context
[ "def", "get_context_data", "(", "self", ",", "*", "*", "kwargs", ")", ":", "#max_columns, max_rows = self.get_max_dimension()", "context", "=", "{", "'gadgets'", ":", "self", ".", "_registry", ",", "'columns'", ":", "self", ".", "columns", ",", "'rows'", ":", "self", ".", "rows", ",", "'column_ratio'", ":", "100", "-", "self", ".", "columns", "*", "2", ",", "'row_ratio'", ":", "100", "-", "self", ".", "rows", "*", "2", ",", "}", "context", ".", "update", "(", "kwargs", ")", "return", "context" ]
31
9.571429
def _add_updated_at_column(self, values): """ Add the "updated_at" column to a dictionary of values. :param values: The values to update :type values: dict :return: The new dictionary of values :rtype: dict """ if not self._model.uses_timestamps(): return values column = self._model.get_updated_at_column() if "updated_at" not in values: values.update({column: self._model.fresh_timestamp_string()}) return values
[ "def", "_add_updated_at_column", "(", "self", ",", "values", ")", ":", "if", "not", "self", ".", "_model", ".", "uses_timestamps", "(", ")", ":", "return", "values", "column", "=", "self", ".", "_model", ".", "get_updated_at_column", "(", ")", "if", "\"updated_at\"", "not", "in", "values", ":", "values", ".", "update", "(", "{", "column", ":", "self", ".", "_model", ".", "fresh_timestamp_string", "(", ")", "}", ")", "return", "values" ]
27
18.473684
def default_listener(col_attr, default): """Establish a default-setting listener.""" @event.listens_for(col_attr, "init_scalar", retval=True, propagate=True) def init_scalar(target, value, dict_): if default.is_callable: # the callable of ColumnDefault always accepts a context argument value = default.arg(None) elif default.is_scalar: value = default.arg else: raise NotImplementedError( "Can't invoke pre-default for a SQL-level column default") dict_[col_attr.key] = value return value
[ "def", "default_listener", "(", "col_attr", ",", "default", ")", ":", "@", "event", ".", "listens_for", "(", "col_attr", ",", "\"init_scalar\"", ",", "retval", "=", "True", ",", "propagate", "=", "True", ")", "def", "init_scalar", "(", "target", ",", "value", ",", "dict_", ")", ":", "if", "default", ".", "is_callable", ":", "# the callable of ColumnDefault always accepts a context argument", "value", "=", "default", ".", "arg", "(", "None", ")", "elif", "default", ".", "is_scalar", ":", "value", "=", "default", ".", "arg", "else", ":", "raise", "NotImplementedError", "(", "\"Can't invoke pre-default for a SQL-level column default\"", ")", "dict_", "[", "col_attr", ".", "key", "]", "=", "value", "return", "value" ]
32.888889
19.611111
def check_aux_coordinates(self, ds): ''' Chapter 5 paragraph 3 The dimensions of an auxiliary coordinate variable must be a subset of the dimensions of the variable with which the coordinate is associated, with two exceptions. First, string-valued coordinates (Section 6.1, "Labels") have a dimension for maximum string length. Second, in the ragged array representations of data (Chapter 9, Discrete Sampling Geometries), special methods are needed to connect the data and coordinates. :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results ''' ret_val = [] geophysical_variables = self._find_geophysical_vars(ds) for name in geophysical_variables: variable = ds.variables[name] coordinates = getattr(variable, 'coordinates', None) # We use a set so we can assert dim_set = set(variable.dimensions) # No auxiliary coordinates, no check if not isinstance(coordinates, basestring) or coordinates == '': continue valid_aux_coords = TestCtx(BaseCheck.HIGH, self.section_titles["5"]) for aux_coord in coordinates.split(): valid_aux_coords.assert_true(aux_coord in ds.variables, "{}'s auxiliary coordinate specified by the coordinates attribute, {}, " "is not a variable in this dataset" "".format(name, aux_coord)) if aux_coord not in ds.variables: continue # §6.1 Allows for "labels" to be referenced as coordinates if ds.variables[aux_coord].dtype.char == 'S': continue aux_coord_dims = set(ds.variables[aux_coord].dimensions) valid_aux_coords.assert_true(aux_coord_dims.issubset(dim_set), "dimensions for auxiliary coordinate variable {} ({}) " "are not a subset of dimensions for variable {} ({})" "".format(aux_coord, ', '.join(aux_coord_dims), name, ', '.join(dim_set))) ret_val.append(valid_aux_coords.to_result()) return ret_val
[ "def", "check_aux_coordinates", "(", "self", ",", "ds", ")", ":", "ret_val", "=", "[", "]", "geophysical_variables", "=", "self", ".", "_find_geophysical_vars", "(", "ds", ")", "for", "name", "in", "geophysical_variables", ":", "variable", "=", "ds", ".", "variables", "[", "name", "]", "coordinates", "=", "getattr", "(", "variable", ",", "'coordinates'", ",", "None", ")", "# We use a set so we can assert", "dim_set", "=", "set", "(", "variable", ".", "dimensions", ")", "# No auxiliary coordinates, no check", "if", "not", "isinstance", "(", "coordinates", ",", "basestring", ")", "or", "coordinates", "==", "''", ":", "continue", "valid_aux_coords", "=", "TestCtx", "(", "BaseCheck", ".", "HIGH", ",", "self", ".", "section_titles", "[", "\"5\"", "]", ")", "for", "aux_coord", "in", "coordinates", ".", "split", "(", ")", ":", "valid_aux_coords", ".", "assert_true", "(", "aux_coord", "in", "ds", ".", "variables", ",", "\"{}'s auxiliary coordinate specified by the coordinates attribute, {}, \"", "\"is not a variable in this dataset\"", "\"\"", ".", "format", "(", "name", ",", "aux_coord", ")", ")", "if", "aux_coord", "not", "in", "ds", ".", "variables", ":", "continue", "# §6.1 Allows for \"labels\" to be referenced as coordinates", "if", "ds", ".", "variables", "[", "aux_coord", "]", ".", "dtype", ".", "char", "==", "'S'", ":", "continue", "aux_coord_dims", "=", "set", "(", "ds", ".", "variables", "[", "aux_coord", "]", ".", "dimensions", ")", "valid_aux_coords", ".", "assert_true", "(", "aux_coord_dims", ".", "issubset", "(", "dim_set", ")", ",", "\"dimensions for auxiliary coordinate variable {} ({}) \"", "\"are not a subset of dimensions for variable {} ({})\"", "\"\"", ".", "format", "(", "aux_coord", ",", "', '", ".", "join", "(", "aux_coord_dims", ")", ",", "name", ",", "', '", ".", "join", "(", "dim_set", ")", ")", ")", "ret_val", ".", "append", "(", "valid_aux_coords", ".", "to_result", "(", ")", ")", "return", "ret_val" ]
48.849057
26.924528
def get_environments(self): """ Returns the environments """ response = self.ebs.describe_environments(application_name=self.app_name, include_deleted=False) return response['DescribeEnvironmentsResponse']['DescribeEnvironmentsResult']['Environments']
[ "def", "get_environments", "(", "self", ")", ":", "response", "=", "self", ".", "ebs", ".", "describe_environments", "(", "application_name", "=", "self", ".", "app_name", ",", "include_deleted", "=", "False", ")", "return", "response", "[", "'DescribeEnvironmentsResponse'", "]", "[", "'DescribeEnvironmentsResult'", "]", "[", "'Environments'", "]" ]
47.666667
24.333333
def _check_leading(*char_lists): """Remove any non-alphanumeric or non-~ leading characters Checks the beginning of any provided lists for non-alphanumeric or non-~ (tilde) leading characters and removes them if found. Operates on (and possibly alters) the passed list. :param list char_list: a list or lists of characters :return: None :rtype: None """ logger.debug('_check_leading(%s)', char_lists) for char_list in char_lists: while (len(char_list) != 0 and not char_list[0].isalnum() and not char_list[0] == '~'): char_list.pop(0) logger.debug('updated list: %s', char_list)
[ "def", "_check_leading", "(", "*", "char_lists", ")", ":", "logger", ".", "debug", "(", "'_check_leading(%s)'", ",", "char_lists", ")", "for", "char_list", "in", "char_lists", ":", "while", "(", "len", "(", "char_list", ")", "!=", "0", "and", "not", "char_list", "[", "0", "]", ".", "isalnum", "(", ")", "and", "not", "char_list", "[", "0", "]", "==", "'~'", ")", ":", "char_list", ".", "pop", "(", "0", ")", "logger", ".", "debug", "(", "'updated list: %s'", ",", "char_list", ")" ]
38.117647
17
def standard_backtrack(self): """Estimate step size L by computing a linesearch that guarantees that F <= Q according to the standard FISTA backtracking strategy in :cite:`beck-2009-fast`. This also updates variable Y. """ gradY = self.eval_grad() # Given Y(f), this updates computes gradY(f) maxiter = self.L_maxiter iterBTrack = 0 linesearch = 1 while linesearch and iterBTrack < maxiter: self.proximal_step(gradY) # Given gradY(f), L, this updates X(f) f = self.obfn_f(self.var_x()) Dxy = self.eval_Dxy() Q = self.obfn_f(self.var_y()) + \ self.eval_linear_approx(Dxy, gradY) + \ (self.L / 2.) * np.linalg.norm(Dxy.flatten(), 2)**2 if f <= Q: linesearch = 0 else: self.L *= self.L_gamma_u iterBTrack += 1 self.F = f self.Q = Q self.iterBTrack = iterBTrack # Update auxiliary sequence self.combination_step()
[ "def", "standard_backtrack", "(", "self", ")", ":", "gradY", "=", "self", ".", "eval_grad", "(", ")", "# Given Y(f), this updates computes gradY(f)", "maxiter", "=", "self", ".", "L_maxiter", "iterBTrack", "=", "0", "linesearch", "=", "1", "while", "linesearch", "and", "iterBTrack", "<", "maxiter", ":", "self", ".", "proximal_step", "(", "gradY", ")", "# Given gradY(f), L, this updates X(f)", "f", "=", "self", ".", "obfn_f", "(", "self", ".", "var_x", "(", ")", ")", "Dxy", "=", "self", ".", "eval_Dxy", "(", ")", "Q", "=", "self", ".", "obfn_f", "(", "self", ".", "var_y", "(", ")", ")", "+", "self", ".", "eval_linear_approx", "(", "Dxy", ",", "gradY", ")", "+", "(", "self", ".", "L", "/", "2.", ")", "*", "np", ".", "linalg", ".", "norm", "(", "Dxy", ".", "flatten", "(", ")", ",", "2", ")", "**", "2", "if", "f", "<=", "Q", ":", "linesearch", "=", "0", "else", ":", "self", ".", "L", "*=", "self", ".", "L_gamma_u", "iterBTrack", "+=", "1", "self", ".", "F", "=", "f", "self", ".", "Q", "=", "Q", "self", ".", "iterBTrack", "=", "iterBTrack", "# Update auxiliary sequence", "self", ".", "combination_step", "(", ")" ]
30.085714
19.485714
def drop_invalid_columns(feed: "Feed") -> "Feed": """ Drop all DataFrame columns of the given "Feed" that are not listed in the GTFS. Return the resulting new "Feed". """ feed = feed.copy() for table, group in cs.GTFS_REF.groupby("table"): f = getattr(feed, table) if f is None: continue valid_columns = group["column"].values for col in f.columns: if col not in valid_columns: print(f"{table}: dropping invalid column {col}") del f[col] setattr(feed, table, f) return feed
[ "def", "drop_invalid_columns", "(", "feed", ":", "\"Feed\"", ")", "->", "\"Feed\"", ":", "feed", "=", "feed", ".", "copy", "(", ")", "for", "table", ",", "group", "in", "cs", ".", "GTFS_REF", ".", "groupby", "(", "\"table\"", ")", ":", "f", "=", "getattr", "(", "feed", ",", "table", ")", "if", "f", "is", "None", ":", "continue", "valid_columns", "=", "group", "[", "\"column\"", "]", ".", "values", "for", "col", "in", "f", ".", "columns", ":", "if", "col", "not", "in", "valid_columns", ":", "print", "(", "f\"{table}: dropping invalid column {col}\"", ")", "del", "f", "[", "col", "]", "setattr", "(", "feed", ",", "table", ",", "f", ")", "return", "feed" ]
30.736842
13.684211
def _check_command_response(response, msg=None, allowable_errors=None, parse_write_concern_error=False): """Check the response to a command for errors. """ if "ok" not in response: # Server didn't recognize our message as a command. raise OperationFailure(response.get("$err"), response.get("code"), response) # TODO: remove, this is moving to _check_gle_response if response.get("wtimeout", False): # MongoDB versions before 1.8.0 return the error message in an "errmsg" # field. If "errmsg" exists "err" will also exist set to None, so we # have to check for "errmsg" first. raise WTimeoutError(response.get("errmsg", response.get("err")), response.get("code"), response) if parse_write_concern_error and 'writeConcernError' in response: wce = response['writeConcernError'] raise WriteConcernError(wce['errmsg'], wce['code'], wce) if not response["ok"]: details = response # Mongos returns the error details in a 'raw' object # for some errors. if "raw" in response: for shard in itervalues(response["raw"]): # Grab the first non-empty raw error from a shard. if shard.get("errmsg") and not shard.get("ok"): details = shard break errmsg = details["errmsg"] if allowable_errors is None or errmsg not in allowable_errors: # Server is "not master" or "recovering" if (errmsg.startswith("not master") or errmsg.startswith("node is recovering")): raise NotMasterError(errmsg, response) # Server assertion failures if errmsg == "db assertion failure": errmsg = ("db assertion failure, assertion: '%s'" % details.get("assertion", "")) raise OperationFailure(errmsg, details.get("assertionCode"), response) # Other errors code = details.get("code") # findAndModify with upsert can raise duplicate key error if code in (11000, 11001, 12582): raise DuplicateKeyError(errmsg, code, response) elif code == 50: raise ExecutionTimeout(errmsg, code, response) elif code == 43: raise CursorNotFound(errmsg, code, response) msg = msg or "%s" raise OperationFailure(msg % errmsg, code, response)
[ "def", "_check_command_response", "(", "response", ",", "msg", "=", "None", ",", "allowable_errors", "=", "None", ",", "parse_write_concern_error", "=", "False", ")", ":", "if", "\"ok\"", "not", "in", "response", ":", "# Server didn't recognize our message as a command.", "raise", "OperationFailure", "(", "response", ".", "get", "(", "\"$err\"", ")", ",", "response", ".", "get", "(", "\"code\"", ")", ",", "response", ")", "# TODO: remove, this is moving to _check_gle_response", "if", "response", ".", "get", "(", "\"wtimeout\"", ",", "False", ")", ":", "# MongoDB versions before 1.8.0 return the error message in an \"errmsg\"", "# field. If \"errmsg\" exists \"err\" will also exist set to None, so we", "# have to check for \"errmsg\" first.", "raise", "WTimeoutError", "(", "response", ".", "get", "(", "\"errmsg\"", ",", "response", ".", "get", "(", "\"err\"", ")", ")", ",", "response", ".", "get", "(", "\"code\"", ")", ",", "response", ")", "if", "parse_write_concern_error", "and", "'writeConcernError'", "in", "response", ":", "wce", "=", "response", "[", "'writeConcernError'", "]", "raise", "WriteConcernError", "(", "wce", "[", "'errmsg'", "]", ",", "wce", "[", "'code'", "]", ",", "wce", ")", "if", "not", "response", "[", "\"ok\"", "]", ":", "details", "=", "response", "# Mongos returns the error details in a 'raw' object", "# for some errors.", "if", "\"raw\"", "in", "response", ":", "for", "shard", "in", "itervalues", "(", "response", "[", "\"raw\"", "]", ")", ":", "# Grab the first non-empty raw error from a shard.", "if", "shard", ".", "get", "(", "\"errmsg\"", ")", "and", "not", "shard", ".", "get", "(", "\"ok\"", ")", ":", "details", "=", "shard", "break", "errmsg", "=", "details", "[", "\"errmsg\"", "]", "if", "allowable_errors", "is", "None", "or", "errmsg", "not", "in", "allowable_errors", ":", "# Server is \"not master\" or \"recovering\"", "if", "(", "errmsg", ".", "startswith", "(", "\"not master\"", ")", "or", "errmsg", ".", "startswith", "(", "\"node is recovering\"", ")", ")", ":", "raise", "NotMasterError", "(", "errmsg", ",", "response", ")", "# Server assertion failures", "if", "errmsg", "==", "\"db assertion failure\"", ":", "errmsg", "=", "(", "\"db assertion failure, assertion: '%s'\"", "%", "details", ".", "get", "(", "\"assertion\"", ",", "\"\"", ")", ")", "raise", "OperationFailure", "(", "errmsg", ",", "details", ".", "get", "(", "\"assertionCode\"", ")", ",", "response", ")", "# Other errors", "code", "=", "details", ".", "get", "(", "\"code\"", ")", "# findAndModify with upsert can raise duplicate key error", "if", "code", "in", "(", "11000", ",", "11001", ",", "12582", ")", ":", "raise", "DuplicateKeyError", "(", "errmsg", ",", "code", ",", "response", ")", "elif", "code", "==", "50", ":", "raise", "ExecutionTimeout", "(", "errmsg", ",", "code", ",", "response", ")", "elif", "code", "==", "43", ":", "raise", "CursorNotFound", "(", "errmsg", ",", "code", ",", "response", ")", "msg", "=", "msg", "or", "\"%s\"", "raise", "OperationFailure", "(", "msg", "%", "errmsg", ",", "code", ",", "response", ")" ]
42.222222
18.68254
def remove_option(self, section, option): """Remove an option.""" if not section or section == self.default_section: sectdict = self._defaults else: try: sectdict = self._sections[section] except KeyError: raise from_none(NoSectionError(section)) option = self.optionxform(option) existed = option in sectdict if existed: del sectdict[option] return existed
[ "def", "remove_option", "(", "self", ",", "section", ",", "option", ")", ":", "if", "not", "section", "or", "section", "==", "self", ".", "default_section", ":", "sectdict", "=", "self", ".", "_defaults", "else", ":", "try", ":", "sectdict", "=", "self", ".", "_sections", "[", "section", "]", "except", "KeyError", ":", "raise", "from_none", "(", "NoSectionError", "(", "section", ")", ")", "option", "=", "self", ".", "optionxform", "(", "option", ")", "existed", "=", "option", "in", "sectdict", "if", "existed", ":", "del", "sectdict", "[", "option", "]", "return", "existed" ]
34.285714
11.642857
def get_training_image_text_data_iters(source_root: str, source: str, target: str, validation_source_root: str, validation_source: str, validation_target: str, vocab_target: vocab.Vocab, vocab_target_path: Optional[str], batch_size: int, batch_by_words: bool, batch_num_devices: int, source_image_size: tuple, max_seq_len_target: int, bucketing: bool, bucket_width: int, use_feature_loader: bool = False, preload_features: bool = False) -> Tuple['ParallelSampleIter', 'ParallelSampleIter', 'DataConfig', 'DataInfo']: """ Returns data iterators for training and validation data. :param source_root: Path to source images since the file in source contains relative paths. :param source: Path to source training data. :param target: Path to target training data. :param validation_source_root: Path to validation source images since the file in validation_source contains relative paths. :param validation_source: Path to source validation data. :param validation_target: Path to target validation data. :param vocab_target: Target vocabulary. :param vocab_target_path: Path to target vocabulary. :param batch_size: Batch size. :param batch_by_words: Size batches by words rather than sentences. :param batch_num_devices: Number of devices batches will be parallelized across. :param source_image_size: size to resize the image to (for iterator) :param max_seq_len_target: Maximum target sequence length. :param bucketing: Whether to use bucketing. :param bucket_width: Size of buckets. :param use_feature_loader: If True, features are loaded instead of images. :param preload_features: If use_feature_loader si True, this enables load all the feature to memory :return: Tuple of (training data iterator, validation data iterator, data config). """ logger.info("===============================") logger.info("Creating training data iterator") logger.info("===============================") # define buckets buckets = define_empty_source_parallel_buckets(max_seq_len_target, bucket_width) if bucketing else [ (0, max_seq_len_target)] source_images = [FileListReader(source, source_root)] target_sentences = SequenceReader(target, vocab_target, add_bos=True) # 2. pass: Get data statistics only on target (source not considered) data_statistics = get_data_statistics(source_readers=None, target_reader=target_sentences, buckets=buckets, length_ratio_mean=1.0, length_ratio_std=1.0, source_vocabs=None, target_vocab=vocab_target) bucket_batch_sizes = define_bucket_batch_sizes(buckets, batch_size, batch_by_words, batch_num_devices, data_statistics.average_len_target_per_bucket) data_statistics.log(bucket_batch_sizes) data_loader = RawListTextDatasetLoader(buckets=buckets, eos_id=vocab_target[C.EOS_SYMBOL], pad_id=C.PAD_ID) training_data = data_loader.load(source_images[0], target_sentences, data_statistics.num_sents_per_bucket).fill_up(bucket_batch_sizes) data_info = DataInfo(sources=source_images, target=target, source_vocabs=None, target_vocab=vocab_target_path, shared_vocab=False, num_shards=1) config_data = DataConfig(data_statistics=data_statistics, max_seq_len_source=0, max_seq_len_target=max_seq_len_target, num_source_factors=len(source_images)) # Add useful stuff to config_data config_data.source_root = source_root config_data.validation_source_root = validation_source_root config_data.use_feature_loader = use_feature_loader train_iter = ImageTextSampleIter(data=training_data, buckets=buckets, batch_size=batch_size, bucket_batch_sizes=bucket_batch_sizes, image_size=source_image_size, use_feature_loader=use_feature_loader, preload_features=preload_features) validation_iter = get_validation_image_text_data_iter(data_loader=data_loader, validation_source_root=validation_source_root, validation_source=validation_source, validation_target=validation_target, buckets=buckets, bucket_batch_sizes=bucket_batch_sizes, source_image_size=source_image_size, vocab_target=vocab_target, max_seq_len_target=max_seq_len_target, batch_size=batch_size, use_feature_loader=use_feature_loader, preload_features=preload_features) return train_iter, validation_iter, config_data, data_info
[ "def", "get_training_image_text_data_iters", "(", "source_root", ":", "str", ",", "source", ":", "str", ",", "target", ":", "str", ",", "validation_source_root", ":", "str", ",", "validation_source", ":", "str", ",", "validation_target", ":", "str", ",", "vocab_target", ":", "vocab", ".", "Vocab", ",", "vocab_target_path", ":", "Optional", "[", "str", "]", ",", "batch_size", ":", "int", ",", "batch_by_words", ":", "bool", ",", "batch_num_devices", ":", "int", ",", "source_image_size", ":", "tuple", ",", "max_seq_len_target", ":", "int", ",", "bucketing", ":", "bool", ",", "bucket_width", ":", "int", ",", "use_feature_loader", ":", "bool", "=", "False", ",", "preload_features", ":", "bool", "=", "False", ")", "->", "Tuple", "[", "'ParallelSampleIter'", ",", "'ParallelSampleIter'", ",", "'DataConfig'", ",", "'DataInfo'", "]", ":", "logger", ".", "info", "(", "\"===============================\"", ")", "logger", ".", "info", "(", "\"Creating training data iterator\"", ")", "logger", ".", "info", "(", "\"===============================\"", ")", "# define buckets", "buckets", "=", "define_empty_source_parallel_buckets", "(", "max_seq_len_target", ",", "bucket_width", ")", "if", "bucketing", "else", "[", "(", "0", ",", "max_seq_len_target", ")", "]", "source_images", "=", "[", "FileListReader", "(", "source", ",", "source_root", ")", "]", "target_sentences", "=", "SequenceReader", "(", "target", ",", "vocab_target", ",", "add_bos", "=", "True", ")", "# 2. pass: Get data statistics only on target (source not considered)", "data_statistics", "=", "get_data_statistics", "(", "source_readers", "=", "None", ",", "target_reader", "=", "target_sentences", ",", "buckets", "=", "buckets", ",", "length_ratio_mean", "=", "1.0", ",", "length_ratio_std", "=", "1.0", ",", "source_vocabs", "=", "None", ",", "target_vocab", "=", "vocab_target", ")", "bucket_batch_sizes", "=", "define_bucket_batch_sizes", "(", "buckets", ",", "batch_size", ",", "batch_by_words", ",", "batch_num_devices", ",", "data_statistics", ".", "average_len_target_per_bucket", ")", "data_statistics", ".", "log", "(", "bucket_batch_sizes", ")", "data_loader", "=", "RawListTextDatasetLoader", "(", "buckets", "=", "buckets", ",", "eos_id", "=", "vocab_target", "[", "C", ".", "EOS_SYMBOL", "]", ",", "pad_id", "=", "C", ".", "PAD_ID", ")", "training_data", "=", "data_loader", ".", "load", "(", "source_images", "[", "0", "]", ",", "target_sentences", ",", "data_statistics", ".", "num_sents_per_bucket", ")", ".", "fill_up", "(", "bucket_batch_sizes", ")", "data_info", "=", "DataInfo", "(", "sources", "=", "source_images", ",", "target", "=", "target", ",", "source_vocabs", "=", "None", ",", "target_vocab", "=", "vocab_target_path", ",", "shared_vocab", "=", "False", ",", "num_shards", "=", "1", ")", "config_data", "=", "DataConfig", "(", "data_statistics", "=", "data_statistics", ",", "max_seq_len_source", "=", "0", ",", "max_seq_len_target", "=", "max_seq_len_target", ",", "num_source_factors", "=", "len", "(", "source_images", ")", ")", "# Add useful stuff to config_data", "config_data", ".", "source_root", "=", "source_root", "config_data", ".", "validation_source_root", "=", "validation_source_root", "config_data", ".", "use_feature_loader", "=", "use_feature_loader", "train_iter", "=", "ImageTextSampleIter", "(", "data", "=", "training_data", ",", "buckets", "=", "buckets", ",", "batch_size", "=", "batch_size", ",", "bucket_batch_sizes", "=", "bucket_batch_sizes", ",", "image_size", "=", "source_image_size", ",", "use_feature_loader", "=", "use_feature_loader", ",", "preload_features", "=", "preload_features", ")", "validation_iter", "=", "get_validation_image_text_data_iter", "(", "data_loader", "=", "data_loader", ",", "validation_source_root", "=", "validation_source_root", ",", "validation_source", "=", "validation_source", ",", "validation_target", "=", "validation_target", ",", "buckets", "=", "buckets", ",", "bucket_batch_sizes", "=", "bucket_batch_sizes", ",", "source_image_size", "=", "source_image_size", ",", "vocab_target", "=", "vocab_target", ",", "max_seq_len_target", "=", "max_seq_len_target", ",", "batch_size", "=", "batch_size", ",", "use_feature_loader", "=", "use_feature_loader", ",", "preload_features", "=", "preload_features", ")", "return", "train_iter", ",", "validation_iter", ",", "config_data", ",", "data_info" ]
57.814159
29.017699
def create_hosted_zone(self, name, caller_reference=None, comment=None): """ Creates and returns a new hosted zone. Once a hosted zone is created, its details can't be changed. :param str name: The name of the hosted zone to create. :keyword str caller_reference: A unique string that identifies the request and that allows failed create_hosted_zone requests to be retried without the risk of executing the operation twice. If no value is given, we'll generate a Type 4 UUID for you. :keyword str comment: An optional comment to attach to the zone. :rtype: tuple :returns: A tuple in the form of ``(hosted_zone, change_info)``. The ``hosted_zone`` variable contains a :py:class:`HostedZone <route53.hosted_zone.HostedZone>` instance matching the newly created zone, and ``change_info`` is a dict with some details about the API request. """ body = xml_generators.create_hosted_zone_writer( connection=self, name=name, caller_reference=caller_reference, comment=comment ) root = self._send_request( path='hostedzone', data=body, method='POST', ) return xml_parsers.created_hosted_zone_parser( root=root, connection=self )
[ "def", "create_hosted_zone", "(", "self", ",", "name", ",", "caller_reference", "=", "None", ",", "comment", "=", "None", ")", ":", "body", "=", "xml_generators", ".", "create_hosted_zone_writer", "(", "connection", "=", "self", ",", "name", "=", "name", ",", "caller_reference", "=", "caller_reference", ",", "comment", "=", "comment", ")", "root", "=", "self", ".", "_send_request", "(", "path", "=", "'hostedzone'", ",", "data", "=", "body", ",", "method", "=", "'POST'", ",", ")", "return", "xml_parsers", ".", "created_hosted_zone_parser", "(", "root", "=", "root", ",", "connection", "=", "self", ")" ]
38.916667
22.583333
def get_new(modules, min_major_version, min_minor_version): """ Get list of migrations that haven't been run yet :param modules: iterable containing module names :param min_major_version: minimum major version :param min_minor_version: minimum minor version :returns: return an iterator that yields only items which versions are >= min_ver """ for mod_data in modules: (modname, mod_major_version, mod_minor_version) = mod_data if (mod_major_version > min_major_version or (mod_major_version == min_major_version and mod_minor_version >= min_minor_version)): yield mod_data
[ "def", "get_new", "(", "modules", ",", "min_major_version", ",", "min_minor_version", ")", ":", "for", "mod_data", "in", "modules", ":", "(", "modname", ",", "mod_major_version", ",", "mod_minor_version", ")", "=", "mod_data", "if", "(", "mod_major_version", ">", "min_major_version", "or", "(", "mod_major_version", "==", "min_major_version", "and", "mod_minor_version", ">=", "min_minor_version", ")", ")", ":", "yield", "mod_data" ]
47.2
17.133333
def i3(): '''Install and customize the tiling window manager i3.''' install_package('i3') install_file_legacy(path='~/.i3/config', username=env.user, repos_dir='repos') # setup: hide the mouse if not in use # in ~/.i3/config: 'exec /home/<USERNAME>/repos/hhpc/hhpc -i 10 &' install_packages(['make', 'pkg-config', 'gcc', 'libc6-dev', 'libx11-dev']) checkup_git_repo_legacy(url='https://github.com/aktau/hhpc.git') run('cd ~/repos/hhpc && make')
[ "def", "i3", "(", ")", ":", "install_package", "(", "'i3'", ")", "install_file_legacy", "(", "path", "=", "'~/.i3/config'", ",", "username", "=", "env", ".", "user", ",", "repos_dir", "=", "'repos'", ")", "# setup: hide the mouse if not in use", "# in ~/.i3/config: 'exec /home/<USERNAME>/repos/hhpc/hhpc -i 10 &'", "install_packages", "(", "[", "'make'", ",", "'pkg-config'", ",", "'gcc'", ",", "'libc6-dev'", ",", "'libx11-dev'", "]", ")", "checkup_git_repo_legacy", "(", "url", "=", "'https://github.com/aktau/hhpc.git'", ")", "run", "(", "'cd ~/repos/hhpc && make'", ")" ]
47
25
def vector_generate(start_pt, end_pt, normalize=False): """ Generates a vector from 2 input points. :param start_pt: start point of the vector :type start_pt: list, tuple :param end_pt: end point of the vector :type end_pt: list, tuple :param normalize: if True, the generated vector is normalized :type normalize: bool :return: a vector from start_pt to end_pt :rtype: list """ try: if start_pt is None or len(start_pt) == 0 or end_pt is None or len(end_pt) == 0: raise ValueError("Input points cannot be empty") except TypeError as e: print("An error occurred: {}".format(e.args[-1])) raise TypeError("Input must be a list or tuple") except Exception: raise ret_vec = [] for sp, ep in zip(start_pt, end_pt): ret_vec.append(ep - sp) if normalize: ret_vec = vector_normalize(ret_vec) return ret_vec
[ "def", "vector_generate", "(", "start_pt", ",", "end_pt", ",", "normalize", "=", "False", ")", ":", "try", ":", "if", "start_pt", "is", "None", "or", "len", "(", "start_pt", ")", "==", "0", "or", "end_pt", "is", "None", "or", "len", "(", "end_pt", ")", "==", "0", ":", "raise", "ValueError", "(", "\"Input points cannot be empty\"", ")", "except", "TypeError", "as", "e", ":", "print", "(", "\"An error occurred: {}\"", ".", "format", "(", "e", ".", "args", "[", "-", "1", "]", ")", ")", "raise", "TypeError", "(", "\"Input must be a list or tuple\"", ")", "except", "Exception", ":", "raise", "ret_vec", "=", "[", "]", "for", "sp", ",", "ep", "in", "zip", "(", "start_pt", ",", "end_pt", ")", ":", "ret_vec", ".", "append", "(", "ep", "-", "sp", ")", "if", "normalize", ":", "ret_vec", "=", "vector_normalize", "(", "ret_vec", ")", "return", "ret_vec" ]
32.214286
18.071429
def strip_qos_cntrl(self, idx, prot_type): """strip(2 byte) wlan.qos :idx: int :prot_type: string 802.11 protocol type(.11ac, .11a, .11n, etc) :return: int number of processed bytes :return: int qos priority :return: int qos bit :return: int qos acknowledgement :return: int amsdupresent(aggregated mac service data unit) """ qos_cntrl, = struct.unpack('H', self._packet[idx:idx + 2]) qos_cntrl_bits = format(qos_cntrl, '016b')[::-1] qos_pri = qos_cntrl & 0x000f qos_bit = int(qos_cntrl_bits[5]) qos_ack = int(qos_cntrl_bits[6:8], 2) amsdupresent = 0 if prot_type == '.11ac': amsdupresent = int(qos_cntrl_bits[7]) return 2, qos_pri, qos_bit, qos_ack, amsdupresent
[ "def", "strip_qos_cntrl", "(", "self", ",", "idx", ",", "prot_type", ")", ":", "qos_cntrl", ",", "=", "struct", ".", "unpack", "(", "'H'", ",", "self", ".", "_packet", "[", "idx", ":", "idx", "+", "2", "]", ")", "qos_cntrl_bits", "=", "format", "(", "qos_cntrl", ",", "'016b'", ")", "[", ":", ":", "-", "1", "]", "qos_pri", "=", "qos_cntrl", "&", "0x000f", "qos_bit", "=", "int", "(", "qos_cntrl_bits", "[", "5", "]", ")", "qos_ack", "=", "int", "(", "qos_cntrl_bits", "[", "6", ":", "8", "]", ",", "2", ")", "amsdupresent", "=", "0", "if", "prot_type", "==", "'.11ac'", ":", "amsdupresent", "=", "int", "(", "qos_cntrl_bits", "[", "7", "]", ")", "return", "2", ",", "qos_pri", ",", "qos_bit", ",", "qos_ack", ",", "amsdupresent" ]
34.36
12.92
def Beachball(fm, linewidth=2, facecolor='b', bgcolor='w', edgecolor='k', alpha=1.0, xy=(0, 0), width=200, size=100, nofill=False, zorder=100, outfile=None, format=None, fig=None): """ Draws a beach ball diagram of an earthquake focal mechanism. S1, D1, and R1, the strike, dip and rake of one of the focal planes, can be vectors of multiple focal mechanisms. :param fm: Focal mechanism that is either number of mechanisms (NM) by 3 (strike, dip, and rake) or NM x 6 (M11, M22, M33, M12, M13, M23 - the six independent components of the moment tensor, where the coordinate system is 1,2,3 = Up,South,East which equals r,theta,phi). The strike is of the first plane, clockwise relative to north. The dip is of the first plane, defined clockwise and perpendicular to strike, relative to horizontal such that 0 is horizontal and 90 is vertical. The rake is of the first focal plane solution. 90 moves the hanging wall up-dip (thrust), 0 moves it in the strike direction (left-lateral), -90 moves it down-dip (normal), and 180 moves it opposite to strike (right-lateral). :param facecolor: Color to use for quadrants of tension; can be a string, e.g. ``'r'``, ``'b'`` or three component color vector, [R G B]. Defaults to ``'b'`` (blue). :param bgcolor: The background color. Defaults to ``'w'`` (white). :param edgecolor: Color of the edges. Defaults to ``'k'`` (black). :param alpha: The alpha level of the beach ball. Defaults to ``1.0`` (opaque). :param xy: Origin position of the beach ball as tuple. Defaults to ``(0, 0)``. :type width: int :param width: Symbol size of beach ball. Defaults to ``200``. :param size: Controls the number of interpolation points for the curves. Minimum is automatically set to ``100``. :param nofill: Do not fill the beach ball, but only plot the planes. :param zorder: Set zorder. Artists with lower zorder values are drawn first. :param outfile: Output file string. Also used to automatically determine the output format. Supported file formats depend on your matplotlib backend. Most backends support png, pdf, ps, eps and svg. Defaults to ``None``. :param format: Format of the graph picture. If no format is given the outfile parameter will be used to try to automatically determine the output format. If no format is found it defaults to png output. If no outfile is specified but a format is, than a binary imagestring will be returned. Defaults to ``None``. :param fig: Give an existing figure instance to plot into. New Figure if set to ``None``. """ plot_width = width * 0.95 # plot the figure if not fig: fig = plt.figure(figsize=(3, 3), dpi=100) fig.subplots_adjust(left=0, bottom=0, right=1, top=1) fig.set_figheight(width // 100) fig.set_figwidth(width // 100) ax = fig.add_subplot(111, aspect='equal') # hide axes + ticks ax.axison = False # plot the collection collection = Beach(fm, linewidth=linewidth, facecolor=facecolor, edgecolor=edgecolor, bgcolor=bgcolor, alpha=alpha, nofill=nofill, xy=xy, width=plot_width, size=size, zorder=zorder) ax.add_collection(collection) ax.autoscale_view(tight=False, scalex=True, scaley=True) # export if outfile: if format: fig.savefig(outfile, dpi=100, transparent=True, format=format) else: fig.savefig(outfile, dpi=100, transparent=True) elif format and not outfile: imgdata = compatibility.BytesIO() fig.savefig(imgdata, format=format, dpi=100, transparent=True) imgdata.seek(0) return imgdata.read() else: plt.show() return fig
[ "def", "Beachball", "(", "fm", ",", "linewidth", "=", "2", ",", "facecolor", "=", "'b'", ",", "bgcolor", "=", "'w'", ",", "edgecolor", "=", "'k'", ",", "alpha", "=", "1.0", ",", "xy", "=", "(", "0", ",", "0", ")", ",", "width", "=", "200", ",", "size", "=", "100", ",", "nofill", "=", "False", ",", "zorder", "=", "100", ",", "outfile", "=", "None", ",", "format", "=", "None", ",", "fig", "=", "None", ")", ":", "plot_width", "=", "width", "*", "0.95", "# plot the figure", "if", "not", "fig", ":", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "(", "3", ",", "3", ")", ",", "dpi", "=", "100", ")", "fig", ".", "subplots_adjust", "(", "left", "=", "0", ",", "bottom", "=", "0", ",", "right", "=", "1", ",", "top", "=", "1", ")", "fig", ".", "set_figheight", "(", "width", "//", "100", ")", "fig", ".", "set_figwidth", "(", "width", "//", "100", ")", "ax", "=", "fig", ".", "add_subplot", "(", "111", ",", "aspect", "=", "'equal'", ")", "# hide axes + ticks", "ax", ".", "axison", "=", "False", "# plot the collection", "collection", "=", "Beach", "(", "fm", ",", "linewidth", "=", "linewidth", ",", "facecolor", "=", "facecolor", ",", "edgecolor", "=", "edgecolor", ",", "bgcolor", "=", "bgcolor", ",", "alpha", "=", "alpha", ",", "nofill", "=", "nofill", ",", "xy", "=", "xy", ",", "width", "=", "plot_width", ",", "size", "=", "size", ",", "zorder", "=", "zorder", ")", "ax", ".", "add_collection", "(", "collection", ")", "ax", ".", "autoscale_view", "(", "tight", "=", "False", ",", "scalex", "=", "True", ",", "scaley", "=", "True", ")", "# export", "if", "outfile", ":", "if", "format", ":", "fig", ".", "savefig", "(", "outfile", ",", "dpi", "=", "100", ",", "transparent", "=", "True", ",", "format", "=", "format", ")", "else", ":", "fig", ".", "savefig", "(", "outfile", ",", "dpi", "=", "100", ",", "transparent", "=", "True", ")", "elif", "format", "and", "not", "outfile", ":", "imgdata", "=", "compatibility", ".", "BytesIO", "(", ")", "fig", ".", "savefig", "(", "imgdata", ",", "format", "=", "format", ",", "dpi", "=", "100", ",", "transparent", "=", "True", ")", "imgdata", ".", "seek", "(", "0", ")", "return", "imgdata", ".", "read", "(", ")", "else", ":", "plt", ".", "show", "(", ")", "return", "fig" ]
46.321429
23.77381
def copy_ecu_with_frames(ecu_or_glob, source_db, target_db): # type: (typing.Union[cm.Ecu, str], cm.CanMatrix, cm.CanMatrix) -> None """ Copy ECU(s) identified by Name or as Object from source CAN matrix to target CAN matrix. This function additionally copy all relevant Frames and Defines. :param ecu_or_glob: Ecu instance or glob pattern for Ecu name :param source_db: Source CAN matrix :param target_db: Destination CAN matrix """ # check whether ecu_or_glob is object or symbolic name if isinstance(ecu_or_glob, cm.Ecu): ecu_list = [ecu_or_glob] else: ecu_list = source_db.glob_ecus(ecu_or_glob) for ecu in ecu_list: logger.info("Copying ECU " + ecu.name) target_db.add_ecu(copy.deepcopy(ecu)) # copy tx-frames for frame in source_db.frames: if ecu.name in frame.transmitters: copy_frame(frame.arbitration_id, source_db, target_db) # copy rx-frames for frame in source_db.frames: for signal in frame.signals: if ecu.name in signal.receivers: copy_frame(frame.arbitration_id, source_db, target_db) break # copy all ECU defines for attribute in ecu.attributes: if attribute not in target_db.ecu_defines: target_db.add_ecu_defines( copy.deepcopy(attribute), copy.deepcopy(source_db.ecu_defines[attribute].definition)) target_db.add_define_default( copy.deepcopy(attribute), copy.deepcopy(source_db.ecu_defines[attribute].defaultValue)) # update enum-data types if needed: if source_db.ecu_defines[attribute].type == 'ENUM': temp_attr = ecu.attribute(attribute, db=source_db) if temp_attr not in target_db.ecu_defines[attribute].values: target_db.ecu_defines[attribute].values.append(copy.deepcopy(temp_attr)) target_db.ecu_defines[attribute].update()
[ "def", "copy_ecu_with_frames", "(", "ecu_or_glob", ",", "source_db", ",", "target_db", ")", ":", "# type: (typing.Union[cm.Ecu, str], cm.CanMatrix, cm.CanMatrix) -> None", "# check whether ecu_or_glob is object or symbolic name", "if", "isinstance", "(", "ecu_or_glob", ",", "cm", ".", "Ecu", ")", ":", "ecu_list", "=", "[", "ecu_or_glob", "]", "else", ":", "ecu_list", "=", "source_db", ".", "glob_ecus", "(", "ecu_or_glob", ")", "for", "ecu", "in", "ecu_list", ":", "logger", ".", "info", "(", "\"Copying ECU \"", "+", "ecu", ".", "name", ")", "target_db", ".", "add_ecu", "(", "copy", ".", "deepcopy", "(", "ecu", ")", ")", "# copy tx-frames", "for", "frame", "in", "source_db", ".", "frames", ":", "if", "ecu", ".", "name", "in", "frame", ".", "transmitters", ":", "copy_frame", "(", "frame", ".", "arbitration_id", ",", "source_db", ",", "target_db", ")", "# copy rx-frames", "for", "frame", "in", "source_db", ".", "frames", ":", "for", "signal", "in", "frame", ".", "signals", ":", "if", "ecu", ".", "name", "in", "signal", ".", "receivers", ":", "copy_frame", "(", "frame", ".", "arbitration_id", ",", "source_db", ",", "target_db", ")", "break", "# copy all ECU defines", "for", "attribute", "in", "ecu", ".", "attributes", ":", "if", "attribute", "not", "in", "target_db", ".", "ecu_defines", ":", "target_db", ".", "add_ecu_defines", "(", "copy", ".", "deepcopy", "(", "attribute", ")", ",", "copy", ".", "deepcopy", "(", "source_db", ".", "ecu_defines", "[", "attribute", "]", ".", "definition", ")", ")", "target_db", ".", "add_define_default", "(", "copy", ".", "deepcopy", "(", "attribute", ")", ",", "copy", ".", "deepcopy", "(", "source_db", ".", "ecu_defines", "[", "attribute", "]", ".", "defaultValue", ")", ")", "# update enum-data types if needed:", "if", "source_db", ".", "ecu_defines", "[", "attribute", "]", ".", "type", "==", "'ENUM'", ":", "temp_attr", "=", "ecu", ".", "attribute", "(", "attribute", ",", "db", "=", "source_db", ")", "if", "temp_attr", "not", "in", "target_db", ".", "ecu_defines", "[", "attribute", "]", ".", "values", ":", "target_db", ".", "ecu_defines", "[", "attribute", "]", ".", "values", ".", "append", "(", "copy", ".", "deepcopy", "(", "temp_attr", ")", ")", "target_db", ".", "ecu_defines", "[", "attribute", "]", ".", "update", "(", ")" ]
43.826087
20.826087
def get_params(self, name, param_list): '''Use lctl get_param to collect a selection of parameters into a file. ''' self.add_cmd_output("lctl get_param %s" % " ".join(param_list), suggest_filename="params-%s" % name, stderr=False)
[ "def", "get_params", "(", "self", ",", "name", ",", "param_list", ")", ":", "self", ".", "add_cmd_output", "(", "\"lctl get_param %s\"", "%", "\" \"", ".", "join", "(", "param_list", ")", ",", "suggest_filename", "=", "\"params-%s\"", "%", "name", ",", "stderr", "=", "False", ")" ]
39.5
22.75
def osmlem(op, x, data, niter, callback=None, **kwargs): r"""Ordered Subsets Maximum Likelihood Expectation Maximation algorithm. This solver attempts to solve:: max_x L(x | data) where ``L(x, | data)`` is the likelihood of ``x`` given ``data``. The likelihood depends on the forward operators ``op[0], ..., op[n-1]`` such that (approximately):: op[i](x) = data[i] Parameters ---------- op : sequence of `Operator` Forward operators in the inverse problem. x : ``op.domain`` element Vector to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. The initial value of ``x`` should be non-negative. data : sequence of ``op.range`` `element-like` Right-hand sides of the equation defining the inverse problem. niter : int Number of iterations. callback : callable, optional Function called with the current iterate after each iteration. Other Parameters ---------------- sensitivities : float or ``op.domain`` `element-like`, optional The algorithm contains an ``A^T 1`` term, if this parameter is given, it is replaced by it. Default: ``op[i].adjoint(op[i].range.one())`` Notes ----- Given forward models :math:`A_i`, and data :math:`g_i`, :math:`i = 1, ..., M`, the algorithm attempts to find an :math:`x` that maximizes: .. math:: \prod_{i=1}^M P(g_i | g_i \text{ is } Poisson(A_i(x)) \text{ distributed}). The algorithm is explicitly given by partial updates: .. math:: x_{n + m/M} = \frac{x_{n + (m - 1)/M}}{A_i^* 1} A_i^* (g_i / A_i(x_{n + (m - 1)/M})) for :math:`m = 1, ..., M` and :math:`x_{n+1} = x_{n + M/M}`. The algorithm is not guaranteed to converge, but works for many practical problems. References ---------- Natterer, F. Mathematical Methods in Image Reconstruction, section 5.3.2. See Also -------- mlem : Ordinary MLEM algorithm without subsets. loglikelihood : Function for calculating the logarithm of the likelihood """ n_ops = len(op) if len(data) != n_ops: raise ValueError('number of data ({}) does not match number of ' 'operators ({})'.format(len(data), n_ops)) if not all(x in opi.domain for opi in op): raise ValueError('`x` not an element in the domains of all operators') # Convert data to range elements data = [op[i].range.element(data[i]) for i in range(len(op))] # Parameter used to enforce positivity. # TODO: let users give this. eps = 1e-8 if np.any(np.less(x, 0)): raise ValueError('`x` must be non-negative') # Extract the sensitivites parameter sensitivities = kwargs.pop('sensitivities', None) if sensitivities is None: sensitivities = [np.maximum(opi.adjoint(opi.range.one()), eps) for opi in op] else: # Make sure the sensitivities is a list of the correct size. try: list(sensitivities) except TypeError: sensitivities = [sensitivities] * n_ops tmp_dom = op[0].domain.element() tmp_ran = [opi.range.element() for opi in op] for _ in range(niter): for i in range(n_ops): op[i](x, out=tmp_ran[i]) tmp_ran[i].ufuncs.maximum(eps, out=tmp_ran[i]) data[i].divide(tmp_ran[i], out=tmp_ran[i]) op[i].adjoint(tmp_ran[i], out=tmp_dom) tmp_dom /= sensitivities[i] x *= tmp_dom if callback is not None: callback(x)
[ "def", "osmlem", "(", "op", ",", "x", ",", "data", ",", "niter", ",", "callback", "=", "None", ",", "*", "*", "kwargs", ")", ":", "n_ops", "=", "len", "(", "op", ")", "if", "len", "(", "data", ")", "!=", "n_ops", ":", "raise", "ValueError", "(", "'number of data ({}) does not match number of '", "'operators ({})'", ".", "format", "(", "len", "(", "data", ")", ",", "n_ops", ")", ")", "if", "not", "all", "(", "x", "in", "opi", ".", "domain", "for", "opi", "in", "op", ")", ":", "raise", "ValueError", "(", "'`x` not an element in the domains of all operators'", ")", "# Convert data to range elements", "data", "=", "[", "op", "[", "i", "]", ".", "range", ".", "element", "(", "data", "[", "i", "]", ")", "for", "i", "in", "range", "(", "len", "(", "op", ")", ")", "]", "# Parameter used to enforce positivity.", "# TODO: let users give this.", "eps", "=", "1e-8", "if", "np", ".", "any", "(", "np", ".", "less", "(", "x", ",", "0", ")", ")", ":", "raise", "ValueError", "(", "'`x` must be non-negative'", ")", "# Extract the sensitivites parameter", "sensitivities", "=", "kwargs", ".", "pop", "(", "'sensitivities'", ",", "None", ")", "if", "sensitivities", "is", "None", ":", "sensitivities", "=", "[", "np", ".", "maximum", "(", "opi", ".", "adjoint", "(", "opi", ".", "range", ".", "one", "(", ")", ")", ",", "eps", ")", "for", "opi", "in", "op", "]", "else", ":", "# Make sure the sensitivities is a list of the correct size.", "try", ":", "list", "(", "sensitivities", ")", "except", "TypeError", ":", "sensitivities", "=", "[", "sensitivities", "]", "*", "n_ops", "tmp_dom", "=", "op", "[", "0", "]", ".", "domain", ".", "element", "(", ")", "tmp_ran", "=", "[", "opi", ".", "range", ".", "element", "(", ")", "for", "opi", "in", "op", "]", "for", "_", "in", "range", "(", "niter", ")", ":", "for", "i", "in", "range", "(", "n_ops", ")", ":", "op", "[", "i", "]", "(", "x", ",", "out", "=", "tmp_ran", "[", "i", "]", ")", "tmp_ran", "[", "i", "]", ".", "ufuncs", ".", "maximum", "(", "eps", ",", "out", "=", "tmp_ran", "[", "i", "]", ")", "data", "[", "i", "]", ".", "divide", "(", "tmp_ran", "[", "i", "]", ",", "out", "=", "tmp_ran", "[", "i", "]", ")", "op", "[", "i", "]", ".", "adjoint", "(", "tmp_ran", "[", "i", "]", ",", "out", "=", "tmp_dom", ")", "tmp_dom", "/=", "sensitivities", "[", "i", "]", "x", "*=", "tmp_dom", "if", "callback", "is", "not", "None", ":", "callback", "(", "x", ")" ]
32.061947
22.477876
def kill(self, sig): """Send the given signal to the child application. In keeping with UNIX tradition it has a misleading name. It does not necessarily kill the child unless you send the right signal. See the :mod:`signal` module for constants representing signal numbers. """ # Same as os.kill, but the pid is given for you. if self.isalive(): os.kill(self.pid, sig)
[ "def", "kill", "(", "self", ",", "sig", ")", ":", "# Same as os.kill, but the pid is given for you.", "if", "self", ".", "isalive", "(", ")", ":", "os", ".", "kill", "(", "self", ".", "pid", ",", "sig", ")" ]
38.909091
21.727273
def init_argparser(self, argparser): """ This should not be called with an external argparser as it will corrupt tracking data if forced. """ def prepare_argparser(): if argparser in self.argparser_details: return False result = self.argparser_details[argparser] = ArgumentParserDetails( {}, {}, {}) return result def to_module_attr(ep): return '%s:%s' % (ep.module_name, '.'.join(ep.attrs)) def register(name, runtime, entry_point): subparser = commands.add_parser( name, help=inst.description, ) # Have to specify this separately because otherwise the # subparser will not have a proper description when it is # invoked as the root. subparser.description = inst.description # Assign values for version reporting system setattr(subparser, ATTR_ROOT_PKG, getattr( argparser, ATTR_ROOT_PKG, self.package_name)) subp_info = [] subp_info.extend(getattr(argparser, ATTR_INFO, [])) subp_info.append((subparser.prog, entry_point.dist)) setattr(subparser, ATTR_INFO, subp_info) try: try: runtime.init_argparser(subparser) except RuntimeError as e: # first attempt to filter out recursion errors; also if # the stack frame isn't available the complaint about # bad validation doesn't apply anyway. frame = currentframe() if (not frame or 'maximum recursion depth' not in str( e.args)): raise if (not isinstance(runtime, Runtime) or (type( runtime).entry_point_load_validated.__code__ is Runtime.entry_point_load_validated.__code__)): # welp, guess some other thing blew up then, or # that the problem is definitely not caused by # this runtime implementation. # TODO figure out how to log this nicer via the # self.log_debug_error without exploding the # console like Megumin would have done. raise # assume the overridden method didn't do everything # correctly then; would be great if there is a way # to ensure that our thing would have been called. cls = type(runtime) logger.critical( "Runtime subclass at entry_point '%s' has override " "'entry_point_load_validated' without filtering out " "its parent classes; this can be addressed by calling " "super(%s.%s, self).entry_point_load_validated(" "entry_point) in its implementation, or simply don't " "override that method to avoid infinite recursion.", entry_point, cls.__module__, cls.__name__, ) exc = RuntimeError( "%r has an invalid 'entry_point_load_validated' " "implementation: insufficient protection against " "infinite recursion into self not provided" % runtime ) # for Python 3 to not blow it up. exc.__suppress_context__ = True raise exc except Exception as e: self.log_debug_error( "cannot register entry_point '%s' from '%s' as a " "subcommand to '%s': %s: %s", entry_point, entry_point.dist, argparser.prog, e.__class__.__name__, e ) # this is where naughty things happen: will be poking at # the parser internals to undo the damage that was done # first, pop the choices_actions as a help was provided commands._choices_actions.pop() # then pop the name that was mapped. commands._name_parser_map.pop(name) else: # finally record the completely initialized subparser # into the structure here if successful. subparsers[name] = subparser runtimes[name] = runtime entry_points[name] = entry_point details = prepare_argparser() if not details: logger.debug( 'argparser %r has already been initialized against runner %r', argparser, self, ) return subparsers, runtimes, entry_points = details super(Runtime, self).init_argparser(argparser) commands = argparser.add_subparsers( dest=self.action_key, metavar='<command>') # Python 3.7 has required set to True, which is correct in most # cases but this disables the manual handling for cases where a # command was not provided; also this generates a useless error # message that simply states "<command> is required" and forces # the program to exit. As the goal of this suite of classes is # to act as a helpful CLI front end, force required to be False # to keep our manual handling and management of subcommands. # Setting this as a property for compatibility with Python<3.7, # as only in Python>=3.7 the add_subparsers can accept required # as an argument. commands.required = False for entry_point in self.iter_entry_points(): inst = self.entry_point_load_validated(entry_point) if not inst: continue if entry_point.name in runtimes: reg_ep = entry_points[entry_point.name] reg_rt = runtimes[entry_point.name] if reg_rt is inst: # this is fine, multiple packages declared the same # thing with the same name. logger.debug( "duplicated registration of command '%s' via entry " "point '%s' ignored; registered '%s', confict '%s'", entry_point.name, entry_point, reg_ep.dist, entry_point.dist, ) continue logger.error( "a calmjs runtime command named '%s' already registered.", entry_point.name ) logger.info("conflicting entry points are:") logger.info( "'%s' from '%s' (registered)", reg_ep, reg_ep.dist) logger.info( "'%s' from '%s' (conflict)", entry_point, entry_point.dist) # Fall back name should work if the class/instances are # stable. name = to_module_attr(entry_point) if name in runtimes: # Maybe this is the third time this module is # registered. Test for its identity. if runtimes[name] is not inst: # Okay someone is having a fun time here mucking # with data structures internal to here, likely # (read hopefully) due to testing or random # monkey patching (or module level reload). logger.critical( "'%s' is already registered but points to a " "completely different instance; please try again " "with verbose logging and note which packages are " "reported as conflicted; alternatively this is a " "forced situation where this Runtime instance has " "been used or initialized improperly.", name ) else: logger.debug( "fallback command '%s' is already registered.", name ) continue logger.error( "falling back to using full instance path '%s' as command " "name, also registering alias for registered command", name ) register(to_module_attr(reg_ep), reg_rt, reg_ep) else: name = entry_point.name register(name, inst, entry_point)
[ "def", "init_argparser", "(", "self", ",", "argparser", ")", ":", "def", "prepare_argparser", "(", ")", ":", "if", "argparser", "in", "self", ".", "argparser_details", ":", "return", "False", "result", "=", "self", ".", "argparser_details", "[", "argparser", "]", "=", "ArgumentParserDetails", "(", "{", "}", ",", "{", "}", ",", "{", "}", ")", "return", "result", "def", "to_module_attr", "(", "ep", ")", ":", "return", "'%s:%s'", "%", "(", "ep", ".", "module_name", ",", "'.'", ".", "join", "(", "ep", ".", "attrs", ")", ")", "def", "register", "(", "name", ",", "runtime", ",", "entry_point", ")", ":", "subparser", "=", "commands", ".", "add_parser", "(", "name", ",", "help", "=", "inst", ".", "description", ",", ")", "# Have to specify this separately because otherwise the", "# subparser will not have a proper description when it is", "# invoked as the root.", "subparser", ".", "description", "=", "inst", ".", "description", "# Assign values for version reporting system", "setattr", "(", "subparser", ",", "ATTR_ROOT_PKG", ",", "getattr", "(", "argparser", ",", "ATTR_ROOT_PKG", ",", "self", ".", "package_name", ")", ")", "subp_info", "=", "[", "]", "subp_info", ".", "extend", "(", "getattr", "(", "argparser", ",", "ATTR_INFO", ",", "[", "]", ")", ")", "subp_info", ".", "append", "(", "(", "subparser", ".", "prog", ",", "entry_point", ".", "dist", ")", ")", "setattr", "(", "subparser", ",", "ATTR_INFO", ",", "subp_info", ")", "try", ":", "try", ":", "runtime", ".", "init_argparser", "(", "subparser", ")", "except", "RuntimeError", "as", "e", ":", "# first attempt to filter out recursion errors; also if", "# the stack frame isn't available the complaint about", "# bad validation doesn't apply anyway.", "frame", "=", "currentframe", "(", ")", "if", "(", "not", "frame", "or", "'maximum recursion depth'", "not", "in", "str", "(", "e", ".", "args", ")", ")", ":", "raise", "if", "(", "not", "isinstance", "(", "runtime", ",", "Runtime", ")", "or", "(", "type", "(", "runtime", ")", ".", "entry_point_load_validated", ".", "__code__", "is", "Runtime", ".", "entry_point_load_validated", ".", "__code__", ")", ")", ":", "# welp, guess some other thing blew up then, or", "# that the problem is definitely not caused by", "# this runtime implementation.", "# TODO figure out how to log this nicer via the", "# self.log_debug_error without exploding the", "# console like Megumin would have done.", "raise", "# assume the overridden method didn't do everything", "# correctly then; would be great if there is a way", "# to ensure that our thing would have been called.", "cls", "=", "type", "(", "runtime", ")", "logger", ".", "critical", "(", "\"Runtime subclass at entry_point '%s' has override \"", "\"'entry_point_load_validated' without filtering out \"", "\"its parent classes; this can be addressed by calling \"", "\"super(%s.%s, self).entry_point_load_validated(\"", "\"entry_point) in its implementation, or simply don't \"", "\"override that method to avoid infinite recursion.\"", ",", "entry_point", ",", "cls", ".", "__module__", ",", "cls", ".", "__name__", ",", ")", "exc", "=", "RuntimeError", "(", "\"%r has an invalid 'entry_point_load_validated' \"", "\"implementation: insufficient protection against \"", "\"infinite recursion into self not provided\"", "%", "runtime", ")", "# for Python 3 to not blow it up.", "exc", ".", "__suppress_context__", "=", "True", "raise", "exc", "except", "Exception", "as", "e", ":", "self", ".", "log_debug_error", "(", "\"cannot register entry_point '%s' from '%s' as a \"", "\"subcommand to '%s': %s: %s\"", ",", "entry_point", ",", "entry_point", ".", "dist", ",", "argparser", ".", "prog", ",", "e", ".", "__class__", ".", "__name__", ",", "e", ")", "# this is where naughty things happen: will be poking at", "# the parser internals to undo the damage that was done", "# first, pop the choices_actions as a help was provided", "commands", ".", "_choices_actions", ".", "pop", "(", ")", "# then pop the name that was mapped.", "commands", ".", "_name_parser_map", ".", "pop", "(", "name", ")", "else", ":", "# finally record the completely initialized subparser", "# into the structure here if successful.", "subparsers", "[", "name", "]", "=", "subparser", "runtimes", "[", "name", "]", "=", "runtime", "entry_points", "[", "name", "]", "=", "entry_point", "details", "=", "prepare_argparser", "(", ")", "if", "not", "details", ":", "logger", ".", "debug", "(", "'argparser %r has already been initialized against runner %r'", ",", "argparser", ",", "self", ",", ")", "return", "subparsers", ",", "runtimes", ",", "entry_points", "=", "details", "super", "(", "Runtime", ",", "self", ")", ".", "init_argparser", "(", "argparser", ")", "commands", "=", "argparser", ".", "add_subparsers", "(", "dest", "=", "self", ".", "action_key", ",", "metavar", "=", "'<command>'", ")", "# Python 3.7 has required set to True, which is correct in most", "# cases but this disables the manual handling for cases where a", "# command was not provided; also this generates a useless error", "# message that simply states \"<command> is required\" and forces", "# the program to exit. As the goal of this suite of classes is", "# to act as a helpful CLI front end, force required to be False", "# to keep our manual handling and management of subcommands.", "# Setting this as a property for compatibility with Python<3.7,", "# as only in Python>=3.7 the add_subparsers can accept required", "# as an argument.", "commands", ".", "required", "=", "False", "for", "entry_point", "in", "self", ".", "iter_entry_points", "(", ")", ":", "inst", "=", "self", ".", "entry_point_load_validated", "(", "entry_point", ")", "if", "not", "inst", ":", "continue", "if", "entry_point", ".", "name", "in", "runtimes", ":", "reg_ep", "=", "entry_points", "[", "entry_point", ".", "name", "]", "reg_rt", "=", "runtimes", "[", "entry_point", ".", "name", "]", "if", "reg_rt", "is", "inst", ":", "# this is fine, multiple packages declared the same", "# thing with the same name.", "logger", ".", "debug", "(", "\"duplicated registration of command '%s' via entry \"", "\"point '%s' ignored; registered '%s', confict '%s'\"", ",", "entry_point", ".", "name", ",", "entry_point", ",", "reg_ep", ".", "dist", ",", "entry_point", ".", "dist", ",", ")", "continue", "logger", ".", "error", "(", "\"a calmjs runtime command named '%s' already registered.\"", ",", "entry_point", ".", "name", ")", "logger", ".", "info", "(", "\"conflicting entry points are:\"", ")", "logger", ".", "info", "(", "\"'%s' from '%s' (registered)\"", ",", "reg_ep", ",", "reg_ep", ".", "dist", ")", "logger", ".", "info", "(", "\"'%s' from '%s' (conflict)\"", ",", "entry_point", ",", "entry_point", ".", "dist", ")", "# Fall back name should work if the class/instances are", "# stable.", "name", "=", "to_module_attr", "(", "entry_point", ")", "if", "name", "in", "runtimes", ":", "# Maybe this is the third time this module is", "# registered. Test for its identity.", "if", "runtimes", "[", "name", "]", "is", "not", "inst", ":", "# Okay someone is having a fun time here mucking", "# with data structures internal to here, likely", "# (read hopefully) due to testing or random", "# monkey patching (or module level reload).", "logger", ".", "critical", "(", "\"'%s' is already registered but points to a \"", "\"completely different instance; please try again \"", "\"with verbose logging and note which packages are \"", "\"reported as conflicted; alternatively this is a \"", "\"forced situation where this Runtime instance has \"", "\"been used or initialized improperly.\"", ",", "name", ")", "else", ":", "logger", ".", "debug", "(", "\"fallback command '%s' is already registered.\"", ",", "name", ")", "continue", "logger", ".", "error", "(", "\"falling back to using full instance path '%s' as command \"", "\"name, also registering alias for registered command\"", ",", "name", ")", "register", "(", "to_module_attr", "(", "reg_ep", ")", ",", "reg_rt", ",", "reg_ep", ")", "else", ":", "name", "=", "entry_point", ".", "name", "register", "(", "name", ",", "inst", ",", "entry_point", ")" ]
46.521277
21.425532
def p_identifier_group_op(self, p): """ identifier_group : identifier_group child_selector ident_parts | identifier_group '+' ident_parts | identifier_group general_sibling_selector ident_parts | identifier_group '*' """ p[1].extend([p[2]]) if len(p) > 3: p[1].extend(p[3]) p[0] = p[1]
[ "def", "p_identifier_group_op", "(", "self", ",", "p", ")", ":", "p", "[", "1", "]", ".", "extend", "(", "[", "p", "[", "2", "]", "]", ")", "if", "len", "(", "p", ")", ">", "3", ":", "p", "[", "1", "]", ".", "extend", "(", "p", "[", "3", "]", ")", "p", "[", "0", "]", "=", "p", "[", "1", "]" ]
45.1
17.3
def write(self, data): """Just quote out stuff before sending it out""" args = parse_qs(self.handler.environ.get("QUERY_STRING")) if "i" in args: i = args["i"] else: i = "0" # TODO: don't we need to quote this data in here ? super(JSONPolling, self).write("io.j[%s]('%s');" % (i, data))
[ "def", "write", "(", "self", ",", "data", ")", ":", "args", "=", "parse_qs", "(", "self", ".", "handler", ".", "environ", ".", "get", "(", "\"QUERY_STRING\"", ")", ")", "if", "\"i\"", "in", "args", ":", "i", "=", "args", "[", "\"i\"", "]", "else", ":", "i", "=", "\"0\"", "# TODO: don't we need to quote this data in here ?", "super", "(", "JSONPolling", ",", "self", ")", ".", "write", "(", "\"io.j[%s]('%s');\"", "%", "(", "i", ",", "data", ")", ")" ]
38.888889
18.888889
def get_regulate_amounts(self): """Extract Increase/DecreaseAmount Statements.""" pos_events = [] neg_events = [] pattern = "EVENT/[type='ONT::STIMULATE']/arg2/[type='ONT::TRANSCRIBE']/.." pos_events += self.tree.findall(pattern) pattern = "EVENT/[type='ONT::INCREASE']/arg2/[type='ONT::TRANSCRIBE']/.." pos_events += self.tree.findall(pattern) pattern = "EVENT/[type='ONT::INHIBIT']/arg2/[type='ONT::TRANSCRIBE']/.." neg_events += self.tree.findall(pattern) pattern = "EVENT/[type='ONT::DECREASE']/arg2/[type='ONT::TRANSCRIBE']/.." neg_events += self.tree.findall(pattern) # Look at polarity pattern = "EVENT/[type='ONT::MODULATE']/arg2/[type='ONT::TRANSCRIBE']/.." mod_events = self.tree.findall(pattern) for event in mod_events: pol = event.find('polarity') if pol is not None: if pol.text == 'ONT::POSITIVE': pos_events.append(event) elif pol.text == 'ONT::NEGATIVE': neg_events.append(event) combs = zip([pos_events, neg_events], [IncreaseAmount, DecreaseAmount]) for events, cls in combs: for event in events: if event.attrib['id'] in self._static_events: continue if event.attrib['id'] in self._subsumed_events: continue # The agent has to exist and be a protein type agent = event.find(".//*[@role=':AGENT']") if agent is None: continue if agent.find('type') is None or \ (agent.find('type').text not in protein_types): continue agent_id = agent.attrib.get('id') if agent_id is None: continue agent_agent = self._get_agent_by_id(agent_id, event.attrib['id']) # The affected, we already know is ONT::TRANSCRIPTION affected_arg = event.find(".//*[@role=':AFFECTED']") if affected_arg is None: continue affected_id = affected_arg.attrib.get('id') affected_event = self.tree.find("EVENT/[@id='%s']" % affected_id) if affected_event is None: continue affected = \ affected_event.find(".//*[@role=':AFFECTED-RESULT']") if affected is None: affected = \ affected_event.find(".//*[@role=':AFFECTED']") if affected is None: continue affected_id = affected.attrib.get('id') if affected_id is None: continue affected_agent = \ self._get_agent_by_id(affected_id, affected_event.attrib['id']) ev = self._get_evidence(event) location = self._get_event_location(event) for subj, obj in \ _agent_list_product((agent_agent, affected_agent)): if obj is None: continue st = cls(subj, obj, evidence=deepcopy(ev)) _stmt_location_to_agents(st, location) self.statements.append(st) self._add_extracted(_get_type(event), event.attrib['id']) self._subsumed_events.append(affected_event.attrib['id'])
[ "def", "get_regulate_amounts", "(", "self", ")", ":", "pos_events", "=", "[", "]", "neg_events", "=", "[", "]", "pattern", "=", "\"EVENT/[type='ONT::STIMULATE']/arg2/[type='ONT::TRANSCRIBE']/..\"", "pos_events", "+=", "self", ".", "tree", ".", "findall", "(", "pattern", ")", "pattern", "=", "\"EVENT/[type='ONT::INCREASE']/arg2/[type='ONT::TRANSCRIBE']/..\"", "pos_events", "+=", "self", ".", "tree", ".", "findall", "(", "pattern", ")", "pattern", "=", "\"EVENT/[type='ONT::INHIBIT']/arg2/[type='ONT::TRANSCRIBE']/..\"", "neg_events", "+=", "self", ".", "tree", ".", "findall", "(", "pattern", ")", "pattern", "=", "\"EVENT/[type='ONT::DECREASE']/arg2/[type='ONT::TRANSCRIBE']/..\"", "neg_events", "+=", "self", ".", "tree", ".", "findall", "(", "pattern", ")", "# Look at polarity", "pattern", "=", "\"EVENT/[type='ONT::MODULATE']/arg2/[type='ONT::TRANSCRIBE']/..\"", "mod_events", "=", "self", ".", "tree", ".", "findall", "(", "pattern", ")", "for", "event", "in", "mod_events", ":", "pol", "=", "event", ".", "find", "(", "'polarity'", ")", "if", "pol", "is", "not", "None", ":", "if", "pol", ".", "text", "==", "'ONT::POSITIVE'", ":", "pos_events", ".", "append", "(", "event", ")", "elif", "pol", ".", "text", "==", "'ONT::NEGATIVE'", ":", "neg_events", ".", "append", "(", "event", ")", "combs", "=", "zip", "(", "[", "pos_events", ",", "neg_events", "]", ",", "[", "IncreaseAmount", ",", "DecreaseAmount", "]", ")", "for", "events", ",", "cls", "in", "combs", ":", "for", "event", "in", "events", ":", "if", "event", ".", "attrib", "[", "'id'", "]", "in", "self", ".", "_static_events", ":", "continue", "if", "event", ".", "attrib", "[", "'id'", "]", "in", "self", ".", "_subsumed_events", ":", "continue", "# The agent has to exist and be a protein type", "agent", "=", "event", ".", "find", "(", "\".//*[@role=':AGENT']\"", ")", "if", "agent", "is", "None", ":", "continue", "if", "agent", ".", "find", "(", "'type'", ")", "is", "None", "or", "(", "agent", ".", "find", "(", "'type'", ")", ".", "text", "not", "in", "protein_types", ")", ":", "continue", "agent_id", "=", "agent", ".", "attrib", ".", "get", "(", "'id'", ")", "if", "agent_id", "is", "None", ":", "continue", "agent_agent", "=", "self", ".", "_get_agent_by_id", "(", "agent_id", ",", "event", ".", "attrib", "[", "'id'", "]", ")", "# The affected, we already know is ONT::TRANSCRIPTION", "affected_arg", "=", "event", ".", "find", "(", "\".//*[@role=':AFFECTED']\"", ")", "if", "affected_arg", "is", "None", ":", "continue", "affected_id", "=", "affected_arg", ".", "attrib", ".", "get", "(", "'id'", ")", "affected_event", "=", "self", ".", "tree", ".", "find", "(", "\"EVENT/[@id='%s']\"", "%", "affected_id", ")", "if", "affected_event", "is", "None", ":", "continue", "affected", "=", "affected_event", ".", "find", "(", "\".//*[@role=':AFFECTED-RESULT']\"", ")", "if", "affected", "is", "None", ":", "affected", "=", "affected_event", ".", "find", "(", "\".//*[@role=':AFFECTED']\"", ")", "if", "affected", "is", "None", ":", "continue", "affected_id", "=", "affected", ".", "attrib", ".", "get", "(", "'id'", ")", "if", "affected_id", "is", "None", ":", "continue", "affected_agent", "=", "self", ".", "_get_agent_by_id", "(", "affected_id", ",", "affected_event", ".", "attrib", "[", "'id'", "]", ")", "ev", "=", "self", ".", "_get_evidence", "(", "event", ")", "location", "=", "self", ".", "_get_event_location", "(", "event", ")", "for", "subj", ",", "obj", "in", "_agent_list_product", "(", "(", "agent_agent", ",", "affected_agent", ")", ")", ":", "if", "obj", "is", "None", ":", "continue", "st", "=", "cls", "(", "subj", ",", "obj", ",", "evidence", "=", "deepcopy", "(", "ev", ")", ")", "_stmt_location_to_agents", "(", "st", ",", "location", ")", "self", ".", "statements", ".", "append", "(", "st", ")", "self", ".", "_add_extracted", "(", "_get_type", "(", "event", ")", ",", "event", ".", "attrib", "[", "'id'", "]", ")", "self", ".", "_subsumed_events", ".", "append", "(", "affected_event", ".", "attrib", "[", "'id'", "]", ")" ]
48.52
16.133333
def c_member_funcs(self, for_struct=False): """Get the decls of the module.""" decls = [ '{} *{};'.format(self._c_type_name(name), name) for name, dummy_args in self.funcs ] if for_struct: return decls return [self._c_mod_decl()] + decls
[ "def", "c_member_funcs", "(", "self", ",", "for_struct", "=", "False", ")", ":", "decls", "=", "[", "'{} *{};'", ".", "format", "(", "self", ".", "_c_type_name", "(", "name", ")", ",", "name", ")", "for", "name", ",", "dummy_args", "in", "self", ".", "funcs", "]", "if", "for_struct", ":", "return", "decls", "return", "[", "self", ".", "_c_mod_decl", "(", ")", "]", "+", "decls" ]
33.888889
13.222222
def get_component_settings(prefixes=None): """ Returns a subset of the env dictionary containing only those keys with the name prefix. """ prefixes = prefixes or [] assert isinstance(prefixes, (tuple, list)), 'Prefixes must be a sequence type, not %s.' % type(prefixes) data = {} for name in prefixes: name = name.lower().strip() for k in sorted(env): if k.startswith('%s_' % name): new_k = k[len(name)+1:] data[new_k] = env[k] return data
[ "def", "get_component_settings", "(", "prefixes", "=", "None", ")", ":", "prefixes", "=", "prefixes", "or", "[", "]", "assert", "isinstance", "(", "prefixes", ",", "(", "tuple", ",", "list", ")", ")", ",", "'Prefixes must be a sequence type, not %s.'", "%", "type", "(", "prefixes", ")", "data", "=", "{", "}", "for", "name", "in", "prefixes", ":", "name", "=", "name", ".", "lower", "(", ")", ".", "strip", "(", ")", "for", "k", "in", "sorted", "(", "env", ")", ":", "if", "k", ".", "startswith", "(", "'%s_'", "%", "name", ")", ":", "new_k", "=", "k", "[", "len", "(", "name", ")", "+", "1", ":", "]", "data", "[", "new_k", "]", "=", "env", "[", "k", "]", "return", "data" ]
34.733333
12.333333
def residual_norm(A, x, b): """Compute ||b - A*x||.""" return norm(np.ravel(b) - A*np.ravel(x))
[ "def", "residual_norm", "(", "A", ",", "x", ",", "b", ")", ":", "return", "norm", "(", "np", ".", "ravel", "(", "b", ")", "-", "A", "*", "np", ".", "ravel", "(", "x", ")", ")" ]
33.666667
5.666667
def cns_vwl_str(self): """ Return a new IPAString, containing only: 1. the consonants, 2. the vowels, and 3. the stress diacritics in the current string. :rtype: IPAString """ return IPAString(ipa_chars=[c for c in self.ipa_chars if (c.is_letter) or (c.is_suprasegmental and c.is_stress)])
[ "def", "cns_vwl_str", "(", "self", ")", ":", "return", "IPAString", "(", "ipa_chars", "=", "[", "c", "for", "c", "in", "self", ".", "ipa_chars", "if", "(", "c", ".", "is_letter", ")", "or", "(", "c", ".", "is_suprasegmental", "and", "c", ".", "is_stress", ")", "]", ")" ]
27.692308
21.538462
def _append_funcs(target, items): """ Helper function to append functions into a given list. Arguments: target (list): receptor list to append functions. items (iterable): iterable that yields elements to append. """ [target.append(item) for item in items if isfunction(item) or ismethod(item)]
[ "def", "_append_funcs", "(", "target", ",", "items", ")", ":", "[", "target", ".", "append", "(", "item", ")", "for", "item", "in", "items", "if", "isfunction", "(", "item", ")", "or", "ismethod", "(", "item", ")", "]" ]
32.7
13.9
def _parse_include_section(cfg, parser, section): ''' Example of include section: [include] flt: /etc/feat/flt.ini ducksboard: /etc/feat/ducksboard.ini ''' for _name, pattern in cfg.items(section): if not os.path.isabs(pattern): pattern = os.path.join(configure.confdir, pattern) matches = glob.glob(pattern) matches.sort() for filename in matches: f = open(filename, 'r') parse_file(parser, f)
[ "def", "_parse_include_section", "(", "cfg", ",", "parser", ",", "section", ")", ":", "for", "_name", ",", "pattern", "in", "cfg", ".", "items", "(", "section", ")", ":", "if", "not", "os", ".", "path", ".", "isabs", "(", "pattern", ")", ":", "pattern", "=", "os", ".", "path", ".", "join", "(", "configure", ".", "confdir", ",", "pattern", ")", "matches", "=", "glob", ".", "glob", "(", "pattern", ")", "matches", ".", "sort", "(", ")", "for", "filename", "in", "matches", ":", "f", "=", "open", "(", "filename", ",", "'r'", ")", "parse_file", "(", "parser", ",", "f", ")" ]
31.733333
13.066667
def _handle_return(state: GlobalState) -> None: """ Adds all the annotations into the state which correspond to the locations in the memory returned by RETURN opcode. :param state: The Global State """ stack = state.mstate.stack try: offset, length = get_concrete_int(stack[-1]), get_concrete_int(stack[-2]) except TypeError: return for element in state.mstate.memory[offset : offset + length]: if not isinstance(element, Expression): continue for annotation in element.annotations: if isinstance(annotation, OverUnderflowAnnotation): state.annotate( OverUnderflowStateAnnotation( annotation.overflowing_state, annotation.operator, annotation.constraint, ) )
[ "def", "_handle_return", "(", "state", ":", "GlobalState", ")", "->", "None", ":", "stack", "=", "state", ".", "mstate", ".", "stack", "try", ":", "offset", ",", "length", "=", "get_concrete_int", "(", "stack", "[", "-", "1", "]", ")", ",", "get_concrete_int", "(", "stack", "[", "-", "2", "]", ")", "except", "TypeError", ":", "return", "for", "element", "in", "state", ".", "mstate", ".", "memory", "[", "offset", ":", "offset", "+", "length", "]", ":", "if", "not", "isinstance", "(", "element", ",", "Expression", ")", ":", "continue", "for", "annotation", "in", "element", ".", "annotations", ":", "if", "isinstance", "(", "annotation", ",", "OverUnderflowAnnotation", ")", ":", "state", ".", "annotate", "(", "OverUnderflowStateAnnotation", "(", "annotation", ".", "overflowing_state", ",", "annotation", ".", "operator", ",", "annotation", ".", "constraint", ",", ")", ")" ]
41.73913
15.391304
def execute(self, eopatch): """ Mask values of `feature` according to the `mask_values` in `mask_feature` :param eopatch: `eopatch` to be processed :return: Same `eopatch` instance with masked `feature` """ feature_type, feature_name, new_feature_name = next(self.feature(eopatch)) mask_feature_type, mask_feature_name = next(self.mask_feature(eopatch)) data = np.copy(eopatch[feature_type][feature_name]) mask = eopatch[mask_feature_type][mask_feature_name] if not isinstance(self.mask_values, list): raise ValueError('Incorrect format or values of argument `mask_values`') for value in self.mask_values: data[mask.squeeze() == value] = self.no_data_value eopatch.add_feature(feature_type, new_feature_name, data) return eopatch
[ "def", "execute", "(", "self", ",", "eopatch", ")", ":", "feature_type", ",", "feature_name", ",", "new_feature_name", "=", "next", "(", "self", ".", "feature", "(", "eopatch", ")", ")", "mask_feature_type", ",", "mask_feature_name", "=", "next", "(", "self", ".", "mask_feature", "(", "eopatch", ")", ")", "data", "=", "np", ".", "copy", "(", "eopatch", "[", "feature_type", "]", "[", "feature_name", "]", ")", "mask", "=", "eopatch", "[", "mask_feature_type", "]", "[", "mask_feature_name", "]", "if", "not", "isinstance", "(", "self", ".", "mask_values", ",", "list", ")", ":", "raise", "ValueError", "(", "'Incorrect format or values of argument `mask_values`'", ")", "for", "value", "in", "self", ".", "mask_values", ":", "data", "[", "mask", ".", "squeeze", "(", ")", "==", "value", "]", "=", "self", ".", "no_data_value", "eopatch", ".", "add_feature", "(", "feature_type", ",", "new_feature_name", ",", "data", ")", "return", "eopatch" ]
39.761905
25
def aliases(self): """ Returns symbol instances corresponding to aliased vars. """ return [x for x in self[self.current_scope].values() if x.is_aliased]
[ "def", "aliases", "(", "self", ")", ":", "return", "[", "x", "for", "x", "in", "self", "[", "self", ".", "current_scope", "]", ".", "values", "(", ")", "if", "x", ".", "is_aliased", "]" ]
43.25
14.75
def get_negation(event): """Return negation attached to an event. Example: "states": [{"@type": "State", "type": "NEGATION", "text": "n't"}] """ states = event.get('states', []) if not states: return [] negs = [state for state in states if state.get('type') == 'NEGATION'] neg_texts = [neg['text'] for neg in negs] return neg_texts
[ "def", "get_negation", "(", "event", ")", ":", "states", "=", "event", ".", "get", "(", "'states'", ",", "[", "]", ")", "if", "not", "states", ":", "return", "[", "]", "negs", "=", "[", "state", "for", "state", "in", "states", "if", "state", ".", "get", "(", "'type'", ")", "==", "'NEGATION'", "]", "neg_texts", "=", "[", "neg", "[", "'text'", "]", "for", "neg", "in", "negs", "]", "return", "neg_texts" ]
33.923077
12.307692
def _purge_children(self): """ Find dead children and put a response on the result queue. :return: """ for task_id, p in six.iteritems(self._running_tasks): if not p.is_alive() and p.exitcode: error_msg = 'Task {} died unexpectedly with exit code {}'.format(task_id, p.exitcode) p.task.trigger_event(Event.PROCESS_FAILURE, p.task, error_msg) elif p.timeout_time is not None and time.time() > float(p.timeout_time) and p.is_alive(): p.terminate() error_msg = 'Task {} timed out after {} seconds and was terminated.'.format(task_id, p.worker_timeout) p.task.trigger_event(Event.TIMEOUT, p.task, error_msg) else: continue logger.info(error_msg) self._task_result_queue.put((task_id, FAILED, error_msg, [], []))
[ "def", "_purge_children", "(", "self", ")", ":", "for", "task_id", ",", "p", "in", "six", ".", "iteritems", "(", "self", ".", "_running_tasks", ")", ":", "if", "not", "p", ".", "is_alive", "(", ")", "and", "p", ".", "exitcode", ":", "error_msg", "=", "'Task {} died unexpectedly with exit code {}'", ".", "format", "(", "task_id", ",", "p", ".", "exitcode", ")", "p", ".", "task", ".", "trigger_event", "(", "Event", ".", "PROCESS_FAILURE", ",", "p", ".", "task", ",", "error_msg", ")", "elif", "p", ".", "timeout_time", "is", "not", "None", "and", "time", ".", "time", "(", ")", ">", "float", "(", "p", ".", "timeout_time", ")", "and", "p", ".", "is_alive", "(", ")", ":", "p", ".", "terminate", "(", ")", "error_msg", "=", "'Task {} timed out after {} seconds and was terminated.'", ".", "format", "(", "task_id", ",", "p", ".", "worker_timeout", ")", "p", ".", "task", ".", "trigger_event", "(", "Event", ".", "TIMEOUT", ",", "p", ".", "task", ",", "error_msg", ")", "else", ":", "continue", "logger", ".", "info", "(", "error_msg", ")", "self", ".", "_task_result_queue", ".", "put", "(", "(", "task_id", ",", "FAILED", ",", "error_msg", ",", "[", "]", ",", "[", "]", ")", ")" ]
46.684211
28.052632
def populate_display_n(self): """Get the number of displays and populate this drop-down box with them all. Prepend the "always on primary" option. """ cb = self.get_widget('display_n') screen = self.get_widget('config-window').get_screen() cb.append_text("always on primary") for m in range(0, int(screen.get_n_monitors())): if m == int(screen.get_primary_monitor()): # TODO l10n cb.append_text(str(m) + ' ' + '(primary)') else: cb.append_text(str(m))
[ "def", "populate_display_n", "(", "self", ")", ":", "cb", "=", "self", ".", "get_widget", "(", "'display_n'", ")", "screen", "=", "self", ".", "get_widget", "(", "'config-window'", ")", ".", "get_screen", "(", ")", "cb", ".", "append_text", "(", "\"always on primary\"", ")", "for", "m", "in", "range", "(", "0", ",", "int", "(", "screen", ".", "get_n_monitors", "(", ")", ")", ")", ":", "if", "m", "==", "int", "(", "screen", ".", "get_primary_monitor", "(", ")", ")", ":", "# TODO l10n", "cb", ".", "append_text", "(", "str", "(", "m", ")", "+", "' '", "+", "'(primary)'", ")", "else", ":", "cb", ".", "append_text", "(", "str", "(", "m", ")", ")" ]
37.8
15
def validate_settings(settings): """ `settings` is either a dictionary or an object containing Kronos settings (e.g., the contents of conf/settings.py). This function checks that all required settings are present and valid. """ # Validate `storage` storage = _validate_and_get_value(settings, 'settings', 'storage', dict) for name, options in storage.iteritems(): if 'backend' not in options: raise ImproperlyConfigured( '`storage[\'{}\'] must contain a `backend` key'.format(name)) path = options['backend'] module, cls = path.rsplit('.', 1) module = import_module(module) if not hasattr(module, cls): raise NotImplementedError('`{}` not implemented.'.format(cls)) validate_storage_settings(getattr(module, cls), options) # Validate `streams_to_backends` namespace_to_streams_configuration = _validate_and_get_value( settings, 'settings', 'namespace_to_streams_configuration', dict) for namespace, prefix_confs in namespace_to_streams_configuration.iteritems(): if '' not in prefix_confs: raise ImproperlyConfigured( 'Must specify backends for the null prefix') for prefix, options in prefix_confs.iteritems(): if prefix != '': # Validate stream prefix. validate_stream(prefix) backends = _validate_and_get_value( options, "namespace_to_streams_configuration['{}']['{}']".format(namespace, prefix), 'backends', dict) for backend in backends.keys(): if backend not in storage: raise ImproperlyConfigured( "`{}` backend for `namespace_to_streams_configuration['{}']" "['{}']` is not configured in `storage`" .format(backend, namespace, prefix)) read_backend = _validate_and_get_value( options, "namespace_to_streams_configuration['{}']['{}']".format(namespace, prefix), 'read_backend', str) if read_backend not in storage: raise ImproperlyConfigured( "`{}` backend for `namespace_to_streams_configuration['{}']" "['{}']` is not configured in `storage`" .format(read_backend, namespace, prefix)) # Validate `stream` stream = getattr(settings, 'stream', dict) _validate_and_get_value(stream, 'stream', 'format', re._pattern_type) # Validate `node` node = getattr(settings, 'node', dict) _validate_and_get_value(node, 'node', 'greenlet_pool_size', int) _validate_and_get_value(node, 'node', 'id', str)
[ "def", "validate_settings", "(", "settings", ")", ":", "# Validate `storage`", "storage", "=", "_validate_and_get_value", "(", "settings", ",", "'settings'", ",", "'storage'", ",", "dict", ")", "for", "name", ",", "options", "in", "storage", ".", "iteritems", "(", ")", ":", "if", "'backend'", "not", "in", "options", ":", "raise", "ImproperlyConfigured", "(", "'`storage[\\'{}\\'] must contain a `backend` key'", ".", "format", "(", "name", ")", ")", "path", "=", "options", "[", "'backend'", "]", "module", ",", "cls", "=", "path", ".", "rsplit", "(", "'.'", ",", "1", ")", "module", "=", "import_module", "(", "module", ")", "if", "not", "hasattr", "(", "module", ",", "cls", ")", ":", "raise", "NotImplementedError", "(", "'`{}` not implemented.'", ".", "format", "(", "cls", ")", ")", "validate_storage_settings", "(", "getattr", "(", "module", ",", "cls", ")", ",", "options", ")", "# Validate `streams_to_backends`", "namespace_to_streams_configuration", "=", "_validate_and_get_value", "(", "settings", ",", "'settings'", ",", "'namespace_to_streams_configuration'", ",", "dict", ")", "for", "namespace", ",", "prefix_confs", "in", "namespace_to_streams_configuration", ".", "iteritems", "(", ")", ":", "if", "''", "not", "in", "prefix_confs", ":", "raise", "ImproperlyConfigured", "(", "'Must specify backends for the null prefix'", ")", "for", "prefix", ",", "options", "in", "prefix_confs", ".", "iteritems", "(", ")", ":", "if", "prefix", "!=", "''", ":", "# Validate stream prefix.", "validate_stream", "(", "prefix", ")", "backends", "=", "_validate_and_get_value", "(", "options", ",", "\"namespace_to_streams_configuration['{}']['{}']\"", ".", "format", "(", "namespace", ",", "prefix", ")", ",", "'backends'", ",", "dict", ")", "for", "backend", "in", "backends", ".", "keys", "(", ")", ":", "if", "backend", "not", "in", "storage", ":", "raise", "ImproperlyConfigured", "(", "\"`{}` backend for `namespace_to_streams_configuration['{}']\"", "\"['{}']` is not configured in `storage`\"", ".", "format", "(", "backend", ",", "namespace", ",", "prefix", ")", ")", "read_backend", "=", "_validate_and_get_value", "(", "options", ",", "\"namespace_to_streams_configuration['{}']['{}']\"", ".", "format", "(", "namespace", ",", "prefix", ")", ",", "'read_backend'", ",", "str", ")", "if", "read_backend", "not", "in", "storage", ":", "raise", "ImproperlyConfigured", "(", "\"`{}` backend for `namespace_to_streams_configuration['{}']\"", "\"['{}']` is not configured in `storage`\"", ".", "format", "(", "read_backend", ",", "namespace", ",", "prefix", ")", ")", "# Validate `stream`", "stream", "=", "getattr", "(", "settings", ",", "'stream'", ",", "dict", ")", "_validate_and_get_value", "(", "stream", ",", "'stream'", ",", "'format'", ",", "re", ".", "_pattern_type", ")", "# Validate `node`", "node", "=", "getattr", "(", "settings", ",", "'node'", ",", "dict", ")", "_validate_and_get_value", "(", "node", ",", "'node'", ",", "'greenlet_pool_size'", ",", "int", ")", "_validate_and_get_value", "(", "node", ",", "'node'", ",", "'id'", ",", "str", ")" ]
40.046154
18.692308
def __update(self, row): """Update rows in table """ expr = self.__table.update().values(row) for key in self.__update_keys: expr = expr.where(getattr(self.__table.c, key) == row[key]) if self.__autoincrement: expr = expr.returning(getattr(self.__table.c, self.__autoincrement)) res = expr.execute() if res.rowcount > 0: if self.__autoincrement: first = next(iter(res)) last_row_id = first[0] return last_row_id return 0 return None
[ "def", "__update", "(", "self", ",", "row", ")", ":", "expr", "=", "self", ".", "__table", ".", "update", "(", ")", ".", "values", "(", "row", ")", "for", "key", "in", "self", ".", "__update_keys", ":", "expr", "=", "expr", ".", "where", "(", "getattr", "(", "self", ".", "__table", ".", "c", ",", "key", ")", "==", "row", "[", "key", "]", ")", "if", "self", ".", "__autoincrement", ":", "expr", "=", "expr", ".", "returning", "(", "getattr", "(", "self", ".", "__table", ".", "c", ",", "self", ".", "__autoincrement", ")", ")", "res", "=", "expr", ".", "execute", "(", ")", "if", "res", ".", "rowcount", ">", "0", ":", "if", "self", ".", "__autoincrement", ":", "first", "=", "next", "(", "iter", "(", "res", ")", ")", "last_row_id", "=", "first", "[", "0", "]", "return", "last_row_id", "return", "0", "return", "None" ]
36.0625
11.4375
def get_coverage(config: CoverageConfig) -> 'Coverage': """ Returns a Coverage instance. :param config: Coverage configuration. :return: Instance of Coverage. """ if config.type == C.COVERAGE_COUNT or config.type == C.COVERAGE_FERTILITY: utils.check_condition(config.num_hidden == 1, "Count or fertility coverage requires coverage_num_hidden==1") if config.type == C.GRU_TYPE: return GRUCoverage(config.num_hidden, config.layer_normalization) elif config.type in {C.TANH, C.SIGMOID, C.RELU, C.SOFT_RELU}: return ActivationCoverage(config.num_hidden, config.type, config.layer_normalization) elif config.type == C.COVERAGE_COUNT: return CountCoverage() elif config.type == C.COVERAGE_FERTILITY: return FertilityCoverage(config.max_fertility) else: raise ValueError("Unknown coverage type %s" % config.type)
[ "def", "get_coverage", "(", "config", ":", "CoverageConfig", ")", "->", "'Coverage'", ":", "if", "config", ".", "type", "==", "C", ".", "COVERAGE_COUNT", "or", "config", ".", "type", "==", "C", ".", "COVERAGE_FERTILITY", ":", "utils", ".", "check_condition", "(", "config", ".", "num_hidden", "==", "1", ",", "\"Count or fertility coverage requires coverage_num_hidden==1\"", ")", "if", "config", ".", "type", "==", "C", ".", "GRU_TYPE", ":", "return", "GRUCoverage", "(", "config", ".", "num_hidden", ",", "config", ".", "layer_normalization", ")", "elif", "config", ".", "type", "in", "{", "C", ".", "TANH", ",", "C", ".", "SIGMOID", ",", "C", ".", "RELU", ",", "C", ".", "SOFT_RELU", "}", ":", "return", "ActivationCoverage", "(", "config", ".", "num_hidden", ",", "config", ".", "type", ",", "config", ".", "layer_normalization", ")", "elif", "config", ".", "type", "==", "C", ".", "COVERAGE_COUNT", ":", "return", "CountCoverage", "(", ")", "elif", "config", ".", "type", "==", "C", ".", "COVERAGE_FERTILITY", ":", "return", "FertilityCoverage", "(", "config", ".", "max_fertility", ")", "else", ":", "raise", "ValueError", "(", "\"Unknown coverage type %s\"", "%", "config", ".", "type", ")" ]
46.315789
20.526316
def resolve_implicit_levels(storage, debug): """Resolving implicit levels (I1, I2) See: http://unicode.org/reports/tr9/#Resolving_Implicit_Levels """ for run in storage['runs']: start, length = run['start'], run['length'] chars = storage['chars'][start:start+length] for _ch in chars: # only those types are allowed at this stage assert _ch['type'] in ('L', 'R', 'EN', 'AN'),\ '%s not allowed here' % _ch['type'] if _embedding_direction(_ch['level']) == 'L': # I1. For all characters with an even (left-to-right) embedding # direction, those of type R go up one level and those of type # AN or EN go up two levels. if _ch['type'] == 'R': _ch['level'] += 1 elif _ch['type'] != 'L': _ch['level'] += 2 else: # I2. For all characters with an odd (right-to-left) embedding # direction, those of type L, EN or AN go up one level. if _ch['type'] != 'R': _ch['level'] += 1 if debug: debug_storage(storage, runs=True)
[ "def", "resolve_implicit_levels", "(", "storage", ",", "debug", ")", ":", "for", "run", "in", "storage", "[", "'runs'", "]", ":", "start", ",", "length", "=", "run", "[", "'start'", "]", ",", "run", "[", "'length'", "]", "chars", "=", "storage", "[", "'chars'", "]", "[", "start", ":", "start", "+", "length", "]", "for", "_ch", "in", "chars", ":", "# only those types are allowed at this stage", "assert", "_ch", "[", "'type'", "]", "in", "(", "'L'", ",", "'R'", ",", "'EN'", ",", "'AN'", ")", ",", "'%s not allowed here'", "%", "_ch", "[", "'type'", "]", "if", "_embedding_direction", "(", "_ch", "[", "'level'", "]", ")", "==", "'L'", ":", "# I1. For all characters with an even (left-to-right) embedding", "# direction, those of type R go up one level and those of type", "# AN or EN go up two levels.", "if", "_ch", "[", "'type'", "]", "==", "'R'", ":", "_ch", "[", "'level'", "]", "+=", "1", "elif", "_ch", "[", "'type'", "]", "!=", "'L'", ":", "_ch", "[", "'level'", "]", "+=", "2", "else", ":", "# I2. For all characters with an odd (right-to-left) embedding", "# direction, those of type L, EN or AN go up one level.", "if", "_ch", "[", "'type'", "]", "!=", "'R'", ":", "_ch", "[", "'level'", "]", "+=", "1", "if", "debug", ":", "debug_storage", "(", "storage", ",", "runs", "=", "True", ")" ]
38.451613
18
def anchor(self): """int or str indicating element under which to insert this subtotal. An int anchor is the id of the dimension element (category or subvariable) under which to place this subtotal. The return value can also be one of 'top' or 'bottom'. The return value defaults to 'bottom' for an anchor referring to an element that is no longer present in the dimension or an element that represents missing data. """ anchor = self._subtotal_dict["anchor"] try: anchor = int(anchor) if anchor not in self.valid_elements.element_ids: return "bottom" return anchor except (TypeError, ValueError): return anchor.lower()
[ "def", "anchor", "(", "self", ")", ":", "anchor", "=", "self", ".", "_subtotal_dict", "[", "\"anchor\"", "]", "try", ":", "anchor", "=", "int", "(", "anchor", ")", "if", "anchor", "not", "in", "self", ".", "valid_elements", ".", "element_ids", ":", "return", "\"bottom\"", "return", "anchor", "except", "(", "TypeError", ",", "ValueError", ")", ":", "return", "anchor", ".", "lower", "(", ")" ]
39.736842
18.157895
def adjust_opts(in_opts, config): """Establish JVM opts, adjusting memory for the context if needed. This allows using less or more memory for highly parallel or multicore supporting processes, respectively. """ memory_adjust = config["algorithm"].get("memory_adjust", {}) out_opts = [] for opt in in_opts: if opt.startswith("-Xmx") or (opt.startswith("-Xms") and memory_adjust.get("direction") == "decrease"): arg = opt[:4] opt = "{arg}{val}".format(arg=arg, val=adjust_memory(opt[4:], memory_adjust.get("magnitude", 1), memory_adjust.get("direction"), maximum=memory_adjust.get("maximum"))) out_opts.append(opt) return out_opts
[ "def", "adjust_opts", "(", "in_opts", ",", "config", ")", ":", "memory_adjust", "=", "config", "[", "\"algorithm\"", "]", ".", "get", "(", "\"memory_adjust\"", ",", "{", "}", ")", "out_opts", "=", "[", "]", "for", "opt", "in", "in_opts", ":", "if", "opt", ".", "startswith", "(", "\"-Xmx\"", ")", "or", "(", "opt", ".", "startswith", "(", "\"-Xms\"", ")", "and", "memory_adjust", ".", "get", "(", "\"direction\"", ")", "==", "\"decrease\"", ")", ":", "arg", "=", "opt", "[", ":", "4", "]", "opt", "=", "\"{arg}{val}\"", ".", "format", "(", "arg", "=", "arg", ",", "val", "=", "adjust_memory", "(", "opt", "[", "4", ":", "]", ",", "memory_adjust", ".", "get", "(", "\"magnitude\"", ",", "1", ")", ",", "memory_adjust", ".", "get", "(", "\"direction\"", ")", ",", "maximum", "=", "memory_adjust", ".", "get", "(", "\"maximum\"", ")", ")", ")", "out_opts", ".", "append", "(", "opt", ")", "return", "out_opts" ]
49.5
24.777778
def _build(self, input_modules, middle_modules, head_modules): """ TBD """ self.input_layer = self._build_input_layer(input_modules) self.middle_layers = self._build_middle_layers(middle_modules) self.heads = self._build_task_heads(head_modules) # Construct loss module reduction = self.config["train_config"]["loss_fn_reduction"] self.criteria = SoftCrossEntropyLoss(reduction=reduction)
[ "def", "_build", "(", "self", ",", "input_modules", ",", "middle_modules", ",", "head_modules", ")", ":", "self", ".", "input_layer", "=", "self", ".", "_build_input_layer", "(", "input_modules", ")", "self", ".", "middle_layers", "=", "self", ".", "_build_middle_layers", "(", "middle_modules", ")", "self", ".", "heads", "=", "self", ".", "_build_task_heads", "(", "head_modules", ")", "# Construct loss module", "reduction", "=", "self", ".", "config", "[", "\"train_config\"", "]", "[", "\"loss_fn_reduction\"", "]", "self", ".", "criteria", "=", "SoftCrossEntropyLoss", "(", "reduction", "=", "reduction", ")" ]
41
20.454545
def _compute_hanging_wall_effect(self, C, rjb, rrup, dip, mag): """ Compute hanging-wall effect (see eq. 7, 8, 9 and 10 page 319). Considers correct version of equation 8 as given in the erratum and not in the original paper. """ # eq. 8 (to be noticed that the USGS-NSHMP implementation defines # the hanging-wall term for all rjb distances, while in the original # manuscript, hw is computed only for rjb < 5). Again the 'firm rock' # is considered hw = np.zeros_like(rjb) if dip <= 70.: hw = (5. - rjb) / 5. # eq. 9 f_m = 1 if mag > 6.5 else mag - 5.5 # # eq. 10 f_rrup = C['c15'] + np.zeros_like(rrup) idx = rrup < 8 f_rrup[idx] *= rrup[idx] / 8 # eq. 7 (to be noticed that the f3 factor is not included # while this is defined in the original manuscript) f_hw = hw * f_m * f_rrup return f_hw
[ "def", "_compute_hanging_wall_effect", "(", "self", ",", "C", ",", "rjb", ",", "rrup", ",", "dip", ",", "mag", ")", ":", "# eq. 8 (to be noticed that the USGS-NSHMP implementation defines", "# the hanging-wall term for all rjb distances, while in the original", "# manuscript, hw is computed only for rjb < 5). Again the 'firm rock'", "# is considered", "hw", "=", "np", ".", "zeros_like", "(", "rjb", ")", "if", "dip", "<=", "70.", ":", "hw", "=", "(", "5.", "-", "rjb", ")", "/", "5.", "# eq. 9", "f_m", "=", "1", "if", "mag", ">", "6.5", "else", "mag", "-", "5.5", "# # eq. 10", "f_rrup", "=", "C", "[", "'c15'", "]", "+", "np", ".", "zeros_like", "(", "rrup", ")", "idx", "=", "rrup", "<", "8", "f_rrup", "[", "idx", "]", "*=", "rrup", "[", "idx", "]", "/", "8", "# eq. 7 (to be noticed that the f3 factor is not included", "# while this is defined in the original manuscript)", "f_hw", "=", "hw", "*", "f_m", "*", "f_rrup", "return", "f_hw" ]
35.296296
21.222222
def get_annotations(self, annotation_type: type) -> Iterator[StateAnnotation]: """Filters annotations for the queried annotation type. Designed particularly for modules with annotations: worldstate.get_annotations(MySpecificModuleAnnotation) :param annotation_type: The type to filter annotations for :return: filter of matching annotations """ return filter(lambda x: isinstance(x, annotation_type), self.annotations)
[ "def", "get_annotations", "(", "self", ",", "annotation_type", ":", "type", ")", "->", "Iterator", "[", "StateAnnotation", "]", ":", "return", "filter", "(", "lambda", "x", ":", "isinstance", "(", "x", ",", "annotation_type", ")", ",", "self", ".", "annotations", ")" ]
51.888889
20.444444
def dump(finished=True, profile_process='worker'): """Dump profile and stop profiler. Use this to save profile in advance in case your program cannot exit normally. Parameters ---------- finished : boolean Indicates whether to stop statistic output (dumping) after this dump. Default is True profile_process : string whether to profile kvstore `server` or `worker`. server can only be profiled when kvstore is of type dist. if this is not passed, defaults to `worker` """ fin = 1 if finished is True else 0 profile_process2int = {'worker': 0, 'server': 1} check_call(_LIB.MXDumpProcessProfile(fin, profile_process2int[profile_process], profiler_kvstore_handle))
[ "def", "dump", "(", "finished", "=", "True", ",", "profile_process", "=", "'worker'", ")", ":", "fin", "=", "1", "if", "finished", "is", "True", "else", "0", "profile_process2int", "=", "{", "'worker'", ":", "0", ",", "'server'", ":", "1", "}", "check_call", "(", "_LIB", ".", "MXDumpProcessProfile", "(", "fin", ",", "profile_process2int", "[", "profile_process", "]", ",", "profiler_kvstore_handle", ")", ")" ]
42.421053
17.789474
def discovery_mdns(self): """ Installs the mDNS discovery bundles and instantiates components """ # Remove Zeroconf debug output logging.getLogger("zeroconf").setLevel(logging.WARNING) # Install the bundle self.context.install_bundle("pelix.remote.discovery.mdns").start() with use_waiting_list(self.context) as ipopo: # Instantiate the discovery ipopo.add(rs.FACTORY_DISCOVERY_ZEROCONF, "pelix-discovery-zeroconf")
[ "def", "discovery_mdns", "(", "self", ")", ":", "# Remove Zeroconf debug output", "logging", ".", "getLogger", "(", "\"zeroconf\"", ")", ".", "setLevel", "(", "logging", ".", "WARNING", ")", "# Install the bundle", "self", ".", "context", ".", "install_bundle", "(", "\"pelix.remote.discovery.mdns\"", ")", ".", "start", "(", ")", "with", "use_waiting_list", "(", "self", ".", "context", ")", "as", "ipopo", ":", "# Instantiate the discovery", "ipopo", ".", "add", "(", "rs", ".", "FACTORY_DISCOVERY_ZEROCONF", ",", "\"pelix-discovery-zeroconf\"", ")" ]
37.923077
19.307692
def rsdl_sn(self, U): """Compute dual residual normalisation term. Overriding this method is required if methods :meth:`cnst_A`, :meth:`cnst_AT`, :meth:`cnst_B`, and :meth:`cnst_c` are not overridden. """ return self.rho * np.linalg.norm(self.cnst_AT(U))
[ "def", "rsdl_sn", "(", "self", ",", "U", ")", ":", "return", "self", ".", "rho", "*", "np", ".", "linalg", ".", "norm", "(", "self", ".", "cnst_AT", "(", "U", ")", ")" ]
32.888889
21.444444
def PullInstancesWithPath(self, context, MaxObjectCount, **extra): # pylint: disable=invalid-name """ Retrieve the next set of instances (with instance paths) from an open enumeration session. *New in pywbem 0.9.* This operation can only be used on enumeration sessions that have been opened by one of the following methods: * :meth:`~pywbem.WBEMConnection.OpenEnumerateInstances` * :meth:`~pywbem.WBEMConnection.OpenAssociatorInstances` * :meth:`~pywbem.WBEMConnection.OpenReferenceInstances` This method performs the PullInstancesWithPath operation (see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all methods performing such operations. If the operation succeeds, this method returns enumeration session status and optionally instances. Otherwise, this method raises an exception. Parameters: context (:func:`py:tuple` of server_context, namespace) A context object identifying the open enumeration session, including its current enumeration state, and the namespace. This object must have been returned by the previous open or pull operation for this enumeration session. The tuple items are: * server_context (:term:`string`): Enumeration context string returned by the server. This string is opaque for the client. * namespace (:term:`string`): Name of the CIM namespace being used for this enumeration session. MaxObjectCount (:class:`~pywbem.Uint32`) Maximum number of instances the WBEM server shall return for this request. This parameter is required for each Pull request. * If positive, the WBEM server is to return no more than the specified number of instances. * If zero, the WBEM server is to return no instances. This may be used by a client to reset the interoperation timer * `None` is not allowed. **extra : Additional keyword arguments are passed as additional operation parameters to the WBEM server. Note that :term:`DSP0200` does not define any additional parameters for this operation. Returns: A :func:`~py:collections.namedtuple` object containing the following named items: * **instances** (:class:`py:list` of :class:`~pywbem.CIMInstance`): Representations of the retrieved instances. The `path` attribute of each :class:`~pywbem.CIMInstance` object is a :class:`~pywbem.CIMInstanceName` object with its attributes set as follows: * `classname`: Name of the creation class of the instance. * `keybindings`: Keybindings of the instance. * `namespace`: Name of the CIM namespace containing the instance. * `host`: Host and optionally port of the WBEM server containing the CIM namespace. * **eos** (:class:`py:bool`): Indicates whether the enumeration session is exhausted after this operation: - If `True`, the enumeration session is exhausted, and the server has closed the enumeration session. - If `False`, the enumeration session is not exhausted and the `context` item is the context object for the next operation on the enumeration session. * **context** (:func:`py:tuple` of server_context, namespace): A context object identifying the open enumeration session, including its current enumeration state, and the namespace. This object must be supplied with the next pull or close operation for this enumeration session. The tuple items are: * server_context (:term:`string`): Enumeration context string returned by the server if the session is not exhausted, or `None` otherwise. This string is opaque for the client. * namespace (:term:`string`): Name of the CIM namespace that was used for this operation. NOTE: This inner tuple hides the need for a CIM namespace on subsequent operations in the enumeration session. CIM operations always require target namespace, but it never makes sense to specify a different one in subsequent operations on the same enumeration session. Raises: Exceptions described in :class:`~pywbem.WBEMConnection`. """ exc = None result_tuple = None method_name = 'PullInstancesWithPath' if self._operation_recorders: self.operation_recorder_reset(pull_op=True) self.operation_recorder_stage_pywbem_args( method=method_name, context=context, MaxObjectCount=MaxObjectCount, **extra) try: stats = self.statistics.start_timer(method_name) _validatePullParams(MaxObjectCount, context) namespace = context[1] result = self._imethodcall( method_name, namespace=namespace, EnumerationContext=context[0], MaxObjectCount=MaxObjectCount, has_out_params=True, **extra) result_tuple = pull_inst_result_tuple( *self._get_rslt_params(result, namespace)) return result_tuple except (CIMXMLParseError, XMLParseError) as exce: exce.request_data = self.last_raw_request exce.response_data = self.last_raw_reply exc = exce raise except Exception as exce: exc = exce raise finally: self._last_operation_time = stats.stop_timer( self.last_request_len, self.last_reply_len, self.last_server_response_time, exc) if self._operation_recorders: self.operation_recorder_stage_result(result_tuple, exc)
[ "def", "PullInstancesWithPath", "(", "self", ",", "context", ",", "MaxObjectCount", ",", "*", "*", "extra", ")", ":", "# pylint: disable=invalid-name", "exc", "=", "None", "result_tuple", "=", "None", "method_name", "=", "'PullInstancesWithPath'", "if", "self", ".", "_operation_recorders", ":", "self", ".", "operation_recorder_reset", "(", "pull_op", "=", "True", ")", "self", ".", "operation_recorder_stage_pywbem_args", "(", "method", "=", "method_name", ",", "context", "=", "context", ",", "MaxObjectCount", "=", "MaxObjectCount", ",", "*", "*", "extra", ")", "try", ":", "stats", "=", "self", ".", "statistics", ".", "start_timer", "(", "method_name", ")", "_validatePullParams", "(", "MaxObjectCount", ",", "context", ")", "namespace", "=", "context", "[", "1", "]", "result", "=", "self", ".", "_imethodcall", "(", "method_name", ",", "namespace", "=", "namespace", ",", "EnumerationContext", "=", "context", "[", "0", "]", ",", "MaxObjectCount", "=", "MaxObjectCount", ",", "has_out_params", "=", "True", ",", "*", "*", "extra", ")", "result_tuple", "=", "pull_inst_result_tuple", "(", "*", "self", ".", "_get_rslt_params", "(", "result", ",", "namespace", ")", ")", "return", "result_tuple", "except", "(", "CIMXMLParseError", ",", "XMLParseError", ")", "as", "exce", ":", "exce", ".", "request_data", "=", "self", ".", "last_raw_request", "exce", ".", "response_data", "=", "self", ".", "last_raw_reply", "exc", "=", "exce", "raise", "except", "Exception", "as", "exce", ":", "exc", "=", "exce", "raise", "finally", ":", "self", ".", "_last_operation_time", "=", "stats", ".", "stop_timer", "(", "self", ".", "last_request_len", ",", "self", ".", "last_reply_len", ",", "self", ".", "last_server_response_time", ",", "exc", ")", "if", "self", ".", "_operation_recorders", ":", "self", ".", "operation_recorder_stage_result", "(", "result_tuple", ",", "exc", ")" ]
39.801282
23.519231
def get_first_element_index(root, tag_name): """ In order to use Element.insert() in a convenient way, this function will find the first child tag with tag_name and return its index position The index can then be used to insert an element before or after the found tag using Element.insert() """ tag_index = 1 for tag in root: if tag.tag == tag_name: # Return the first one found if there is a match return tag_index tag_index = tag_index + 1 # Default return None
[ "def", "get_first_element_index", "(", "root", ",", "tag_name", ")", ":", "tag_index", "=", "1", "for", "tag", "in", "root", ":", "if", "tag", ".", "tag", "==", "tag_name", ":", "# Return the first one found if there is a match", "return", "tag_index", "tag_index", "=", "tag_index", "+", "1", "# Default", "return", "None" ]
33.3125
14.1875
def seek(self, ofs, whence=0): """Seek in data. On uncompressed files, the seeking works by actual seeks so it's fast. On compresses files its slow - forward seeking happends by reading ahead, backwards by re-opening and decompressing from the start. """ # disable crc check when seeking self._md_context = NoHashContext() fsize = self._inf.file_size cur_ofs = self.tell() if whence == 0: # seek from beginning of file new_ofs = ofs elif whence == 1: # seek from current position new_ofs = cur_ofs + ofs elif whence == 2: # seek from end of file new_ofs = fsize + ofs else: raise ValueError('Invalid value for whence') # sanity check if new_ofs < 0: new_ofs = 0 elif new_ofs > fsize: new_ofs = fsize # do the actual seek if new_ofs >= cur_ofs: self._skip(new_ofs - cur_ofs) else: # reopen and seek self._open() self._skip(new_ofs) return self.tell()
[ "def", "seek", "(", "self", ",", "ofs", ",", "whence", "=", "0", ")", ":", "# disable crc check when seeking", "self", ".", "_md_context", "=", "NoHashContext", "(", ")", "fsize", "=", "self", ".", "_inf", ".", "file_size", "cur_ofs", "=", "self", ".", "tell", "(", ")", "if", "whence", "==", "0", ":", "# seek from beginning of file", "new_ofs", "=", "ofs", "elif", "whence", "==", "1", ":", "# seek from current position", "new_ofs", "=", "cur_ofs", "+", "ofs", "elif", "whence", "==", "2", ":", "# seek from end of file", "new_ofs", "=", "fsize", "+", "ofs", "else", ":", "raise", "ValueError", "(", "'Invalid value for whence'", ")", "# sanity check", "if", "new_ofs", "<", "0", ":", "new_ofs", "=", "0", "elif", "new_ofs", ">", "fsize", ":", "new_ofs", "=", "fsize", "# do the actual seek", "if", "new_ofs", ">=", "cur_ofs", ":", "self", ".", "_skip", "(", "new_ofs", "-", "cur_ofs", ")", "else", ":", "# reopen and seek", "self", ".", "_open", "(", ")", "self", ".", "_skip", "(", "new_ofs", ")", "return", "self", ".", "tell", "(", ")" ]
29.342105
16.578947
def query_one_table(self, table_name): """ Run all queries for the given table name (date) and update the cache. :param table_name: table name to query against :type table_name: str """ table_date = self._datetime_for_table_name(table_name) logger.info('Running all queries for date table: %s (%s)', table_name, table_date.strftime('%Y-%m-%d')) final = self._dict_for_projects() try: data_timestamp = self._get_newest_ts_in_table(table_name) except HttpError as exc: try: content = json.loads(exc.content.decode('utf-8')) if content['error']['message'].startswith('Not found: Table'): logger.error("Table %s not found; no data for that day", table_name) return except: pass raise exc # data queries # note - ProjectStats._is_empty_cache_record() needs to know keys for name, func in { 'by_version': self._query_by_version, 'by_file_type': self._query_by_file_type, 'by_installer': self._query_by_installer, 'by_implementation': self._query_by_implementation, 'by_system': self._query_by_system, 'by_distro': self._query_by_distro, 'by_country': self._query_by_country_code }.items(): tmp = func(table_name) for proj_name in tmp: final[proj_name][name] = tmp[proj_name] # add to cache for proj_name in final: self.cache.set(proj_name, table_date, final[proj_name], data_timestamp)
[ "def", "query_one_table", "(", "self", ",", "table_name", ")", ":", "table_date", "=", "self", ".", "_datetime_for_table_name", "(", "table_name", ")", "logger", ".", "info", "(", "'Running all queries for date table: %s (%s)'", ",", "table_name", ",", "table_date", ".", "strftime", "(", "'%Y-%m-%d'", ")", ")", "final", "=", "self", ".", "_dict_for_projects", "(", ")", "try", ":", "data_timestamp", "=", "self", ".", "_get_newest_ts_in_table", "(", "table_name", ")", "except", "HttpError", "as", "exc", ":", "try", ":", "content", "=", "json", ".", "loads", "(", "exc", ".", "content", ".", "decode", "(", "'utf-8'", ")", ")", "if", "content", "[", "'error'", "]", "[", "'message'", "]", ".", "startswith", "(", "'Not found: Table'", ")", ":", "logger", ".", "error", "(", "\"Table %s not found; no data for that day\"", ",", "table_name", ")", "return", "except", ":", "pass", "raise", "exc", "# data queries", "# note - ProjectStats._is_empty_cache_record() needs to know keys", "for", "name", ",", "func", "in", "{", "'by_version'", ":", "self", ".", "_query_by_version", ",", "'by_file_type'", ":", "self", ".", "_query_by_file_type", ",", "'by_installer'", ":", "self", ".", "_query_by_installer", ",", "'by_implementation'", ":", "self", ".", "_query_by_implementation", ",", "'by_system'", ":", "self", ".", "_query_by_system", ",", "'by_distro'", ":", "self", ".", "_query_by_distro", ",", "'by_country'", ":", "self", ".", "_query_by_country_code", "}", ".", "items", "(", ")", ":", "tmp", "=", "func", "(", "table_name", ")", "for", "proj_name", "in", "tmp", ":", "final", "[", "proj_name", "]", "[", "name", "]", "=", "tmp", "[", "proj_name", "]", "# add to cache", "for", "proj_name", "in", "final", ":", "self", ".", "cache", ".", "set", "(", "proj_name", ",", "table_date", ",", "final", "[", "proj_name", "]", ",", "data_timestamp", ")" ]
41.95122
17.02439
def update(self): """ Update the screens contents in every loop. """ # this is not really neccesary because the surface is black after initializing self.corners.fill(BLACK) self.corners.draw_dot((0, 0), self.colors[0]) self.corners.draw_dot((self.screen.width - 1, 0), self.colors[0]) self.corners.draw_dot((self.screen.width - 1, self.screen.height - 1), self.colors[0]) self.corners.draw_dot((0, self.screen.height - 1), self.colors[0]) self.lines.fill(BLACK) self.lines.draw_line((1, 0), (self.lines.width - 1, 0), self.colors[1]) self.lines.draw_line((0, 1), (0, self.lines.height - 1), self.colors[3]) self.lines.draw_line((0, 0), (self.lines.width - 1, self.lines.height - 1), self.colors[2]) self.rects.fill(BLACK) self.rects.draw_rect((0, 0), (int(self.rects.width / 2) - 1, self.rects.height), self.colors[2], self.colors[3]) self.rects.draw_rect((int(self.rects.width / 2) + 1, 0), (int(self.rects.width / 2) - 1, self.rects.height), self.colors[3], self.colors[2]) self.circle.fill(BLACK) radius = int(min(self.circle.width, self.circle.height) / 2) - 1 self.circle.draw_circle((int(self.circle.width / 2) - 1, int(self.circle.height / 2) - 1), radius, self.colors[4], self.colors[5]) self.filled.fill(self.colors[6])
[ "def", "update", "(", "self", ")", ":", "# this is not really neccesary because the surface is black after initializing", "self", ".", "corners", ".", "fill", "(", "BLACK", ")", "self", ".", "corners", ".", "draw_dot", "(", "(", "0", ",", "0", ")", ",", "self", ".", "colors", "[", "0", "]", ")", "self", ".", "corners", ".", "draw_dot", "(", "(", "self", ".", "screen", ".", "width", "-", "1", ",", "0", ")", ",", "self", ".", "colors", "[", "0", "]", ")", "self", ".", "corners", ".", "draw_dot", "(", "(", "self", ".", "screen", ".", "width", "-", "1", ",", "self", ".", "screen", ".", "height", "-", "1", ")", ",", "self", ".", "colors", "[", "0", "]", ")", "self", ".", "corners", ".", "draw_dot", "(", "(", "0", ",", "self", ".", "screen", ".", "height", "-", "1", ")", ",", "self", ".", "colors", "[", "0", "]", ")", "self", ".", "lines", ".", "fill", "(", "BLACK", ")", "self", ".", "lines", ".", "draw_line", "(", "(", "1", ",", "0", ")", ",", "(", "self", ".", "lines", ".", "width", "-", "1", ",", "0", ")", ",", "self", ".", "colors", "[", "1", "]", ")", "self", ".", "lines", ".", "draw_line", "(", "(", "0", ",", "1", ")", ",", "(", "0", ",", "self", ".", "lines", ".", "height", "-", "1", ")", ",", "self", ".", "colors", "[", "3", "]", ")", "self", ".", "lines", ".", "draw_line", "(", "(", "0", ",", "0", ")", ",", "(", "self", ".", "lines", ".", "width", "-", "1", ",", "self", ".", "lines", ".", "height", "-", "1", ")", ",", "self", ".", "colors", "[", "2", "]", ")", "self", ".", "rects", ".", "fill", "(", "BLACK", ")", "self", ".", "rects", ".", "draw_rect", "(", "(", "0", ",", "0", ")", ",", "(", "int", "(", "self", ".", "rects", ".", "width", "/", "2", ")", "-", "1", ",", "self", ".", "rects", ".", "height", ")", ",", "self", ".", "colors", "[", "2", "]", ",", "self", ".", "colors", "[", "3", "]", ")", "self", ".", "rects", ".", "draw_rect", "(", "(", "int", "(", "self", ".", "rects", ".", "width", "/", "2", ")", "+", "1", ",", "0", ")", ",", "(", "int", "(", "self", ".", "rects", ".", "width", "/", "2", ")", "-", "1", ",", "self", ".", "rects", ".", "height", ")", ",", "self", ".", "colors", "[", "3", "]", ",", "self", ".", "colors", "[", "2", "]", ")", "self", ".", "circle", ".", "fill", "(", "BLACK", ")", "radius", "=", "int", "(", "min", "(", "self", ".", "circle", ".", "width", ",", "self", ".", "circle", ".", "height", ")", "/", "2", ")", "-", "1", "self", ".", "circle", ".", "draw_circle", "(", "(", "int", "(", "self", ".", "circle", ".", "width", "/", "2", ")", "-", "1", ",", "int", "(", "self", ".", "circle", ".", "height", "/", "2", ")", "-", "1", ")", ",", "radius", ",", "self", ".", "colors", "[", "4", "]", ",", "self", ".", "colors", "[", "5", "]", ")", "self", ".", "filled", ".", "fill", "(", "self", ".", "colors", "[", "6", "]", ")" ]
48.441176
23.088235
def string_to_run(self, qad, executable, stdin=None, stdout=None, stderr=None, exec_args=None): """ Build and return a string with the command required to launch `executable` with the qadapter `qad`. Args qad: Qadapter instance. executable (str): Executable name or path stdin (str): Name of the file to be used as standard input. None means no redirection. stdout (str): Name of the file to be used as standard output. None means no redirection. stderr (str): Name of the file to be used as standard error. None means no redirection. exec_args: Optional list of strings with options passed to `executable`. Return: String with command to execute. """ stdin = "< " + stdin if stdin is not None else "" stdout = "> " + stdout if stdout is not None else "" stderr = "2> " + stderr if stderr is not None else "" if exec_args: executable = executable + " " + " ".join(list_strings(exec_args)) basename = os.path.basename(self.name) if basename in ["mpirun", "mpiexec", "srun"]: if self.type is None: # $MPIRUN -n $MPI_PROCS $EXECUTABLE < $STDIN > $STDOUT 2> $STDERR num_opt = "-n " + str(qad.mpi_procs) cmd = " ".join([self.name, self.options, num_opt, executable, stdin, stdout, stderr]) else: raise NotImplementedError("type %s is not supported!" % self.type) elif basename == "runjob": #runjob --ranks-per-node 2 --exp-env OMP_NUM_THREADS --exe $ABINIT < $STDIN > $STDOUT 2> $STDERR #runjob -n 2 --exp-env=OMP_NUM_THREADS --exe $ABINIT < $STDIN > $STDOUT 2> $STDERR # exe must be absolute path or relative to cwd. bg_size, rpn = qad.bgsize_rankspernode() #num_opt = "-n " + str(qad.mpi_procs) num_opt = "--ranks-per-node " + str(rpn) cmd = " ".join([self.name, self.options, num_opt, "--exp-env OMP_NUM_THREADS", "--exe `which " + executable + "` ", stdin, stdout, stderr]) else: if qad.mpi_procs != 1: raise ValueError("Cannot use mpi_procs > when mpi_runner basename=%s" % basename) cmd = " ".join([executable, stdin, stdout, stderr]) return cmd
[ "def", "string_to_run", "(", "self", ",", "qad", ",", "executable", ",", "stdin", "=", "None", ",", "stdout", "=", "None", ",", "stderr", "=", "None", ",", "exec_args", "=", "None", ")", ":", "stdin", "=", "\"< \"", "+", "stdin", "if", "stdin", "is", "not", "None", "else", "\"\"", "stdout", "=", "\"> \"", "+", "stdout", "if", "stdout", "is", "not", "None", "else", "\"\"", "stderr", "=", "\"2> \"", "+", "stderr", "if", "stderr", "is", "not", "None", "else", "\"\"", "if", "exec_args", ":", "executable", "=", "executable", "+", "\" \"", "+", "\" \"", ".", "join", "(", "list_strings", "(", "exec_args", ")", ")", "basename", "=", "os", ".", "path", ".", "basename", "(", "self", ".", "name", ")", "if", "basename", "in", "[", "\"mpirun\"", ",", "\"mpiexec\"", ",", "\"srun\"", "]", ":", "if", "self", ".", "type", "is", "None", ":", "# $MPIRUN -n $MPI_PROCS $EXECUTABLE < $STDIN > $STDOUT 2> $STDERR", "num_opt", "=", "\"-n \"", "+", "str", "(", "qad", ".", "mpi_procs", ")", "cmd", "=", "\" \"", ".", "join", "(", "[", "self", ".", "name", ",", "self", ".", "options", ",", "num_opt", ",", "executable", ",", "stdin", ",", "stdout", ",", "stderr", "]", ")", "else", ":", "raise", "NotImplementedError", "(", "\"type %s is not supported!\"", "%", "self", ".", "type", ")", "elif", "basename", "==", "\"runjob\"", ":", "#runjob --ranks-per-node 2 --exp-env OMP_NUM_THREADS --exe $ABINIT < $STDIN > $STDOUT 2> $STDERR", "#runjob -n 2 --exp-env=OMP_NUM_THREADS --exe $ABINIT < $STDIN > $STDOUT 2> $STDERR", "# exe must be absolute path or relative to cwd.", "bg_size", ",", "rpn", "=", "qad", ".", "bgsize_rankspernode", "(", ")", "#num_opt = \"-n \" + str(qad.mpi_procs)", "num_opt", "=", "\"--ranks-per-node \"", "+", "str", "(", "rpn", ")", "cmd", "=", "\" \"", ".", "join", "(", "[", "self", ".", "name", ",", "self", ".", "options", ",", "num_opt", ",", "\"--exp-env OMP_NUM_THREADS\"", ",", "\"--exe `which \"", "+", "executable", "+", "\"` \"", ",", "stdin", ",", "stdout", ",", "stderr", "]", ")", "else", ":", "if", "qad", ".", "mpi_procs", "!=", "1", ":", "raise", "ValueError", "(", "\"Cannot use mpi_procs > when mpi_runner basename=%s\"", "%", "basename", ")", "cmd", "=", "\" \"", ".", "join", "(", "[", "executable", ",", "stdin", ",", "stdout", ",", "stderr", "]", ")", "return", "cmd" ]
51.173913
30.173913
def _match_minimum_date_time(self, match_key, date_time_value, match=True): """Matches a minimum date time value""" if match: gtelt = '$gte' else: gtelt = '$lt' if match_key in self._query_terms: self._query_terms[match_key][gtelt] = date_time_value else: self._query_terms[match_key] = {gtelt: date_time_value}
[ "def", "_match_minimum_date_time", "(", "self", ",", "match_key", ",", "date_time_value", ",", "match", "=", "True", ")", ":", "if", "match", ":", "gtelt", "=", "'$gte'", "else", ":", "gtelt", "=", "'$lt'", "if", "match_key", "in", "self", ".", "_query_terms", ":", "self", ".", "_query_terms", "[", "match_key", "]", "[", "gtelt", "]", "=", "date_time_value", "else", ":", "self", ".", "_query_terms", "[", "match_key", "]", "=", "{", "gtelt", ":", "date_time_value", "}" ]
39
19.5
def verify_pattern(pattern): """Verifies if pattern for matching and finding fulfill expected structure. :param pattern: string pattern to verify :return: True if pattern has proper syntax, False otherwise """ regex = re.compile("^!?[a-zA-Z]+$|[*]{1,2}$") def __verify_pattern__(__pattern__): if not __pattern__: return False elif __pattern__[0] == "!": return __verify_pattern__(__pattern__[1:]) elif __pattern__[0] == "[" and __pattern__[-1] == "]": return all(__verify_pattern__(p) for p in __pattern__[1:-1].split(",")) else: return regex.match(__pattern__) return all(__verify_pattern__(p) for p in pattern.split("/"))
[ "def", "verify_pattern", "(", "pattern", ")", ":", "regex", "=", "re", ".", "compile", "(", "\"^!?[a-zA-Z]+$|[*]{1,2}$\"", ")", "def", "__verify_pattern__", "(", "__pattern__", ")", ":", "if", "not", "__pattern__", ":", "return", "False", "elif", "__pattern__", "[", "0", "]", "==", "\"!\"", ":", "return", "__verify_pattern__", "(", "__pattern__", "[", "1", ":", "]", ")", "elif", "__pattern__", "[", "0", "]", "==", "\"[\"", "and", "__pattern__", "[", "-", "1", "]", "==", "\"]\"", ":", "return", "all", "(", "__verify_pattern__", "(", "p", ")", "for", "p", "in", "__pattern__", "[", "1", ":", "-", "1", "]", ".", "split", "(", "\",\"", ")", ")", "else", ":", "return", "regex", ".", "match", "(", "__pattern__", ")", "return", "all", "(", "__verify_pattern__", "(", "p", ")", "for", "p", "in", "pattern", ".", "split", "(", "\"/\"", ")", ")" ]
34.47619
20.190476
def _subset_by_support(orig_vcf, cmp_calls, data): """Subset orig_vcf to calls also present in any of the comparison callers. """ cmp_vcfs = [x["vrn_file"] for x in cmp_calls] out_file = "%s-inensemble.vcf.gz" % utils.splitext_plus(orig_vcf)[0] if not utils.file_uptodate(out_file, orig_vcf): with file_transaction(data, out_file) as tx_out_file: cmd = "bedtools intersect -header -wa -f 0.5 -r -a {orig_vcf} -b " for cmp_vcf in cmp_vcfs: cmd += "<(bcftools view -f 'PASS,.' %s) " % cmp_vcf cmd += "| bgzip -c > {tx_out_file}" do.run(cmd.format(**locals()), "Subset calls by those present in Ensemble output") return vcfutils.bgzip_and_index(out_file, data["config"])
[ "def", "_subset_by_support", "(", "orig_vcf", ",", "cmp_calls", ",", "data", ")", ":", "cmp_vcfs", "=", "[", "x", "[", "\"vrn_file\"", "]", "for", "x", "in", "cmp_calls", "]", "out_file", "=", "\"%s-inensemble.vcf.gz\"", "%", "utils", ".", "splitext_plus", "(", "orig_vcf", ")", "[", "0", "]", "if", "not", "utils", ".", "file_uptodate", "(", "out_file", ",", "orig_vcf", ")", ":", "with", "file_transaction", "(", "data", ",", "out_file", ")", "as", "tx_out_file", ":", "cmd", "=", "\"bedtools intersect -header -wa -f 0.5 -r -a {orig_vcf} -b \"", "for", "cmp_vcf", "in", "cmp_vcfs", ":", "cmd", "+=", "\"<(bcftools view -f 'PASS,.' %s) \"", "%", "cmp_vcf", "cmd", "+=", "\"| bgzip -c > {tx_out_file}\"", "do", ".", "run", "(", "cmd", ".", "format", "(", "*", "*", "locals", "(", ")", ")", ",", "\"Subset calls by those present in Ensemble output\"", ")", "return", "vcfutils", ".", "bgzip_and_index", "(", "out_file", ",", "data", "[", "\"config\"", "]", ")" ]
57.769231
18
def _get_nblock_regions(in_file, min_n_size, ref_regions): """Retrieve coordinates of regions in reference genome with no mapping. These are potential breakpoints for parallelizing analysis. """ out_lines = [] called_contigs = set([]) with utils.open_gzipsafe(in_file) as in_handle: for line in in_handle: contig, start, end, ctype = line.rstrip().split() called_contigs.add(contig) if (ctype in ["REF_N", "NO_COVERAGE", "EXCESSIVE_COVERAGE", "LOW_COVERAGE"] and int(end) - int(start) > min_n_size): out_lines.append("%s\t%s\t%s\n" % (contig, start, end)) for refr in ref_regions: if refr.chrom not in called_contigs: out_lines.append("%s\t%s\t%s\n" % (refr.chrom, 0, refr.stop)) return pybedtools.BedTool("\n".join(out_lines), from_string=True)
[ "def", "_get_nblock_regions", "(", "in_file", ",", "min_n_size", ",", "ref_regions", ")", ":", "out_lines", "=", "[", "]", "called_contigs", "=", "set", "(", "[", "]", ")", "with", "utils", ".", "open_gzipsafe", "(", "in_file", ")", "as", "in_handle", ":", "for", "line", "in", "in_handle", ":", "contig", ",", "start", ",", "end", ",", "ctype", "=", "line", ".", "rstrip", "(", ")", ".", "split", "(", ")", "called_contigs", ".", "add", "(", "contig", ")", "if", "(", "ctype", "in", "[", "\"REF_N\"", ",", "\"NO_COVERAGE\"", ",", "\"EXCESSIVE_COVERAGE\"", ",", "\"LOW_COVERAGE\"", "]", "and", "int", "(", "end", ")", "-", "int", "(", "start", ")", ">", "min_n_size", ")", ":", "out_lines", ".", "append", "(", "\"%s\\t%s\\t%s\\n\"", "%", "(", "contig", ",", "start", ",", "end", ")", ")", "for", "refr", "in", "ref_regions", ":", "if", "refr", ".", "chrom", "not", "in", "called_contigs", ":", "out_lines", ".", "append", "(", "\"%s\\t%s\\t%s\\n\"", "%", "(", "refr", ".", "chrom", ",", "0", ",", "refr", ".", "stop", ")", ")", "return", "pybedtools", ".", "BedTool", "(", "\"\\n\"", ".", "join", "(", "out_lines", ")", ",", "from_string", "=", "True", ")" ]
50.529412
17.235294
def extrair_logs(self): """Sobrepõe :meth:`~satcfe.base.FuncoesSAT.extrair_logs`. :return: Uma resposta SAT especializada em ``ExtrairLogs``. :rtype: satcfe.resposta.extrairlogs.RespostaExtrairLogs """ resp = self._http_post('extrairlogs') conteudo = resp.json() return RespostaExtrairLogs.analisar(conteudo.get('retorno'))
[ "def", "extrair_logs", "(", "self", ")", ":", "resp", "=", "self", ".", "_http_post", "(", "'extrairlogs'", ")", "conteudo", "=", "resp", ".", "json", "(", ")", "return", "RespostaExtrairLogs", ".", "analisar", "(", "conteudo", ".", "get", "(", "'retorno'", ")", ")" ]
41.333333
16.666667
def proxy_headers(self, proxy): """Returns a dictionary of the headers to add to any request sent through a proxy. This works with urllib3 magic to ensure that they are correctly sent to the proxy, rather than in a tunnelled request if CONNECT is being used. This should not be called from user code, and is only exposed for use when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`. :param proxies: The url of the proxy being used for this request. :param kwargs: Optional additional keyword arguments. """ headers = {} username, password = get_auth_from_url(proxy) if username and password: headers['Proxy-Authorization'] = _basic_auth_str(username, password) return headers
[ "def", "proxy_headers", "(", "self", ",", "proxy", ")", ":", "headers", "=", "{", "}", "username", ",", "password", "=", "get_auth_from_url", "(", "proxy", ")", "if", "username", "and", "password", ":", "headers", "[", "'Proxy-Authorization'", "]", "=", "_basic_auth_str", "(", "username", ",", "password", ")", "return", "headers" ]
41.190476
23.47619
def _srm(self, data): """Expectation-Maximization algorithm for fitting the probabilistic SRM. Parameters ---------- data : list of 2D arrays, element i has shape=[voxels_i, samples] Each element in the list contains the fMRI data of one subject. Returns ------- sigma_s : array, shape=[features, features] The covariance :math:`\\Sigma_s` of the shared response Normal distribution. w : list of array, element i has shape=[voxels_i, features] The orthogonal transforms (mappings) :math:`W_i` for each subject. mu : list of array, element i has shape=[voxels_i] The voxel means :math:`\\mu_i` over the samples for each subject. rho2 : array, shape=[subjects] The estimated noise variance :math:`\\rho_i^2` for each subject s : array, shape=[features, samples] The shared response. """ local_min = min([d.shape[1] for d in data if d is not None], default=sys.maxsize) samples = self.comm.allreduce(local_min, op=MPI.MIN) subjects = len(data) self.random_state_ = np.random.RandomState(self.rand_seed) random_states = [ np.random.RandomState(self.random_state_.randint(2 ** 32)) for i in range(len(data))] # Initialization step: initialize the outputs with initial values, # voxels with the number of voxels in each subject, and trace_xtx with # the ||X_i||_F^2 of each subject. w, voxels = _init_w_transforms(data, self.features, random_states, self.comm) x, mu, rho2, trace_xtx = self._init_structures(data, subjects) shared_response = np.zeros((self.features, samples)) sigma_s = np.identity(self.features) rank = self.comm.Get_rank() # Main loop of the algorithm (run for iteration in range(self.n_iter): logger.info('Iteration %d' % (iteration + 1)) # E-step: # Sum the inverted the rho2 elements for computing W^T * Psi^-1 * W if rank == 0: rho0 = (1 / rho2).sum() # Invert Sigma_s using Cholesky factorization (chol_sigma_s, lower_sigma_s) = scipy.linalg.cho_factor( sigma_s, check_finite=False) inv_sigma_s = scipy.linalg.cho_solve( (chol_sigma_s, lower_sigma_s), np.identity(self.features), check_finite=False) # Invert (Sigma_s + rho_0 * I) using Cholesky factorization sigma_s_rhos = inv_sigma_s + np.identity(self.features) * rho0 chol_sigma_s_rhos, lower_sigma_s_rhos = \ scipy.linalg.cho_factor(sigma_s_rhos, check_finite=False) inv_sigma_s_rhos = scipy.linalg.cho_solve( (chol_sigma_s_rhos, lower_sigma_s_rhos), np.identity(self.features), check_finite=False) # Compute the sum of W_i^T * rho_i^-2 * X_i, and the sum of traces # of X_i^T * rho_i^-2 * X_i wt_invpsi_x = np.zeros((self.features, samples)) trace_xt_invsigma2_x = 0.0 for subject in range(subjects): if data[subject] is not None: wt_invpsi_x += (w[subject].T.dot(x[subject])) \ / rho2[subject] trace_xt_invsigma2_x += trace_xtx[subject] / rho2[subject] wt_invpsi_x = self.comm.reduce(wt_invpsi_x, op=MPI.SUM) trace_xt_invsigma2_x = self.comm.reduce(trace_xt_invsigma2_x, op=MPI.SUM) trace_sigma_s = None if rank == 0: log_det_psi = np.sum(np.log(rho2) * voxels) # Update the shared response shared_response = sigma_s.dot( np.identity(self.features) - rho0 * inv_sigma_s_rhos).dot( wt_invpsi_x) # M-step # Update Sigma_s and compute its trace sigma_s = (inv_sigma_s_rhos + shared_response.dot(shared_response.T) / samples) trace_sigma_s = samples * np.trace(sigma_s) shared_response = self.comm.bcast(shared_response) trace_sigma_s = self.comm.bcast(trace_sigma_s) # Update each subject's mapping transform W_i and error variance # rho_i^2 for subject in range(subjects): if x[subject] is not None: a_subject = x[subject].dot(shared_response.T) perturbation = np.zeros(a_subject.shape) np.fill_diagonal(perturbation, 0.001) u_subject, s_subject, v_subject = np.linalg.svd( a_subject + perturbation, full_matrices=False) w[subject] = u_subject.dot(v_subject) rho2[subject] = trace_xtx[subject] rho2[subject] += -2 * np.sum(w[subject] * a_subject).sum() rho2[subject] += trace_sigma_s rho2[subject] /= samples * voxels[subject] else: rho2[subject] = 0 rho2 = self.comm.allreduce(rho2, op=MPI.SUM) if rank == 0: if logger.isEnabledFor(logging.INFO): # Calculate and log the current log-likelihood for checking # convergence loglike = self._likelihood( chol_sigma_s_rhos, log_det_psi, chol_sigma_s, trace_xt_invsigma2_x, inv_sigma_s_rhos, wt_invpsi_x, samples) logger.info('Objective function %f' % loglike) sigma_s = self.comm.bcast(sigma_s) return sigma_s, w, mu, rho2, shared_response
[ "def", "_srm", "(", "self", ",", "data", ")", ":", "local_min", "=", "min", "(", "[", "d", ".", "shape", "[", "1", "]", "for", "d", "in", "data", "if", "d", "is", "not", "None", "]", ",", "default", "=", "sys", ".", "maxsize", ")", "samples", "=", "self", ".", "comm", ".", "allreduce", "(", "local_min", ",", "op", "=", "MPI", ".", "MIN", ")", "subjects", "=", "len", "(", "data", ")", "self", ".", "random_state_", "=", "np", ".", "random", ".", "RandomState", "(", "self", ".", "rand_seed", ")", "random_states", "=", "[", "np", ".", "random", ".", "RandomState", "(", "self", ".", "random_state_", ".", "randint", "(", "2", "**", "32", ")", ")", "for", "i", "in", "range", "(", "len", "(", "data", ")", ")", "]", "# Initialization step: initialize the outputs with initial values,", "# voxels with the number of voxels in each subject, and trace_xtx with", "# the ||X_i||_F^2 of each subject.", "w", ",", "voxels", "=", "_init_w_transforms", "(", "data", ",", "self", ".", "features", ",", "random_states", ",", "self", ".", "comm", ")", "x", ",", "mu", ",", "rho2", ",", "trace_xtx", "=", "self", ".", "_init_structures", "(", "data", ",", "subjects", ")", "shared_response", "=", "np", ".", "zeros", "(", "(", "self", ".", "features", ",", "samples", ")", ")", "sigma_s", "=", "np", ".", "identity", "(", "self", ".", "features", ")", "rank", "=", "self", ".", "comm", ".", "Get_rank", "(", ")", "# Main loop of the algorithm (run", "for", "iteration", "in", "range", "(", "self", ".", "n_iter", ")", ":", "logger", ".", "info", "(", "'Iteration %d'", "%", "(", "iteration", "+", "1", ")", ")", "# E-step:", "# Sum the inverted the rho2 elements for computing W^T * Psi^-1 * W", "if", "rank", "==", "0", ":", "rho0", "=", "(", "1", "/", "rho2", ")", ".", "sum", "(", ")", "# Invert Sigma_s using Cholesky factorization", "(", "chol_sigma_s", ",", "lower_sigma_s", ")", "=", "scipy", ".", "linalg", ".", "cho_factor", "(", "sigma_s", ",", "check_finite", "=", "False", ")", "inv_sigma_s", "=", "scipy", ".", "linalg", ".", "cho_solve", "(", "(", "chol_sigma_s", ",", "lower_sigma_s", ")", ",", "np", ".", "identity", "(", "self", ".", "features", ")", ",", "check_finite", "=", "False", ")", "# Invert (Sigma_s + rho_0 * I) using Cholesky factorization", "sigma_s_rhos", "=", "inv_sigma_s", "+", "np", ".", "identity", "(", "self", ".", "features", ")", "*", "rho0", "chol_sigma_s_rhos", ",", "lower_sigma_s_rhos", "=", "scipy", ".", "linalg", ".", "cho_factor", "(", "sigma_s_rhos", ",", "check_finite", "=", "False", ")", "inv_sigma_s_rhos", "=", "scipy", ".", "linalg", ".", "cho_solve", "(", "(", "chol_sigma_s_rhos", ",", "lower_sigma_s_rhos", ")", ",", "np", ".", "identity", "(", "self", ".", "features", ")", ",", "check_finite", "=", "False", ")", "# Compute the sum of W_i^T * rho_i^-2 * X_i, and the sum of traces", "# of X_i^T * rho_i^-2 * X_i", "wt_invpsi_x", "=", "np", ".", "zeros", "(", "(", "self", ".", "features", ",", "samples", ")", ")", "trace_xt_invsigma2_x", "=", "0.0", "for", "subject", "in", "range", "(", "subjects", ")", ":", "if", "data", "[", "subject", "]", "is", "not", "None", ":", "wt_invpsi_x", "+=", "(", "w", "[", "subject", "]", ".", "T", ".", "dot", "(", "x", "[", "subject", "]", ")", ")", "/", "rho2", "[", "subject", "]", "trace_xt_invsigma2_x", "+=", "trace_xtx", "[", "subject", "]", "/", "rho2", "[", "subject", "]", "wt_invpsi_x", "=", "self", ".", "comm", ".", "reduce", "(", "wt_invpsi_x", ",", "op", "=", "MPI", ".", "SUM", ")", "trace_xt_invsigma2_x", "=", "self", ".", "comm", ".", "reduce", "(", "trace_xt_invsigma2_x", ",", "op", "=", "MPI", ".", "SUM", ")", "trace_sigma_s", "=", "None", "if", "rank", "==", "0", ":", "log_det_psi", "=", "np", ".", "sum", "(", "np", ".", "log", "(", "rho2", ")", "*", "voxels", ")", "# Update the shared response", "shared_response", "=", "sigma_s", ".", "dot", "(", "np", ".", "identity", "(", "self", ".", "features", ")", "-", "rho0", "*", "inv_sigma_s_rhos", ")", ".", "dot", "(", "wt_invpsi_x", ")", "# M-step", "# Update Sigma_s and compute its trace", "sigma_s", "=", "(", "inv_sigma_s_rhos", "+", "shared_response", ".", "dot", "(", "shared_response", ".", "T", ")", "/", "samples", ")", "trace_sigma_s", "=", "samples", "*", "np", ".", "trace", "(", "sigma_s", ")", "shared_response", "=", "self", ".", "comm", ".", "bcast", "(", "shared_response", ")", "trace_sigma_s", "=", "self", ".", "comm", ".", "bcast", "(", "trace_sigma_s", ")", "# Update each subject's mapping transform W_i and error variance", "# rho_i^2", "for", "subject", "in", "range", "(", "subjects", ")", ":", "if", "x", "[", "subject", "]", "is", "not", "None", ":", "a_subject", "=", "x", "[", "subject", "]", ".", "dot", "(", "shared_response", ".", "T", ")", "perturbation", "=", "np", ".", "zeros", "(", "a_subject", ".", "shape", ")", "np", ".", "fill_diagonal", "(", "perturbation", ",", "0.001", ")", "u_subject", ",", "s_subject", ",", "v_subject", "=", "np", ".", "linalg", ".", "svd", "(", "a_subject", "+", "perturbation", ",", "full_matrices", "=", "False", ")", "w", "[", "subject", "]", "=", "u_subject", ".", "dot", "(", "v_subject", ")", "rho2", "[", "subject", "]", "=", "trace_xtx", "[", "subject", "]", "rho2", "[", "subject", "]", "+=", "-", "2", "*", "np", ".", "sum", "(", "w", "[", "subject", "]", "*", "a_subject", ")", ".", "sum", "(", ")", "rho2", "[", "subject", "]", "+=", "trace_sigma_s", "rho2", "[", "subject", "]", "/=", "samples", "*", "voxels", "[", "subject", "]", "else", ":", "rho2", "[", "subject", "]", "=", "0", "rho2", "=", "self", ".", "comm", ".", "allreduce", "(", "rho2", ",", "op", "=", "MPI", ".", "SUM", ")", "if", "rank", "==", "0", ":", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "INFO", ")", ":", "# Calculate and log the current log-likelihood for checking", "# convergence", "loglike", "=", "self", ".", "_likelihood", "(", "chol_sigma_s_rhos", ",", "log_det_psi", ",", "chol_sigma_s", ",", "trace_xt_invsigma2_x", ",", "inv_sigma_s_rhos", ",", "wt_invpsi_x", ",", "samples", ")", "logger", ".", "info", "(", "'Objective function %f'", "%", "loglike", ")", "sigma_s", "=", "self", ".", "comm", ".", "bcast", "(", "sigma_s", ")", "return", "sigma_s", ",", "w", ",", "mu", ",", "rho2", ",", "shared_response" ]
42.647482
22.841727
def prove(self, file, chal, tag): """This function returns a proof calculated from the file, the challenge, and the file tag :param file: this is a file like object that supports `read()`, `tell()` and `seek()` methods. :param chal: the challenge to use for proving :param tag: the file tag """ chunk_size = self.sectors * self.sectorsize index = KeyedPRF(chal.key, len(tag.sigma)) v = KeyedPRF(chal.key, chal.v_max) proof = Proof() proof.mu = [0] * self.sectors proof.sigma = 0 for i in range(0, chal.chunks): for j in range(0, self.sectors): pos = index.eval(i) * chunk_size + j * self.sectorsize file.seek(pos) buffer = file.read(self.sectorsize) if (len(buffer) > 0): proof.mu[j] += v.eval(i) * number.bytes_to_long(buffer) if (len(buffer) != self.sectorsize): break for j in range(0, self.sectors): proof.mu[j] %= self.prime for i in range(0, chal.chunks): proof.sigma += v.eval(i) * tag.sigma[index.eval(i)] proof.sigma %= self.prime return proof
[ "def", "prove", "(", "self", ",", "file", ",", "chal", ",", "tag", ")", ":", "chunk_size", "=", "self", ".", "sectors", "*", "self", ".", "sectorsize", "index", "=", "KeyedPRF", "(", "chal", ".", "key", ",", "len", "(", "tag", ".", "sigma", ")", ")", "v", "=", "KeyedPRF", "(", "chal", ".", "key", ",", "chal", ".", "v_max", ")", "proof", "=", "Proof", "(", ")", "proof", ".", "mu", "=", "[", "0", "]", "*", "self", ".", "sectors", "proof", ".", "sigma", "=", "0", "for", "i", "in", "range", "(", "0", ",", "chal", ".", "chunks", ")", ":", "for", "j", "in", "range", "(", "0", ",", "self", ".", "sectors", ")", ":", "pos", "=", "index", ".", "eval", "(", "i", ")", "*", "chunk_size", "+", "j", "*", "self", ".", "sectorsize", "file", ".", "seek", "(", "pos", ")", "buffer", "=", "file", ".", "read", "(", "self", ".", "sectorsize", ")", "if", "(", "len", "(", "buffer", ")", ">", "0", ")", ":", "proof", ".", "mu", "[", "j", "]", "+=", "v", ".", "eval", "(", "i", ")", "*", "number", ".", "bytes_to_long", "(", "buffer", ")", "if", "(", "len", "(", "buffer", ")", "!=", "self", ".", "sectorsize", ")", ":", "break", "for", "j", "in", "range", "(", "0", ",", "self", ".", "sectors", ")", ":", "proof", ".", "mu", "[", "j", "]", "%=", "self", ".", "prime", "for", "i", "in", "range", "(", "0", ",", "chal", ".", "chunks", ")", ":", "proof", ".", "sigma", "+=", "v", ".", "eval", "(", "i", ")", "*", "tag", ".", "sigma", "[", "index", ".", "eval", "(", "i", ")", "]", "proof", ".", "sigma", "%=", "self", ".", "prime", "return", "proof" ]
32.210526
17.394737
def all(self, query=None, **kwargs): """ Gets all organizations. """ return super(OrganizationsProxy, self).all(query=query)
[ "def", "all", "(", "self", ",", "query", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "super", "(", "OrganizationsProxy", ",", "self", ")", ".", "all", "(", "query", "=", "query", ")" ]
25.333333
12.666667
def process_metadata_callbacks(aws_config): """ Iterates through each type of resource and, when callbacks have been configured in the config metadata, recurse through each resource and calls each callback. :param aws_config: The entire AWS configuration object :return: None """ for service_group in aws_config['metadata']: for service in aws_config['metadata'][service_group]: if service == 'summaries': continue # Reset external attack surface if 'summaries' in aws_config['metadata'][service_group][service]: for summary in aws_config['metadata'][service_group][service]['summaries']: if summary == 'external attack surface' and service in aws_config['services'] and 'external_attack_surface' in aws_config['services'][service]: aws_config['services'][service].pop('external_attack_surface') # Reset all global summaries if 'service_groups' in aws_config: aws_config.pop('service_groups') # Resources for resource_type in aws_config['metadata'][service_group][service]['resources']: if 'callbacks' in aws_config['metadata'][service_group][service]['resources'][resource_type]: current_path = [ 'services', service ] target_path = aws_config['metadata'][service_group][service]['resources'][resource_type]['path'].replace('.id', '').split('.')[2:] callbacks = aws_config['metadata'][service_group][service]['resources'][resource_type]['callbacks'] new_go_to_and_do(aws_config, get_object_at(aws_config, current_path), target_path, current_path, callbacks) # Summaries if 'summaries' in aws_config['metadata'][service_group][service]: for summary in aws_config['metadata'][service_group][service]['summaries']: if 'callbacks' in aws_config['metadata'][service_group][service]['summaries'][summary]: current_path = [ 'services', service ] for callback in aws_config['metadata'][service_group][service]['summaries'][summary]['callbacks']: callback_name = callback[0] callback_args = copy.deepcopy(callback[1]) target_path = callback_args.pop('path').replace('.id', '').split('.')[2:] callbacks = [ [callback_name, callback_args] ] new_go_to_and_do(aws_config, get_object_at(aws_config, current_path), target_path, current_path, callbacks) # Group-level summaries for service_group in aws_config['metadata']: if 'summaries' in aws_config['metadata'][service_group]: for summary in aws_config['metadata'][service_group]['summaries']: current_path = [ 'services', service ] for callback in aws_config['metadata'][service_group]['summaries'][summary]['callbacks']: callback_name = callback[0] callback_args = copy.deepcopy(callback[1]) target_path = aws_config['metadata'][service_group]['summaries'][summary]['path'].split('.') target_object = aws_config for p in target_path: manage_dictionary(target_object, p, {}) target_object = target_object[p] if callback_name == 'merge': for service in aws_config['metadata'][service_group]: if service == 'summaries': continue if 'summaries' in aws_config['metadata'][service_group][service] and summary in aws_config['metadata'][service_group][service]['summaries']: try: source = get_object_at(aws_config, aws_config['metadata'][service_group][service]['summaries'][summary]['path'].split('.')) except: source = {} target_object.update(source) return None
[ "def", "process_metadata_callbacks", "(", "aws_config", ")", ":", "for", "service_group", "in", "aws_config", "[", "'metadata'", "]", ":", "for", "service", "in", "aws_config", "[", "'metadata'", "]", "[", "service_group", "]", ":", "if", "service", "==", "'summaries'", ":", "continue", "# Reset external attack surface", "if", "'summaries'", "in", "aws_config", "[", "'metadata'", "]", "[", "service_group", "]", "[", "service", "]", ":", "for", "summary", "in", "aws_config", "[", "'metadata'", "]", "[", "service_group", "]", "[", "service", "]", "[", "'summaries'", "]", ":", "if", "summary", "==", "'external attack surface'", "and", "service", "in", "aws_config", "[", "'services'", "]", "and", "'external_attack_surface'", "in", "aws_config", "[", "'services'", "]", "[", "service", "]", ":", "aws_config", "[", "'services'", "]", "[", "service", "]", ".", "pop", "(", "'external_attack_surface'", ")", "# Reset all global summaries", "if", "'service_groups'", "in", "aws_config", ":", "aws_config", ".", "pop", "(", "'service_groups'", ")", "# Resources", "for", "resource_type", "in", "aws_config", "[", "'metadata'", "]", "[", "service_group", "]", "[", "service", "]", "[", "'resources'", "]", ":", "if", "'callbacks'", "in", "aws_config", "[", "'metadata'", "]", "[", "service_group", "]", "[", "service", "]", "[", "'resources'", "]", "[", "resource_type", "]", ":", "current_path", "=", "[", "'services'", ",", "service", "]", "target_path", "=", "aws_config", "[", "'metadata'", "]", "[", "service_group", "]", "[", "service", "]", "[", "'resources'", "]", "[", "resource_type", "]", "[", "'path'", "]", ".", "replace", "(", "'.id'", ",", "''", ")", ".", "split", "(", "'.'", ")", "[", "2", ":", "]", "callbacks", "=", "aws_config", "[", "'metadata'", "]", "[", "service_group", "]", "[", "service", "]", "[", "'resources'", "]", "[", "resource_type", "]", "[", "'callbacks'", "]", "new_go_to_and_do", "(", "aws_config", ",", "get_object_at", "(", "aws_config", ",", "current_path", ")", ",", "target_path", ",", "current_path", ",", "callbacks", ")", "# Summaries", "if", "'summaries'", "in", "aws_config", "[", "'metadata'", "]", "[", "service_group", "]", "[", "service", "]", ":", "for", "summary", "in", "aws_config", "[", "'metadata'", "]", "[", "service_group", "]", "[", "service", "]", "[", "'summaries'", "]", ":", "if", "'callbacks'", "in", "aws_config", "[", "'metadata'", "]", "[", "service_group", "]", "[", "service", "]", "[", "'summaries'", "]", "[", "summary", "]", ":", "current_path", "=", "[", "'services'", ",", "service", "]", "for", "callback", "in", "aws_config", "[", "'metadata'", "]", "[", "service_group", "]", "[", "service", "]", "[", "'summaries'", "]", "[", "summary", "]", "[", "'callbacks'", "]", ":", "callback_name", "=", "callback", "[", "0", "]", "callback_args", "=", "copy", ".", "deepcopy", "(", "callback", "[", "1", "]", ")", "target_path", "=", "callback_args", ".", "pop", "(", "'path'", ")", ".", "replace", "(", "'.id'", ",", "''", ")", ".", "split", "(", "'.'", ")", "[", "2", ":", "]", "callbacks", "=", "[", "[", "callback_name", ",", "callback_args", "]", "]", "new_go_to_and_do", "(", "aws_config", ",", "get_object_at", "(", "aws_config", ",", "current_path", ")", ",", "target_path", ",", "current_path", ",", "callbacks", ")", "# Group-level summaries", "for", "service_group", "in", "aws_config", "[", "'metadata'", "]", ":", "if", "'summaries'", "in", "aws_config", "[", "'metadata'", "]", "[", "service_group", "]", ":", "for", "summary", "in", "aws_config", "[", "'metadata'", "]", "[", "service_group", "]", "[", "'summaries'", "]", ":", "current_path", "=", "[", "'services'", ",", "service", "]", "for", "callback", "in", "aws_config", "[", "'metadata'", "]", "[", "service_group", "]", "[", "'summaries'", "]", "[", "summary", "]", "[", "'callbacks'", "]", ":", "callback_name", "=", "callback", "[", "0", "]", "callback_args", "=", "copy", ".", "deepcopy", "(", "callback", "[", "1", "]", ")", "target_path", "=", "aws_config", "[", "'metadata'", "]", "[", "service_group", "]", "[", "'summaries'", "]", "[", "summary", "]", "[", "'path'", "]", ".", "split", "(", "'.'", ")", "target_object", "=", "aws_config", "for", "p", "in", "target_path", ":", "manage_dictionary", "(", "target_object", ",", "p", ",", "{", "}", ")", "target_object", "=", "target_object", "[", "p", "]", "if", "callback_name", "==", "'merge'", ":", "for", "service", "in", "aws_config", "[", "'metadata'", "]", "[", "service_group", "]", ":", "if", "service", "==", "'summaries'", ":", "continue", "if", "'summaries'", "in", "aws_config", "[", "'metadata'", "]", "[", "service_group", "]", "[", "service", "]", "and", "summary", "in", "aws_config", "[", "'metadata'", "]", "[", "service_group", "]", "[", "service", "]", "[", "'summaries'", "]", ":", "try", ":", "source", "=", "get_object_at", "(", "aws_config", ",", "aws_config", "[", "'metadata'", "]", "[", "service_group", "]", "[", "service", "]", "[", "'summaries'", "]", "[", "summary", "]", "[", "'path'", "]", ".", "split", "(", "'.'", ")", ")", "except", ":", "source", "=", "{", "}", "target_object", ".", "update", "(", "source", ")", "return", "None" ]
66.046154
34.2
def init_original_response(self): """Get the original response for comparing, confirm ``is_cookie_necessary``""" if 'json' in self.request: self.request['data'] = json.dumps(self.request.pop('json')).encode( self.encoding) r1 = self.req.request( retry=self.retry, timeout=self.timeout, **self.request) resp = r1.x assert resp, ValueError( 'original_response should not be failed. %s' % self.request) self.encoding = self.encoding or resp.encoding self.original_response = self.ensure_response(r1) return self.original_response
[ "def", "init_original_response", "(", "self", ")", ":", "if", "'json'", "in", "self", ".", "request", ":", "self", ".", "request", "[", "'data'", "]", "=", "json", ".", "dumps", "(", "self", ".", "request", ".", "pop", "(", "'json'", ")", ")", ".", "encode", "(", "self", ".", "encoding", ")", "r1", "=", "self", ".", "req", ".", "request", "(", "retry", "=", "self", ".", "retry", ",", "timeout", "=", "self", ".", "timeout", ",", "*", "*", "self", ".", "request", ")", "resp", "=", "r1", ".", "x", "assert", "resp", ",", "ValueError", "(", "'original_response should not be failed. %s'", "%", "self", ".", "request", ")", "self", ".", "encoding", "=", "self", ".", "encoding", "or", "resp", ".", "encoding", "self", ".", "original_response", "=", "self", ".", "ensure_response", "(", "r1", ")", "return", "self", ".", "original_response" ]
48.461538
14.923077
def outgoing(self, node): """Returns nodes connecting out of the given node (or list of nodes).""" nodes = node if isinstance(node, list) else [node] node_ids = [self.id(n) for n in nodes] # Find edges outgoing from this group but not incoming to it outgoing = [self[e[1]] for e in self.edges if e[0] in node_ids and e[1] not in node_ids] return outgoing
[ "def", "outgoing", "(", "self", ",", "node", ")", ":", "nodes", "=", "node", "if", "isinstance", "(", "node", ",", "list", ")", "else", "[", "node", "]", "node_ids", "=", "[", "self", ".", "id", "(", "n", ")", "for", "n", "in", "nodes", "]", "# Find edges outgoing from this group but not incoming to it", "outgoing", "=", "[", "self", "[", "e", "[", "1", "]", "]", "for", "e", "in", "self", ".", "edges", "if", "e", "[", "0", "]", "in", "node_ids", "and", "e", "[", "1", "]", "not", "in", "node_ids", "]", "return", "outgoing" ]
51.875
14.875
def clear_api_path_map_cache(self): """Clear out cache for api_path_map.""" self._api_path_cache = None for api_provider in self.api_providers: if six.get_method_self( api_provider.clear_api_path_map_cache, ) is not None: api_provider.clear_api_path_map_cache()
[ "def", "clear_api_path_map_cache", "(", "self", ")", ":", "self", ".", "_api_path_cache", "=", "None", "for", "api_provider", "in", "self", ".", "api_providers", ":", "if", "six", ".", "get_method_self", "(", "api_provider", ".", "clear_api_path_map_cache", ",", ")", "is", "not", "None", ":", "api_provider", ".", "clear_api_path_map_cache", "(", ")" ]
41.75
8.125
def version(*names, **kwargs): ''' Common interface for obtaining the version of installed packages. Accepts full or partial FMRI. If called using pkg_resource, full FMRI is required. Partial FMRI is returned if the package is not installed. CLI Example: .. code-block:: bash salt '*' pkg.version vim salt '*' pkg.version foo bar baz salt '*' pkg_resource.version pkg://solaris/entire ''' if not names: return '' cmd = ['/bin/pkg', 'list', '-Hv'] cmd.extend(names) lines = __salt__['cmd.run_stdout'](cmd, ignore_retcode=True).splitlines() ret = {} for line in lines: ret[_ips_get_pkgname(line)] = _ips_get_pkgversion(line) # Append package names which are not installed/found unmatched = list([name for name in names if not reduce(lambda x, y: x or name in y, ret, False)]) # pylint: disable=W0640 ret.update(zip(unmatched, itertools.cycle(('',)))) # Return a string if only one package name passed if len(names) == 1: try: return next(six.itervalues(ret)) except StopIteration: return '' return ret
[ "def", "version", "(", "*", "names", ",", "*", "*", "kwargs", ")", ":", "if", "not", "names", ":", "return", "''", "cmd", "=", "[", "'/bin/pkg'", ",", "'list'", ",", "'-Hv'", "]", "cmd", ".", "extend", "(", "names", ")", "lines", "=", "__salt__", "[", "'cmd.run_stdout'", "]", "(", "cmd", ",", "ignore_retcode", "=", "True", ")", ".", "splitlines", "(", ")", "ret", "=", "{", "}", "for", "line", "in", "lines", ":", "ret", "[", "_ips_get_pkgname", "(", "line", ")", "]", "=", "_ips_get_pkgversion", "(", "line", ")", "# Append package names which are not installed/found", "unmatched", "=", "list", "(", "[", "name", "for", "name", "in", "names", "if", "not", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "or", "name", "in", "y", ",", "ret", ",", "False", ")", "]", ")", "# pylint: disable=W0640", "ret", ".", "update", "(", "zip", "(", "unmatched", ",", "itertools", ".", "cycle", "(", "(", "''", ",", ")", ")", ")", ")", "# Return a string if only one package name passed", "if", "len", "(", "names", ")", "==", "1", ":", "try", ":", "return", "next", "(", "six", ".", "itervalues", "(", "ret", ")", ")", "except", "StopIteration", ":", "return", "''", "return", "ret" ]
30.486486
26.108108
def stream_gzip_decompress_lines(stream): """ Uncompress a gzip stream into lines of text. Parameters ---------- Generator of chunks of gzip compressed text. Returns ------- Generator of uncompressed lines. """ dec = zlib.decompressobj(zlib.MAX_WBITS | 16) previous = "" for compressed_chunk in stream: chunk = dec.decompress(compressed_chunk).decode() if chunk: lines = (previous + chunk).split("\n") previous = lines.pop() for line in lines: yield line yield previous
[ "def", "stream_gzip_decompress_lines", "(", "stream", ")", ":", "dec", "=", "zlib", ".", "decompressobj", "(", "zlib", ".", "MAX_WBITS", "|", "16", ")", "previous", "=", "\"\"", "for", "compressed_chunk", "in", "stream", ":", "chunk", "=", "dec", ".", "decompress", "(", "compressed_chunk", ")", ".", "decode", "(", ")", "if", "chunk", ":", "lines", "=", "(", "previous", "+", "chunk", ")", ".", "split", "(", "\"\\n\"", ")", "previous", "=", "lines", ".", "pop", "(", ")", "for", "line", "in", "lines", ":", "yield", "line", "yield", "previous" ]
25.909091
15.909091
def prettify(root, encoding='utf-8'): """ Return a pretty-printed XML string for the Element. @see: http://www.doughellmann.com/PyMOTW/xml/etree/ElementTree/create.html """ if isinstance(root, ElementTree.Element): node = ElementTree.tostring(root, 'utf-8') else: node = root # Hacky solution as it seems PyXML doesn't exist anymore... return etree.tostring(etree.fromstring(node), pretty_print=True, xml_declaration=True, encoding='utf-8')
[ "def", "prettify", "(", "root", ",", "encoding", "=", "'utf-8'", ")", ":", "if", "isinstance", "(", "root", ",", "ElementTree", ".", "Element", ")", ":", "node", "=", "ElementTree", ".", "tostring", "(", "root", ",", "'utf-8'", ")", "else", ":", "node", "=", "root", "# Hacky solution as it seems PyXML doesn't exist anymore... ", "return", "etree", ".", "tostring", "(", "etree", ".", "fromstring", "(", "node", ")", ",", "pretty_print", "=", "True", ",", "xml_declaration", "=", "True", ",", "encoding", "=", "'utf-8'", ")" ]
34.625
15.625
def register_model(self, *index_fields, **kw): """Registers a single model for fulltext search. This basically creates a simple Whoosheer for the model and calls :func:`register_whoosheer` on it. """ # construct subclass of AbstractWhoosheer for a model class ModelWhoosheer(AbstractWhoosheerMeta): @classmethod def _assign_primary(cls, primary, primary_is_numeric, attrs, model): attrs[primary] = getattr(model, primary) if not primary_is_numeric: if sys.version < '3': attrs[primary] = unicode(attrs[primary]) else: attrs[primary] = str(attrs[primary]) mwh = ModelWhoosheer def inner(model): mwh.index_subdir = model.__tablename__ mwh.models = [model] schema_attrs = {} for field in model.__table__.columns: if field.primary_key: primary = field.name primary_is_numeric = True if isinstance(field.type, SQLInteger): schema_attrs[field.name] = whoosh.fields.NUMERIC(stored=True, unique=True) else: primary_is_numeric = False schema_attrs[field.name] = whoosh.fields.ID(stored=True, unique=True) elif field.name in index_fields: schema_attrs[field.name] = whoosh.fields.TEXT(**kw) mwh.schema = whoosh.fields.Schema(**schema_attrs) # we can't check with isinstance, because ModelWhoosheer is private # so use this attribute to find out mwh._is_model_whoosheer = True @classmethod def update_model(cls, writer, model): attrs = {} cls._assign_primary(primary, primary_is_numeric, attrs, model) for f in index_fields: attrs[f] = getattr(model, f) if not isinstance(attrs[f], int): if sys.version < '3': attrs[f] = unicode(attrs[f]) else: attrs[f] = str(attrs[f]) writer.update_document(**attrs) @classmethod def insert_model(cls, writer, model): attrs = {} cls._assign_primary(primary, primary_is_numeric, attrs, model) for f in index_fields: attrs[f] = getattr(model, f) if not isinstance(attrs[f], int): if sys.version < '3': attrs[f] = unicode(attrs[f]) else: attrs[f] = str(attrs[f]) writer.add_document(**attrs) @classmethod def delete_model(cls, writer, model): writer.delete_by_term(primary, getattr(model, primary)) setattr(mwh, '{0}_{1}'.format(UPDATE_KWD, model.__name__.lower()), update_model) setattr(mwh, '{0}_{1}'.format(INSERT_KWD, model.__name__.lower()), insert_model) setattr(mwh, '{0}_{1}'.format(DELETE_KWD, model.__name__.lower()), delete_model) model._whoosheer_ = mwh model.whoosh_search = mwh.search self.register_whoosheer(mwh) return model return inner
[ "def", "register_model", "(", "self", ",", "*", "index_fields", ",", "*", "*", "kw", ")", ":", "# construct subclass of AbstractWhoosheer for a model", "class", "ModelWhoosheer", "(", "AbstractWhoosheerMeta", ")", ":", "@", "classmethod", "def", "_assign_primary", "(", "cls", ",", "primary", ",", "primary_is_numeric", ",", "attrs", ",", "model", ")", ":", "attrs", "[", "primary", "]", "=", "getattr", "(", "model", ",", "primary", ")", "if", "not", "primary_is_numeric", ":", "if", "sys", ".", "version", "<", "'3'", ":", "attrs", "[", "primary", "]", "=", "unicode", "(", "attrs", "[", "primary", "]", ")", "else", ":", "attrs", "[", "primary", "]", "=", "str", "(", "attrs", "[", "primary", "]", ")", "mwh", "=", "ModelWhoosheer", "def", "inner", "(", "model", ")", ":", "mwh", ".", "index_subdir", "=", "model", ".", "__tablename__", "mwh", ".", "models", "=", "[", "model", "]", "schema_attrs", "=", "{", "}", "for", "field", "in", "model", ".", "__table__", ".", "columns", ":", "if", "field", ".", "primary_key", ":", "primary", "=", "field", ".", "name", "primary_is_numeric", "=", "True", "if", "isinstance", "(", "field", ".", "type", ",", "SQLInteger", ")", ":", "schema_attrs", "[", "field", ".", "name", "]", "=", "whoosh", ".", "fields", ".", "NUMERIC", "(", "stored", "=", "True", ",", "unique", "=", "True", ")", "else", ":", "primary_is_numeric", "=", "False", "schema_attrs", "[", "field", ".", "name", "]", "=", "whoosh", ".", "fields", ".", "ID", "(", "stored", "=", "True", ",", "unique", "=", "True", ")", "elif", "field", ".", "name", "in", "index_fields", ":", "schema_attrs", "[", "field", ".", "name", "]", "=", "whoosh", ".", "fields", ".", "TEXT", "(", "*", "*", "kw", ")", "mwh", ".", "schema", "=", "whoosh", ".", "fields", ".", "Schema", "(", "*", "*", "schema_attrs", ")", "# we can't check with isinstance, because ModelWhoosheer is private", "# so use this attribute to find out", "mwh", ".", "_is_model_whoosheer", "=", "True", "@", "classmethod", "def", "update_model", "(", "cls", ",", "writer", ",", "model", ")", ":", "attrs", "=", "{", "}", "cls", ".", "_assign_primary", "(", "primary", ",", "primary_is_numeric", ",", "attrs", ",", "model", ")", "for", "f", "in", "index_fields", ":", "attrs", "[", "f", "]", "=", "getattr", "(", "model", ",", "f", ")", "if", "not", "isinstance", "(", "attrs", "[", "f", "]", ",", "int", ")", ":", "if", "sys", ".", "version", "<", "'3'", ":", "attrs", "[", "f", "]", "=", "unicode", "(", "attrs", "[", "f", "]", ")", "else", ":", "attrs", "[", "f", "]", "=", "str", "(", "attrs", "[", "f", "]", ")", "writer", ".", "update_document", "(", "*", "*", "attrs", ")", "@", "classmethod", "def", "insert_model", "(", "cls", ",", "writer", ",", "model", ")", ":", "attrs", "=", "{", "}", "cls", ".", "_assign_primary", "(", "primary", ",", "primary_is_numeric", ",", "attrs", ",", "model", ")", "for", "f", "in", "index_fields", ":", "attrs", "[", "f", "]", "=", "getattr", "(", "model", ",", "f", ")", "if", "not", "isinstance", "(", "attrs", "[", "f", "]", ",", "int", ")", ":", "if", "sys", ".", "version", "<", "'3'", ":", "attrs", "[", "f", "]", "=", "unicode", "(", "attrs", "[", "f", "]", ")", "else", ":", "attrs", "[", "f", "]", "=", "str", "(", "attrs", "[", "f", "]", ")", "writer", ".", "add_document", "(", "*", "*", "attrs", ")", "@", "classmethod", "def", "delete_model", "(", "cls", ",", "writer", ",", "model", ")", ":", "writer", ".", "delete_by_term", "(", "primary", ",", "getattr", "(", "model", ",", "primary", ")", ")", "setattr", "(", "mwh", ",", "'{0}_{1}'", ".", "format", "(", "UPDATE_KWD", ",", "model", ".", "__name__", ".", "lower", "(", ")", ")", ",", "update_model", ")", "setattr", "(", "mwh", ",", "'{0}_{1}'", ".", "format", "(", "INSERT_KWD", ",", "model", ".", "__name__", ".", "lower", "(", ")", ")", ",", "insert_model", ")", "setattr", "(", "mwh", ",", "'{0}_{1}'", ".", "format", "(", "DELETE_KWD", ",", "model", ".", "__name__", ".", "lower", "(", ")", ")", ",", "delete_model", ")", "model", ".", "_whoosheer_", "=", "mwh", "model", ".", "whoosh_search", "=", "mwh", ".", "search", "self", ".", "register_whoosheer", "(", "mwh", ")", "return", "model", "return", "inner" ]
43.151899
18.822785
def _build_toc_node(docname, anchor="anchor", text="test text", bullet=False): """ Create the node structure that Sphinx expects for TOC Tree entries. The ``bullet`` argument wraps it in a ``nodes.bullet_list``, which is how you nest TOC Tree entries. """ reference = nodes.reference( "", "", internal=True, refuri=docname, anchorname="#" + anchor, *[nodes.Text(text, text)] ) para = addnodes.compact_paragraph("", "", reference) ret_list = nodes.list_item("", para) return nodes.bullet_list("", ret_list) if bullet else ret_list
[ "def", "_build_toc_node", "(", "docname", ",", "anchor", "=", "\"anchor\"", ",", "text", "=", "\"test text\"", ",", "bullet", "=", "False", ")", ":", "reference", "=", "nodes", ".", "reference", "(", "\"\"", ",", "\"\"", ",", "internal", "=", "True", ",", "refuri", "=", "docname", ",", "anchorname", "=", "\"#\"", "+", "anchor", ",", "*", "[", "nodes", ".", "Text", "(", "text", ",", "text", ")", "]", ")", "para", "=", "addnodes", ".", "compact_paragraph", "(", "\"\"", ",", "\"\"", ",", "reference", ")", "ret_list", "=", "nodes", ".", "list_item", "(", "\"\"", ",", "para", ")", "return", "nodes", ".", "bullet_list", "(", "\"\"", ",", "ret_list", ")", "if", "bullet", "else", "ret_list" ]
33.388889
18.277778
def timeFunction(requestContext, name, step=60): """ Short Alias: time() Just returns the timestamp for each X value. T Example:: &target=time("The.time.series") This would create a series named "The.time.series" that contains in Y the same value (in seconds) as X. A second argument can be provided as a step parameter (default is 60 secs) """ start = int(epoch(requestContext["startTime"])) end = int(epoch(requestContext["endTime"])) delta = timedelta(seconds=step) when = requestContext["startTime"] values = [] while when < requestContext["endTime"]: values.append(epoch(when)) when += delta series = TimeSeries(name, start, end, step, values) series.pathExpression = name return [series]
[ "def", "timeFunction", "(", "requestContext", ",", "name", ",", "step", "=", "60", ")", ":", "start", "=", "int", "(", "epoch", "(", "requestContext", "[", "\"startTime\"", "]", ")", ")", "end", "=", "int", "(", "epoch", "(", "requestContext", "[", "\"endTime\"", "]", ")", ")", "delta", "=", "timedelta", "(", "seconds", "=", "step", ")", "when", "=", "requestContext", "[", "\"startTime\"", "]", "values", "=", "[", "]", "while", "when", "<", "requestContext", "[", "\"endTime\"", "]", ":", "values", ".", "append", "(", "epoch", "(", "when", ")", ")", "when", "+=", "delta", "series", "=", "TimeSeries", "(", "name", ",", "start", ",", "end", ",", "step", ",", "values", ")", "series", ".", "pathExpression", "=", "name", "return", "[", "series", "]" ]
25.5
20.633333
def _masquerade(origin: str, orig: ServiceDefn, new: ServiceDefn, **map: str) -> str: """build an origin URL such that the orig has all of the mappings to new defined by map""" origin: ParseResult = urlparse(origin) prev_maps = {} if origin.query: prev_maps = {k: v for k, v in parse_qsl(origin.query)} r_args = {} for new_k, orig_k in map.items(): assert new_k in new.rpcs, [new_k, new.rpcs] assert orig_k in orig.rpcs, [orig_k, orig.rpcs] # todo: check if the definitions are the same new_v = new.rpcs[new_k] orig_v = orig.rpcs[orig_k] if orig_k in prev_maps: orig_k = prev_maps[orig_k] assert new_v.res == orig_v.res, [new_v.res, orig_v.res] assert new_v.req == orig_v.req, [new_v.req, orig_v.req] r_args[new_k] = orig_k return urlunparse(origin._replace(query=urlencode(r_args)))
[ "def", "_masquerade", "(", "origin", ":", "str", ",", "orig", ":", "ServiceDefn", ",", "new", ":", "ServiceDefn", ",", "*", "*", "map", ":", "str", ")", "->", "str", ":", "origin", ":", "ParseResult", "=", "urlparse", "(", "origin", ")", "prev_maps", "=", "{", "}", "if", "origin", ".", "query", ":", "prev_maps", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "parse_qsl", "(", "origin", ".", "query", ")", "}", "r_args", "=", "{", "}", "for", "new_k", ",", "orig_k", "in", "map", ".", "items", "(", ")", ":", "assert", "new_k", "in", "new", ".", "rpcs", ",", "[", "new_k", ",", "new", ".", "rpcs", "]", "assert", "orig_k", "in", "orig", ".", "rpcs", ",", "[", "orig_k", ",", "orig", ".", "rpcs", "]", "# todo: check if the definitions are the same", "new_v", "=", "new", ".", "rpcs", "[", "new_k", "]", "orig_v", "=", "orig", ".", "rpcs", "[", "orig_k", "]", "if", "orig_k", "in", "prev_maps", ":", "orig_k", "=", "prev_maps", "[", "orig_k", "]", "assert", "new_v", ".", "res", "==", "orig_v", ".", "res", ",", "[", "new_v", ".", "res", ",", "orig_v", ".", "res", "]", "assert", "new_v", ".", "req", "==", "orig_v", ".", "req", ",", "[", "new_v", ".", "req", ",", "orig_v", ".", "req", "]", "r_args", "[", "new_k", "]", "=", "orig_k", "return", "urlunparse", "(", "origin", ".", "_replace", "(", "query", "=", "urlencode", "(", "r_args", ")", ")", ")" ]
28.548387
24.612903
def register_blueprints(app): """Register Flask blueprints.""" app.register_blueprint(public.public_bp) app.register_blueprint(genes.genes_bp) app.register_blueprint(cases.cases_bp) app.register_blueprint(login.login_bp) app.register_blueprint(variants.variants_bp) app.register_blueprint(panels.panels_bp) app.register_blueprint(dashboard.dashboard_bp) app.register_blueprint(api.api_bp) app.register_blueprint(alignviewers.alignviewers_bp) app.register_blueprint(phenotypes.hpo_bp) app.register_blueprint(institutes.overview)
[ "def", "register_blueprints", "(", "app", ")", ":", "app", ".", "register_blueprint", "(", "public", ".", "public_bp", ")", "app", ".", "register_blueprint", "(", "genes", ".", "genes_bp", ")", "app", ".", "register_blueprint", "(", "cases", ".", "cases_bp", ")", "app", ".", "register_blueprint", "(", "login", ".", "login_bp", ")", "app", ".", "register_blueprint", "(", "variants", ".", "variants_bp", ")", "app", ".", "register_blueprint", "(", "panels", ".", "panels_bp", ")", "app", ".", "register_blueprint", "(", "dashboard", ".", "dashboard_bp", ")", "app", ".", "register_blueprint", "(", "api", ".", "api_bp", ")", "app", ".", "register_blueprint", "(", "alignviewers", ".", "alignviewers_bp", ")", "app", ".", "register_blueprint", "(", "phenotypes", ".", "hpo_bp", ")", "app", ".", "register_blueprint", "(", "institutes", ".", "overview", ")" ]
43.307692
5.615385
def list_all_coupons(cls, **kwargs): """List Coupons Return a list of Coupons This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.list_all_coupons(async=True) >>> result = thread.get() :param async bool :param int page: page number :param int size: page size :param str sort: page order :return: page[Coupon] If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._list_all_coupons_with_http_info(**kwargs) else: (data) = cls._list_all_coupons_with_http_info(**kwargs) return data
[ "def", "list_all_coupons", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_list_all_coupons_with_http_info", "(", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_list_all_coupons_with_http_info", "(", "*", "*", "kwargs", ")", "return", "data" ]
35.869565
14.652174
def isloaded(self, name): """Checks if given hook module has been loaded Args: name (str): The name of the module to check Returns: bool. The return code:: True -- Loaded False -- Not Loaded """ if name is None: return True if isinstance(name, str): return (name in [x.__module__ for x in self]) if isinstance(name, Iterable): return set(name).issubset([x.__module__ for x in self]) return False
[ "def", "isloaded", "(", "self", ",", "name", ")", ":", "if", "name", "is", "None", ":", "return", "True", "if", "isinstance", "(", "name", ",", "str", ")", ":", "return", "(", "name", "in", "[", "x", ".", "__module__", "for", "x", "in", "self", "]", ")", "if", "isinstance", "(", "name", ",", "Iterable", ")", ":", "return", "set", "(", "name", ")", ".", "issubset", "(", "[", "x", ".", "__module__", "for", "x", "in", "self", "]", ")", "return", "False" ]
24.409091
20.272727
def opt_vals_(self): """Iterator over sections, option names, and option values. This iterator is also implemented at the section level. The two loops produce the same output:: for sct, opt, val in conf.opt_vals_(): print(sct, opt, val) for sct in conf.sections_(): for opt, val in conf[sct].opt_vals_(): print(sct, opt, val) Yields: tuples with sections, option names, and option values. """ for sct, opt in self.options_(): yield sct, opt, self[sct][opt]
[ "def", "opt_vals_", "(", "self", ")", ":", "for", "sct", ",", "opt", "in", "self", ".", "options_", "(", ")", ":", "yield", "sct", ",", "opt", ",", "self", "[", "sct", "]", "[", "opt", "]" ]
32.833333
16.944444
def _split_input_from_namespace(cls, app, namespace, entity_kind, shard_count): """Helper for _split_input_from_params. If there are not enough Entities to make all of the given shards, the returned list of KeyRanges will include Nones. The returned list will contain KeyRanges ordered lexographically with any Nones appearing at the end. Args: app: the app. namespace: the namespace. entity_kind: entity kind as string. shard_count: the number of shards. Returns: KeyRange objects. """ raw_entity_kind = cls._get_raw_entity_kind(entity_kind) if shard_count == 1: # With one shard we don't need to calculate any splitpoints at all. return [key_range.KeyRange(namespace=namespace, _app=app)] ds_query = datastore.Query(kind=raw_entity_kind, namespace=namespace, _app=app, keys_only=True) ds_query.Order("__scatter__") random_keys = ds_query.Get(shard_count * cls._OVERSAMPLING_FACTOR) if not random_keys: # There are no entities with scatter property. We have no idea # how to split. return ([key_range.KeyRange(namespace=namespace, _app=app)] + [None] * (shard_count - 1)) random_keys.sort() if len(random_keys) >= shard_count: # We've got a lot of scatter values. Sample them down. random_keys = cls._choose_split_points(random_keys, shard_count) # pylint: disable=redefined-outer-name key_ranges = [] key_ranges.append(key_range.KeyRange( key_start=None, key_end=random_keys[0], direction=key_range.KeyRange.ASC, include_start=False, include_end=False, namespace=namespace, _app=app)) for i in range(0, len(random_keys) - 1): key_ranges.append(key_range.KeyRange( key_start=random_keys[i], key_end=random_keys[i+1], direction=key_range.KeyRange.ASC, include_start=True, include_end=False, namespace=namespace, _app=app)) key_ranges.append(key_range.KeyRange( key_start=random_keys[-1], key_end=None, direction=key_range.KeyRange.ASC, include_start=True, include_end=False, namespace=namespace, _app=app)) if len(key_ranges) < shard_count: # We need to have as many shards as it was requested. Add some Nones. key_ranges += [None] * (shard_count - len(key_ranges)) return key_ranges
[ "def", "_split_input_from_namespace", "(", "cls", ",", "app", ",", "namespace", ",", "entity_kind", ",", "shard_count", ")", ":", "raw_entity_kind", "=", "cls", ".", "_get_raw_entity_kind", "(", "entity_kind", ")", "if", "shard_count", "==", "1", ":", "# With one shard we don't need to calculate any splitpoints at all.", "return", "[", "key_range", ".", "KeyRange", "(", "namespace", "=", "namespace", ",", "_app", "=", "app", ")", "]", "ds_query", "=", "datastore", ".", "Query", "(", "kind", "=", "raw_entity_kind", ",", "namespace", "=", "namespace", ",", "_app", "=", "app", ",", "keys_only", "=", "True", ")", "ds_query", ".", "Order", "(", "\"__scatter__\"", ")", "random_keys", "=", "ds_query", ".", "Get", "(", "shard_count", "*", "cls", ".", "_OVERSAMPLING_FACTOR", ")", "if", "not", "random_keys", ":", "# There are no entities with scatter property. We have no idea", "# how to split.", "return", "(", "[", "key_range", ".", "KeyRange", "(", "namespace", "=", "namespace", ",", "_app", "=", "app", ")", "]", "+", "[", "None", "]", "*", "(", "shard_count", "-", "1", ")", ")", "random_keys", ".", "sort", "(", ")", "if", "len", "(", "random_keys", ")", ">=", "shard_count", ":", "# We've got a lot of scatter values. Sample them down.", "random_keys", "=", "cls", ".", "_choose_split_points", "(", "random_keys", ",", "shard_count", ")", "# pylint: disable=redefined-outer-name", "key_ranges", "=", "[", "]", "key_ranges", ".", "append", "(", "key_range", ".", "KeyRange", "(", "key_start", "=", "None", ",", "key_end", "=", "random_keys", "[", "0", "]", ",", "direction", "=", "key_range", ".", "KeyRange", ".", "ASC", ",", "include_start", "=", "False", ",", "include_end", "=", "False", ",", "namespace", "=", "namespace", ",", "_app", "=", "app", ")", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "random_keys", ")", "-", "1", ")", ":", "key_ranges", ".", "append", "(", "key_range", ".", "KeyRange", "(", "key_start", "=", "random_keys", "[", "i", "]", ",", "key_end", "=", "random_keys", "[", "i", "+", "1", "]", ",", "direction", "=", "key_range", ".", "KeyRange", ".", "ASC", ",", "include_start", "=", "True", ",", "include_end", "=", "False", ",", "namespace", "=", "namespace", ",", "_app", "=", "app", ")", ")", "key_ranges", ".", "append", "(", "key_range", ".", "KeyRange", "(", "key_start", "=", "random_keys", "[", "-", "1", "]", ",", "key_end", "=", "None", ",", "direction", "=", "key_range", ".", "KeyRange", ".", "ASC", ",", "include_start", "=", "True", ",", "include_end", "=", "False", ",", "namespace", "=", "namespace", ",", "_app", "=", "app", ")", ")", "if", "len", "(", "key_ranges", ")", "<", "shard_count", ":", "# We need to have as many shards as it was requested. Add some Nones.", "key_ranges", "+=", "[", "None", "]", "*", "(", "shard_count", "-", "len", "(", "key_ranges", ")", ")", "return", "key_ranges" ]
31.949367
19.012658
def installUpdate(self): """ Install the newest version of Plex Media Server. """ # We can add this but dunno how useful this is since it sometimes # requires user action using a gui. part = '/updater/apply' release = self.check_for_update(force=True, download=True) if release and release.version != self.version: # figure out what method this is.. return self.query(part, method=self._session.put)
[ "def", "installUpdate", "(", "self", ")", ":", "# We can add this but dunno how useful this is since it sometimes", "# requires user action using a gui.", "part", "=", "'/updater/apply'", "release", "=", "self", ".", "check_for_update", "(", "force", "=", "True", ",", "download", "=", "True", ")", "if", "release", "and", "release", ".", "version", "!=", "self", ".", "version", ":", "# figure out what method this is..", "return", "self", ".", "query", "(", "part", ",", "method", "=", "self", ".", "_session", ".", "put", ")" ]
51.444444
14.333333
def cleanup(self): """Forcefully delete objects from memory In an ideal world, this shouldn't be necessary. Garbage collection guarantees that anything without reference is automatically removed. However, because this application is designed to be run multiple times from the same interpreter process, extra case must be taken to ensure there are no memory leaks. Explicitly deleting objects shines a light on where objects may still be referenced in the form of an error. No errors means this was uneccesary, but that's ok. """ for instance in self.context: del(instance) for plugin in self.plugins: del(plugin)
[ "def", "cleanup", "(", "self", ")", ":", "for", "instance", "in", "self", ".", "context", ":", "del", "(", "instance", ")", "for", "plugin", "in", "self", ".", "plugins", ":", "del", "(", "plugin", ")" ]
32.954545
22
def deprecated(fun_name=None, msg=""): '''Issue a deprecation warning for a function''' def _deprecated(fun): '''Issue a deprecation warning for a function''' @wraps(fun) def _wrapper(*args, **kwargs): '''Issue deprecation warning and forward arguments to fun''' name = fun_name if fun_name is not None else fun.__name__ _warn_deprecated('Call to deprecated function %s. %s' % (name, msg)) return fun(*args, **kwargs) return _wrapper return _deprecated
[ "def", "deprecated", "(", "fun_name", "=", "None", ",", "msg", "=", "\"\"", ")", ":", "def", "_deprecated", "(", "fun", ")", ":", "'''Issue a deprecation warning for a function'''", "@", "wraps", "(", "fun", ")", "def", "_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "'''Issue deprecation warning and forward arguments to fun'''", "name", "=", "fun_name", "if", "fun_name", "is", "not", "None", "else", "fun", ".", "__name__", "_warn_deprecated", "(", "'Call to deprecated function %s. %s'", "%", "(", "name", ",", "msg", ")", ")", "return", "fun", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "_wrapper", "return", "_deprecated" ]
38.071429
20.357143
def _validate_program(self): """ Verify that selected mode is available for program and all courses in the program """ program = self.cleaned_data.get(self.Fields.PROGRAM) if not program: return course_runs = get_course_runs_from_program(program) try: client = CourseCatalogApiClient(self._user, self._enterprise_customer.site) available_modes = client.get_common_course_modes(course_runs) course_mode = self.cleaned_data.get(self.Fields.COURSE_MODE) except (HttpClientError, HttpServerError): raise ValidationError( ValidationMessages.FAILED_TO_OBTAIN_COURSE_MODES.format(program_title=program.get("title")) ) if not course_mode: raise ValidationError(ValidationMessages.COURSE_WITHOUT_COURSE_MODE) if course_mode not in available_modes: raise ValidationError(ValidationMessages.COURSE_MODE_NOT_AVAILABLE.format( mode=course_mode, program_title=program.get("title"), modes=", ".join(available_modes) ))
[ "def", "_validate_program", "(", "self", ")", ":", "program", "=", "self", ".", "cleaned_data", ".", "get", "(", "self", ".", "Fields", ".", "PROGRAM", ")", "if", "not", "program", ":", "return", "course_runs", "=", "get_course_runs_from_program", "(", "program", ")", "try", ":", "client", "=", "CourseCatalogApiClient", "(", "self", ".", "_user", ",", "self", ".", "_enterprise_customer", ".", "site", ")", "available_modes", "=", "client", ".", "get_common_course_modes", "(", "course_runs", ")", "course_mode", "=", "self", ".", "cleaned_data", ".", "get", "(", "self", ".", "Fields", ".", "COURSE_MODE", ")", "except", "(", "HttpClientError", ",", "HttpServerError", ")", ":", "raise", "ValidationError", "(", "ValidationMessages", ".", "FAILED_TO_OBTAIN_COURSE_MODES", ".", "format", "(", "program_title", "=", "program", ".", "get", "(", "\"title\"", ")", ")", ")", "if", "not", "course_mode", ":", "raise", "ValidationError", "(", "ValidationMessages", ".", "COURSE_WITHOUT_COURSE_MODE", ")", "if", "course_mode", "not", "in", "available_modes", ":", "raise", "ValidationError", "(", "ValidationMessages", ".", "COURSE_MODE_NOT_AVAILABLE", ".", "format", "(", "mode", "=", "course_mode", ",", "program_title", "=", "program", ".", "get", "(", "\"title\"", ")", ",", "modes", "=", "\", \"", ".", "join", "(", "available_modes", ")", ")", ")" ]
45.916667
27.583333
def format_dateaxis(subplot, freq, index): """ Pretty-formats the date axis (x-axis). Major and minor ticks are automatically set for the frequency of the current underlying series. As the dynamic mode is activated by default, changing the limits of the x axis will intelligently change the positions of the ticks. """ # handle index specific formatting # Note: DatetimeIndex does not use this # interface. DatetimeIndex uses matplotlib.date directly if isinstance(index, ABCPeriodIndex): majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot) minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot) subplot.xaxis.set_major_locator(majlocator) subplot.xaxis.set_minor_locator(minlocator) majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot) minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot) subplot.xaxis.set_major_formatter(majformatter) subplot.xaxis.set_minor_formatter(minformatter) # x and y coord info subplot.format_coord = functools.partial(_format_coord, freq) elif isinstance(index, ABCTimedeltaIndex): subplot.xaxis.set_major_formatter( TimeSeries_TimedeltaFormatter()) else: raise TypeError('index type not supported') pylab.draw_if_interactive()
[ "def", "format_dateaxis", "(", "subplot", ",", "freq", ",", "index", ")", ":", "# handle index specific formatting", "# Note: DatetimeIndex does not use this", "# interface. DatetimeIndex uses matplotlib.date directly", "if", "isinstance", "(", "index", ",", "ABCPeriodIndex", ")", ":", "majlocator", "=", "TimeSeries_DateLocator", "(", "freq", ",", "dynamic_mode", "=", "True", ",", "minor_locator", "=", "False", ",", "plot_obj", "=", "subplot", ")", "minlocator", "=", "TimeSeries_DateLocator", "(", "freq", ",", "dynamic_mode", "=", "True", ",", "minor_locator", "=", "True", ",", "plot_obj", "=", "subplot", ")", "subplot", ".", "xaxis", ".", "set_major_locator", "(", "majlocator", ")", "subplot", ".", "xaxis", ".", "set_minor_locator", "(", "minlocator", ")", "majformatter", "=", "TimeSeries_DateFormatter", "(", "freq", ",", "dynamic_mode", "=", "True", ",", "minor_locator", "=", "False", ",", "plot_obj", "=", "subplot", ")", "minformatter", "=", "TimeSeries_DateFormatter", "(", "freq", ",", "dynamic_mode", "=", "True", ",", "minor_locator", "=", "True", ",", "plot_obj", "=", "subplot", ")", "subplot", ".", "xaxis", ".", "set_major_formatter", "(", "majformatter", ")", "subplot", ".", "xaxis", ".", "set_minor_formatter", "(", "minformatter", ")", "# x and y coord info", "subplot", ".", "format_coord", "=", "functools", ".", "partial", "(", "_format_coord", ",", "freq", ")", "elif", "isinstance", "(", "index", ",", "ABCTimedeltaIndex", ")", ":", "subplot", ".", "xaxis", ".", "set_major_formatter", "(", "TimeSeries_TimedeltaFormatter", "(", ")", ")", "else", ":", "raise", "TypeError", "(", "'index type not supported'", ")", "pylab", ".", "draw_if_interactive", "(", ")" ]
42.976744
20.465116
def _define(self): """ gate sdg a { u1(-pi/2) a; } """ definition = [] q = QuantumRegister(1, "q") rule = [ (U1Gate(-pi/2), [q[0]], []) ] for inst in rule: definition.append(inst) self.definition = definition
[ "def", "_define", "(", "self", ")", ":", "definition", "=", "[", "]", "q", "=", "QuantumRegister", "(", "1", ",", "\"q\"", ")", "rule", "=", "[", "(", "U1Gate", "(", "-", "pi", "/", "2", ")", ",", "[", "q", "[", "0", "]", "]", ",", "[", "]", ")", "]", "for", "inst", "in", "rule", ":", "definition", ".", "append", "(", "inst", ")", "self", ".", "definition", "=", "definition" ]
24.416667
10.75
def get_line_rules(declarations): """ Given a list of declarations, return a list of output.Rule objects. This function is wise to line-<foo>, inline-<foo>, and outline-<foo> properties, and will generate multiple LineSymbolizers if necessary. """ property_map = {'line-color': 'stroke', 'line-width': 'stroke-width', 'line-opacity': 'stroke-opacity', 'line-join': 'stroke-linejoin', 'line-cap': 'stroke-linecap', 'line-dasharray': 'stroke-dasharray', 'line-meta-output': 'meta-output', 'line-meta-writer': 'meta-writer'} property_names = property_map.keys() # prepend parameter names with 'in' and 'out' for i in range(len(property_names)): property_names.append('in' + property_names[i]) property_names.append('out' + property_names[i]) # a place to put rules rules = [] for (filter, values) in filtered_property_declarations(declarations, property_names): width = values.has_key('line-width') and values['line-width'].value color = values.has_key('line-color') and values['line-color'].value opacity = values.has_key('line-opacity') and values['line-opacity'].value or None join = values.has_key('line-join') and values['line-join'].value or None cap = values.has_key('line-cap') and values['line-cap'].value or None dashes = values.has_key('line-dasharray') and values['line-dasharray'].value or None line_symbolizer = color and width and output.LineSymbolizer(color, width, opacity, join, cap, dashes) or False width = values.has_key('inline-width') and values['inline-width'].value color = values.has_key('inline-color') and values['inline-color'].value opacity = values.has_key('inline-opacity') and values['inline-opacity'].value or None join = values.has_key('inline-join') and values['inline-join'].value or None cap = values.has_key('inline-cap') and values['inline-cap'].value or None dashes = values.has_key('inline-dasharray') and values['inline-dasharray'].value or None inline_symbolizer = color and width and output.LineSymbolizer(color, width, opacity, join, cap, dashes) or False # outline requires regular line to have a meaningful width width = values.has_key('outline-width') and values.has_key('line-width') \ and values['line-width'].value + values['outline-width'].value * 2 color = values.has_key('outline-color') and values['outline-color'].value opacity = values.has_key('outline-opacity') and values['outline-opacity'].value or None join = values.has_key('outline-join') and values['outline-join'].value or None cap = values.has_key('outline-cap') and values['outline-cap'].value or None dashes = values.has_key('outline-dasharray') and values['outline-dasharray'].value or None outline_symbolizer = color and width and output.LineSymbolizer(color, width, opacity, join, cap, dashes) or False if outline_symbolizer or line_symbolizer or inline_symbolizer: rules.append(make_rule(filter, outline_symbolizer, line_symbolizer, inline_symbolizer)) return rules
[ "def", "get_line_rules", "(", "declarations", ")", ":", "property_map", "=", "{", "'line-color'", ":", "'stroke'", ",", "'line-width'", ":", "'stroke-width'", ",", "'line-opacity'", ":", "'stroke-opacity'", ",", "'line-join'", ":", "'stroke-linejoin'", ",", "'line-cap'", ":", "'stroke-linecap'", ",", "'line-dasharray'", ":", "'stroke-dasharray'", ",", "'line-meta-output'", ":", "'meta-output'", ",", "'line-meta-writer'", ":", "'meta-writer'", "}", "property_names", "=", "property_map", ".", "keys", "(", ")", "# prepend parameter names with 'in' and 'out'", "for", "i", "in", "range", "(", "len", "(", "property_names", ")", ")", ":", "property_names", ".", "append", "(", "'in'", "+", "property_names", "[", "i", "]", ")", "property_names", ".", "append", "(", "'out'", "+", "property_names", "[", "i", "]", ")", "# a place to put rules", "rules", "=", "[", "]", "for", "(", "filter", ",", "values", ")", "in", "filtered_property_declarations", "(", "declarations", ",", "property_names", ")", ":", "width", "=", "values", ".", "has_key", "(", "'line-width'", ")", "and", "values", "[", "'line-width'", "]", ".", "value", "color", "=", "values", ".", "has_key", "(", "'line-color'", ")", "and", "values", "[", "'line-color'", "]", ".", "value", "opacity", "=", "values", ".", "has_key", "(", "'line-opacity'", ")", "and", "values", "[", "'line-opacity'", "]", ".", "value", "or", "None", "join", "=", "values", ".", "has_key", "(", "'line-join'", ")", "and", "values", "[", "'line-join'", "]", ".", "value", "or", "None", "cap", "=", "values", ".", "has_key", "(", "'line-cap'", ")", "and", "values", "[", "'line-cap'", "]", ".", "value", "or", "None", "dashes", "=", "values", ".", "has_key", "(", "'line-dasharray'", ")", "and", "values", "[", "'line-dasharray'", "]", ".", "value", "or", "None", "line_symbolizer", "=", "color", "and", "width", "and", "output", ".", "LineSymbolizer", "(", "color", ",", "width", ",", "opacity", ",", "join", ",", "cap", ",", "dashes", ")", "or", "False", "width", "=", "values", ".", "has_key", "(", "'inline-width'", ")", "and", "values", "[", "'inline-width'", "]", ".", "value", "color", "=", "values", ".", "has_key", "(", "'inline-color'", ")", "and", "values", "[", "'inline-color'", "]", ".", "value", "opacity", "=", "values", ".", "has_key", "(", "'inline-opacity'", ")", "and", "values", "[", "'inline-opacity'", "]", ".", "value", "or", "None", "join", "=", "values", ".", "has_key", "(", "'inline-join'", ")", "and", "values", "[", "'inline-join'", "]", ".", "value", "or", "None", "cap", "=", "values", ".", "has_key", "(", "'inline-cap'", ")", "and", "values", "[", "'inline-cap'", "]", ".", "value", "or", "None", "dashes", "=", "values", ".", "has_key", "(", "'inline-dasharray'", ")", "and", "values", "[", "'inline-dasharray'", "]", ".", "value", "or", "None", "inline_symbolizer", "=", "color", "and", "width", "and", "output", ".", "LineSymbolizer", "(", "color", ",", "width", ",", "opacity", ",", "join", ",", "cap", ",", "dashes", ")", "or", "False", "# outline requires regular line to have a meaningful width", "width", "=", "values", ".", "has_key", "(", "'outline-width'", ")", "and", "values", ".", "has_key", "(", "'line-width'", ")", "and", "values", "[", "'line-width'", "]", ".", "value", "+", "values", "[", "'outline-width'", "]", ".", "value", "*", "2", "color", "=", "values", ".", "has_key", "(", "'outline-color'", ")", "and", "values", "[", "'outline-color'", "]", ".", "value", "opacity", "=", "values", ".", "has_key", "(", "'outline-opacity'", ")", "and", "values", "[", "'outline-opacity'", "]", ".", "value", "or", "None", "join", "=", "values", ".", "has_key", "(", "'outline-join'", ")", "and", "values", "[", "'outline-join'", "]", ".", "value", "or", "None", "cap", "=", "values", ".", "has_key", "(", "'outline-cap'", ")", "and", "values", "[", "'outline-cap'", "]", ".", "value", "or", "None", "dashes", "=", "values", ".", "has_key", "(", "'outline-dasharray'", ")", "and", "values", "[", "'outline-dasharray'", "]", ".", "value", "or", "None", "outline_symbolizer", "=", "color", "and", "width", "and", "output", ".", "LineSymbolizer", "(", "color", ",", "width", ",", "opacity", ",", "join", ",", "cap", ",", "dashes", ")", "or", "False", "if", "outline_symbolizer", "or", "line_symbolizer", "or", "inline_symbolizer", ":", "rules", ".", "append", "(", "make_rule", "(", "filter", ",", "outline_symbolizer", ",", "line_symbolizer", ",", "inline_symbolizer", ")", ")", "return", "rules" ]
53.516667
37.583333
def insert_many(self, documents, ordered=True, bypass_document_validation=False): """Insert an iterable of documents. >>> db.test.count() 0 >>> result = db.test.insert_many([{'x': i} for i in range(2)]) >>> result.inserted_ids [ObjectId('54f113fffba522406c9cc20e'), ObjectId('54f113fffba522406c9cc20f')] >>> db.test.count() 2 :Parameters: - `documents`: A iterable of documents to insert. - `ordered` (optional): If ``True`` (the default) documents will be inserted on the server serially, in the order provided. If an error occurs all remaining inserts are aborted. If ``False``, documents will be inserted on the server in arbitrary order, possibly in parallel, and all document inserts will be attempted. - `bypass_document_validation`: (optional) If ``True``, allows the write to opt-out of document level validation. Default is ``False``. :Returns: An instance of :class:`~pymongo.results.InsertManyResult`. .. seealso:: :ref:`writes-and-ids` .. note:: `bypass_document_validation` requires server version **>= 3.2** .. versionchanged:: 3.2 Added bypass_document_validation support .. versionadded:: 3.0 """ if not isinstance(documents, collections.Iterable) or not documents: raise TypeError("documents must be a non-empty list") inserted_ids = [] def gen(): """A generator that validates documents and handles _ids.""" for document in documents: common.validate_is_document_type("document", document) if not isinstance(document, RawBSONDocument): if "_id" not in document: document["_id"] = ObjectId() inserted_ids.append(document["_id"]) yield (message._INSERT, document) blk = _Bulk(self, ordered, bypass_document_validation) blk.ops = [doc for doc in gen()] blk.execute(self.write_concern.document) return InsertManyResult(inserted_ids, self.write_concern.acknowledged)
[ "def", "insert_many", "(", "self", ",", "documents", ",", "ordered", "=", "True", ",", "bypass_document_validation", "=", "False", ")", ":", "if", "not", "isinstance", "(", "documents", ",", "collections", ".", "Iterable", ")", "or", "not", "documents", ":", "raise", "TypeError", "(", "\"documents must be a non-empty list\"", ")", "inserted_ids", "=", "[", "]", "def", "gen", "(", ")", ":", "\"\"\"A generator that validates documents and handles _ids.\"\"\"", "for", "document", "in", "documents", ":", "common", ".", "validate_is_document_type", "(", "\"document\"", ",", "document", ")", "if", "not", "isinstance", "(", "document", ",", "RawBSONDocument", ")", ":", "if", "\"_id\"", "not", "in", "document", ":", "document", "[", "\"_id\"", "]", "=", "ObjectId", "(", ")", "inserted_ids", ".", "append", "(", "document", "[", "\"_id\"", "]", ")", "yield", "(", "message", ".", "_INSERT", ",", "document", ")", "blk", "=", "_Bulk", "(", "self", ",", "ordered", ",", "bypass_document_validation", ")", "blk", ".", "ops", "=", "[", "doc", "for", "doc", "in", "gen", "(", ")", "]", "blk", ".", "execute", "(", "self", ".", "write_concern", ".", "document", ")", "return", "InsertManyResult", "(", "inserted_ids", ",", "self", ".", "write_concern", ".", "acknowledged", ")" ]
41.981132
22.509434
def _from_dict(cls, _dict): """Initialize a Element object from a json dictionary.""" args = {} if 'location' in _dict: args['location'] = Location._from_dict(_dict.get('location')) if 'text' in _dict: args['text'] = _dict.get('text') if 'types' in _dict: args['types'] = [ TypeLabel._from_dict(x) for x in (_dict.get('types')) ] if 'categories' in _dict: args['categories'] = [ Category._from_dict(x) for x in (_dict.get('categories')) ] if 'attributes' in _dict: args['attributes'] = [ Attribute._from_dict(x) for x in (_dict.get('attributes')) ] return cls(**args)
[ "def", "_from_dict", "(", "cls", ",", "_dict", ")", ":", "args", "=", "{", "}", "if", "'location'", "in", "_dict", ":", "args", "[", "'location'", "]", "=", "Location", ".", "_from_dict", "(", "_dict", ".", "get", "(", "'location'", ")", ")", "if", "'text'", "in", "_dict", ":", "args", "[", "'text'", "]", "=", "_dict", ".", "get", "(", "'text'", ")", "if", "'types'", "in", "_dict", ":", "args", "[", "'types'", "]", "=", "[", "TypeLabel", ".", "_from_dict", "(", "x", ")", "for", "x", "in", "(", "_dict", ".", "get", "(", "'types'", ")", ")", "]", "if", "'categories'", "in", "_dict", ":", "args", "[", "'categories'", "]", "=", "[", "Category", ".", "_from_dict", "(", "x", ")", "for", "x", "in", "(", "_dict", ".", "get", "(", "'categories'", ")", ")", "]", "if", "'attributes'", "in", "_dict", ":", "args", "[", "'attributes'", "]", "=", "[", "Attribute", ".", "_from_dict", "(", "x", ")", "for", "x", "in", "(", "_dict", ".", "get", "(", "'attributes'", ")", ")", "]", "return", "cls", "(", "*", "*", "args", ")" ]
37.8
16.75
def rename_file(self, fname): """Rename file""" path, valid = QInputDialog.getText(self, _('Rename'), _('New name:'), QLineEdit.Normal, osp.basename(fname)) if valid: path = osp.join(osp.dirname(fname), to_text_string(path)) if path == fname: return if osp.exists(path): if QMessageBox.warning(self, _("Rename"), _("Do you really want to rename <b>%s</b> and " "overwrite the existing file <b>%s</b>?" ) % (osp.basename(fname), osp.basename(path)), QMessageBox.Yes|QMessageBox.No) == QMessageBox.No: return try: misc.rename_file(fname, path) if osp.isfile(fname): self.sig_renamed.emit(fname, path) else: self.sig_renamed_tree.emit(fname, path) return path except EnvironmentError as error: QMessageBox.critical(self, _("Rename"), _("<b>Unable to rename file <i>%s</i></b>" "<br><br>Error message:<br>%s" ) % (osp.basename(fname), to_text_string(error)))
[ "def", "rename_file", "(", "self", ",", "fname", ")", ":", "path", ",", "valid", "=", "QInputDialog", ".", "getText", "(", "self", ",", "_", "(", "'Rename'", ")", ",", "_", "(", "'New name:'", ")", ",", "QLineEdit", ".", "Normal", ",", "osp", ".", "basename", "(", "fname", ")", ")", "if", "valid", ":", "path", "=", "osp", ".", "join", "(", "osp", ".", "dirname", "(", "fname", ")", ",", "to_text_string", "(", "path", ")", ")", "if", "path", "==", "fname", ":", "return", "if", "osp", ".", "exists", "(", "path", ")", ":", "if", "QMessageBox", ".", "warning", "(", "self", ",", "_", "(", "\"Rename\"", ")", ",", "_", "(", "\"Do you really want to rename <b>%s</b> and \"", "\"overwrite the existing file <b>%s</b>?\"", ")", "%", "(", "osp", ".", "basename", "(", "fname", ")", ",", "osp", ".", "basename", "(", "path", ")", ")", ",", "QMessageBox", ".", "Yes", "|", "QMessageBox", ".", "No", ")", "==", "QMessageBox", ".", "No", ":", "return", "try", ":", "misc", ".", "rename_file", "(", "fname", ",", "path", ")", "if", "osp", ".", "isfile", "(", "fname", ")", ":", "self", ".", "sig_renamed", ".", "emit", "(", "fname", ",", "path", ")", "else", ":", "self", ".", "sig_renamed_tree", ".", "emit", "(", "fname", ",", "path", ")", "return", "path", "except", "EnvironmentError", "as", "error", ":", "QMessageBox", ".", "critical", "(", "self", ",", "_", "(", "\"Rename\"", ")", ",", "_", "(", "\"<b>Unable to rename file <i>%s</i></b>\"", "\"<br><br>Error message:<br>%s\"", ")", "%", "(", "osp", ".", "basename", "(", "fname", ")", ",", "to_text_string", "(", "error", ")", ")", ")" ]
48.642857
18.714286
def generate_table_from(data): "Output a nicely formatted ascii table" table = Texttable(max_width=120) table.add_row(["view", "method", "status", "count", "minimum", "maximum", "mean", "stdev", "queries", "querytime"]) table.set_cols_align(["l", "l", "l", "r", "r", "r", "r", "r", "r", "r"]) for item in sorted(data): mean = round(sum(data[item]['times'])/data[item]['count'], 3) mean_sql = round(sum(data[item]['sql'])/data[item]['count'], 3) mean_sqltime = round(sum(data[item]['sqltime'])/data[item]['count'], 3) sdsq = sum([(i - mean) ** 2 for i in data[item]['times']]) try: stdev = '%.2f' % ((sdsq / (len(data[item]['times']) - 1)) ** .5) except ZeroDivisionError: stdev = '0.00' minimum = "%.2f" % min(data[item]['times']) maximum = "%.2f" % max(data[item]['times']) table.add_row([data[item]['view'], data[item]['method'], data[item]['status'], data[item]['count'], minimum, maximum, '%.3f' % mean, stdev, mean_sql, mean_sqltime]) return table.draw()
[ "def", "generate_table_from", "(", "data", ")", ":", "table", "=", "Texttable", "(", "max_width", "=", "120", ")", "table", ".", "add_row", "(", "[", "\"view\"", ",", "\"method\"", ",", "\"status\"", ",", "\"count\"", ",", "\"minimum\"", ",", "\"maximum\"", ",", "\"mean\"", ",", "\"stdev\"", ",", "\"queries\"", ",", "\"querytime\"", "]", ")", "table", ".", "set_cols_align", "(", "[", "\"l\"", ",", "\"l\"", ",", "\"l\"", ",", "\"r\"", ",", "\"r\"", ",", "\"r\"", ",", "\"r\"", ",", "\"r\"", ",", "\"r\"", ",", "\"r\"", "]", ")", "for", "item", "in", "sorted", "(", "data", ")", ":", "mean", "=", "round", "(", "sum", "(", "data", "[", "item", "]", "[", "'times'", "]", ")", "/", "data", "[", "item", "]", "[", "'count'", "]", ",", "3", ")", "mean_sql", "=", "round", "(", "sum", "(", "data", "[", "item", "]", "[", "'sql'", "]", ")", "/", "data", "[", "item", "]", "[", "'count'", "]", ",", "3", ")", "mean_sqltime", "=", "round", "(", "sum", "(", "data", "[", "item", "]", "[", "'sqltime'", "]", ")", "/", "data", "[", "item", "]", "[", "'count'", "]", ",", "3", ")", "sdsq", "=", "sum", "(", "[", "(", "i", "-", "mean", ")", "**", "2", "for", "i", "in", "data", "[", "item", "]", "[", "'times'", "]", "]", ")", "try", ":", "stdev", "=", "'%.2f'", "%", "(", "(", "sdsq", "/", "(", "len", "(", "data", "[", "item", "]", "[", "'times'", "]", ")", "-", "1", ")", ")", "**", ".5", ")", "except", "ZeroDivisionError", ":", "stdev", "=", "'0.00'", "minimum", "=", "\"%.2f\"", "%", "min", "(", "data", "[", "item", "]", "[", "'times'", "]", ")", "maximum", "=", "\"%.2f\"", "%", "max", "(", "data", "[", "item", "]", "[", "'times'", "]", ")", "table", ".", "add_row", "(", "[", "data", "[", "item", "]", "[", "'view'", "]", ",", "data", "[", "item", "]", "[", "'method'", "]", ",", "data", "[", "item", "]", "[", "'status'", "]", ",", "data", "[", "item", "]", "[", "'count'", "]", ",", "minimum", ",", "maximum", ",", "'%.3f'", "%", "mean", ",", "stdev", ",", "mean_sql", ",", "mean_sqltime", "]", ")", "return", "table", ".", "draw", "(", ")" ]
46.565217
31.173913