text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def _key_index_iter(self) -> Iterator[Tuple[str, Any]]: """ Allows for iteration over the ``KeyIndex`` values. This function is intended to be assigned to a newly created KeyIndex class. It enables iteration over the ``KeyIndex`` names and values. We don't use a mixin to avoid issues with YAML. Note: This isn't recursive like ``dataclasses.asdict(...)``. Generally, we don't want those recursive conversion properties. Plus, this approach is much faster. """ for k, v in vars(self).items(): yield k, v
[ "def", "_key_index_iter", "(", "self", ")", "->", "Iterator", "[", "Tuple", "[", "str", ",", "Any", "]", "]", ":", "for", "k", ",", "v", "in", "vars", "(", "self", ")", ".", "items", "(", ")", ":", "yield", "k", ",", "v" ]
45.166667
29.416667
def _validate(cls, engine, *version_cols): """ Validates the archive table. Validates the following criteria: - all version columns exist in the archive table - the python types of the user table and archive table columns are the same - a user_id column exists - there is a unique constraint on version and the other versioned columns from the user table :param engine: instance of :class:`~sqlalchemy.engine.Engine` :param *version_cols: instances of :class:`~InstrumentedAttribute` from the user table corresponding to the columns that versioning pivots around :raises: :class:`~LogTableCreationError` """ cls._version_col_names = set() for version_column_ut in version_cols: # Make sure all version columns exist on this table version_col_name = version_column_ut.key version_column_at = getattr(cls, version_col_name, None) if not isinstance(version_column_at, InstrumentedAttribute): raise LogTableCreationError("Log table needs {} column".format(version_col_name)) # Make sure the type of the user table and log table columns are the same version_col_at_t = version_column_at.property.columns[0].type.__class__ version_col_ut_t = version_column_ut.property.columns[0].type.__class__ if version_col_at_t != version_col_ut_t: raise LogTableCreationError( "Type of column {} must match in log and user table".format(version_col_name) ) cls._version_col_names.add(version_col_name) # Ensure user added a user_id column # TODO: should user_id column be optional? user_id = getattr(cls, 'user_id', None) if not isinstance(user_id, InstrumentedAttribute): raise LogTableCreationError("Log table needs user_id column") # Check the unique constraint on the versioned columns version_col_names = list(cls._version_col_names) + ['version_id'] if not utils.has_constraint(cls, engine, *version_col_names): raise LogTableCreationError("There is no unique constraint on the version columns")
[ "def", "_validate", "(", "cls", ",", "engine", ",", "*", "version_cols", ")", ":", "cls", ".", "_version_col_names", "=", "set", "(", ")", "for", "version_column_ut", "in", "version_cols", ":", "# Make sure all version columns exist on this table", "version_col_name", "=", "version_column_ut", ".", "key", "version_column_at", "=", "getattr", "(", "cls", ",", "version_col_name", ",", "None", ")", "if", "not", "isinstance", "(", "version_column_at", ",", "InstrumentedAttribute", ")", ":", "raise", "LogTableCreationError", "(", "\"Log table needs {} column\"", ".", "format", "(", "version_col_name", ")", ")", "# Make sure the type of the user table and log table columns are the same", "version_col_at_t", "=", "version_column_at", ".", "property", ".", "columns", "[", "0", "]", ".", "type", ".", "__class__", "version_col_ut_t", "=", "version_column_ut", ".", "property", ".", "columns", "[", "0", "]", ".", "type", ".", "__class__", "if", "version_col_at_t", "!=", "version_col_ut_t", ":", "raise", "LogTableCreationError", "(", "\"Type of column {} must match in log and user table\"", ".", "format", "(", "version_col_name", ")", ")", "cls", ".", "_version_col_names", ".", "add", "(", "version_col_name", ")", "# Ensure user added a user_id column", "# TODO: should user_id column be optional?", "user_id", "=", "getattr", "(", "cls", ",", "'user_id'", ",", "None", ")", "if", "not", "isinstance", "(", "user_id", ",", "InstrumentedAttribute", ")", ":", "raise", "LogTableCreationError", "(", "\"Log table needs user_id column\"", ")", "# Check the unique constraint on the versioned columns", "version_col_names", "=", "list", "(", "cls", ".", "_version_col_names", ")", "+", "[", "'version_id'", "]", "if", "not", "utils", ".", "has_constraint", "(", "cls", ",", "engine", ",", "*", "version_col_names", ")", ":", "raise", "LogTableCreationError", "(", "\"There is no unique constraint on the version columns\"", ")" ]
52.139535
25.116279
def to_pandas_dataframe(self): """ Pulls the contents of the RDD to the driver and places them in a Pandas DataFrame. Each record in the RDD becomes and column, and the DataFrame is indexed with a DatetimeIndex generated from this RDD's index. """ pd_index = self.index().to_pandas_index() return pd.DataFrame.from_items(self.collect()).set_index(pd_index)
[ "def", "to_pandas_dataframe", "(", "self", ")", ":", "pd_index", "=", "self", ".", "index", "(", ")", ".", "to_pandas_index", "(", ")", "return", "pd", ".", "DataFrame", ".", "from_items", "(", "self", ".", "collect", "(", ")", ")", ".", "set_index", "(", "pd_index", ")" ]
45.888889
21.666667
def _get_populate_values(self, instance) -> Tuple[str, str]: """Gets all values (for each language) from the specified's instance's `populate_from` field. Arguments: instance: The instance to get the values from. Returns: A list of (lang_code, value) tuples. """ return [ ( lang_code, self._get_populate_from_value( instance, self.populate_from, lang_code ), ) for lang_code, _ in settings.LANGUAGES ]
[ "def", "_get_populate_values", "(", "self", ",", "instance", ")", "->", "Tuple", "[", "str", ",", "str", "]", ":", "return", "[", "(", "lang_code", ",", "self", ".", "_get_populate_from_value", "(", "instance", ",", "self", ".", "populate_from", ",", "lang_code", ")", ",", ")", "for", "lang_code", ",", "_", "in", "settings", ".", "LANGUAGES", "]" ]
27.043478
18.347826
def module_names(self): """get all the modules in the controller_prefix :returns: set, a set of string module names """ controller_prefix = self.controller_prefix _module_name_cache = self._module_name_cache if controller_prefix in _module_name_cache: return _module_name_cache[controller_prefix] module = self.get_module(controller_prefix) if hasattr(module, "__path__"): # path attr exists so this is a package modules = self.find_modules(module.__path__[0], controller_prefix) else: # we have a lonely .py file modules = set([controller_prefix]) _module_name_cache.setdefault(controller_prefix, {}) _module_name_cache[controller_prefix] = modules return modules
[ "def", "module_names", "(", "self", ")", ":", "controller_prefix", "=", "self", ".", "controller_prefix", "_module_name_cache", "=", "self", ".", "_module_name_cache", "if", "controller_prefix", "in", "_module_name_cache", ":", "return", "_module_name_cache", "[", "controller_prefix", "]", "module", "=", "self", ".", "get_module", "(", "controller_prefix", ")", "if", "hasattr", "(", "module", ",", "\"__path__\"", ")", ":", "# path attr exists so this is a package", "modules", "=", "self", ".", "find_modules", "(", "module", ".", "__path__", "[", "0", "]", ",", "controller_prefix", ")", "else", ":", "# we have a lonely .py file", "modules", "=", "set", "(", "[", "controller_prefix", "]", ")", "_module_name_cache", ".", "setdefault", "(", "controller_prefix", ",", "{", "}", ")", "_module_name_cache", "[", "controller_prefix", "]", "=", "modules", "return", "modules" ]
33.458333
19.375
def ad_hoc_magic_from_file(filename, **kwargs): """Ad-hoc emulation of magic.from_file from python-magic.""" with open(filename, 'rb') as stream: head = stream.read(16) if head[:4] == b'\x7fELF': return b'application/x-executable' elif head[:2] == b'MZ': return b'application/x-dosexec' else: raise NotImplementedError()
[ "def", "ad_hoc_magic_from_file", "(", "filename", ",", "*", "*", "kwargs", ")", ":", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "stream", ":", "head", "=", "stream", ".", "read", "(", "16", ")", "if", "head", "[", ":", "4", "]", "==", "b'\\x7fELF'", ":", "return", "b'application/x-executable'", "elif", "head", "[", ":", "2", "]", "==", "b'MZ'", ":", "return", "b'application/x-dosexec'", "else", ":", "raise", "NotImplementedError", "(", ")" ]
38.7
6.9
def getLabelByName(self, name): """Gets a label widget by it component name :param name: name of the AbstractStimulusComponent which this label is named after :type name: str :returns: :class:`DragLabel<sparkle.gui.drag_label.DragLabel>` """ name = name.lower() if name in self.stimLabels: return self.stimLabels[name] else: return None
[ "def", "getLabelByName", "(", "self", ",", "name", ")", ":", "name", "=", "name", ".", "lower", "(", ")", "if", "name", "in", "self", ".", "stimLabels", ":", "return", "self", ".", "stimLabels", "[", "name", "]", "else", ":", "return", "None" ]
34.5
17.333333
def add_item(self, *args, **kwargs): """Pass through to provider methods.""" try: self._get_provider_session('assessment_basic_authoring_session').add_item(*args, **kwargs) except InvalidArgument: self._get_sub_package_provider_session( 'assessment_authoring', 'assessment_part_item_design_session').add_item(*args, **kwargs)
[ "def", "add_item", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "self", ".", "_get_provider_session", "(", "'assessment_basic_authoring_session'", ")", ".", "add_item", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "InvalidArgument", ":", "self", ".", "_get_sub_package_provider_session", "(", "'assessment_authoring'", ",", "'assessment_part_item_design_session'", ")", ".", "add_item", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
54.714286
25.428571
def delete_index(self,*fields): """Delete the index on the specified fields""" for f in fields: if not f in self.indices: raise ValueError,"No index on field %s" %f for f in fields: del self.indices[f] self.commit()
[ "def", "delete_index", "(", "self", ",", "*", "fields", ")", ":", "for", "f", "in", "fields", ":", "if", "not", "f", "in", "self", ".", "indices", ":", "raise", "ValueError", ",", "\"No index on field %s\"", "%", "f", "for", "f", "in", "fields", ":", "del", "self", ".", "indices", "[", "f", "]", "self", ".", "commit", "(", ")" ]
35.875
10.75
def vnormg(v, ndim): """ Compute the magnitude of a double precision vector of arbitrary dimension. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/vnormg_c.html :param v: Vector whose magnitude is to be found. :type v: Array of floats :param ndim: Dimension of v :type ndim: int :return: magnitude of v calculated in a numerically stable way :rtype: float """ v = stypes.toDoubleVector(v) ndim = ctypes.c_int(ndim) return libspice.vnormg_c(v, ndim)
[ "def", "vnormg", "(", "v", ",", "ndim", ")", ":", "v", "=", "stypes", ".", "toDoubleVector", "(", "v", ")", "ndim", "=", "ctypes", ".", "c_int", "(", "ndim", ")", "return", "libspice", ".", "vnormg_c", "(", "v", ",", "ndim", ")" ]
31.125
18.5
def getThings(self): """ Get the things registered in your account :return: dict with things registered in the logged in account and API call status """ login_return = self._is_logged_in() # raise NameError("Please login first using the login function, with username and password") data = { "path": "/thing", "host": "https://sky.brunt.co" } resp = self._http.request(data, RequestTypes.GET) self._things = resp['things'] resp.update(login_return) return resp
[ "def", "getThings", "(", "self", ")", ":", "login_return", "=", "self", ".", "_is_logged_in", "(", ")", "# raise NameError(\"Please login first using the login function, with username and password\")", "data", "=", "{", "\"path\"", ":", "\"/thing\"", ",", "\"host\"", ":", "\"https://sky.brunt.co\"", "}", "resp", "=", "self", ".", "_http", ".", "request", "(", "data", ",", "RequestTypes", ".", "GET", ")", "self", ".", "_things", "=", "resp", "[", "'things'", "]", "resp", ".", "update", "(", "login_return", ")", "return", "resp" ]
38.066667
18.933333
def reset_position_scales(self): """ Reset x and y scales """ if not self.facet.shrink: return with suppress(AttributeError): self.panel_scales_x.reset() with suppress(AttributeError): self.panel_scales_y.reset()
[ "def", "reset_position_scales", "(", "self", ")", ":", "if", "not", "self", ".", "facet", ".", "shrink", ":", "return", "with", "suppress", "(", "AttributeError", ")", ":", "self", ".", "panel_scales_x", ".", "reset", "(", ")", "with", "suppress", "(", "AttributeError", ")", ":", "self", ".", "panel_scales_y", ".", "reset", "(", ")" ]
23.916667
11.25
def find_additional_rels(self, all_models): """Attempts to scan for additional relationship fields for this model based on all of the other models' structures and relationships. """ for model_name, model in iteritems(all_models): if model_name != self.name: for field_name in model.field_names: field = model.fields[field_name] # if this field type references the current model if field.field_type == self.name and field.back_populates is not None and \ (isinstance(field, StatikForeignKeyField) or isinstance(field, StatikManyToManyField)): self.additional_rels[field.back_populates] = { 'to_model': model_name, 'back_populates': field_name, 'secondary': (model_name, field.field_type) if isinstance(field, StatikManyToManyField) else None } logger.debug( 'Additional relationship %s.%s -> %s (%s)', self.name, field.back_populates, model_name, self.additional_rels[field.back_populates] )
[ "def", "find_additional_rels", "(", "self", ",", "all_models", ")", ":", "for", "model_name", ",", "model", "in", "iteritems", "(", "all_models", ")", ":", "if", "model_name", "!=", "self", ".", "name", ":", "for", "field_name", "in", "model", ".", "field_names", ":", "field", "=", "model", ".", "fields", "[", "field_name", "]", "# if this field type references the current model", "if", "field", ".", "field_type", "==", "self", ".", "name", "and", "field", ".", "back_populates", "is", "not", "None", "and", "(", "isinstance", "(", "field", ",", "StatikForeignKeyField", ")", "or", "isinstance", "(", "field", ",", "StatikManyToManyField", ")", ")", ":", "self", ".", "additional_rels", "[", "field", ".", "back_populates", "]", "=", "{", "'to_model'", ":", "model_name", ",", "'back_populates'", ":", "field_name", ",", "'secondary'", ":", "(", "model_name", ",", "field", ".", "field_type", ")", "if", "isinstance", "(", "field", ",", "StatikManyToManyField", ")", "else", "None", "}", "logger", ".", "debug", "(", "'Additional relationship %s.%s -> %s (%s)'", ",", "self", ".", "name", ",", "field", ".", "back_populates", ",", "model_name", ",", "self", ".", "additional_rels", "[", "field", ".", "back_populates", "]", ")" ]
56.958333
18.541667
def _sample_aAt(self,n): """Sampling frequencies, angles, and times part of sampling, for stream with gap""" # Use streamdf's _sample_aAt to generate unperturbed frequencies, # angles Om,angle,dt= super(streamgapdf,self)._sample_aAt(n) # Now rewind angles by timpact, apply the kicks, and run forward again dangle_at_impact= angle-numpy.tile(self._progenitor_angle.T,(n,1)).T\ -(Om-numpy.tile(self._progenitor_Omega.T,(n,1)).T)*self._timpact dangle_par_at_impact= numpy.dot(dangle_at_impact.T, self._dsigomeanProgDirection)\ *self._gap_sigMeanSign # Calculate and apply kicks (points not yet released have zero kick) dOr= self._kick_interpdOr(dangle_par_at_impact) dOp= self._kick_interpdOp(dangle_par_at_impact) dOz= self._kick_interpdOz(dangle_par_at_impact) Om[0,:]+= dOr Om[1,:]+= dOp Om[2,:]+= dOz angle[0,:]+=\ self._kick_interpdar(dangle_par_at_impact)+dOr*self._timpact angle[1,:]+=\ self._kick_interpdap(dangle_par_at_impact)+dOp*self._timpact angle[2,:]+=\ self._kick_interpdaz(dangle_par_at_impact)+dOz*self._timpact return (Om,angle,dt)
[ "def", "_sample_aAt", "(", "self", ",", "n", ")", ":", "# Use streamdf's _sample_aAt to generate unperturbed frequencies,", "# angles", "Om", ",", "angle", ",", "dt", "=", "super", "(", "streamgapdf", ",", "self", ")", ".", "_sample_aAt", "(", "n", ")", "# Now rewind angles by timpact, apply the kicks, and run forward again", "dangle_at_impact", "=", "angle", "-", "numpy", ".", "tile", "(", "self", ".", "_progenitor_angle", ".", "T", ",", "(", "n", ",", "1", ")", ")", ".", "T", "-", "(", "Om", "-", "numpy", ".", "tile", "(", "self", ".", "_progenitor_Omega", ".", "T", ",", "(", "n", ",", "1", ")", ")", ".", "T", ")", "*", "self", ".", "_timpact", "dangle_par_at_impact", "=", "numpy", ".", "dot", "(", "dangle_at_impact", ".", "T", ",", "self", ".", "_dsigomeanProgDirection", ")", "*", "self", ".", "_gap_sigMeanSign", "# Calculate and apply kicks (points not yet released have zero kick)", "dOr", "=", "self", ".", "_kick_interpdOr", "(", "dangle_par_at_impact", ")", "dOp", "=", "self", ".", "_kick_interpdOp", "(", "dangle_par_at_impact", ")", "dOz", "=", "self", ".", "_kick_interpdOz", "(", "dangle_par_at_impact", ")", "Om", "[", "0", ",", ":", "]", "+=", "dOr", "Om", "[", "1", ",", ":", "]", "+=", "dOp", "Om", "[", "2", ",", ":", "]", "+=", "dOz", "angle", "[", "0", ",", ":", "]", "+=", "self", ".", "_kick_interpdar", "(", "dangle_par_at_impact", ")", "+", "dOr", "*", "self", ".", "_timpact", "angle", "[", "1", ",", ":", "]", "+=", "self", ".", "_kick_interpdap", "(", "dangle_par_at_impact", ")", "+", "dOp", "*", "self", ".", "_timpact", "angle", "[", "2", ",", ":", "]", "+=", "self", ".", "_kick_interpdaz", "(", "dangle_par_at_impact", ")", "+", "dOz", "*", "self", ".", "_timpact", "return", "(", "Om", ",", "angle", ",", "dt", ")" ]
51.84
23.08
def graph_from_cov_df(df, threshold=.5, gain=2., n=None, class_dict=CLASSES): """Compose pair of lists of dicts (nodes, edges) for the graph described by a DataFrame""" n = n or len(df) nodes = [{'group': class_dict.get(name, 0), "name": name} for name in df.index.values][:n] edges = [] for i, (row_name, row) in enumerate(df.iterrows()): for j, value in enumerate(row.values): if i > j and value * gain > threshold and i < n and j < n: edges += [{'source': i, 'target': j, 'value': gain * value}] return nodes, edges
[ "def", "graph_from_cov_df", "(", "df", ",", "threshold", "=", ".5", ",", "gain", "=", "2.", ",", "n", "=", "None", ",", "class_dict", "=", "CLASSES", ")", ":", "n", "=", "n", "or", "len", "(", "df", ")", "nodes", "=", "[", "{", "'group'", ":", "class_dict", ".", "get", "(", "name", ",", "0", ")", ",", "\"name\"", ":", "name", "}", "for", "name", "in", "df", ".", "index", ".", "values", "]", "[", ":", "n", "]", "edges", "=", "[", "]", "for", "i", ",", "(", "row_name", ",", "row", ")", "in", "enumerate", "(", "df", ".", "iterrows", "(", ")", ")", ":", "for", "j", ",", "value", "in", "enumerate", "(", "row", ".", "values", ")", ":", "if", "i", ">", "j", "and", "value", "*", "gain", ">", "threshold", "and", "i", "<", "n", "and", "j", "<", "n", ":", "edges", "+=", "[", "{", "'source'", ":", "i", ",", "'target'", ":", "j", ",", "'value'", ":", "gain", "*", "value", "}", "]", "return", "nodes", ",", "edges" ]
56.9
24.1
def prepend_http(url): """ Ensure there's a scheme specified at the beginning of a url, defaulting to http:// >>> prepend_http('duckduckgo.com') 'http://duckduckgo.com' """ url = url.lstrip() if not urlparse(url).scheme: return 'http://' + url return url
[ "def", "prepend_http", "(", "url", ")", ":", "url", "=", "url", ".", "lstrip", "(", ")", "if", "not", "urlparse", "(", "url", ")", ".", "scheme", ":", "return", "'http://'", "+", "url", "return", "url" ]
28.2
13.5
def group(self, p_todos): """ Groups the todos according to the given group string. """ # preorder todos for the group sort p_todos = _apply_sort_functions(p_todos, self.pregroupfunctions) # initialize result with a single group result = OrderedDict([((), p_todos)]) for (function, label), _ in self.groupfunctions: oldresult = result result = OrderedDict() for oldkey, oldgroup in oldresult.items(): for key, _group in groupby(oldgroup, function): newgroup = list(_group) if not isinstance(key, list): key = [key] for subkey in key: subkey = "{}: {}".format(label, subkey) newkey = oldkey + (subkey,) if newkey in result: result[newkey] = result[newkey] + newgroup else: result[newkey] = newgroup # sort all groups for key, _group in result.items(): result[key] = self.sort(_group) return result
[ "def", "group", "(", "self", ",", "p_todos", ")", ":", "# preorder todos for the group sort", "p_todos", "=", "_apply_sort_functions", "(", "p_todos", ",", "self", ".", "pregroupfunctions", ")", "# initialize result with a single group", "result", "=", "OrderedDict", "(", "[", "(", "(", ")", ",", "p_todos", ")", "]", ")", "for", "(", "function", ",", "label", ")", ",", "_", "in", "self", ".", "groupfunctions", ":", "oldresult", "=", "result", "result", "=", "OrderedDict", "(", ")", "for", "oldkey", ",", "oldgroup", "in", "oldresult", ".", "items", "(", ")", ":", "for", "key", ",", "_group", "in", "groupby", "(", "oldgroup", ",", "function", ")", ":", "newgroup", "=", "list", "(", "_group", ")", "if", "not", "isinstance", "(", "key", ",", "list", ")", ":", "key", "=", "[", "key", "]", "for", "subkey", "in", "key", ":", "subkey", "=", "\"{}: {}\"", ".", "format", "(", "label", ",", "subkey", ")", "newkey", "=", "oldkey", "+", "(", "subkey", ",", ")", "if", "newkey", "in", "result", ":", "result", "[", "newkey", "]", "=", "result", "[", "newkey", "]", "+", "newgroup", "else", ":", "result", "[", "newkey", "]", "=", "newgroup", "# sort all groups", "for", "key", ",", "_group", "in", "result", ".", "items", "(", ")", ":", "result", "[", "key", "]", "=", "self", ".", "sort", "(", "_group", ")", "return", "result" ]
34.058824
17.117647
def properties(lines): """Parse properties block Returns: dict: {property_type: (atom_index, value)} """ results = {} for i, line in enumerate(lines): type_ = line[3:6] if type_ not in ["CHG", "RAD", "ISO"]: continue # Other properties are not supported yet count = int(line[6:9]) results[type_] = [] for j in range(count): idx = int(line[10 + j * 8: 13 + j * 8]) val = int(line[14 + j * 8: 17 + j * 8]) results[type_].append((idx, val)) return results
[ "def", "properties", "(", "lines", ")", ":", "results", "=", "{", "}", "for", "i", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "type_", "=", "line", "[", "3", ":", "6", "]", "if", "type_", "not", "in", "[", "\"CHG\"", ",", "\"RAD\"", ",", "\"ISO\"", "]", ":", "continue", "# Other properties are not supported yet", "count", "=", "int", "(", "line", "[", "6", ":", "9", "]", ")", "results", "[", "type_", "]", "=", "[", "]", "for", "j", "in", "range", "(", "count", ")", ":", "idx", "=", "int", "(", "line", "[", "10", "+", "j", "*", "8", ":", "13", "+", "j", "*", "8", "]", ")", "val", "=", "int", "(", "line", "[", "14", "+", "j", "*", "8", ":", "17", "+", "j", "*", "8", "]", ")", "results", "[", "type_", "]", ".", "append", "(", "(", "idx", ",", "val", ")", ")", "return", "results" ]
30.944444
13.833333
def sendintr(self): '''This sends a SIGINT to the child. It does not require the SIGINT to be the first character on a line. ''' n, byte = self.ptyproc.sendintr() self._log_control(byte)
[ "def", "sendintr", "(", "self", ")", ":", "n", ",", "byte", "=", "self", ".", "ptyproc", ".", "sendintr", "(", ")", "self", ".", "_log_control", "(", "byte", ")" ]
35.666667
19
def build_metamodel(self, id_generator=None): ''' Build and return a *xtuml.MetaModel* containing previously loaded input. ''' m = xtuml.MetaModel(id_generator) self.populate(m) return m
[ "def", "build_metamodel", "(", "self", ",", "id_generator", "=", "None", ")", ":", "m", "=", "xtuml", ".", "MetaModel", "(", "id_generator", ")", "self", ".", "populate", "(", "m", ")", "return", "m" ]
27.111111
23.111111
def delete_row(self, index): """"Deletes the row from the worksheet at the specified index. :param index: Index of a row for deletion. :type index: int """ body = { "requests": [{ "deleteDimension": { "range": { "sheetId": self.id, "dimension": "ROWS", "startIndex": index - 1, "endIndex": index } } }] } return self.spreadsheet.batch_update(body)
[ "def", "delete_row", "(", "self", ",", "index", ")", ":", "body", "=", "{", "\"requests\"", ":", "[", "{", "\"deleteDimension\"", ":", "{", "\"range\"", ":", "{", "\"sheetId\"", ":", "self", ".", "id", ",", "\"dimension\"", ":", "\"ROWS\"", ",", "\"startIndex\"", ":", "index", "-", "1", ",", "\"endIndex\"", ":", "index", "}", "}", "}", "]", "}", "return", "self", ".", "spreadsheet", ".", "batch_update", "(", "body", ")" ]
28.5
14.45
def get_node(self, key): """ return the node with the key or None if it does not exist """ self._check_if_open() try: if not key.startswith('/'): key = '/' + key return self._handle.get_node(self.root, key) except _table_mod.exceptions.NoSuchNodeError: return None
[ "def", "get_node", "(", "self", ",", "key", ")", ":", "self", ".", "_check_if_open", "(", ")", "try", ":", "if", "not", "key", ".", "startswith", "(", "'/'", ")", ":", "key", "=", "'/'", "+", "key", "return", "self", ".", "_handle", ".", "get_node", "(", "self", ".", "root", ",", "key", ")", "except", "_table_mod", ".", "exceptions", ".", "NoSuchNodeError", ":", "return", "None" ]
37.777778
12.333333
def exec_request(self, URL): """Sends the actual request; returns response.""" ## Throttle request, if need be interval = time.time() - self.__ts_last_req if (interval < self.__min_req_interval): time.sleep( self.__min_req_interval - interval ) ## Construct and execute request headers = { "X-ELS-APIKey" : self.api_key, "User-Agent" : self.__user_agent, "Accept" : 'application/json' } if self.inst_token: headers["X-ELS-Insttoken"] = self.inst_token logger.info('Sending GET request to ' + URL) r = requests.get( URL, headers = headers ) self.__ts_last_req = time.time() self._status_code=r.status_code if r.status_code == 200: self._status_msg='data retrieved' return json.loads(r.text) else: self._status_msg="HTTP " + str(r.status_code) + " Error from " + URL + " and using headers " + str(headers) + ": " + r.text raise requests.HTTPError("HTTP " + str(r.status_code) + " Error from " + URL + "\nand using headers " + str(headers) + ":\n" + r.text)
[ "def", "exec_request", "(", "self", ",", "URL", ")", ":", "## Throttle request, if need be", "interval", "=", "time", ".", "time", "(", ")", "-", "self", ".", "__ts_last_req", "if", "(", "interval", "<", "self", ".", "__min_req_interval", ")", ":", "time", ".", "sleep", "(", "self", ".", "__min_req_interval", "-", "interval", ")", "## Construct and execute request", "headers", "=", "{", "\"X-ELS-APIKey\"", ":", "self", ".", "api_key", ",", "\"User-Agent\"", ":", "self", ".", "__user_agent", ",", "\"Accept\"", ":", "'application/json'", "}", "if", "self", ".", "inst_token", ":", "headers", "[", "\"X-ELS-Insttoken\"", "]", "=", "self", ".", "inst_token", "logger", ".", "info", "(", "'Sending GET request to '", "+", "URL", ")", "r", "=", "requests", ".", "get", "(", "URL", ",", "headers", "=", "headers", ")", "self", ".", "__ts_last_req", "=", "time", ".", "time", "(", ")", "self", ".", "_status_code", "=", "r", ".", "status_code", "if", "r", ".", "status_code", "==", "200", ":", "self", ".", "_status_msg", "=", "'data retrieved'", "return", "json", ".", "loads", "(", "r", ".", "text", ")", "else", ":", "self", ".", "_status_msg", "=", "\"HTTP \"", "+", "str", "(", "r", ".", "status_code", ")", "+", "\" Error from \"", "+", "URL", "+", "\" and using headers \"", "+", "str", "(", "headers", ")", "+", "\": \"", "+", "r", ".", "text", "raise", "requests", ".", "HTTPError", "(", "\"HTTP \"", "+", "str", "(", "r", ".", "status_code", ")", "+", "\" Error from \"", "+", "URL", "+", "\"\\nand using headers \"", "+", "str", "(", "headers", ")", "+", "\":\\n\"", "+", "r", ".", "text", ")" ]
41.62069
19.103448
def _writeSedimentTable(self, session, fileObject, mapTable, replaceParamFile): """ Write Sediment Mapping Table Method This method writes the sediments special mapping table case. """ # Write the sediment mapping table header fileObject.write('%s\n' % (mapTable.name)) fileObject.write('NUM_SED %s\n' % (mapTable.numSed)) # Write the value header line fileObject.write( 'Sediment Description%sSpec. Grav%sPart. Dia%sOutput Filename\n' % (' ' * 22, ' ' * 3, ' ' * 5)) # Retrive the sediment mapping table values sediments = session.query(MTSediment). \ filter(MTSediment.mapTable == mapTable). \ order_by(MTSediment.id). \ all() # Write sediments out to file for sediment in sediments: # Determine spacing for aesthetics space1 = 42 - len(sediment.description) # Pad values with zeros / Get replacement variable specGravString = vwp(sediment.specificGravity, replaceParamFile) partDiamString = vwp(sediment.particleDiameter, replaceParamFile) try: specGrav = '%.6f' % specGravString except: specGrav = '%s' % specGravString try: partDiam = '%.6f' % partDiamString except: partDiam = '%s' % partDiamString fileObject.write('%s%s%s%s%s%s%s\n' % ( sediment.description, ' ' * space1, specGrav, ' ' * 5, partDiam, ' ' * 6, sediment.outputFilename))
[ "def", "_writeSedimentTable", "(", "self", ",", "session", ",", "fileObject", ",", "mapTable", ",", "replaceParamFile", ")", ":", "# Write the sediment mapping table header", "fileObject", ".", "write", "(", "'%s\\n'", "%", "(", "mapTable", ".", "name", ")", ")", "fileObject", ".", "write", "(", "'NUM_SED %s\\n'", "%", "(", "mapTable", ".", "numSed", ")", ")", "# Write the value header line", "fileObject", ".", "write", "(", "'Sediment Description%sSpec. Grav%sPart. Dia%sOutput Filename\\n'", "%", "(", "' '", "*", "22", ",", "' '", "*", "3", ",", "' '", "*", "5", ")", ")", "# Retrive the sediment mapping table values", "sediments", "=", "session", ".", "query", "(", "MTSediment", ")", ".", "filter", "(", "MTSediment", ".", "mapTable", "==", "mapTable", ")", ".", "order_by", "(", "MTSediment", ".", "id", ")", ".", "all", "(", ")", "# Write sediments out to file", "for", "sediment", "in", "sediments", ":", "# Determine spacing for aesthetics", "space1", "=", "42", "-", "len", "(", "sediment", ".", "description", ")", "# Pad values with zeros / Get replacement variable", "specGravString", "=", "vwp", "(", "sediment", ".", "specificGravity", ",", "replaceParamFile", ")", "partDiamString", "=", "vwp", "(", "sediment", ".", "particleDiameter", ",", "replaceParamFile", ")", "try", ":", "specGrav", "=", "'%.6f'", "%", "specGravString", "except", ":", "specGrav", "=", "'%s'", "%", "specGravString", "try", ":", "partDiam", "=", "'%.6f'", "%", "partDiamString", "except", ":", "partDiam", "=", "'%s'", "%", "partDiamString", "fileObject", ".", "write", "(", "'%s%s%s%s%s%s%s\\n'", "%", "(", "sediment", ".", "description", ",", "' '", "*", "space1", ",", "specGrav", ",", "' '", "*", "5", ",", "partDiam", ",", "' '", "*", "6", ",", "sediment", ".", "outputFilename", ")", ")" ]
36.372093
22.930233
def run_type(self): """ Returns the run type. Currently supports LDA, GGA, vdW-DF and HF calcs. TODO: Fix for other functional types like PW91, other vdW types, etc. """ METAGGA_TYPES = {"TPSS", "RTPSS", "M06L", "MBJL", "SCAN", "MS0", "MS1", "MS2"} if self.parameters.get("LHFCALC", False): rt = "HF" elif self.parameters.get("METAGGA", "").strip().upper() in METAGGA_TYPES: rt = self.parameters["METAGGA"].strip().upper() elif self.parameters.get("LUSE_VDW", False): vdw_gga = {"RE": "DF", "OR": "optPBE", "BO": "optB88", "MK": "optB86b", "ML": "DF2"} gga = self.parameters.get("GGA").upper() rt = "vdW-" + vdw_gga[gga] elif self.potcar_symbols[0].split()[0] == 'PAW': rt = "LDA" else: rt = "GGA" if self.is_hubbard: rt += "+U" return rt
[ "def", "run_type", "(", "self", ")", ":", "METAGGA_TYPES", "=", "{", "\"TPSS\"", ",", "\"RTPSS\"", ",", "\"M06L\"", ",", "\"MBJL\"", ",", "\"SCAN\"", ",", "\"MS0\"", ",", "\"MS1\"", ",", "\"MS2\"", "}", "if", "self", ".", "parameters", ".", "get", "(", "\"LHFCALC\"", ",", "False", ")", ":", "rt", "=", "\"HF\"", "elif", "self", ".", "parameters", ".", "get", "(", "\"METAGGA\"", ",", "\"\"", ")", ".", "strip", "(", ")", ".", "upper", "(", ")", "in", "METAGGA_TYPES", ":", "rt", "=", "self", ".", "parameters", "[", "\"METAGGA\"", "]", ".", "strip", "(", ")", ".", "upper", "(", ")", "elif", "self", ".", "parameters", ".", "get", "(", "\"LUSE_VDW\"", ",", "False", ")", ":", "vdw_gga", "=", "{", "\"RE\"", ":", "\"DF\"", ",", "\"OR\"", ":", "\"optPBE\"", ",", "\"BO\"", ":", "\"optB88\"", ",", "\"MK\"", ":", "\"optB86b\"", ",", "\"ML\"", ":", "\"DF2\"", "}", "gga", "=", "self", ".", "parameters", ".", "get", "(", "\"GGA\"", ")", ".", "upper", "(", ")", "rt", "=", "\"vdW-\"", "+", "vdw_gga", "[", "gga", "]", "elif", "self", ".", "potcar_symbols", "[", "0", "]", ".", "split", "(", ")", "[", "0", "]", "==", "'PAW'", ":", "rt", "=", "\"LDA\"", "else", ":", "rt", "=", "\"GGA\"", "if", "self", ".", "is_hubbard", ":", "rt", "+=", "\"+U\"", "return", "rt" ]
37.28
21.92
def updateAndFlush(self, login, tableName, cells): """ Parameters: - login - tableName - cells """ self.send_updateAndFlush(login, tableName, cells) self.recv_updateAndFlush()
[ "def", "updateAndFlush", "(", "self", ",", "login", ",", "tableName", ",", "cells", ")", ":", "self", ".", "send_updateAndFlush", "(", "login", ",", "tableName", ",", "cells", ")", "self", ".", "recv_updateAndFlush", "(", ")" ]
22.444444
15.333333
def modify(self, fields=None, **fields_kwargs): """update the fields of this instance with the values in dict fields this should rarely be messed with, if you would like to manipulate the fields you should override _modify() :param fields: dict, the fields in a dict :param **fields_kwargs: dict, if you would like to pass the fields as key=val this picks those up and combines them with fields :returns: set, all the names of the fields that were modified """ modified_fields = set() fields = self.make_dict(fields, fields_kwargs) fields = self._modify(fields) for field_name, field_val in fields.items(): in_schema = field_name in self.schema.fields if in_schema: setattr(self, field_name, field_val) modified_fields.add(field_name) return modified_fields
[ "def", "modify", "(", "self", ",", "fields", "=", "None", ",", "*", "*", "fields_kwargs", ")", ":", "modified_fields", "=", "set", "(", ")", "fields", "=", "self", ".", "make_dict", "(", "fields", ",", "fields_kwargs", ")", "fields", "=", "self", ".", "_modify", "(", "fields", ")", "for", "field_name", ",", "field_val", "in", "fields", ".", "items", "(", ")", ":", "in_schema", "=", "field_name", "in", "self", ".", "schema", ".", "fields", "if", "in_schema", ":", "setattr", "(", "self", ",", "field_name", ",", "field_val", ")", "modified_fields", ".", "add", "(", "field_name", ")", "return", "modified_fields" ]
43.047619
17.666667
def levels(self): """ Return a sequence of |CategoryLevel| objects representing the hierarchy of this category collection. The sequence is empty when the category collection is not hierarchical, that is, contains only leaf-level categories. The levels are ordered from the leaf level to the root level; so the first level will contain the same categories as this category collection. """ cat = self._xChart.cat if cat is None: return [] return [CategoryLevel(lvl) for lvl in cat.lvls]
[ "def", "levels", "(", "self", ")", ":", "cat", "=", "self", ".", "_xChart", ".", "cat", "if", "cat", "is", "None", ":", "return", "[", "]", "return", "[", "CategoryLevel", "(", "lvl", ")", "for", "lvl", "in", "cat", ".", "lvls", "]" ]
44
19.692308
def edit(self, description='', files={}): """Edit this gist. :param str description: (optional), description of the gist :param dict files: (optional), files that make up this gist; the key(s) should be the file name(s) and the values should be another (optional) dictionary with (optional) keys: 'content' and 'filename' where the former is the content of the file and the latter is the new name of the file. :returns: bool -- whether the edit was successful """ data = {} json = None if description: data['description'] = description if files: data['files'] = files if data: json = self._json(self._patch(self._api, data=dumps(data)), 200) if json: self._update_(json) return True return False
[ "def", "edit", "(", "self", ",", "description", "=", "''", ",", "files", "=", "{", "}", ")", ":", "data", "=", "{", "}", "json", "=", "None", "if", "description", ":", "data", "[", "'description'", "]", "=", "description", "if", "files", ":", "data", "[", "'files'", "]", "=", "files", "if", "data", ":", "json", "=", "self", ".", "_json", "(", "self", ".", "_patch", "(", "self", ".", "_api", ",", "data", "=", "dumps", "(", "data", ")", ")", ",", "200", ")", "if", "json", ":", "self", ".", "_update_", "(", "json", ")", "return", "True", "return", "False" ]
36.583333
20.458333
def _get(self, url, params={}): """Wrapper around request.get() to use the API prefix. Returns a JSON response.""" req = self._session.get(self._api_prefix + url, params=params) return self._action(req)
[ "def", "_get", "(", "self", ",", "url", ",", "params", "=", "{", "}", ")", ":", "req", "=", "self", ".", "_session", ".", "get", "(", "self", ".", "_api_prefix", "+", "url", ",", "params", "=", "params", ")", "return", "self", ".", "_action", "(", "req", ")" ]
55.75
11.75
def filedown(environ, filename, cache=True, cache_timeout=None, action=None, real_filename=None, x_sendfile=False, x_header_name=None, x_filename=None, fileobj=None, default_mimetype='application/octet-stream'): """ @param filename: is used for display in download @param real_filename: if used for the real file location @param x_urlfile: is only used in x-sendfile, and be set to x-sendfile header @param fileobj: if provided, then returned as file content @type fileobj: (fobj, mtime, size) filedown now support web server controlled download, you should set xsendfile=True, and add x_header, for example: nginx ('X-Accel-Redirect', '/path/to/local_url') apache ('X-Sendfile', '/path/to/local_url') """ from .common import safe_str from werkzeug.http import parse_range_header guessed_type = mimetypes.guess_type(filename) mime_type = guessed_type[0] or default_mimetype real_filename = real_filename or filename #make common headers headers = [] headers.append(('Content-Type', mime_type)) d_filename = _get_download_filename(environ, os.path.basename(filename)) if action == 'download': headers.append(('Content-Disposition', 'attachment; %s' % d_filename)) elif action == 'inline': headers.append(('Content-Disposition', 'inline; %s' % d_filename)) if x_sendfile: if not x_header_name or not x_filename: raise Exception("x_header_name or x_filename can't be empty") headers.append((x_header_name, safe_str(x_filename))) return Response('', status=200, headers=headers, direct_passthrough=True) else: request = environ.get('werkzeug.request') if request: range = request.range else: range = parse_range_header(environ.get('HTTP_RANGE')) #when request range,only recognize "bytes" as range units if range and range.units=="bytes": try: fsize = os.path.getsize(real_filename) except OSError as e: return Response("Not found",status=404) mtime = datetime.utcfromtimestamp(os.path.getmtime(real_filename)) mtime_str = http_date(mtime) if cache: etag = _generate_etag(mtime, fsize, real_filename) else: etag = mtime_str if_range = environ.get('HTTP_IF_RANGE') if if_range: check_if_range_ok = (if_range.strip('"')==etag) #print "check_if_range_ok (%s) = (%s ==%s)"%(check_if_range_ok,if_range.strip('"'),etag) else: check_if_range_ok = True rbegin,rend = range.ranges[0] if check_if_range_ok and (rbegin+1)<fsize: if rend == None: rend = fsize headers.append(('Content-Length',str(rend-rbegin))) #werkzeug do not count rend with the same way of rfc7233,so -1 headers.append(('Content-Range','%s %d-%d/%d' %(range.units,rbegin, rend-1, fsize))) headers.append(('Last-Modified', mtime_str)) if cache: headers.append(('ETag', '"%s"' % etag)) #for small file, read it to memory and return directly #and this can avoid some issue with google chrome if (rend-rbegin) < FileIterator.chunk_size: s = "".join([chunk for chunk in FileIterator(real_filename,rbegin,rend)]) return Response(s,status=206, headers=headers, direct_passthrough=True) else: return Response(FileIterator(real_filename,rbegin,rend), status=206, headers=headers, direct_passthrough=True) #process fileobj if fileobj: f, mtime, file_size = fileobj else: f, mtime, file_size = _opener(real_filename) headers.append(('Date', http_date())) if cache: etag = _generate_etag(mtime, file_size, real_filename) headers += [ ('ETag', '"%s"' % etag), ] if cache_timeout: headers += [ ('Cache-Control', 'max-age=%d, public' % cache_timeout), ('Expires', http_date(time() + cache_timeout)) ] if not is_resource_modified(environ, etag, last_modified=mtime): f.close() return Response(status=304, headers=headers) else: headers.append(('Cache-Control', 'public')) headers.extend(( ('Content-Length', str(file_size)), ('Last-Modified', http_date(mtime)) )) return Response(wrap_file(environ, f), status=200, headers=headers, direct_passthrough=True)
[ "def", "filedown", "(", "environ", ",", "filename", ",", "cache", "=", "True", ",", "cache_timeout", "=", "None", ",", "action", "=", "None", ",", "real_filename", "=", "None", ",", "x_sendfile", "=", "False", ",", "x_header_name", "=", "None", ",", "x_filename", "=", "None", ",", "fileobj", "=", "None", ",", "default_mimetype", "=", "'application/octet-stream'", ")", ":", "from", ".", "common", "import", "safe_str", "from", "werkzeug", ".", "http", "import", "parse_range_header", "guessed_type", "=", "mimetypes", ".", "guess_type", "(", "filename", ")", "mime_type", "=", "guessed_type", "[", "0", "]", "or", "default_mimetype", "real_filename", "=", "real_filename", "or", "filename", "#make common headers", "headers", "=", "[", "]", "headers", ".", "append", "(", "(", "'Content-Type'", ",", "mime_type", ")", ")", "d_filename", "=", "_get_download_filename", "(", "environ", ",", "os", ".", "path", ".", "basename", "(", "filename", ")", ")", "if", "action", "==", "'download'", ":", "headers", ".", "append", "(", "(", "'Content-Disposition'", ",", "'attachment; %s'", "%", "d_filename", ")", ")", "elif", "action", "==", "'inline'", ":", "headers", ".", "append", "(", "(", "'Content-Disposition'", ",", "'inline; %s'", "%", "d_filename", ")", ")", "if", "x_sendfile", ":", "if", "not", "x_header_name", "or", "not", "x_filename", ":", "raise", "Exception", "(", "\"x_header_name or x_filename can't be empty\"", ")", "headers", ".", "append", "(", "(", "x_header_name", ",", "safe_str", "(", "x_filename", ")", ")", ")", "return", "Response", "(", "''", ",", "status", "=", "200", ",", "headers", "=", "headers", ",", "direct_passthrough", "=", "True", ")", "else", ":", "request", "=", "environ", ".", "get", "(", "'werkzeug.request'", ")", "if", "request", ":", "range", "=", "request", ".", "range", "else", ":", "range", "=", "parse_range_header", "(", "environ", ".", "get", "(", "'HTTP_RANGE'", ")", ")", "#when request range,only recognize \"bytes\" as range units", "if", "range", "and", "range", ".", "units", "==", "\"bytes\"", ":", "try", ":", "fsize", "=", "os", ".", "path", ".", "getsize", "(", "real_filename", ")", "except", "OSError", "as", "e", ":", "return", "Response", "(", "\"Not found\"", ",", "status", "=", "404", ")", "mtime", "=", "datetime", ".", "utcfromtimestamp", "(", "os", ".", "path", ".", "getmtime", "(", "real_filename", ")", ")", "mtime_str", "=", "http_date", "(", "mtime", ")", "if", "cache", ":", "etag", "=", "_generate_etag", "(", "mtime", ",", "fsize", ",", "real_filename", ")", "else", ":", "etag", "=", "mtime_str", "if_range", "=", "environ", ".", "get", "(", "'HTTP_IF_RANGE'", ")", "if", "if_range", ":", "check_if_range_ok", "=", "(", "if_range", ".", "strip", "(", "'\"'", ")", "==", "etag", ")", "#print \"check_if_range_ok (%s) = (%s ==%s)\"%(check_if_range_ok,if_range.strip('\"'),etag)", "else", ":", "check_if_range_ok", "=", "True", "rbegin", ",", "rend", "=", "range", ".", "ranges", "[", "0", "]", "if", "check_if_range_ok", "and", "(", "rbegin", "+", "1", ")", "<", "fsize", ":", "if", "rend", "==", "None", ":", "rend", "=", "fsize", "headers", ".", "append", "(", "(", "'Content-Length'", ",", "str", "(", "rend", "-", "rbegin", ")", ")", ")", "#werkzeug do not count rend with the same way of rfc7233,so -1", "headers", ".", "append", "(", "(", "'Content-Range'", ",", "'%s %d-%d/%d'", "%", "(", "range", ".", "units", ",", "rbegin", ",", "rend", "-", "1", ",", "fsize", ")", ")", ")", "headers", ".", "append", "(", "(", "'Last-Modified'", ",", "mtime_str", ")", ")", "if", "cache", ":", "headers", ".", "append", "(", "(", "'ETag'", ",", "'\"%s\"'", "%", "etag", ")", ")", "#for small file, read it to memory and return directly", "#and this can avoid some issue with google chrome", "if", "(", "rend", "-", "rbegin", ")", "<", "FileIterator", ".", "chunk_size", ":", "s", "=", "\"\"", ".", "join", "(", "[", "chunk", "for", "chunk", "in", "FileIterator", "(", "real_filename", ",", "rbegin", ",", "rend", ")", "]", ")", "return", "Response", "(", "s", ",", "status", "=", "206", ",", "headers", "=", "headers", ",", "direct_passthrough", "=", "True", ")", "else", ":", "return", "Response", "(", "FileIterator", "(", "real_filename", ",", "rbegin", ",", "rend", ")", ",", "status", "=", "206", ",", "headers", "=", "headers", ",", "direct_passthrough", "=", "True", ")", "#process fileobj", "if", "fileobj", ":", "f", ",", "mtime", ",", "file_size", "=", "fileobj", "else", ":", "f", ",", "mtime", ",", "file_size", "=", "_opener", "(", "real_filename", ")", "headers", ".", "append", "(", "(", "'Date'", ",", "http_date", "(", ")", ")", ")", "if", "cache", ":", "etag", "=", "_generate_etag", "(", "mtime", ",", "file_size", ",", "real_filename", ")", "headers", "+=", "[", "(", "'ETag'", ",", "'\"%s\"'", "%", "etag", ")", ",", "]", "if", "cache_timeout", ":", "headers", "+=", "[", "(", "'Cache-Control'", ",", "'max-age=%d, public'", "%", "cache_timeout", ")", ",", "(", "'Expires'", ",", "http_date", "(", "time", "(", ")", "+", "cache_timeout", ")", ")", "]", "if", "not", "is_resource_modified", "(", "environ", ",", "etag", ",", "last_modified", "=", "mtime", ")", ":", "f", ".", "close", "(", ")", "return", "Response", "(", "status", "=", "304", ",", "headers", "=", "headers", ")", "else", ":", "headers", ".", "append", "(", "(", "'Cache-Control'", ",", "'public'", ")", ")", "headers", ".", "extend", "(", "(", "(", "'Content-Length'", ",", "str", "(", "file_size", ")", ")", ",", "(", "'Last-Modified'", ",", "http_date", "(", "mtime", ")", ")", ")", ")", "return", "Response", "(", "wrap_file", "(", "environ", ",", "f", ")", ",", "status", "=", "200", ",", "headers", "=", "headers", ",", "direct_passthrough", "=", "True", ")" ]
40.982906
21.478632
def list_availability_zones(call=None): ''' List all availability zones in the current region ''' ret = {} params = {'Action': 'DescribeZones', 'RegionId': get_location()} items = query(params) for zone in items['Zones']['Zone']: ret[zone['ZoneId']] = {} for item in zone: ret[zone['ZoneId']][item] = six.text_type(zone[item]) return ret
[ "def", "list_availability_zones", "(", "call", "=", "None", ")", ":", "ret", "=", "{", "}", "params", "=", "{", "'Action'", ":", "'DescribeZones'", ",", "'RegionId'", ":", "get_location", "(", ")", "}", "items", "=", "query", "(", "params", ")", "for", "zone", "in", "items", "[", "'Zones'", "]", "[", "'Zone'", "]", ":", "ret", "[", "zone", "[", "'ZoneId'", "]", "]", "=", "{", "}", "for", "item", "in", "zone", ":", "ret", "[", "zone", "[", "'ZoneId'", "]", "]", "[", "item", "]", "=", "six", ".", "text_type", "(", "zone", "[", "item", "]", ")", "return", "ret" ]
24.9375
19.9375
def _trigger(self): """ Add stats to json and dump to disk. Note that this method is idempotent. """ if len(self._stat_now): self._stat_now['epoch_num'] = self.epoch_num self._stat_now['global_step'] = self.global_step self._stats.append(self._stat_now) self._stat_now = {} self._write_stat()
[ "def", "_trigger", "(", "self", ")", ":", "if", "len", "(", "self", ".", "_stat_now", ")", ":", "self", ".", "_stat_now", "[", "'epoch_num'", "]", "=", "self", ".", "epoch_num", "self", ".", "_stat_now", "[", "'global_step'", "]", "=", "self", ".", "global_step", "self", ".", "_stats", ".", "append", "(", "self", ".", "_stat_now", ")", "self", ".", "_stat_now", "=", "{", "}", "self", ".", "_write_stat", "(", ")" ]
31.833333
11.5
def release(self): """Create a release 1. Perform Sanity checks on work file. 2. Copy work file to releasefile location. 3. Perform cleanup actions on releasefile. :returns: True if successfull, False if not. :rtype: bool :raises: None """ log.info("Releasing: %s", self._workfile.get_fullpath()) ac = self.build_actions() ac.execute(self) s = ac.status().value if not s == ActionStatus.SUCCESS: ard = ActionReportDialog(ac) ard.exec_() pass return s == ActionStatus.SUCCESS
[ "def", "release", "(", "self", ")", ":", "log", ".", "info", "(", "\"Releasing: %s\"", ",", "self", ".", "_workfile", ".", "get_fullpath", "(", ")", ")", "ac", "=", "self", ".", "build_actions", "(", ")", "ac", ".", "execute", "(", "self", ")", "s", "=", "ac", ".", "status", "(", ")", ".", "value", "if", "not", "s", "==", "ActionStatus", ".", "SUCCESS", ":", "ard", "=", "ActionReportDialog", "(", "ac", ")", "ard", ".", "exec_", "(", ")", "pass", "return", "s", "==", "ActionStatus", ".", "SUCCESS" ]
30.25
13.95
def info(gandi, resource, id): """ Display information about a vhost. Resource must be the vhost fqdn. """ output_keys = ['name', 'state', 'date_creation', 'paas_name', 'ssl'] if id: # When we will have more than paas vhost, we will append rproxy_id output_keys.append('paas_id') paas_names = gandi.paas.list_names() ret = [] paas = None for num, item in enumerate(resource): vhost = gandi.vhost.info(item) try: hostedcert = gandi.hostedcert.infos(vhost['name']) vhost['ssl'] = 'activated' if hostedcert else 'disabled' except ValueError: vhost['ssl'] = 'disabled' paas = paas_names.get(vhost['paas_id']) if num: gandi.separator_line() ret.append(output_vhost(gandi, vhost, paas, output_keys)) return ret
[ "def", "info", "(", "gandi", ",", "resource", ",", "id", ")", ":", "output_keys", "=", "[", "'name'", ",", "'state'", ",", "'date_creation'", ",", "'paas_name'", ",", "'ssl'", "]", "if", "id", ":", "# When we will have more than paas vhost, we will append rproxy_id", "output_keys", ".", "append", "(", "'paas_id'", ")", "paas_names", "=", "gandi", ".", "paas", ".", "list_names", "(", ")", "ret", "=", "[", "]", "paas", "=", "None", "for", "num", ",", "item", "in", "enumerate", "(", "resource", ")", ":", "vhost", "=", "gandi", ".", "vhost", ".", "info", "(", "item", ")", "try", ":", "hostedcert", "=", "gandi", ".", "hostedcert", ".", "infos", "(", "vhost", "[", "'name'", "]", ")", "vhost", "[", "'ssl'", "]", "=", "'activated'", "if", "hostedcert", "else", "'disabled'", "except", "ValueError", ":", "vhost", "[", "'ssl'", "]", "=", "'disabled'", "paas", "=", "paas_names", ".", "get", "(", "vhost", "[", "'paas_id'", "]", ")", "if", "num", ":", "gandi", ".", "separator_line", "(", ")", "ret", ".", "append", "(", "output_vhost", "(", "gandi", ",", "vhost", ",", "paas", ",", "output_keys", ")", ")", "return", "ret" ]
29.785714
19.75
def toggle_axes(self, parameters = None): '''Toggle axes [x,y,z] on and off for the current representation Parameters: dictionary of parameters to control axes: position/p: origin of axes length/l: length of axis offset/o: offset to place axis labels axis_colors/ac: axis colors text_colors/tc: label colors radii/r: axis radii text/t: label text sizes/s: label sizes fonts/f: label fonts''' if len(self._axes_reps)>0: for rep_id in self._axes_reps: self.remove_representation(rep_id) self._axes_reps = [] else: if not isinstance(parameters,dict): parameters={} def defaults(pdict,keys,default,length=3,instance=(int,float)): '''Helper function to generate default values and handle errors''' for k in keys: val=pdict.get(k) if val!=None: break if val==None: val=default elif isinstance(val,instance) and length>1: val = [val]*length elif isinstance(val,(list,np.generic,np.ndarray)) and length>1: if not all([isinstance(v,instance) for v in val]): raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance)) elif not isinstance(val,instance): raise RuntimeError("Invalid type {t} for parameter {p}. Use {i}.".format(t=type(val),p=val,i=instance)) return val p = defaults(parameters,['positions','position','p'],np.average(self.coordinates,0)) l = defaults(parameters,['lengths','length','l'],max([np.linalg.norm(x-p) for x in self.coordinates]),1) o = defaults(parameters,['offsets','offset','o'],l*1.05,1) ac = defaults(parameters,[a+c for a in ['axis_','a',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex)) tc = defaults(parameters,[a+c for a in ['text_','t',''] for c in ['colors','colours','color','colour','c']],[0xff0000,0x00ff00,0x0000ff],3,(int,hex)) r = defaults(parameters,['radii','radius','r'],[0.005]*3,3) t = defaults(parameters,['text','labels','t'],['X','Y','Z'],3,str) s = defaults(parameters,['sizes','size','s'],[32]*3,3) f = defaults(parameters,['fonts','font','f'],['Arial']*3,3,str) starts=np.array([p,p,p],float) ends=np.array([p+[l,0,0],p+[0,l,0],p+[0,0,l]],float) axis_labels_coords=np.array([p+[o,0,0],p+[0,o,0],p+[0,0,o]],float) a_rep=self.add_representation('cylinders',{"startCoords":starts, "endCoords":ends, "colors":ac, "radii":r}) t_rep=self.add_representation('text',{"coordinates":axis_labels_coords, "text":t, "colors":tc, "sizes":s, "fonts":f}) self._axes_reps = [a_rep, t_rep]
[ "def", "toggle_axes", "(", "self", ",", "parameters", "=", "None", ")", ":", "if", "len", "(", "self", ".", "_axes_reps", ")", ">", "0", ":", "for", "rep_id", "in", "self", ".", "_axes_reps", ":", "self", ".", "remove_representation", "(", "rep_id", ")", "self", ".", "_axes_reps", "=", "[", "]", "else", ":", "if", "not", "isinstance", "(", "parameters", ",", "dict", ")", ":", "parameters", "=", "{", "}", "def", "defaults", "(", "pdict", ",", "keys", ",", "default", ",", "length", "=", "3", ",", "instance", "=", "(", "int", ",", "float", ")", ")", ":", "'''Helper function to generate default values and handle errors'''", "for", "k", "in", "keys", ":", "val", "=", "pdict", ".", "get", "(", "k", ")", "if", "val", "!=", "None", ":", "break", "if", "val", "==", "None", ":", "val", "=", "default", "elif", "isinstance", "(", "val", ",", "instance", ")", "and", "length", ">", "1", ":", "val", "=", "[", "val", "]", "*", "length", "elif", "isinstance", "(", "val", ",", "(", "list", ",", "np", ".", "generic", ",", "np", ".", "ndarray", ")", ")", "and", "length", ">", "1", ":", "if", "not", "all", "(", "[", "isinstance", "(", "v", ",", "instance", ")", "for", "v", "in", "val", "]", ")", ":", "raise", "RuntimeError", "(", "\"Invalid type {t} for parameter {p}. Use {i}.\"", ".", "format", "(", "t", "=", "type", "(", "val", ")", ",", "p", "=", "val", ",", "i", "=", "instance", ")", ")", "elif", "not", "isinstance", "(", "val", ",", "instance", ")", ":", "raise", "RuntimeError", "(", "\"Invalid type {t} for parameter {p}. Use {i}.\"", ".", "format", "(", "t", "=", "type", "(", "val", ")", ",", "p", "=", "val", ",", "i", "=", "instance", ")", ")", "return", "val", "p", "=", "defaults", "(", "parameters", ",", "[", "'positions'", ",", "'position'", ",", "'p'", "]", ",", "np", ".", "average", "(", "self", ".", "coordinates", ",", "0", ")", ")", "l", "=", "defaults", "(", "parameters", ",", "[", "'lengths'", ",", "'length'", ",", "'l'", "]", ",", "max", "(", "[", "np", ".", "linalg", ".", "norm", "(", "x", "-", "p", ")", "for", "x", "in", "self", ".", "coordinates", "]", ")", ",", "1", ")", "o", "=", "defaults", "(", "parameters", ",", "[", "'offsets'", ",", "'offset'", ",", "'o'", "]", ",", "l", "*", "1.05", ",", "1", ")", "ac", "=", "defaults", "(", "parameters", ",", "[", "a", "+", "c", "for", "a", "in", "[", "'axis_'", ",", "'a'", ",", "''", "]", "for", "c", "in", "[", "'colors'", ",", "'colours'", ",", "'color'", ",", "'colour'", ",", "'c'", "]", "]", ",", "[", "0xff0000", ",", "0x00ff00", ",", "0x0000ff", "]", ",", "3", ",", "(", "int", ",", "hex", ")", ")", "tc", "=", "defaults", "(", "parameters", ",", "[", "a", "+", "c", "for", "a", "in", "[", "'text_'", ",", "'t'", ",", "''", "]", "for", "c", "in", "[", "'colors'", ",", "'colours'", ",", "'color'", ",", "'colour'", ",", "'c'", "]", "]", ",", "[", "0xff0000", ",", "0x00ff00", ",", "0x0000ff", "]", ",", "3", ",", "(", "int", ",", "hex", ")", ")", "r", "=", "defaults", "(", "parameters", ",", "[", "'radii'", ",", "'radius'", ",", "'r'", "]", ",", "[", "0.005", "]", "*", "3", ",", "3", ")", "t", "=", "defaults", "(", "parameters", ",", "[", "'text'", ",", "'labels'", ",", "'t'", "]", ",", "[", "'X'", ",", "'Y'", ",", "'Z'", "]", ",", "3", ",", "str", ")", "s", "=", "defaults", "(", "parameters", ",", "[", "'sizes'", ",", "'size'", ",", "'s'", "]", ",", "[", "32", "]", "*", "3", ",", "3", ")", "f", "=", "defaults", "(", "parameters", ",", "[", "'fonts'", ",", "'font'", ",", "'f'", "]", ",", "[", "'Arial'", "]", "*", "3", ",", "3", ",", "str", ")", "starts", "=", "np", ".", "array", "(", "[", "p", ",", "p", ",", "p", "]", ",", "float", ")", "ends", "=", "np", ".", "array", "(", "[", "p", "+", "[", "l", ",", "0", ",", "0", "]", ",", "p", "+", "[", "0", ",", "l", ",", "0", "]", ",", "p", "+", "[", "0", ",", "0", ",", "l", "]", "]", ",", "float", ")", "axis_labels_coords", "=", "np", ".", "array", "(", "[", "p", "+", "[", "o", ",", "0", ",", "0", "]", ",", "p", "+", "[", "0", ",", "o", ",", "0", "]", ",", "p", "+", "[", "0", ",", "0", ",", "o", "]", "]", ",", "float", ")", "a_rep", "=", "self", ".", "add_representation", "(", "'cylinders'", ",", "{", "\"startCoords\"", ":", "starts", ",", "\"endCoords\"", ":", "ends", ",", "\"colors\"", ":", "ac", ",", "\"radii\"", ":", "r", "}", ")", "t_rep", "=", "self", ".", "add_representation", "(", "'text'", ",", "{", "\"coordinates\"", ":", "axis_labels_coords", ",", "\"text\"", ":", "t", ",", "\"colors\"", ":", "tc", ",", "\"sizes\"", ":", "s", ",", "\"fonts\"", ":", "f", "}", ")", "self", ".", "_axes_reps", "=", "[", "a_rep", ",", "t_rep", "]" ]
54.634921
26.253968
def _backup(path, filename): """ Backup a file. """ target = os.path.join(path, filename) if os.path.isfile(target): dt = datetime.now() new_filename = ".{0}.{1}.{2}".format( filename, dt.isoformat(), "backup" ) destination = os.path.join(path, new_filename) puts("- Backing up {0} to {1}".format( colored.cyan(target), colored.cyan(destination) )) shutil.copy(target, destination)
[ "def", "_backup", "(", "path", ",", "filename", ")", ":", "target", "=", "os", ".", "path", ".", "join", "(", "path", ",", "filename", ")", "if", "os", ".", "path", ".", "isfile", "(", "target", ")", ":", "dt", "=", "datetime", ".", "now", "(", ")", "new_filename", "=", "\".{0}.{1}.{2}\"", ".", "format", "(", "filename", ",", "dt", ".", "isoformat", "(", ")", ",", "\"backup\"", ")", "destination", "=", "os", ".", "path", ".", "join", "(", "path", ",", "new_filename", ")", "puts", "(", "\"- Backing up {0} to {1}\"", ".", "format", "(", "colored", ".", "cyan", "(", "target", ")", ",", "colored", ".", "cyan", "(", "destination", ")", ")", ")", "shutil", ".", "copy", "(", "target", ",", "destination", ")" ]
28.117647
11.764706
def CreateMenuBar(self): """Create our menu-bar for triggering operations""" menubar = wx.MenuBar() menu = wx.Menu() menu.Append(ID_OPEN, _('&Open Profile'), _('Open a cProfile file')) menu.Append(ID_OPEN_MEMORY, _('Open &Memory'), _('Open a Meliae memory-dump file')) menu.AppendSeparator() menu.Append(ID_EXIT, _('&Close'), _('Close this RunSnakeRun window')) menubar.Append(menu, _('&File')) menu = wx.Menu() # self.packageMenuItem = menu.AppendCheckItem( # ID_PACKAGE_VIEW, _('&File View'), # _('View time spent by package/module') # ) self.percentageMenuItem = menu.AppendCheckItem( ID_PERCENTAGE_VIEW, _('&Percentage View'), _('View time spent as percent of overall time') ) self.rootViewItem = menu.Append( ID_ROOT_VIEW, _('&Root View (Home)'), _('View the root of the tree') ) self.backViewItem = menu.Append( ID_BACK_VIEW, _('&Back'), _('Go back in your viewing history') ) self.upViewItem = menu.Append( ID_UP_VIEW, _('&Up'), _('Go "up" to the parent of this node with the largest cumulative total') ) self.moreSquareViewItem = menu.AppendCheckItem( ID_MORE_SQUARE, _('&Hierarchic Squares'), _('Toggle hierarchic squares in the square-map view') ) # This stuff isn't really all that useful for profiling, # it's more about how to generate graphics to describe profiling... self.deeperViewItem = menu.Append( ID_DEEPER_VIEW, _('&Deeper'), _('View deeper squaremap views') ) self.shallowerViewItem = menu.Append( ID_SHALLOWER_VIEW, _('&Shallower'), _('View shallower squaremap views') ) # wx.ToolTip.Enable(True) menubar.Append(menu, _('&View')) self.viewTypeMenu =wx.Menu( ) menubar.Append(self.viewTypeMenu, _('View &Type')) self.SetMenuBar(menubar) wx.EVT_MENU(self, ID_EXIT, lambda evt: self.Close(True)) wx.EVT_MENU(self, ID_OPEN, self.OnOpenFile) wx.EVT_MENU(self, ID_OPEN_MEMORY, self.OnOpenMemory) wx.EVT_MENU(self, ID_PERCENTAGE_VIEW, self.OnPercentageView) wx.EVT_MENU(self, ID_UP_VIEW, self.OnUpView) wx.EVT_MENU(self, ID_DEEPER_VIEW, self.OnDeeperView) wx.EVT_MENU(self, ID_SHALLOWER_VIEW, self.OnShallowerView) wx.EVT_MENU(self, ID_ROOT_VIEW, self.OnRootView) wx.EVT_MENU(self, ID_BACK_VIEW, self.OnBackView) wx.EVT_MENU(self, ID_MORE_SQUARE, self.OnMoreSquareToggle)
[ "def", "CreateMenuBar", "(", "self", ")", ":", "menubar", "=", "wx", ".", "MenuBar", "(", ")", "menu", "=", "wx", ".", "Menu", "(", ")", "menu", ".", "Append", "(", "ID_OPEN", ",", "_", "(", "'&Open Profile'", ")", ",", "_", "(", "'Open a cProfile file'", ")", ")", "menu", ".", "Append", "(", "ID_OPEN_MEMORY", ",", "_", "(", "'Open &Memory'", ")", ",", "_", "(", "'Open a Meliae memory-dump file'", ")", ")", "menu", ".", "AppendSeparator", "(", ")", "menu", ".", "Append", "(", "ID_EXIT", ",", "_", "(", "'&Close'", ")", ",", "_", "(", "'Close this RunSnakeRun window'", ")", ")", "menubar", ".", "Append", "(", "menu", ",", "_", "(", "'&File'", ")", ")", "menu", "=", "wx", ".", "Menu", "(", ")", "# self.packageMenuItem = menu.AppendCheckItem(", "# ID_PACKAGE_VIEW, _('&File View'),", "# _('View time spent by package/module')", "# )", "self", ".", "percentageMenuItem", "=", "menu", ".", "AppendCheckItem", "(", "ID_PERCENTAGE_VIEW", ",", "_", "(", "'&Percentage View'", ")", ",", "_", "(", "'View time spent as percent of overall time'", ")", ")", "self", ".", "rootViewItem", "=", "menu", ".", "Append", "(", "ID_ROOT_VIEW", ",", "_", "(", "'&Root View (Home)'", ")", ",", "_", "(", "'View the root of the tree'", ")", ")", "self", ".", "backViewItem", "=", "menu", ".", "Append", "(", "ID_BACK_VIEW", ",", "_", "(", "'&Back'", ")", ",", "_", "(", "'Go back in your viewing history'", ")", ")", "self", ".", "upViewItem", "=", "menu", ".", "Append", "(", "ID_UP_VIEW", ",", "_", "(", "'&Up'", ")", ",", "_", "(", "'Go \"up\" to the parent of this node with the largest cumulative total'", ")", ")", "self", ".", "moreSquareViewItem", "=", "menu", ".", "AppendCheckItem", "(", "ID_MORE_SQUARE", ",", "_", "(", "'&Hierarchic Squares'", ")", ",", "_", "(", "'Toggle hierarchic squares in the square-map view'", ")", ")", "# This stuff isn't really all that useful for profiling,", "# it's more about how to generate graphics to describe profiling...", "self", ".", "deeperViewItem", "=", "menu", ".", "Append", "(", "ID_DEEPER_VIEW", ",", "_", "(", "'&Deeper'", ")", ",", "_", "(", "'View deeper squaremap views'", ")", ")", "self", ".", "shallowerViewItem", "=", "menu", ".", "Append", "(", "ID_SHALLOWER_VIEW", ",", "_", "(", "'&Shallower'", ")", ",", "_", "(", "'View shallower squaremap views'", ")", ")", "# wx.ToolTip.Enable(True)", "menubar", ".", "Append", "(", "menu", ",", "_", "(", "'&View'", ")", ")", "self", ".", "viewTypeMenu", "=", "wx", ".", "Menu", "(", ")", "menubar", ".", "Append", "(", "self", ".", "viewTypeMenu", ",", "_", "(", "'View &Type'", ")", ")", "self", ".", "SetMenuBar", "(", "menubar", ")", "wx", ".", "EVT_MENU", "(", "self", ",", "ID_EXIT", ",", "lambda", "evt", ":", "self", ".", "Close", "(", "True", ")", ")", "wx", ".", "EVT_MENU", "(", "self", ",", "ID_OPEN", ",", "self", ".", "OnOpenFile", ")", "wx", ".", "EVT_MENU", "(", "self", ",", "ID_OPEN_MEMORY", ",", "self", ".", "OnOpenMemory", ")", "wx", ".", "EVT_MENU", "(", "self", ",", "ID_PERCENTAGE_VIEW", ",", "self", ".", "OnPercentageView", ")", "wx", ".", "EVT_MENU", "(", "self", ",", "ID_UP_VIEW", ",", "self", ".", "OnUpView", ")", "wx", ".", "EVT_MENU", "(", "self", ",", "ID_DEEPER_VIEW", ",", "self", ".", "OnDeeperView", ")", "wx", ".", "EVT_MENU", "(", "self", ",", "ID_SHALLOWER_VIEW", ",", "self", ".", "OnShallowerView", ")", "wx", ".", "EVT_MENU", "(", "self", ",", "ID_ROOT_VIEW", ",", "self", ".", "OnRootView", ")", "wx", ".", "EVT_MENU", "(", "self", ",", "ID_BACK_VIEW", ",", "self", ".", "OnBackView", ")", "wx", ".", "EVT_MENU", "(", "self", ",", "ID_MORE_SQUARE", ",", "self", ".", "OnMoreSquareToggle", ")" ]
43.344262
20.04918
def loadUiType(uifile, from_imports=False, resource_suffix='_rc', import_from='.'): """loadUiType(uifile, from_imports=False, resource_suffix='_rc', import_from='.') -> (form class, base class) Load a Qt Designer .ui file and return the generated form class and the Qt base class. uifile is a file name or file-like object containing the .ui file. from_imports is optionally set to generate relative import statements. At the moment this only applies to the import of resource modules. resource_suffix is the suffix appended to the basename of any resource file specified in the .ui file to create the name of the Python module generated from the resource file by pyrcc4. The default is '_rc', i.e. if the .ui file specified a resource file called foo.qrc then the corresponding Python module is foo_rc. import_from is optionally set to the package used for relative import statements. The default is ``'.'``. """ import sys from PyQt5 import QtWidgets if sys.hexversion >= 0x03000000: from .port_v3.string_io import StringIO else: from .port_v2.string_io import StringIO code_string = StringIO() winfo = compiler.UICompiler().compileUi(uifile, code_string, from_imports, resource_suffix, import_from) ui_globals = {} exec(code_string.getvalue(), ui_globals) return (ui_globals[winfo["uiclass"]], getattr(QtWidgets, winfo["baseclass"]))
[ "def", "loadUiType", "(", "uifile", ",", "from_imports", "=", "False", ",", "resource_suffix", "=", "'_rc'", ",", "import_from", "=", "'.'", ")", ":", "import", "sys", "from", "PyQt5", "import", "QtWidgets", "if", "sys", ".", "hexversion", ">=", "0x03000000", ":", "from", ".", "port_v3", ".", "string_io", "import", "StringIO", "else", ":", "from", ".", "port_v2", ".", "string_io", "import", "StringIO", "code_string", "=", "StringIO", "(", ")", "winfo", "=", "compiler", ".", "UICompiler", "(", ")", ".", "compileUi", "(", "uifile", ",", "code_string", ",", "from_imports", ",", "resource_suffix", ",", "import_from", ")", "ui_globals", "=", "{", "}", "exec", "(", "code_string", ".", "getvalue", "(", ")", ",", "ui_globals", ")", "return", "(", "ui_globals", "[", "winfo", "[", "\"uiclass\"", "]", "]", ",", "getattr", "(", "QtWidgets", ",", "winfo", "[", "\"baseclass\"", "]", ")", ")" ]
41.823529
28.117647
def doframe(self, v): """This method will set the measure specified as part of a frame. If conversion from one type to another is necessary (with the measure function), the following frames should be set if one of the reference types involved in the conversion is as in the following lists: **Epoch** * UTC * TAI * LAST - position * LMST - position * GMST1 * GAST * UT1 * UT2 * TDT * TCG * TDB * TCD **Direction** * J2000 * JMEAN - epoch * JTRUE - epoch * APP - epoch * B1950 * BMEAN - epoch * BTRUE - epoch * GALACTIC * HADEC - epoch, position * AZEL - epoch, position * SUPERGALACTIC * ECLIPTIC * MECLIPTIC - epoch * TECLIPTIC - epoch * PLANET - epoch, [position] **Position** * WGS84 * ITRF **Radial Velocity** * LSRK - direction * LSRD - direction * BARY - direction * GEO - direction, epoch * TOPO - direction, epoch, position * GALACTO - direction * **Doppler** * RADIO * OPTICAL * Z * RATIO * RELATIVISTIC * BETA * GAMMA * **Frequency** * REST - direction, radialvelocity * LSRK - direction * LSRD - direction * BARY - direction * GEO - direction, epoch * TOPO - direction, epoch, position * GALACTO """ if not is_measure(v): raise TypeError('Argument is not a measure') if (v["type"] == "frequency" and v["refer"].lower() == "rest") \ or _measures.doframe(self, v): self._framestack[v["type"]] = v return True return False
[ "def", "doframe", "(", "self", ",", "v", ")", ":", "if", "not", "is_measure", "(", "v", ")", ":", "raise", "TypeError", "(", "'Argument is not a measure'", ")", "if", "(", "v", "[", "\"type\"", "]", "==", "\"frequency\"", "and", "v", "[", "\"refer\"", "]", ".", "lower", "(", ")", "==", "\"rest\"", ")", "or", "_measures", ".", "doframe", "(", "self", ",", "v", ")", ":", "self", ".", "_framestack", "[", "v", "[", "\"type\"", "]", "]", "=", "v", "return", "True", "return", "False" ]
22.428571
21.714286
def extract_package_dir(self): # type: () -> Optional[str] """ Get the package_dir dictionary from source :return: """ # package_dir={'': 'lib'}, source = self.setup_py_source() if not source: # this happens when the setup.py file is missing return None # sometime # 'package_dir' : {'': 'src'}, # sometimes # package_dir={...} if "package_dir=" in source: line = source.replace("\n", "") line = line.split("package_dir")[1] fixed = "" for char in line: fixed += char if char == "}": break line = fixed simplified_line = line.strip(" ,").replace("'", '"') parts = simplified_line.split("=") dict_src = parts[1].strip(" \t") if not dict_src.endswith("}"): raise JiggleVersionException( "Either this is hard to parse or we have 2+ src foldrs" ) try: paths_dict = ast.literal_eval(dict_src) except ValueError as ve: logger.error(source + ": " + dict_src) return "" if "" in paths_dict: candidate = paths_dict[""] if os.path.isdir(candidate): return unicode(candidate) if len(paths_dict) == 1: candidate = first_value_in_dict(paths_dict) if os.path.isdir(candidate): return unicode(candidate) else: raise JiggleVersionException( "Have path_dict, but has more than one path." ) return None
[ "def", "extract_package_dir", "(", "self", ")", ":", "# type: () -> Optional[str]", "# package_dir={'': 'lib'},", "source", "=", "self", ".", "setup_py_source", "(", ")", "if", "not", "source", ":", "# this happens when the setup.py file is missing", "return", "None", "# sometime", "# 'package_dir' : {'': 'src'},", "# sometimes", "# package_dir={...}", "if", "\"package_dir=\"", "in", "source", ":", "line", "=", "source", ".", "replace", "(", "\"\\n\"", ",", "\"\"", ")", "line", "=", "line", ".", "split", "(", "\"package_dir\"", ")", "[", "1", "]", "fixed", "=", "\"\"", "for", "char", "in", "line", ":", "fixed", "+=", "char", "if", "char", "==", "\"}\"", ":", "break", "line", "=", "fixed", "simplified_line", "=", "line", ".", "strip", "(", "\" ,\"", ")", ".", "replace", "(", "\"'\"", ",", "'\"'", ")", "parts", "=", "simplified_line", ".", "split", "(", "\"=\"", ")", "dict_src", "=", "parts", "[", "1", "]", ".", "strip", "(", "\" \\t\"", ")", "if", "not", "dict_src", ".", "endswith", "(", "\"}\"", ")", ":", "raise", "JiggleVersionException", "(", "\"Either this is hard to parse or we have 2+ src foldrs\"", ")", "try", ":", "paths_dict", "=", "ast", ".", "literal_eval", "(", "dict_src", ")", "except", "ValueError", "as", "ve", ":", "logger", ".", "error", "(", "source", "+", "\": \"", "+", "dict_src", ")", "return", "\"\"", "if", "\"\"", "in", "paths_dict", ":", "candidate", "=", "paths_dict", "[", "\"\"", "]", "if", "os", ".", "path", ".", "isdir", "(", "candidate", ")", ":", "return", "unicode", "(", "candidate", ")", "if", "len", "(", "paths_dict", ")", "==", "1", ":", "candidate", "=", "first_value_in_dict", "(", "paths_dict", ")", "if", "os", ".", "path", ".", "isdir", "(", "candidate", ")", ":", "return", "unicode", "(", "candidate", ")", "else", ":", "raise", "JiggleVersionException", "(", "\"Have path_dict, but has more than one path.\"", ")", "return", "None" ]
32.962264
14.849057
def default_logger(name): """Return a toplevel logger. This should be used only in the toplevel file. Files deeper in the hierarchy should use ``logger = logging.getLogger(__name__)``, in order to considered as children of the toplevel logger. Beware that without a setLevel() somewhere, the default value (warning) will be used, so no debug message will be shown. Args: name (str): usually `__name__` in the package toplevel __init__.py, or `__file__` in a script file (because __name__ would be "__main__" in this case). """ # https://docs.python.org/3/howto/logging.html#logging-advanced-tutorial logger = logging.getLogger(name) # this is a basic handler, with output to stderr logger_handler = logging.StreamHandler() formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s') logger_handler.setFormatter(formatter) logger.addHandler(logger_handler) return logger
[ "def", "default_logger", "(", "name", ")", ":", "# https://docs.python.org/3/howto/logging.html#logging-advanced-tutorial", "logger", "=", "logging", ".", "getLogger", "(", "name", ")", "# this is a basic handler, with output to stderr", "logger_handler", "=", "logging", ".", "StreamHandler", "(", ")", "formatter", "=", "logging", ".", "Formatter", "(", "'%(name)s - %(levelname)s - %(message)s'", ")", "logger_handler", ".", "setFormatter", "(", "formatter", ")", "logger", ".", "addHandler", "(", "logger_handler", ")", "return", "logger" ]
36.185185
21.111111
def _set_vrrp_rbridge_global(self, v, load=False): """ Setter method for vrrp_rbridge_global, mapped from YANG variable /vrrp_rbridge_global (container) If this variable is read-only (config: false) in the source YANG file, then _set_vrrp_rbridge_global is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_vrrp_rbridge_global() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=vrrp_rbridge_global.vrrp_rbridge_global, is_container='container', presence=False, yang_name="vrrp-rbridge-global", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'vrrpv3GlobalConf', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """vrrp_rbridge_global must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=vrrp_rbridge_global.vrrp_rbridge_global, is_container='container', presence=False, yang_name="vrrp-rbridge-global", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'vrrpv3GlobalConf', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='container', is_config=True)""", }) self.__vrrp_rbridge_global = t if hasattr(self, '_set'): self._set()
[ "def", "_set_vrrp_rbridge_global", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "vrrp_rbridge_global", ".", "vrrp_rbridge_global", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"vrrp-rbridge-global\"", ",", "rest_name", "=", "\"\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'cli-drop-node-name'", ":", "None", ",", "u'callpoint'", ":", "u'vrrpv3GlobalConf'", ",", "u'cli-incomplete-no'", ":", "None", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-vrrpv3'", ",", "defining_module", "=", "'brocade-vrrpv3'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"vrrp_rbridge_global must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=vrrp_rbridge_global.vrrp_rbridge_global, is_container='container', presence=False, yang_name=\"vrrp-rbridge-global\", rest_name=\"\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'vrrpv3GlobalConf', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-vrrpv3', defining_module='brocade-vrrpv3', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__vrrp_rbridge_global", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
81.227273
37.590909
def _apply_outputter(self, func, mod): ''' Apply the __outputter__ variable to the functions ''' if hasattr(mod, '__outputter__'): outp = mod.__outputter__ if func.__name__ in outp: func.__outputter__ = outp[func.__name__]
[ "def", "_apply_outputter", "(", "self", ",", "func", ",", "mod", ")", ":", "if", "hasattr", "(", "mod", ",", "'__outputter__'", ")", ":", "outp", "=", "mod", ".", "__outputter__", "if", "func", ".", "__name__", "in", "outp", ":", "func", ".", "__outputter__", "=", "outp", "[", "func", ".", "__name__", "]" ]
35.875
12.625
def build_stack_changes(stack_name, new_stack, old_stack, new_params, old_params): """Builds a list of strings to represent the the parameters (if changed) and stack diff""" from_file = "old_%s" % (stack_name,) to_file = "new_%s" % (stack_name,) lines = difflib.context_diff( old_stack, new_stack, fromfile=from_file, tofile=to_file, n=7) # ensure at least a few lines of context are displayed afterward template_changes = list(lines) log_lines = [] if not template_changes: log_lines.append("*** No changes to template ***") param_diffs = diff_parameters(old_params, new_params) if param_diffs: log_lines.append(format_params_diff(param_diffs)) if template_changes: log_lines.append("".join(template_changes)) return log_lines
[ "def", "build_stack_changes", "(", "stack_name", ",", "new_stack", ",", "old_stack", ",", "new_params", ",", "old_params", ")", ":", "from_file", "=", "\"old_%s\"", "%", "(", "stack_name", ",", ")", "to_file", "=", "\"new_%s\"", "%", "(", "stack_name", ",", ")", "lines", "=", "difflib", ".", "context_diff", "(", "old_stack", ",", "new_stack", ",", "fromfile", "=", "from_file", ",", "tofile", "=", "to_file", ",", "n", "=", "7", ")", "# ensure at least a few lines of context are displayed afterward", "template_changes", "=", "list", "(", "lines", ")", "log_lines", "=", "[", "]", "if", "not", "template_changes", ":", "log_lines", ".", "append", "(", "\"*** No changes to template ***\"", ")", "param_diffs", "=", "diff_parameters", "(", "old_params", ",", "new_params", ")", "if", "param_diffs", ":", "log_lines", ".", "append", "(", "format_params_diff", "(", "param_diffs", ")", ")", "if", "template_changes", ":", "log_lines", ".", "append", "(", "\"\"", ".", "join", "(", "template_changes", ")", ")", "return", "log_lines" ]
39.52381
14
def output(self, kind, line): "*line* should be bytes" self.destination.write(b''.join([ self._cyan, b't=%07d' % (time.time() - self._t0), self._reset, self._kind_prefixes[kind], self.markers[kind], line, self._reset, ])) self.destination.flush()
[ "def", "output", "(", "self", ",", "kind", ",", "line", ")", ":", "self", ".", "destination", ".", "write", "(", "b''", ".", "join", "(", "[", "self", ".", "_cyan", ",", "b't=%07d'", "%", "(", "time", ".", "time", "(", ")", "-", "self", ".", "_t0", ")", ",", "self", ".", "_reset", ",", "self", ".", "_kind_prefixes", "[", "kind", "]", ",", "self", ".", "markers", "[", "kind", "]", ",", "line", ",", "self", ".", "_reset", ",", "]", ")", ")", "self", ".", "destination", ".", "flush", "(", ")" ]
29.25
12.416667
def diff_charsToLines(self, diffs, lineArray): """Rehydrate the text in a diff from a string of line hashes to real lines of text. Args: diffs: Array of diff tuples. lineArray: Array of unique strings. """ for i in range(len(diffs)): text = [] for char in diffs[i][1]: text.append(lineArray[ord(char)]) diffs[i] = (diffs[i][0], "".join(text))
[ "def", "diff_charsToLines", "(", "self", ",", "diffs", ",", "lineArray", ")", ":", "for", "i", "in", "range", "(", "len", "(", "diffs", ")", ")", ":", "text", "=", "[", "]", "for", "char", "in", "diffs", "[", "i", "]", "[", "1", "]", ":", "text", ".", "append", "(", "lineArray", "[", "ord", "(", "char", ")", "]", ")", "diffs", "[", "i", "]", "=", "(", "diffs", "[", "i", "]", "[", "0", "]", ",", "\"\"", ".", "join", "(", "text", ")", ")" ]
29.923077
12.461538
def temporal_latent_to_dist(name, x, hparams, output_channels=None): """Network that maps a time-indexed list of 3-D latents to a gaussian. Args: name: variable scope. x: List of 4-D Tensors indexed by time, (NHWC) hparams: tf.contrib.training.Hparams. output_channels: int, Number of channels of the output gaussian mean. Returns: dist: tfp.distributions.Normal """ _, _, width, _, res_channels = common_layers.shape_list(x) if output_channels is None: output_channels = res_channels dilation_rates = get_dilation_rates(hparams, width) with tf.variable_scope(name, reuse=tf.AUTO_REUSE): h = x for i in range(hparams.latent_encoder_depth): if hparams.latent_apply_dilations: h2 = dilated_conv_stack("dil_latent_3d_res_%d" % i, h, mid_channels=hparams.latent_encoder_width, output_channels=res_channels, dilation_rates=dilation_rates, activation=hparams.latent_activation, dropout=hparams.latent_dropout) else: h2 = conv_stack("latent_3d_res_%d" % i, h, mid_channels=hparams.latent_encoder_width, output_channels=res_channels, activation=hparams.latent_activation, dropout=hparams.latent_dropout) h += h2 # take last activation that should capture all context since padding is # on left. h = h[:, -1, :, :, :] h = conv("res_final", h, apply_actnorm=False, conv_init="zeros", output_channels=2*output_channels, filter_size=[1, 1]) mean, log_scale = h[:, :, :, 0::2], h[:, :, :, 1::2] return tfp.distributions.Normal(mean, tf.exp(log_scale))
[ "def", "temporal_latent_to_dist", "(", "name", ",", "x", ",", "hparams", ",", "output_channels", "=", "None", ")", ":", "_", ",", "_", ",", "width", ",", "_", ",", "res_channels", "=", "common_layers", ".", "shape_list", "(", "x", ")", "if", "output_channels", "is", "None", ":", "output_channels", "=", "res_channels", "dilation_rates", "=", "get_dilation_rates", "(", "hparams", ",", "width", ")", "with", "tf", ".", "variable_scope", "(", "name", ",", "reuse", "=", "tf", ".", "AUTO_REUSE", ")", ":", "h", "=", "x", "for", "i", "in", "range", "(", "hparams", ".", "latent_encoder_depth", ")", ":", "if", "hparams", ".", "latent_apply_dilations", ":", "h2", "=", "dilated_conv_stack", "(", "\"dil_latent_3d_res_%d\"", "%", "i", ",", "h", ",", "mid_channels", "=", "hparams", ".", "latent_encoder_width", ",", "output_channels", "=", "res_channels", ",", "dilation_rates", "=", "dilation_rates", ",", "activation", "=", "hparams", ".", "latent_activation", ",", "dropout", "=", "hparams", ".", "latent_dropout", ")", "else", ":", "h2", "=", "conv_stack", "(", "\"latent_3d_res_%d\"", "%", "i", ",", "h", ",", "mid_channels", "=", "hparams", ".", "latent_encoder_width", ",", "output_channels", "=", "res_channels", ",", "activation", "=", "hparams", ".", "latent_activation", ",", "dropout", "=", "hparams", ".", "latent_dropout", ")", "h", "+=", "h2", "# take last activation that should capture all context since padding is", "# on left.", "h", "=", "h", "[", ":", ",", "-", "1", ",", ":", ",", ":", ",", ":", "]", "h", "=", "conv", "(", "\"res_final\"", ",", "h", ",", "apply_actnorm", "=", "False", ",", "conv_init", "=", "\"zeros\"", ",", "output_channels", "=", "2", "*", "output_channels", ",", "filter_size", "=", "[", "1", ",", "1", "]", ")", "mean", ",", "log_scale", "=", "h", "[", ":", ",", ":", ",", ":", ",", "0", ":", ":", "2", "]", ",", "h", "[", ":", ",", ":", ",", ":", ",", "1", ":", ":", "2", "]", "return", "tfp", ".", "distributions", ".", "Normal", "(", "mean", ",", "tf", ".", "exp", "(", "log_scale", ")", ")" ]
44.35
19.875
def get_lines(file_path=BOOK_PATH): r""" Retrieve text lines from the manuscript Chapter*.asc and Appendix*.asc files Args: file_path (str): Path to directory containing manuscript asciidoc files i.e.: /Users/cole-home/repos/nlpinaction/manuscript/ or nlpia.constants.BOOK_PATH Returns: list of lists of str, one list for each Chapter or Appendix >>> lines = get_lines(BOOK_PATH) >>> next(lines) ('.../src/nlpia/data/book/Appendix F -- Glossary.asc', ['= Glossary\n', '\n', "We've collected some ...]) """ if os.path.isdir(file_path): file_path = os.path.join(file_path, '*.asc') files = glob.glob(file_path) elif os.path.isfile(file_path): files = [file_path] elif '*' in file_path: if os.path.sep not in file_path: file_path = os.path.join(os.path.abspath(os.path.curdir), file_path) files = glob.glob(file_path) else: raise FileNotFoundError("Unable to find the directory or files requested.") lines = [] for filepath in files: with open(filepath, 'r') as f: lines.append(f.readlines()) return zip(files, lines)
[ "def", "get_lines", "(", "file_path", "=", "BOOK_PATH", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "file_path", ")", ":", "file_path", "=", "os", ".", "path", ".", "join", "(", "file_path", ",", "'*.asc'", ")", "files", "=", "glob", ".", "glob", "(", "file_path", ")", "elif", "os", ".", "path", ".", "isfile", "(", "file_path", ")", ":", "files", "=", "[", "file_path", "]", "elif", "'*'", "in", "file_path", ":", "if", "os", ".", "path", ".", "sep", "not", "in", "file_path", ":", "file_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "curdir", ")", ",", "file_path", ")", "files", "=", "glob", ".", "glob", "(", "file_path", ")", "else", ":", "raise", "FileNotFoundError", "(", "\"Unable to find the directory or files requested.\"", ")", "lines", "=", "[", "]", "for", "filepath", "in", "files", ":", "with", "open", "(", "filepath", ",", "'r'", ")", "as", "f", ":", "lines", ".", "append", "(", "f", ".", "readlines", "(", ")", ")", "return", "zip", "(", "files", ",", "lines", ")" ]
35.212121
18.969697
def AddWarning(self, warning): """Adds a warnings. Args: warning (ExtractionWarning): warning. Raises: IOError: when the storage writer is closed. OSError: when the storage writer is closed. """ self._RaiseIfNotWritable() warning = self._PrepareAttributeContainer(warning) self._warnings.append(warning) self.number_of_warnings += 1
[ "def", "AddWarning", "(", "self", ",", "warning", ")", ":", "self", ".", "_RaiseIfNotWritable", "(", ")", "warning", "=", "self", ".", "_PrepareAttributeContainer", "(", "warning", ")", "self", ".", "_warnings", ".", "append", "(", "warning", ")", "self", ".", "number_of_warnings", "+=", "1" ]
23.1875
18.0625
def onSelect_specimen(self, event): """ update figures and text when a new specimen is selected """ self.selected_meas = [] self.select_specimen(str(self.specimens_box.GetValue())) if self.ie_open: self.ie.change_selected(self.current_fit) self.update_selection()
[ "def", "onSelect_specimen", "(", "self", ",", "event", ")", ":", "self", ".", "selected_meas", "=", "[", "]", "self", ".", "select_specimen", "(", "str", "(", "self", ".", "specimens_box", ".", "GetValue", "(", ")", ")", ")", "if", "self", ".", "ie_open", ":", "self", ".", "ie", ".", "change_selected", "(", "self", ".", "current_fit", ")", "self", ".", "update_selection", "(", ")" ]
35.888889
11
def _parse_row(rowvalues, rowtypes): """ Scan a single row from an Excel file, and return the list of ranges corresponding to each consecutive span of non-empty cells in this row. If all cells are empty, return an empty list. Each "range" in the list is a tuple of the form `(startcol, endcol)`. For example, if the row is the following: [ ][ 1.0 ][ 23 ][ "foo" ][ ][ "hello" ][ ] then the returned list of ranges will be: [(1, 4), (5, 6)] This algorithm considers a cell to be empty if its type is 0 (XL_EMPTY), or 6 (XL_BLANK), or if it's a text cell containing empty string, or a whitespace-only string. Numeric `0` is not considered empty. """ n = len(rowvalues) assert n == len(rowtypes) if not n: return [] range_start = None ranges = [] for i in range(n): ctype = rowtypes[i] cval = rowvalues[i] # Check whether the cell is empty or not. If it is empty, and there is # an active range being tracked - terminate it. On the other hand, if # the cell is not empty and there isn't an active range, then start it. if ctype == 0 or ctype == 6 or (ctype == 1 and (cval == "" or cval.isspace())): if range_start is not None: ranges.append((range_start, i)) range_start = None else: if range_start is None: range_start = i if range_start is not None: ranges.append((range_start, n)) return ranges
[ "def", "_parse_row", "(", "rowvalues", ",", "rowtypes", ")", ":", "n", "=", "len", "(", "rowvalues", ")", "assert", "n", "==", "len", "(", "rowtypes", ")", "if", "not", "n", ":", "return", "[", "]", "range_start", "=", "None", "ranges", "=", "[", "]", "for", "i", "in", "range", "(", "n", ")", ":", "ctype", "=", "rowtypes", "[", "i", "]", "cval", "=", "rowvalues", "[", "i", "]", "# Check whether the cell is empty or not. If it is empty, and there is", "# an active range being tracked - terminate it. On the other hand, if", "# the cell is not empty and there isn't an active range, then start it.", "if", "ctype", "==", "0", "or", "ctype", "==", "6", "or", "(", "ctype", "==", "1", "and", "(", "cval", "==", "\"\"", "or", "cval", ".", "isspace", "(", ")", ")", ")", ":", "if", "range_start", "is", "not", "None", ":", "ranges", ".", "append", "(", "(", "range_start", ",", "i", ")", ")", "range_start", "=", "None", "else", ":", "if", "range_start", "is", "None", ":", "range_start", "=", "i", "if", "range_start", "is", "not", "None", ":", "ranges", ".", "append", "(", "(", "range_start", ",", "n", ")", ")", "return", "ranges" ]
36.52381
20.333333
def create_app(**config): """Application Factory You can create a new He-Man application with:: from heman.config import create_app app = create_app() # app can be uses as WSGI application app.run() # Or you can run as a simple web server """ app = Flask( __name__, static_folder=None ) if 'MONGO_URI' in os.environ: app.config['MONGO_URI'] = os.environ['MONGO_URI'] app.config['LOG_LEVEL'] = 'DEBUG' app.config['SECRET_KEY'] = '2205552d13b5431bb537732bbb051f1214414f5ab34d47' configure_logging(app) configure_sentry(app) configure_api(app) configure_mongodb(app) configure_login(app) return app
[ "def", "create_app", "(", "*", "*", "config", ")", ":", "app", "=", "Flask", "(", "__name__", ",", "static_folder", "=", "None", ")", "if", "'MONGO_URI'", "in", "os", ".", "environ", ":", "app", ".", "config", "[", "'MONGO_URI'", "]", "=", "os", ".", "environ", "[", "'MONGO_URI'", "]", "app", ".", "config", "[", "'LOG_LEVEL'", "]", "=", "'DEBUG'", "app", ".", "config", "[", "'SECRET_KEY'", "]", "=", "'2205552d13b5431bb537732bbb051f1214414f5ab34d47'", "configure_logging", "(", "app", ")", "configure_sentry", "(", "app", ")", "configure_api", "(", "app", ")", "configure_mongodb", "(", "app", ")", "configure_login", "(", "app", ")", "return", "app" ]
24.888889
21.518519
def leave_group(self, group_id, timeout=None): """Call leave group API. https://devdocs.line.me/en/#leave Leave a group. :param str group_id: Group ID :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float) """ self._post( '/v2/bot/group/{group_id}/leave'.format(group_id=group_id), timeout=timeout )
[ "def", "leave_group", "(", "self", ",", "group_id", ",", "timeout", "=", "None", ")", ":", "self", ".", "_post", "(", "'/v2/bot/group/{group_id}/leave'", ".", "format", "(", "group_id", "=", "group_id", ")", ",", "timeout", "=", "timeout", ")" ]
32.944444
17.888889
def flags_from_dict(kw): """ This turns a dict with keys that are flags (e.g. for CLOSECIRCUIT, CLOSESTREAM) only if the values are true. """ if len(kw) == 0: return '' flags = '' for (k, v) in kw.items(): if v: flags += ' ' + str(k) # note that we want the leading space if there's at least one # flag. return flags
[ "def", "flags_from_dict", "(", "kw", ")", ":", "if", "len", "(", "kw", ")", "==", "0", ":", "return", "''", "flags", "=", "''", "for", "(", "k", ",", "v", ")", "in", "kw", ".", "items", "(", ")", ":", "if", "v", ":", "flags", "+=", "' '", "+", "str", "(", "k", ")", "# note that we want the leading space if there's at least one", "# flag.", "return", "flags" ]
23.1875
20.1875
def re_filter(text, regexps): """Filter text using regular expressions.""" if not regexps: return text matched_text = [] compiled_regexps = [re.compile(x) for x in regexps] for line in text: if line in matched_text: continue for regexp in compiled_regexps: found = regexp.search(line) if found and found.group(): matched_text.append(line) return matched_text or text
[ "def", "re_filter", "(", "text", ",", "regexps", ")", ":", "if", "not", "regexps", ":", "return", "text", "matched_text", "=", "[", "]", "compiled_regexps", "=", "[", "re", ".", "compile", "(", "x", ")", "for", "x", "in", "regexps", "]", "for", "line", "in", "text", ":", "if", "line", "in", "matched_text", ":", "continue", "for", "regexp", "in", "compiled_regexps", ":", "found", "=", "regexp", ".", "search", "(", "line", ")", "if", "found", "and", "found", ".", "group", "(", ")", ":", "matched_text", ".", "append", "(", "line", ")", "return", "matched_text", "or", "text" ]
26.647059
15.705882
def get_hash(path, form='sha256', chunk_size=65536): ''' Get the hash sum of a file This is better than ``get_sum`` for the following reasons: - It does not read the entire file into memory. - It does not return a string on error. The returned value of ``get_sum`` cannot really be trusted since it is vulnerable to collisions: ``get_sum(..., 'xyz') == 'Hash xyz not supported'`` ''' hash_type = hasattr(hashlib, form) and getattr(hashlib, form) or None if hash_type is None: raise ValueError('Invalid hash type: {0}'.format(form)) with salt.utils.files.fopen(path, 'rb') as ifile: hash_obj = hash_type() # read the file in in chunks, not the entire file for chunk in iter(lambda: ifile.read(chunk_size), b''): hash_obj.update(chunk) return hash_obj.hexdigest()
[ "def", "get_hash", "(", "path", ",", "form", "=", "'sha256'", ",", "chunk_size", "=", "65536", ")", ":", "hash_type", "=", "hasattr", "(", "hashlib", ",", "form", ")", "and", "getattr", "(", "hashlib", ",", "form", ")", "or", "None", "if", "hash_type", "is", "None", ":", "raise", "ValueError", "(", "'Invalid hash type: {0}'", ".", "format", "(", "form", ")", ")", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "path", ",", "'rb'", ")", "as", "ifile", ":", "hash_obj", "=", "hash_type", "(", ")", "# read the file in in chunks, not the entire file", "for", "chunk", "in", "iter", "(", "lambda", ":", "ifile", ".", "read", "(", "chunk_size", ")", ",", "b''", ")", ":", "hash_obj", ".", "update", "(", "chunk", ")", "return", "hash_obj", ".", "hexdigest", "(", ")" ]
43.2
22.4
def build_table(table, meta_data): """ This returns a table object with all rows and cells correctly populated. """ # Create a blank table element. table_el = etree.Element('table') w_namespace = get_namespace(table, 'w') # Get the rowspan values for cells that have a rowspan. row_spans = get_rowspan_data(table) for el in table: if el.tag == '%str' % w_namespace: # Create the tr element. tr_el = build_tr( el, meta_data, row_spans, ) # And append it to the table. table_el.append(tr_el) visited_nodes = list(table.iter()) return table_el, visited_nodes
[ "def", "build_table", "(", "table", ",", "meta_data", ")", ":", "# Create a blank table element.", "table_el", "=", "etree", ".", "Element", "(", "'table'", ")", "w_namespace", "=", "get_namespace", "(", "table", ",", "'w'", ")", "# Get the rowspan values for cells that have a rowspan.", "row_spans", "=", "get_rowspan_data", "(", "table", ")", "for", "el", "in", "table", ":", "if", "el", ".", "tag", "==", "'%str'", "%", "w_namespace", ":", "# Create the tr element.", "tr_el", "=", "build_tr", "(", "el", ",", "meta_data", ",", "row_spans", ",", ")", "# And append it to the table.", "table_el", ".", "append", "(", "tr_el", ")", "visited_nodes", "=", "list", "(", "table", ".", "iter", "(", ")", ")", "return", "table_el", ",", "visited_nodes" ]
28.958333
13.375
def collect_vocab(qp_pairs): ''' Build the vocab from corpus. ''' vocab = set() for qp_pair in qp_pairs: for word in qp_pair['question_tokens']: vocab.add(word['word']) for word in qp_pair['passage_tokens']: vocab.add(word['word']) return vocab
[ "def", "collect_vocab", "(", "qp_pairs", ")", ":", "vocab", "=", "set", "(", ")", "for", "qp_pair", "in", "qp_pairs", ":", "for", "word", "in", "qp_pair", "[", "'question_tokens'", "]", ":", "vocab", ".", "add", "(", "word", "[", "'word'", "]", ")", "for", "word", "in", "qp_pair", "[", "'passage_tokens'", "]", ":", "vocab", ".", "add", "(", "word", "[", "'word'", "]", ")", "return", "vocab" ]
27.090909
15.272727
def ec2_elasticip_elasticip_ipaddress(self, lookup, default=None): """ Args: lookup: the CloudFormation resource name of the Elastic IP address to look up default: the optional value to return if lookup failed; returns None if not set Returns: The IP address of the first Elastic IP found with a description matching 'lookup' or default/None if no match """ # Extract environment from resource ID to build stack name m = re.search('ElasticIp([A-Z]?[a-z]+[0-9]?)\w+', lookup) # The lookup string was not a valid ElasticIp resource label if m is None: return default env = m.group(1) stackname = "{}-elasticip".format(env.lower()) # Convert env substring to title in case {{ENV}} substitution is being used lookup = lookup.replace(env, env.title()) # Look up the EIP resource in the stack to get the IP address assigned to the EIP try: eip_stack = EFAwsResolver.__CLIENTS["cloudformation"].describe_stack_resources( StackName=stackname, LogicalResourceId=lookup ) except ClientError: return default stack_resources = eip_stack["StackResources"] # Resource does not exist in stack if len(stack_resources) < 1: return default eip_publicip = stack_resources[0]["PhysicalResourceId"] return eip_publicip
[ "def", "ec2_elasticip_elasticip_ipaddress", "(", "self", ",", "lookup", ",", "default", "=", "None", ")", ":", "# Extract environment from resource ID to build stack name", "m", "=", "re", ".", "search", "(", "'ElasticIp([A-Z]?[a-z]+[0-9]?)\\w+'", ",", "lookup", ")", "# The lookup string was not a valid ElasticIp resource label", "if", "m", "is", "None", ":", "return", "default", "env", "=", "m", ".", "group", "(", "1", ")", "stackname", "=", "\"{}-elasticip\"", ".", "format", "(", "env", ".", "lower", "(", ")", ")", "# Convert env substring to title in case {{ENV}} substitution is being used", "lookup", "=", "lookup", ".", "replace", "(", "env", ",", "env", ".", "title", "(", ")", ")", "# Look up the EIP resource in the stack to get the IP address assigned to the EIP", "try", ":", "eip_stack", "=", "EFAwsResolver", ".", "__CLIENTS", "[", "\"cloudformation\"", "]", ".", "describe_stack_resources", "(", "StackName", "=", "stackname", ",", "LogicalResourceId", "=", "lookup", ")", "except", "ClientError", ":", "return", "default", "stack_resources", "=", "eip_stack", "[", "\"StackResources\"", "]", "# Resource does not exist in stack", "if", "len", "(", "stack_resources", ")", "<", "1", ":", "return", "default", "eip_publicip", "=", "stack_resources", "[", "0", "]", "[", "\"PhysicalResourceId\"", "]", "return", "eip_publicip" ]
42.290323
23.193548
def return_obj(cols, df, return_cols=False): """Construct a DataFrameHolder and then return either that or the DataFrame.""" df_holder = DataFrameHolder(cols=cols, df=df) return df_holder.return_self(return_cols=return_cols)
[ "def", "return_obj", "(", "cols", ",", "df", ",", "return_cols", "=", "False", ")", ":", "df_holder", "=", "DataFrameHolder", "(", "cols", "=", "cols", ",", "df", "=", "df", ")", "return", "df_holder", ".", "return_self", "(", "return_cols", "=", "return_cols", ")" ]
61.25
9.5
def node_mkdir(self, path=''): 'Does not raise any errors if dir already exists.' return self(path, data=dict(kind='directory'), encode='json', method='put')
[ "def", "node_mkdir", "(", "self", ",", "path", "=", "''", ")", ":", "return", "self", "(", "path", ",", "data", "=", "dict", "(", "kind", "=", "'directory'", ")", ",", "encode", "=", "'json'", ",", "method", "=", "'put'", ")" ]
53
19.666667
def Watson(T, Hvap_ref, T_Ref, Tc, exponent=0.38): ''' Adjusts enthalpy of vaporization of enthalpy for another temperature, for one temperature. ''' Tr = T/Tc Trefr = T_Ref/Tc H2 = Hvap_ref*((1-Tr)/(1-Trefr))**exponent return H2
[ "def", "Watson", "(", "T", ",", "Hvap_ref", ",", "T_Ref", ",", "Tc", ",", "exponent", "=", "0.38", ")", ":", "Tr", "=", "T", "/", "Tc", "Trefr", "=", "T_Ref", "/", "Tc", "H2", "=", "Hvap_ref", "*", "(", "(", "1", "-", "Tr", ")", "/", "(", "1", "-", "Trefr", ")", ")", "**", "exponent", "return", "H2" ]
31.25
26.25
def send_activation_email(self, user): """ Send the activation email. The activation key is the username, signed using TimestampSigner. """ activation_key = self.get_activation_key(user) context = self.get_email_context(activation_key) context.update({ 'user': user }) subject = render_to_string(self.email_subject_template, context) # Force subject to a single line to avoid header-injection # issues. subject = ''.join(subject.splitlines()) message = render_to_string(self.email_body_template, context) user.email_user(subject, message, conf.get('DEFAULT_FROM_EMAIL'))
[ "def", "send_activation_email", "(", "self", ",", "user", ")", ":", "activation_key", "=", "self", ".", "get_activation_key", "(", "user", ")", "context", "=", "self", ".", "get_email_context", "(", "activation_key", ")", "context", ".", "update", "(", "{", "'user'", ":", "user", "}", ")", "subject", "=", "render_to_string", "(", "self", ".", "email_subject_template", ",", "context", ")", "# Force subject to a single line to avoid header-injection", "# issues.", "subject", "=", "''", ".", "join", "(", "subject", ".", "splitlines", "(", ")", ")", "message", "=", "render_to_string", "(", "self", ".", "email_body_template", ",", "context", ")", "user", ".", "email_user", "(", "subject", ",", "message", ",", "conf", ".", "get", "(", "'DEFAULT_FROM_EMAIL'", ")", ")" ]
39.315789
16.052632
def connect_with_key(self, ssh, username, key, address, port, sock, timeout=20): """ Create an ssh session to a remote host with a username and rsa key :type username: str :param username: username used for ssh authentication :type key: :py:class:`paramiko.key.RSAKey` :param key: paramiko rsa key used for ssh authentication :type address: str :param address: remote server address :type port: int :param port: remote server port """ ssh.connect(hostname=address, port=port, username=username, pkey=key, sock=sock, timeout=timeout)
[ "def", "connect_with_key", "(", "self", ",", "ssh", ",", "username", ",", "key", ",", "address", ",", "port", ",", "sock", ",", "timeout", "=", "20", ")", ":", "ssh", ".", "connect", "(", "hostname", "=", "address", ",", "port", "=", "port", ",", "username", "=", "username", ",", "pkey", "=", "key", ",", "sock", "=", "sock", ",", "timeout", "=", "timeout", ")" ]
36.75
12.45
def post(self, request, *args, **kwargs): """ Validates subscription data before creating Outbound message """ schedule_disable.delay(kwargs["subscription_id"]) return Response({"accepted": True}, status=201)
[ "def", "post", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "schedule_disable", ".", "delay", "(", "kwargs", "[", "\"subscription_id\"", "]", ")", "return", "Response", "(", "{", "\"accepted\"", ":", "True", "}", ",", "status", "=", "201", ")" ]
47.2
6.6
def idle_print_status(self): '''print out statistics every 10 seconds from idle loop''' now = time.time() if (now - self.last_idle_status_printed_time) >= 10: print (self.status()) self.last_idle_status_printed_time = now self.prev_download = self.download
[ "def", "idle_print_status", "(", "self", ")", ":", "now", "=", "time", ".", "time", "(", ")", "if", "(", "now", "-", "self", ".", "last_idle_status_printed_time", ")", ">=", "10", ":", "print", "(", "self", ".", "status", "(", ")", ")", "self", ".", "last_idle_status_printed_time", "=", "now", "self", ".", "prev_download", "=", "self", ".", "download" ]
44.285714
14
def get_bytes(self, addr, size, **kwargs): '''Reading bytes of any arbitrary size Parameters ----------. addr : int The register address. size : int Byte length of the value. Returns ------- data : iterable Byte array. ''' return self._intf.read(self._conf['base_addr'] + addr, size)
[ "def", "get_bytes", "(", "self", ",", "addr", ",", "size", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_intf", ".", "read", "(", "self", ".", "_conf", "[", "'base_addr'", "]", "+", "addr", ",", "size", ")" ]
25.0625
19.6875
def set_off(self): """Turn the bulb off.""" try: request = requests.post( '{}/{}/{}/'.format(self.resource, URI, self._mac), data={'action': 'off'}, timeout=self.timeout) if request.status_code == 200: pass except requests.exceptions.ConnectionError: raise exceptions.MyStromConnectionError()
[ "def", "set_off", "(", "self", ")", ":", "try", ":", "request", "=", "requests", ".", "post", "(", "'{}/{}/{}/'", ".", "format", "(", "self", ".", "resource", ",", "URI", ",", "self", ".", "_mac", ")", ",", "data", "=", "{", "'action'", ":", "'off'", "}", ",", "timeout", "=", "self", ".", "timeout", ")", "if", "request", ".", "status_code", "==", "200", ":", "pass", "except", "requests", ".", "exceptions", ".", "ConnectionError", ":", "raise", "exceptions", ".", "MyStromConnectionError", "(", ")" ]
39.1
14.7
def _set_status_self(self, key=JobDetails.topkey, status=JobStatus.unknown): """Set the status of this job, both in self.jobs and in the `JobArchive` if it is present. """ fullkey = JobDetails.make_fullkey(self.full_linkname, key) if fullkey in self.jobs: self.jobs[fullkey].status = status if self._job_archive: self._job_archive.register_job(self.jobs[fullkey]) else: self._register_self('dummy.log', key, status)
[ "def", "_set_status_self", "(", "self", ",", "key", "=", "JobDetails", ".", "topkey", ",", "status", "=", "JobStatus", ".", "unknown", ")", ":", "fullkey", "=", "JobDetails", ".", "make_fullkey", "(", "self", ".", "full_linkname", ",", "key", ")", "if", "fullkey", "in", "self", ".", "jobs", ":", "self", ".", "jobs", "[", "fullkey", "]", ".", "status", "=", "status", "if", "self", ".", "_job_archive", ":", "self", ".", "_job_archive", ".", "register_job", "(", "self", ".", "jobs", "[", "fullkey", "]", ")", "else", ":", "self", ".", "_register_self", "(", "'dummy.log'", ",", "key", ",", "status", ")" ]
49.8
15.3
def downsample_grid(a, b, samples, ret_idx=False): """Content-based downsampling for faster visualization The arrays `a` and `b` make up a 2D scatter plot with high and low density values. This method takes out points at indices with high density. Parameters ---------- a, b: 1d ndarrays The input arrays to downsample samples: int The desired number of samples remove_invalid: bool Remove nan and inf values before downsampling ret_idx: bool Also return a boolean array that corresponds to the downsampled indices in `a` and `b`. Returns ------- dsa, dsb: 1d ndarrays of shape (samples,) The arrays `a` and `b` downsampled by evenly selecting points and pseudo-randomly adding or removing points to match `samples`. idx: 1d boolean array with same shape as `a` Only returned if `ret_idx` is True. A boolean array such that `a[idx] == dsa` """ # fixed random state for this method rs = np.random.RandomState(seed=47).get_state() samples = int(samples) if samples and samples < a.size: # The events to keep keep = np.zeros_like(a, dtype=bool) # 1. Produce evenly distributed samples # Choosing grid-size: # - large numbers tend to show actual structures of the sample, # which is not desired for plotting # - small numbers tend will not result in too few samples and, # in order to reach the desired samples, the data must be # upsampled again. # 300 is about the size of the plot in marker sizes and yields # good results. grid_size = 300 xpx = norm(a, a, b) * grid_size ypx = norm(b, b, a) * grid_size # The events on the grid to process toproc = np.ones((grid_size, grid_size), dtype=bool) for ii in range(xpx.size): xi = xpx[ii] yi = ypx[ii] # filter for overlapping events if valid(xi, yi) and toproc[int(xi-1), int(yi-1)]: toproc[int(xi-1), int(yi-1)] = False # include event keep[ii] = True # 2. Make sure that we reach `samples` by adding or # removing events. diff = np.sum(keep) - samples if diff > 0: # Too many samples rem_indices = np.where(keep)[0] np.random.set_state(rs) rem = np.random.choice(rem_indices, size=diff, replace=False) keep[rem] = False elif diff < 0: # Not enough samples add_indices = np.where(~keep)[0] np.random.set_state(rs) add = np.random.choice(add_indices, size=abs(diff), replace=False) keep[add] = True assert np.sum(keep) == samples, "sanity check" asd = a[keep] bsd = b[keep] assert np.allclose(a[keep], asd, equal_nan=True), "sanity check" assert np.allclose(b[keep], bsd, equal_nan=True), "sanity check" else: keep = np.ones_like(a, dtype=bool) asd = a bsd = b if ret_idx: return asd, bsd, keep else: return asd, bsd
[ "def", "downsample_grid", "(", "a", ",", "b", ",", "samples", ",", "ret_idx", "=", "False", ")", ":", "# fixed random state for this method", "rs", "=", "np", ".", "random", ".", "RandomState", "(", "seed", "=", "47", ")", ".", "get_state", "(", ")", "samples", "=", "int", "(", "samples", ")", "if", "samples", "and", "samples", "<", "a", ".", "size", ":", "# The events to keep", "keep", "=", "np", ".", "zeros_like", "(", "a", ",", "dtype", "=", "bool", ")", "# 1. Produce evenly distributed samples", "# Choosing grid-size:", "# - large numbers tend to show actual structures of the sample,", "# which is not desired for plotting", "# - small numbers tend will not result in too few samples and,", "# in order to reach the desired samples, the data must be", "# upsampled again.", "# 300 is about the size of the plot in marker sizes and yields", "# good results.", "grid_size", "=", "300", "xpx", "=", "norm", "(", "a", ",", "a", ",", "b", ")", "*", "grid_size", "ypx", "=", "norm", "(", "b", ",", "b", ",", "a", ")", "*", "grid_size", "# The events on the grid to process", "toproc", "=", "np", ".", "ones", "(", "(", "grid_size", ",", "grid_size", ")", ",", "dtype", "=", "bool", ")", "for", "ii", "in", "range", "(", "xpx", ".", "size", ")", ":", "xi", "=", "xpx", "[", "ii", "]", "yi", "=", "ypx", "[", "ii", "]", "# filter for overlapping events", "if", "valid", "(", "xi", ",", "yi", ")", "and", "toproc", "[", "int", "(", "xi", "-", "1", ")", ",", "int", "(", "yi", "-", "1", ")", "]", ":", "toproc", "[", "int", "(", "xi", "-", "1", ")", ",", "int", "(", "yi", "-", "1", ")", "]", "=", "False", "# include event", "keep", "[", "ii", "]", "=", "True", "# 2. Make sure that we reach `samples` by adding or", "# removing events.", "diff", "=", "np", ".", "sum", "(", "keep", ")", "-", "samples", "if", "diff", ">", "0", ":", "# Too many samples", "rem_indices", "=", "np", ".", "where", "(", "keep", ")", "[", "0", "]", "np", ".", "random", ".", "set_state", "(", "rs", ")", "rem", "=", "np", ".", "random", ".", "choice", "(", "rem_indices", ",", "size", "=", "diff", ",", "replace", "=", "False", ")", "keep", "[", "rem", "]", "=", "False", "elif", "diff", "<", "0", ":", "# Not enough samples", "add_indices", "=", "np", ".", "where", "(", "~", "keep", ")", "[", "0", "]", "np", ".", "random", ".", "set_state", "(", "rs", ")", "add", "=", "np", ".", "random", ".", "choice", "(", "add_indices", ",", "size", "=", "abs", "(", "diff", ")", ",", "replace", "=", "False", ")", "keep", "[", "add", "]", "=", "True", "assert", "np", ".", "sum", "(", "keep", ")", "==", "samples", ",", "\"sanity check\"", "asd", "=", "a", "[", "keep", "]", "bsd", "=", "b", "[", "keep", "]", "assert", "np", ".", "allclose", "(", "a", "[", "keep", "]", ",", "asd", ",", "equal_nan", "=", "True", ")", ",", "\"sanity check\"", "assert", "np", ".", "allclose", "(", "b", "[", "keep", "]", ",", "bsd", ",", "equal_nan", "=", "True", ")", ",", "\"sanity check\"", "else", ":", "keep", "=", "np", ".", "ones_like", "(", "a", ",", "dtype", "=", "bool", ")", "asd", "=", "a", "bsd", "=", "b", "if", "ret_idx", ":", "return", "asd", ",", "bsd", ",", "keep", "else", ":", "return", "asd", ",", "bsd" ]
33.96875
16.5
def deptree(self, field, oids, date=None, level=None, table=None): ''' Dependency tree builder. Recursively fetchs objects that are children of the initial set of parent object ids provided. :param field: Field that contains the 'parent of' data :param oids: Object oids to build depedency tree for :param date: date (metrique date range) that should be queried. If date==None then the most recent versions of the objects will be queried. :param level: limit depth of recursion ''' table = self.get_table(table) fringe = str2list(oids) checked = set(fringe) loop_k = 0 while len(fringe) > 0: if level and loop_k == abs(level): break query = '_oid in %s' % list(fringe) docs = self.find(table=table, query=query, fields=[field], date=date, raw=True) fringe = {oid for doc in docs for oid in (doc[field] or []) if oid not in checked} checked |= fringe loop_k += 1 return sorted(checked)
[ "def", "deptree", "(", "self", ",", "field", ",", "oids", ",", "date", "=", "None", ",", "level", "=", "None", ",", "table", "=", "None", ")", ":", "table", "=", "self", ".", "get_table", "(", "table", ")", "fringe", "=", "str2list", "(", "oids", ")", "checked", "=", "set", "(", "fringe", ")", "loop_k", "=", "0", "while", "len", "(", "fringe", ")", ">", "0", ":", "if", "level", "and", "loop_k", "==", "abs", "(", "level", ")", ":", "break", "query", "=", "'_oid in %s'", "%", "list", "(", "fringe", ")", "docs", "=", "self", ".", "find", "(", "table", "=", "table", ",", "query", "=", "query", ",", "fields", "=", "[", "field", "]", ",", "date", "=", "date", ",", "raw", "=", "True", ")", "fringe", "=", "{", "oid", "for", "doc", "in", "docs", "for", "oid", "in", "(", "doc", "[", "field", "]", "or", "[", "]", ")", "if", "oid", "not", "in", "checked", "}", "checked", "|=", "fringe", "loop_k", "+=", "1", "return", "sorted", "(", "checked", ")" ]
42.592593
18.148148
def get_subscribers(self, order="created_at desc", offset=None, count=None): """Returns a list of subscribers. List is sorted by most-recent-to-subsribe, starting at an optional integer ``offset``, and optionally limited to the first ``count`` items (in sorted order). Returned data includes various statistics about each subscriber, e.g., ``total_sent``, ``total_opens``, ``total_clicks``. """ req_data = [ None, order, fmt_paging(offset, count)] return self.request("query:Contact.stats", req_data)
[ "def", "get_subscribers", "(", "self", ",", "order", "=", "\"created_at desc\"", ",", "offset", "=", "None", ",", "count", "=", "None", ")", ":", "req_data", "=", "[", "None", ",", "order", ",", "fmt_paging", "(", "offset", ",", "count", ")", "]", "return", "self", ".", "request", "(", "\"query:Contact.stats\"", ",", "req_data", ")" ]
48.666667
31.333333
def post_collect(self, obj): """ We want to manage the side-effect of not collecting other items of the same type as root model. If for example, you run the collect on a specific user that is linked to a model "A" linked (ForeignKey) to ANOTHER user. Then the collect won't collect this other user, but the collected model "A" will keep the ForeignKey value of a user we are not collecting. For now, we set the ForeignKey of ANOTHER user to the root one, to be sure that model "A" will always be linked to an existing user (of course, the meaning is changing, but only if this field is not unique. Before: user1 -> modelA -> user2843 After collection: user1 -> modelA -> user1 """ if not self.ALLOWS_SAME_TYPE_AS_ROOT_COLLECT: for field in self.get_local_fields(obj): if isinstance(field, ForeignKey) and not field.unique: # Relative field's API has been changed Django 2.0 # See https://docs.djangoproject.com/en/2.0/releases/1.9/#field-rel-changes for details if django.VERSION[0] == 2: remote_model = field.remote_field.model else: remote_model = field.rel.to if isinstance(self.root_obj, remote_model): setattr(obj, field.name, self.root_obj)
[ "def", "post_collect", "(", "self", ",", "obj", ")", ":", "if", "not", "self", ".", "ALLOWS_SAME_TYPE_AS_ROOT_COLLECT", ":", "for", "field", "in", "self", ".", "get_local_fields", "(", "obj", ")", ":", "if", "isinstance", "(", "field", ",", "ForeignKey", ")", "and", "not", "field", ".", "unique", ":", "# Relative field's API has been changed Django 2.0", "# See https://docs.djangoproject.com/en/2.0/releases/1.9/#field-rel-changes for details", "if", "django", ".", "VERSION", "[", "0", "]", "==", "2", ":", "remote_model", "=", "field", ".", "remote_field", ".", "model", "else", ":", "remote_model", "=", "field", ".", "rel", ".", "to", "if", "isinstance", "(", "self", ".", "root_obj", ",", "remote_model", ")", ":", "setattr", "(", "obj", ",", "field", ".", "name", ",", "self", ".", "root_obj", ")" ]
53.111111
28.518519
def cfmakeraw(tflags): """Given a list returned by :py:func:`termios.tcgetattr`, return a list modified in a manner similar to the `cfmakeraw()` C library function, but additionally disabling local echo.""" # BSD: https://github.com/freebsd/freebsd/blob/master/lib/libc/gen/termios.c#L162 # Linux: https://github.com/lattera/glibc/blob/master/termios/cfmakeraw.c#L20 iflag, oflag, cflag, lflag, ispeed, ospeed, cc = tflags iflag &= ~flags('IMAXBEL IXOFF INPCK BRKINT PARMRK ISTRIP INLCR ICRNL IXON IGNPAR') iflag &= ~flags('IGNBRK BRKINT PARMRK') oflag &= ~flags('OPOST') lflag &= ~flags('ECHO ECHOE ECHOK ECHONL ICANON ISIG IEXTEN NOFLSH TOSTOP PENDIN') cflag &= ~flags('CSIZE PARENB') cflag |= flags('CS8 CREAD') return [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]
[ "def", "cfmakeraw", "(", "tflags", ")", ":", "# BSD: https://github.com/freebsd/freebsd/blob/master/lib/libc/gen/termios.c#L162", "# Linux: https://github.com/lattera/glibc/blob/master/termios/cfmakeraw.c#L20", "iflag", ",", "oflag", ",", "cflag", ",", "lflag", ",", "ispeed", ",", "ospeed", ",", "cc", "=", "tflags", "iflag", "&=", "~", "flags", "(", "'IMAXBEL IXOFF INPCK BRKINT PARMRK ISTRIP INLCR ICRNL IXON IGNPAR'", ")", "iflag", "&=", "~", "flags", "(", "'IGNBRK BRKINT PARMRK'", ")", "oflag", "&=", "~", "flags", "(", "'OPOST'", ")", "lflag", "&=", "~", "flags", "(", "'ECHO ECHOE ECHOK ECHONL ICANON ISIG IEXTEN NOFLSH TOSTOP PENDIN'", ")", "cflag", "&=", "~", "flags", "(", "'CSIZE PARENB'", ")", "cflag", "|=", "flags", "(", "'CS8 CREAD'", ")", "return", "[", "iflag", ",", "oflag", ",", "cflag", ",", "lflag", ",", "ispeed", ",", "ospeed", ",", "cc", "]" ]
57.785714
21.5
def get_recipes_in_cookbook(name): """Gets the name of all recipes present in a cookbook Returns a list of dictionaries """ recipes = {} path = None cookbook_exists = False metadata_exists = False for cookbook_path in cookbook_paths: path = os.path.join(cookbook_path, name) path_exists = os.path.exists(path) # cookbook exists if present in any of the cookbook paths cookbook_exists = cookbook_exists or path_exists if not path_exists: continue _generate_metadata(path, cookbook_path, name) # Now try to open metadata.json try: with open(os.path.join(path, 'metadata.json'), 'r') as f: try: cookbook = json.loads(f.read()) except ValueError as e: msg = "Little Chef found the following error in your" msg += " {0} file:\n {1}".format( os.path.join(path, 'metadata.json'), e) abort(msg) # Add each recipe defined in the cookbook metadata_exists = True recipe_defaults = { 'description': '', 'version': cookbook.get('version'), 'dependencies': cookbook.get('dependencies', {}).keys(), 'attributes': cookbook.get('attributes', {}) } for recipe in cookbook.get('recipes', []): recipes[recipe] = dict( recipe_defaults, name=recipe, description=cookbook['recipes'][recipe] ) # Cookbook metadata.json was found, don't try next cookbook path # because metadata.json in site-cookbooks has preference break except IOError: # metadata.json was not found, try next cookbook_path pass if not cookbook_exists: abort('Unable to find cookbook "{0}"'.format(name)) elif not metadata_exists: abort('Cookbook "{0}" has no metadata.json'.format(name)) # Add recipes found in the 'recipes' directory but not listed # in the metadata for cookbook_path in cookbook_paths: recipes_dir = os.path.join(cookbook_path, name, 'recipes') if not os.path.isdir(recipes_dir): continue for basename in os.listdir(recipes_dir): fname, ext = os.path.splitext(basename) if ext != '.rb': continue if fname != 'default': recipe = '%s::%s' % (name, fname) else: recipe = name if recipe not in recipes: recipes[recipe] = dict(recipe_defaults, name=recipe) # When a recipe has no default recipe (libraries?), # add one so that it is listed if not recipes: recipes[name] = dict( recipe_defaults, name=name, description='This cookbook has no default recipe' ) return recipes.values()
[ "def", "get_recipes_in_cookbook", "(", "name", ")", ":", "recipes", "=", "{", "}", "path", "=", "None", "cookbook_exists", "=", "False", "metadata_exists", "=", "False", "for", "cookbook_path", "in", "cookbook_paths", ":", "path", "=", "os", ".", "path", ".", "join", "(", "cookbook_path", ",", "name", ")", "path_exists", "=", "os", ".", "path", ".", "exists", "(", "path", ")", "# cookbook exists if present in any of the cookbook paths", "cookbook_exists", "=", "cookbook_exists", "or", "path_exists", "if", "not", "path_exists", ":", "continue", "_generate_metadata", "(", "path", ",", "cookbook_path", ",", "name", ")", "# Now try to open metadata.json", "try", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'metadata.json'", ")", ",", "'r'", ")", "as", "f", ":", "try", ":", "cookbook", "=", "json", ".", "loads", "(", "f", ".", "read", "(", ")", ")", "except", "ValueError", "as", "e", ":", "msg", "=", "\"Little Chef found the following error in your\"", "msg", "+=", "\" {0} file:\\n {1}\"", ".", "format", "(", "os", ".", "path", ".", "join", "(", "path", ",", "'metadata.json'", ")", ",", "e", ")", "abort", "(", "msg", ")", "# Add each recipe defined in the cookbook", "metadata_exists", "=", "True", "recipe_defaults", "=", "{", "'description'", ":", "''", ",", "'version'", ":", "cookbook", ".", "get", "(", "'version'", ")", ",", "'dependencies'", ":", "cookbook", ".", "get", "(", "'dependencies'", ",", "{", "}", ")", ".", "keys", "(", ")", ",", "'attributes'", ":", "cookbook", ".", "get", "(", "'attributes'", ",", "{", "}", ")", "}", "for", "recipe", "in", "cookbook", ".", "get", "(", "'recipes'", ",", "[", "]", ")", ":", "recipes", "[", "recipe", "]", "=", "dict", "(", "recipe_defaults", ",", "name", "=", "recipe", ",", "description", "=", "cookbook", "[", "'recipes'", "]", "[", "recipe", "]", ")", "# Cookbook metadata.json was found, don't try next cookbook path", "# because metadata.json in site-cookbooks has preference", "break", "except", "IOError", ":", "# metadata.json was not found, try next cookbook_path", "pass", "if", "not", "cookbook_exists", ":", "abort", "(", "'Unable to find cookbook \"{0}\"'", ".", "format", "(", "name", ")", ")", "elif", "not", "metadata_exists", ":", "abort", "(", "'Cookbook \"{0}\" has no metadata.json'", ".", "format", "(", "name", ")", ")", "# Add recipes found in the 'recipes' directory but not listed", "# in the metadata", "for", "cookbook_path", "in", "cookbook_paths", ":", "recipes_dir", "=", "os", ".", "path", ".", "join", "(", "cookbook_path", ",", "name", ",", "'recipes'", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "recipes_dir", ")", ":", "continue", "for", "basename", "in", "os", ".", "listdir", "(", "recipes_dir", ")", ":", "fname", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "basename", ")", "if", "ext", "!=", "'.rb'", ":", "continue", "if", "fname", "!=", "'default'", ":", "recipe", "=", "'%s::%s'", "%", "(", "name", ",", "fname", ")", "else", ":", "recipe", "=", "name", "if", "recipe", "not", "in", "recipes", ":", "recipes", "[", "recipe", "]", "=", "dict", "(", "recipe_defaults", ",", "name", "=", "recipe", ")", "# When a recipe has no default recipe (libraries?),", "# add one so that it is listed", "if", "not", "recipes", ":", "recipes", "[", "name", "]", "=", "dict", "(", "recipe_defaults", ",", "name", "=", "name", ",", "description", "=", "'This cookbook has no default recipe'", ")", "return", "recipes", ".", "values", "(", ")" ]
38.807692
16.064103
def kind(self): """The type of value to watch, based on :attr:`block`. One of ``variable``, ``list``, or ``block``. ``block`` watchers watch the value of a reporter block. """ if self.block.type.has_command('readVariable'): return 'variable' elif self.block.type.has_command('contentsOfList:'): return 'list' else: return 'block'
[ "def", "kind", "(", "self", ")", ":", "if", "self", ".", "block", ".", "type", ".", "has_command", "(", "'readVariable'", ")", ":", "return", "'variable'", "elif", "self", ".", "block", ".", "type", ".", "has_command", "(", "'contentsOfList:'", ")", ":", "return", "'list'", "else", ":", "return", "'block'" ]
29.357143
20.142857
def resolve_resource_id_refs(self, input_dict, supported_resource_id_refs): """ Updates references to the old logical id of a resource to the new (generated) logical id. Example: {"Ref": "MyLayer"} => {"Ref": "MyLayerABC123"} :param dict input_dict: Dictionary representing the Ref function to be resolved. :param dict supported_resource_id_refs: Dictionary that maps old logical ids to new ones. :return dict: Dictionary with resource references resolved. """ if not self.can_handle(input_dict): return input_dict ref_value = input_dict[self.intrinsic_name] if not isinstance(ref_value, string_types) or self._resource_ref_separator in ref_value: return input_dict logical_id = ref_value resolved_value = supported_resource_id_refs.get(logical_id) if not resolved_value: return input_dict return { self.intrinsic_name: resolved_value }
[ "def", "resolve_resource_id_refs", "(", "self", ",", "input_dict", ",", "supported_resource_id_refs", ")", ":", "if", "not", "self", ".", "can_handle", "(", "input_dict", ")", ":", "return", "input_dict", "ref_value", "=", "input_dict", "[", "self", ".", "intrinsic_name", "]", "if", "not", "isinstance", "(", "ref_value", ",", "string_types", ")", "or", "self", ".", "_resource_ref_separator", "in", "ref_value", ":", "return", "input_dict", "logical_id", "=", "ref_value", "resolved_value", "=", "supported_resource_id_refs", ".", "get", "(", "logical_id", ")", "if", "not", "resolved_value", ":", "return", "input_dict", "return", "{", "self", ".", "intrinsic_name", ":", "resolved_value", "}" ]
35.571429
27.071429
def search(self): """Search srt in project for cells matching list of terms.""" matches = [] for pattern in Config.patterns: matches += self.termfinder(pattern) return sorted(set(matches), key=int)
[ "def", "search", "(", "self", ")", ":", "matches", "=", "[", "]", "for", "pattern", "in", "Config", ".", "patterns", ":", "matches", "+=", "self", ".", "termfinder", "(", "pattern", ")", "return", "sorted", "(", "set", "(", "matches", ")", ",", "key", "=", "int", ")" ]
29.5
16.875
def a_not_committed(ctx): """Provide the message that current software is not committed and reload is not possible.""" ctx.ctrl.sendline('n') ctx.msg = "Some active software packages are not yet committed. Reload may cause software rollback." ctx.device.chain.connection.emit_message(ctx.msg, log_level=logging.ERROR) ctx.failed = True return False
[ "def", "a_not_committed", "(", "ctx", ")", ":", "ctx", ".", "ctrl", ".", "sendline", "(", "'n'", ")", "ctx", ".", "msg", "=", "\"Some active software packages are not yet committed. Reload may cause software rollback.\"", "ctx", ".", "device", ".", "chain", ".", "connection", ".", "emit_message", "(", "ctx", ".", "msg", ",", "log_level", "=", "logging", ".", "ERROR", ")", "ctx", ".", "failed", "=", "True", "return", "False" ]
52.285714
24.857143
def make_syllables(self, sentences_words): """Divide the word tokens into a list of syllables. Note that a syllable in this instance is defined as a vocalic group (i.e., a vowel or a diphthong). This means that all syllables which are not the last syllable in the word will end with a vowel or diphthong. TODO: Determine whether Luke Hollis's module at `cltk.stem.latin.syllabifier could replace this method.` :param sentences_words: A list of sentences with tokenized words. :return: Syllabified words :rtype : list """ all_syllables = [] for sentence in sentences_words: syll_per_sent = [] for word in sentence: syll_start = 0 # Begins syllable iterator syll_per_word = [] cur_letter_in = 0 # Begins general iterator while cur_letter_in < len(word): letter = word[cur_letter_in] if not cur_letter_in == len(word) - 1: if word[cur_letter_in] + word[cur_letter_in + 1] in self.diphthongs: cur_letter_in += 1 # Syllable ends with a diphthong syll_per_word.append( word[syll_start:cur_letter_in + 1]) syll_start = cur_letter_in + 1 elif (letter in self.vowels) or \ (letter in self.long_vowels): # Syllable ends with a vowel syll_per_word.append( word[syll_start:cur_letter_in + 1]) syll_start = cur_letter_in + 1 elif (letter in self.vowels) or \ (letter in self.long_vowels): # Syllable ends with a vowel syll_per_word.append( word[syll_start:cur_letter_in + 1]) syll_start = cur_letter_in + 1 cur_letter_in += 1 try: last_vowel = syll_per_word[-1][-1] # Last vowel of a word # Modifies general iterator for consonants after the last # syllable in a word. cur_letter_in = len( word) - 1 # Contains all of the consonants after the last vowel in a # word leftovers = '' while word[cur_letter_in] != last_vowel: if word[cur_letter_in] != '.': # Adds consonants to leftovers leftovers = word[cur_letter_in] + leftovers cur_letter_in -= 1 # Adds leftovers to last syllable in a word syll_per_word[-1] += leftovers syll_per_sent.append(syll_per_word) except IndexError: logger.info("IndexError while making syllables of '%s'. Continuing.", word) all_syllables.append(syll_per_sent) return all_syllables
[ "def", "make_syllables", "(", "self", ",", "sentences_words", ")", ":", "all_syllables", "=", "[", "]", "for", "sentence", "in", "sentences_words", ":", "syll_per_sent", "=", "[", "]", "for", "word", "in", "sentence", ":", "syll_start", "=", "0", "# Begins syllable iterator", "syll_per_word", "=", "[", "]", "cur_letter_in", "=", "0", "# Begins general iterator", "while", "cur_letter_in", "<", "len", "(", "word", ")", ":", "letter", "=", "word", "[", "cur_letter_in", "]", "if", "not", "cur_letter_in", "==", "len", "(", "word", ")", "-", "1", ":", "if", "word", "[", "cur_letter_in", "]", "+", "word", "[", "cur_letter_in", "+", "1", "]", "in", "self", ".", "diphthongs", ":", "cur_letter_in", "+=", "1", "# Syllable ends with a diphthong", "syll_per_word", ".", "append", "(", "word", "[", "syll_start", ":", "cur_letter_in", "+", "1", "]", ")", "syll_start", "=", "cur_letter_in", "+", "1", "elif", "(", "letter", "in", "self", ".", "vowels", ")", "or", "(", "letter", "in", "self", ".", "long_vowels", ")", ":", "# Syllable ends with a vowel", "syll_per_word", ".", "append", "(", "word", "[", "syll_start", ":", "cur_letter_in", "+", "1", "]", ")", "syll_start", "=", "cur_letter_in", "+", "1", "elif", "(", "letter", "in", "self", ".", "vowels", ")", "or", "(", "letter", "in", "self", ".", "long_vowels", ")", ":", "# Syllable ends with a vowel", "syll_per_word", ".", "append", "(", "word", "[", "syll_start", ":", "cur_letter_in", "+", "1", "]", ")", "syll_start", "=", "cur_letter_in", "+", "1", "cur_letter_in", "+=", "1", "try", ":", "last_vowel", "=", "syll_per_word", "[", "-", "1", "]", "[", "-", "1", "]", "# Last vowel of a word", "# Modifies general iterator for consonants after the last", "# syllable in a word.", "cur_letter_in", "=", "len", "(", "word", ")", "-", "1", "# Contains all of the consonants after the last vowel in a", "# word", "leftovers", "=", "''", "while", "word", "[", "cur_letter_in", "]", "!=", "last_vowel", ":", "if", "word", "[", "cur_letter_in", "]", "!=", "'.'", ":", "# Adds consonants to leftovers", "leftovers", "=", "word", "[", "cur_letter_in", "]", "+", "leftovers", "cur_letter_in", "-=", "1", "# Adds leftovers to last syllable in a word", "syll_per_word", "[", "-", "1", "]", "+=", "leftovers", "syll_per_sent", ".", "append", "(", "syll_per_word", ")", "except", "IndexError", ":", "logger", ".", "info", "(", "\"IndexError while making syllables of '%s'. Continuing.\"", ",", "word", ")", "all_syllables", ".", "append", "(", "syll_per_sent", ")", "return", "all_syllables" ]
50.555556
17.015873
def gradient_rgb( self, text=None, fore=None, back=None, style=None, start=None, stop=None, step=1, linemode=True, movefactor=0): """ Return a black and white gradient. Arguments: text : String to colorize. fore : Foreground color, background will be gradient. back : Background color, foreground will be gradient. style : Name of style to use for the gradient. start : Starting rgb value. stop : Stopping rgb value. step : Number of characters to colorize per color. This allows a "wider" gradient. This will always be greater than 0. linemode : Colorize each line in the input. Default: True movefactor : Amount to shift gradient for each line when `linemode` is set. """ gradargs = { 'step': step, 'fore': fore, 'back': back, 'style': style, } start = start or (0, 0, 0) stop = stop or (255, 255, 255) if linemode: method = self._gradient_rgb_lines gradargs['movefactor'] = movefactor else: method = self._gradient_rgb_line if text: return self.__class__( ''.join(( self.data or '', method( text, start, stop, **gradargs ), )) ) # Operating on self.data. return self.__class__( method( self.stripped(), start, stop, **gradargs ) )
[ "def", "gradient_rgb", "(", "self", ",", "text", "=", "None", ",", "fore", "=", "None", ",", "back", "=", "None", ",", "style", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "step", "=", "1", ",", "linemode", "=", "True", ",", "movefactor", "=", "0", ")", ":", "gradargs", "=", "{", "'step'", ":", "step", ",", "'fore'", ":", "fore", ",", "'back'", ":", "back", ",", "'style'", ":", "style", ",", "}", "start", "=", "start", "or", "(", "0", ",", "0", ",", "0", ")", "stop", "=", "stop", "or", "(", "255", ",", "255", ",", "255", ")", "if", "linemode", ":", "method", "=", "self", ".", "_gradient_rgb_lines", "gradargs", "[", "'movefactor'", "]", "=", "movefactor", "else", ":", "method", "=", "self", ".", "_gradient_rgb_line", "if", "text", ":", "return", "self", ".", "__class__", "(", "''", ".", "join", "(", "(", "self", ".", "data", "or", "''", ",", "method", "(", "text", ",", "start", ",", "stop", ",", "*", "*", "gradargs", ")", ",", ")", ")", ")", "# Operating on self.data.", "return", "self", ".", "__class__", "(", "method", "(", "self", ".", "stripped", "(", ")", ",", "start", ",", "stop", ",", "*", "*", "gradargs", ")", ")" ]
33.892857
17.428571
def declare_queue(self, queue_name): """Declare a queue. Has no effect if a queue with the given name already exists. Parameters: queue_name(str): The name of the new queue. Raises: ConnectionClosed: If the underlying channel or connection has been closed. """ attempts = 1 while True: try: if queue_name not in self.queues: self.emit_before("declare_queue", queue_name) self._declare_queue(queue_name) self.queues.add(queue_name) self.emit_after("declare_queue", queue_name) delayed_name = dq_name(queue_name) self._declare_dq_queue(queue_name) self.delay_queues.add(delayed_name) self.emit_after("declare_delay_queue", delayed_name) self._declare_xq_queue(queue_name) break except (pika.exceptions.AMQPConnectionError, pika.exceptions.AMQPChannelError) as e: # pragma: no cover # Delete the channel and the connection so that the next # caller may initiate new ones of each. del self.channel del self.connection attempts += 1 if attempts > MAX_DECLARE_ATTEMPTS: raise ConnectionClosed(e) from None self.logger.debug( "Retrying declare due to closed connection. [%d/%d]", attempts, MAX_DECLARE_ATTEMPTS, )
[ "def", "declare_queue", "(", "self", ",", "queue_name", ")", ":", "attempts", "=", "1", "while", "True", ":", "try", ":", "if", "queue_name", "not", "in", "self", ".", "queues", ":", "self", ".", "emit_before", "(", "\"declare_queue\"", ",", "queue_name", ")", "self", ".", "_declare_queue", "(", "queue_name", ")", "self", ".", "queues", ".", "add", "(", "queue_name", ")", "self", ".", "emit_after", "(", "\"declare_queue\"", ",", "queue_name", ")", "delayed_name", "=", "dq_name", "(", "queue_name", ")", "self", ".", "_declare_dq_queue", "(", "queue_name", ")", "self", ".", "delay_queues", ".", "add", "(", "delayed_name", ")", "self", ".", "emit_after", "(", "\"declare_delay_queue\"", ",", "delayed_name", ")", "self", ".", "_declare_xq_queue", "(", "queue_name", ")", "break", "except", "(", "pika", ".", "exceptions", ".", "AMQPConnectionError", ",", "pika", ".", "exceptions", ".", "AMQPChannelError", ")", "as", "e", ":", "# pragma: no cover", "# Delete the channel and the connection so that the next", "# caller may initiate new ones of each.", "del", "self", ".", "channel", "del", "self", ".", "connection", "attempts", "+=", "1", "if", "attempts", ">", "MAX_DECLARE_ATTEMPTS", ":", "raise", "ConnectionClosed", "(", "e", ")", "from", "None", "self", ".", "logger", ".", "debug", "(", "\"Retrying declare due to closed connection. [%d/%d]\"", ",", "attempts", ",", "MAX_DECLARE_ATTEMPTS", ",", ")" ]
38.214286
19.714286
def ternary_operation(x): """Ternary operation use threshold computed with weights.""" g = tf.get_default_graph() with g.gradient_override_map({"Sign": "Identity"}): threshold = _compute_threshold(x) x = tf.sign(tf.add(tf.sign(tf.add(x, threshold)), tf.sign(tf.add(x, -threshold)))) return x
[ "def", "ternary_operation", "(", "x", ")", ":", "g", "=", "tf", ".", "get_default_graph", "(", ")", "with", "g", ".", "gradient_override_map", "(", "{", "\"Sign\"", ":", "\"Identity\"", "}", ")", ":", "threshold", "=", "_compute_threshold", "(", "x", ")", "x", "=", "tf", ".", "sign", "(", "tf", ".", "add", "(", "tf", ".", "sign", "(", "tf", ".", "add", "(", "x", ",", "threshold", ")", ")", ",", "tf", ".", "sign", "(", "tf", ".", "add", "(", "x", ",", "-", "threshold", ")", ")", ")", ")", "return", "x" ]
45.857143
16.428571
def analyze(self, s, method='chebyshev', order=30): r"""Convenience alias to :meth:`filter`.""" if s.ndim == 3 and s.shape[-1] != 1: raise ValueError('Last dimension (#features) should be ' '1, got {}.'.format(s.shape)) return self.filter(s, method, order)
[ "def", "analyze", "(", "self", ",", "s", ",", "method", "=", "'chebyshev'", ",", "order", "=", "30", ")", ":", "if", "s", ".", "ndim", "==", "3", "and", "s", ".", "shape", "[", "-", "1", "]", "!=", "1", ":", "raise", "ValueError", "(", "'Last dimension (#features) should be '", "'1, got {}.'", ".", "format", "(", "s", ".", "shape", ")", ")", "return", "self", ".", "filter", "(", "s", ",", "method", ",", "order", ")" ]
52.666667
10.833333
def rc2lar(k): """Convert reflection coefficients to log area ratios. :param k: reflection coefficients :return: inverse sine parameters The log area ratio is defined by G = log((1+k)/(1-k)) , where the K parameter is the reflection coefficient. .. seealso:: :func:`lar2rc`, :func:`rc2poly`, :func:`rc2ac`, :func:`rc2ic`. :References: [1] J. Makhoul, "Linear Prediction: A Tutorial Review," Proc. IEEE, Vol.63, No.4, pp.561-580, Apr 1975. """ assert numpy.isrealobj(k), 'Log area ratios not defined for complex reflection coefficients.' if max(numpy.abs(k)) >= 1: raise ValueError('All reflection coefficients should have magnitude less than unity.') # Use the relation, atanh(x) = (1/2)*log((1+k)/(1-k)) return -2 * numpy.arctanh(-numpy.array(k))
[ "def", "rc2lar", "(", "k", ")", ":", "assert", "numpy", ".", "isrealobj", "(", "k", ")", ",", "'Log area ratios not defined for complex reflection coefficients.'", "if", "max", "(", "numpy", ".", "abs", "(", "k", ")", ")", ">=", "1", ":", "raise", "ValueError", "(", "'All reflection coefficients should have magnitude less than unity.'", ")", "# Use the relation, atanh(x) = (1/2)*log((1+k)/(1-k))", "return", "-", "2", "*", "numpy", ".", "arctanh", "(", "-", "numpy", ".", "array", "(", "k", ")", ")" ]
37.904762
27.857143
def get_next_record(in_uid, kind='1'): ''' Get next record by time_create. ''' current_rec = MPost.get_by_uid(in_uid) recs = TabPost.select().where( (TabPost.kind == kind) & (TabPost.time_create < current_rec.time_create) ).order_by(TabPost.time_create.desc()) if recs.count(): return recs.get() return None
[ "def", "get_next_record", "(", "in_uid", ",", "kind", "=", "'1'", ")", ":", "current_rec", "=", "MPost", ".", "get_by_uid", "(", "in_uid", ")", "recs", "=", "TabPost", ".", "select", "(", ")", ".", "where", "(", "(", "TabPost", ".", "kind", "==", "kind", ")", "&", "(", "TabPost", ".", "time_create", "<", "current_rec", ".", "time_create", ")", ")", ".", "order_by", "(", "TabPost", ".", "time_create", ".", "desc", "(", ")", ")", "if", "recs", ".", "count", "(", ")", ":", "return", "recs", ".", "get", "(", ")", "return", "None" ]
33
12.166667
def list_repos(config_path=_DEFAULT_CONFIG_PATH, with_packages=False): ''' List all of the local package repositories. :param str config_path: The path to the configuration file for the aptly instance. :param bool with_packages: Return a list of packages in the repo. :return: A dictionary of the repositories. :rtype: dict CLI Example: .. code-block:: bash salt '*' aptly.list_repos ''' _validate_config(config_path) ret = dict() cmd = ['repo', 'list', '-config={}'.format(config_path), '-raw=true'] cmd_ret = _cmd_run(cmd) repos = [line.strip() for line in cmd_ret.splitlines()] log.debug('Found repositories: %s', len(repos)) for name in repos: ret[name] = get_repo(name=name, config_path=config_path, with_packages=with_packages) return ret
[ "def", "list_repos", "(", "config_path", "=", "_DEFAULT_CONFIG_PATH", ",", "with_packages", "=", "False", ")", ":", "_validate_config", "(", "config_path", ")", "ret", "=", "dict", "(", ")", "cmd", "=", "[", "'repo'", ",", "'list'", ",", "'-config={}'", ".", "format", "(", "config_path", ")", ",", "'-raw=true'", "]", "cmd_ret", "=", "_cmd_run", "(", "cmd", ")", "repos", "=", "[", "line", ".", "strip", "(", ")", "for", "line", "in", "cmd_ret", ".", "splitlines", "(", ")", "]", "log", ".", "debug", "(", "'Found repositories: %s'", ",", "len", "(", "repos", ")", ")", "for", "name", "in", "repos", ":", "ret", "[", "name", "]", "=", "get_repo", "(", "name", "=", "name", ",", "config_path", "=", "config_path", ",", "with_packages", "=", "with_packages", ")", "return", "ret" ]
27.9
26.9
def open(self, file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None): """Redirect the call to FakeFileOpen. See FakeFileOpen.call() for description. """ if opener is not None and sys.version_info < (3, 3): raise TypeError( "open() got an unexpected keyword argument 'opener'") fake_open = FakeFileOpen(self.filesystem, use_io=True) return fake_open(file, mode, buffering, encoding, errors, newline, closefd, opener)
[ "def", "open", "(", "self", ",", "file", ",", "mode", "=", "'r'", ",", "buffering", "=", "-", "1", ",", "encoding", "=", "None", ",", "errors", "=", "None", ",", "newline", "=", "None", ",", "closefd", "=", "True", ",", "opener", "=", "None", ")", ":", "if", "opener", "is", "not", "None", "and", "sys", ".", "version_info", "<", "(", "3", ",", "3", ")", ":", "raise", "TypeError", "(", "\"open() got an unexpected keyword argument 'opener'\"", ")", "fake_open", "=", "FakeFileOpen", "(", "self", ".", "filesystem", ",", "use_io", "=", "True", ")", "return", "fake_open", "(", "file", ",", "mode", ",", "buffering", ",", "encoding", ",", "errors", ",", "newline", ",", "closefd", ",", "opener", ")" ]
51.272727
15.636364
def feed_backend(url, clean, fetch_archive, backend_name, backend_params, es_index=None, es_index_enrich=None, project=None, arthur=False, es_aliases=None, projects_json_repo=None): """ Feed Ocean with backend data """ backend = None repo = {'backend_name': backend_name, 'backend_params': backend_params} # repository data to be stored in conf if es_index: clean = False # don't remove index, it could be shared if not get_connector_from_name(backend_name): raise RuntimeError("Unknown backend %s" % backend_name) connector = get_connector_from_name(backend_name) klass = connector[3] # BackendCmd for the connector try: logger.info("Feeding Ocean from %s (%s)", backend_name, es_index) if not es_index: logger.error("Raw index not defined for %s", backend_name) repo['repo_update_start'] = datetime.now().isoformat() # perceval backends fetch params offset = None from_date = None category = None latest_items = None filter_classified = None backend_cmd = klass(*backend_params) parsed_args = vars(backend_cmd.parsed_args) init_args = find_signature_parameters(backend_cmd.BACKEND, parsed_args) if backend_cmd.archive_manager and fetch_archive: archive = Archive(parsed_args['archive_path']) else: archive = backend_cmd.archive_manager.create_archive() if backend_cmd.archive_manager else None init_args['archive'] = archive backend_cmd.backend = backend_cmd.BACKEND(**init_args) backend = backend_cmd.backend ocean_backend = connector[1](backend, fetch_archive=fetch_archive, project=project) elastic_ocean = get_elastic(url, es_index, clean, ocean_backend, es_aliases) ocean_backend.set_elastic(elastic_ocean) ocean_backend.set_projects_json_repo(projects_json_repo) if fetch_archive: signature = inspect.signature(backend.fetch_from_archive) else: signature = inspect.signature(backend.fetch) if 'from_date' in signature.parameters: try: # Support perceval pre and post BackendCommand refactoring from_date = backend_cmd.from_date except AttributeError: from_date = backend_cmd.parsed_args.from_date if 'offset' in signature.parameters: try: offset = backend_cmd.offset except AttributeError: offset = backend_cmd.parsed_args.offset if 'category' in signature.parameters: try: category = backend_cmd.category except AttributeError: try: category = backend_cmd.parsed_args.category except AttributeError: pass if 'filter_classified' in signature.parameters: try: filter_classified = backend_cmd.parsed_args.filter_classified except AttributeError: pass if 'latest_items' in signature.parameters: try: latest_items = backend_cmd.latest_items except AttributeError: latest_items = backend_cmd.parsed_args.latest_items # fetch params support if arthur: # If using arthur just provide the items generator to be used # to collect the items and upload to Elasticsearch aitems = feed_backend_arthur(backend_name, backend_params) ocean_backend.feed(arthur_items=aitems) else: params = {} if latest_items: params['latest_items'] = latest_items if category: params['category'] = category if filter_classified: params['filter_classified'] = filter_classified if from_date and (from_date.replace(tzinfo=None) != parser.parse("1970-01-01")): params['from_date'] = from_date if offset: params['from_offset'] = offset ocean_backend.feed(**params) except Exception as ex: if backend: logger.error("Error feeding ocean from %s (%s): %s", backend_name, backend.origin, ex, exc_info=True) else: logger.error("Error feeding ocean %s", ex, exc_info=True) logger.info("Done %s ", backend_name)
[ "def", "feed_backend", "(", "url", ",", "clean", ",", "fetch_archive", ",", "backend_name", ",", "backend_params", ",", "es_index", "=", "None", ",", "es_index_enrich", "=", "None", ",", "project", "=", "None", ",", "arthur", "=", "False", ",", "es_aliases", "=", "None", ",", "projects_json_repo", "=", "None", ")", ":", "backend", "=", "None", "repo", "=", "{", "'backend_name'", ":", "backend_name", ",", "'backend_params'", ":", "backend_params", "}", "# repository data to be stored in conf", "if", "es_index", ":", "clean", "=", "False", "# don't remove index, it could be shared", "if", "not", "get_connector_from_name", "(", "backend_name", ")", ":", "raise", "RuntimeError", "(", "\"Unknown backend %s\"", "%", "backend_name", ")", "connector", "=", "get_connector_from_name", "(", "backend_name", ")", "klass", "=", "connector", "[", "3", "]", "# BackendCmd for the connector", "try", ":", "logger", ".", "info", "(", "\"Feeding Ocean from %s (%s)\"", ",", "backend_name", ",", "es_index", ")", "if", "not", "es_index", ":", "logger", ".", "error", "(", "\"Raw index not defined for %s\"", ",", "backend_name", ")", "repo", "[", "'repo_update_start'", "]", "=", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", "# perceval backends fetch params", "offset", "=", "None", "from_date", "=", "None", "category", "=", "None", "latest_items", "=", "None", "filter_classified", "=", "None", "backend_cmd", "=", "klass", "(", "*", "backend_params", ")", "parsed_args", "=", "vars", "(", "backend_cmd", ".", "parsed_args", ")", "init_args", "=", "find_signature_parameters", "(", "backend_cmd", ".", "BACKEND", ",", "parsed_args", ")", "if", "backend_cmd", ".", "archive_manager", "and", "fetch_archive", ":", "archive", "=", "Archive", "(", "parsed_args", "[", "'archive_path'", "]", ")", "else", ":", "archive", "=", "backend_cmd", ".", "archive_manager", ".", "create_archive", "(", ")", "if", "backend_cmd", ".", "archive_manager", "else", "None", "init_args", "[", "'archive'", "]", "=", "archive", "backend_cmd", ".", "backend", "=", "backend_cmd", ".", "BACKEND", "(", "*", "*", "init_args", ")", "backend", "=", "backend_cmd", ".", "backend", "ocean_backend", "=", "connector", "[", "1", "]", "(", "backend", ",", "fetch_archive", "=", "fetch_archive", ",", "project", "=", "project", ")", "elastic_ocean", "=", "get_elastic", "(", "url", ",", "es_index", ",", "clean", ",", "ocean_backend", ",", "es_aliases", ")", "ocean_backend", ".", "set_elastic", "(", "elastic_ocean", ")", "ocean_backend", ".", "set_projects_json_repo", "(", "projects_json_repo", ")", "if", "fetch_archive", ":", "signature", "=", "inspect", ".", "signature", "(", "backend", ".", "fetch_from_archive", ")", "else", ":", "signature", "=", "inspect", ".", "signature", "(", "backend", ".", "fetch", ")", "if", "'from_date'", "in", "signature", ".", "parameters", ":", "try", ":", "# Support perceval pre and post BackendCommand refactoring", "from_date", "=", "backend_cmd", ".", "from_date", "except", "AttributeError", ":", "from_date", "=", "backend_cmd", ".", "parsed_args", ".", "from_date", "if", "'offset'", "in", "signature", ".", "parameters", ":", "try", ":", "offset", "=", "backend_cmd", ".", "offset", "except", "AttributeError", ":", "offset", "=", "backend_cmd", ".", "parsed_args", ".", "offset", "if", "'category'", "in", "signature", ".", "parameters", ":", "try", ":", "category", "=", "backend_cmd", ".", "category", "except", "AttributeError", ":", "try", ":", "category", "=", "backend_cmd", ".", "parsed_args", ".", "category", "except", "AttributeError", ":", "pass", "if", "'filter_classified'", "in", "signature", ".", "parameters", ":", "try", ":", "filter_classified", "=", "backend_cmd", ".", "parsed_args", ".", "filter_classified", "except", "AttributeError", ":", "pass", "if", "'latest_items'", "in", "signature", ".", "parameters", ":", "try", ":", "latest_items", "=", "backend_cmd", ".", "latest_items", "except", "AttributeError", ":", "latest_items", "=", "backend_cmd", ".", "parsed_args", ".", "latest_items", "# fetch params support", "if", "arthur", ":", "# If using arthur just provide the items generator to be used", "# to collect the items and upload to Elasticsearch", "aitems", "=", "feed_backend_arthur", "(", "backend_name", ",", "backend_params", ")", "ocean_backend", ".", "feed", "(", "arthur_items", "=", "aitems", ")", "else", ":", "params", "=", "{", "}", "if", "latest_items", ":", "params", "[", "'latest_items'", "]", "=", "latest_items", "if", "category", ":", "params", "[", "'category'", "]", "=", "category", "if", "filter_classified", ":", "params", "[", "'filter_classified'", "]", "=", "filter_classified", "if", "from_date", "and", "(", "from_date", ".", "replace", "(", "tzinfo", "=", "None", ")", "!=", "parser", ".", "parse", "(", "\"1970-01-01\"", ")", ")", ":", "params", "[", "'from_date'", "]", "=", "from_date", "if", "offset", ":", "params", "[", "'from_offset'", "]", "=", "offset", "ocean_backend", ".", "feed", "(", "*", "*", "params", ")", "except", "Exception", "as", "ex", ":", "if", "backend", ":", "logger", ".", "error", "(", "\"Error feeding ocean from %s (%s): %s\"", ",", "backend_name", ",", "backend", ".", "origin", ",", "ex", ",", "exc_info", "=", "True", ")", "else", ":", "logger", ".", "error", "(", "\"Error feeding ocean %s\"", ",", "ex", ",", "exc_info", "=", "True", ")", "logger", ".", "info", "(", "\"Done %s \"", ",", "backend_name", ")" ]
37.550847
23.29661
def functions(self): """ Returns all documented module level functions in the module sorted alphabetically as a list of `pydoc.Function`. """ p = lambda o: isinstance(o, Function) and self._docfilter(o) return sorted(filter(p, self.doc.values()))
[ "def", "functions", "(", "self", ")", ":", "p", "=", "lambda", "o", ":", "isinstance", "(", "o", ",", "Function", ")", "and", "self", ".", "_docfilter", "(", "o", ")", "return", "sorted", "(", "filter", "(", "p", ",", "self", ".", "doc", ".", "values", "(", ")", ")", ")" ]
41.142857
15.142857
def generate_signing_key(date, region, secret_key): """ Generate signing key. :param date: Date is input from :meth:`datetime.datetime` :param region: Region should be set to bucket region. :param secret_key: Secret access key. """ formatted_date = date.strftime("%Y%m%d") key1_string = 'AWS4' + secret_key key1 = key1_string.encode('utf-8') key2 = hmac.new(key1, formatted_date.encode('utf-8'), hashlib.sha256).digest() key3 = hmac.new(key2, region.encode('utf-8'), hashlib.sha256).digest() key4 = hmac.new(key3, 's3'.encode('utf-8'), hashlib.sha256).digest() return hmac.new(key4, 'aws4_request'.encode('utf-8'), hashlib.sha256).digest()
[ "def", "generate_signing_key", "(", "date", ",", "region", ",", "secret_key", ")", ":", "formatted_date", "=", "date", ".", "strftime", "(", "\"%Y%m%d\"", ")", "key1_string", "=", "'AWS4'", "+", "secret_key", "key1", "=", "key1_string", ".", "encode", "(", "'utf-8'", ")", "key2", "=", "hmac", ".", "new", "(", "key1", ",", "formatted_date", ".", "encode", "(", "'utf-8'", ")", ",", "hashlib", ".", "sha256", ")", ".", "digest", "(", ")", "key3", "=", "hmac", ".", "new", "(", "key2", ",", "region", ".", "encode", "(", "'utf-8'", ")", ",", "hashlib", ".", "sha256", ")", ".", "digest", "(", ")", "key4", "=", "hmac", ".", "new", "(", "key3", ",", "'s3'", ".", "encode", "(", "'utf-8'", ")", ",", "hashlib", ".", "sha256", ")", ".", "digest", "(", ")", "return", "hmac", ".", "new", "(", "key4", ",", "'aws4_request'", ".", "encode", "(", "'utf-8'", ")", ",", "hashlib", ".", "sha256", ")", ".", "digest", "(", ")" ]
37.684211
15.894737
def find_ui_tree_entity(entity_id=None, entity_value=None, entity_ca=None): """ find the Ariane UI tree menu entity depending on its id (priority), value or context address :param entity_id: the Ariane UI tree menu ID to search :param entity_value: the Ariane UI tree menu Value to search :param entity_ca: the Ariane UI tree menu context address to search :return: """ LOGGER.debug("InjectorUITreeService.find_ui_tree_entity") operation = None search_criteria = None criteria_value = None if entity_id is not None: operation = 'GET_TREE_MENU_ENTITY_I' search_criteria = 'id' criteria_value = entity_id if operation is None and entity_value is not None: operation = 'GET_TREE_MENU_ENTITY_V' search_criteria = 'value' criteria_value = entity_value if operation is None and entity_ca is not None: operation = 'GET_TREE_MENU_ENTITY_C' search_criteria = 'context address' criteria_value = entity_ca ret = None if operation is not None: args = {'properties': {'OPERATION': operation, 'TREE_MENU_ENTITY_ID': criteria_value}} result = InjectorUITreeService.requester.call(args).get() if result.rc == 0: ret = InjectorUITreeEntity.json_2_injector_ui_tree_menu_entity(result.response_content) elif result.rc != 404: err_msg = 'InjectorUITreeService.find_ui_tree_entity - Problem while finding ' \ 'injector UI Tree Menu Entity ('+search_criteria+':' + \ str(criteria_value) + '). ' + \ 'Reason: ' + str(result.response_content) + '-' + str(result.error_message) + \ " (" + str(result.rc) + ")" LOGGER.warning(err_msg) return ret
[ "def", "find_ui_tree_entity", "(", "entity_id", "=", "None", ",", "entity_value", "=", "None", ",", "entity_ca", "=", "None", ")", ":", "LOGGER", ".", "debug", "(", "\"InjectorUITreeService.find_ui_tree_entity\"", ")", "operation", "=", "None", "search_criteria", "=", "None", "criteria_value", "=", "None", "if", "entity_id", "is", "not", "None", ":", "operation", "=", "'GET_TREE_MENU_ENTITY_I'", "search_criteria", "=", "'id'", "criteria_value", "=", "entity_id", "if", "operation", "is", "None", "and", "entity_value", "is", "not", "None", ":", "operation", "=", "'GET_TREE_MENU_ENTITY_V'", "search_criteria", "=", "'value'", "criteria_value", "=", "entity_value", "if", "operation", "is", "None", "and", "entity_ca", "is", "not", "None", ":", "operation", "=", "'GET_TREE_MENU_ENTITY_C'", "search_criteria", "=", "'context address'", "criteria_value", "=", "entity_ca", "ret", "=", "None", "if", "operation", "is", "not", "None", ":", "args", "=", "{", "'properties'", ":", "{", "'OPERATION'", ":", "operation", ",", "'TREE_MENU_ENTITY_ID'", ":", "criteria_value", "}", "}", "result", "=", "InjectorUITreeService", ".", "requester", ".", "call", "(", "args", ")", ".", "get", "(", ")", "if", "result", ".", "rc", "==", "0", ":", "ret", "=", "InjectorUITreeEntity", ".", "json_2_injector_ui_tree_menu_entity", "(", "result", ".", "response_content", ")", "elif", "result", ".", "rc", "!=", "404", ":", "err_msg", "=", "'InjectorUITreeService.find_ui_tree_entity - Problem while finding '", "'injector UI Tree Menu Entity ('", "+", "search_criteria", "+", "':'", "+", "str", "(", "criteria_value", ")", "+", "'). '", "+", "'Reason: '", "+", "str", "(", "result", ".", "response_content", ")", "+", "'-'", "+", "str", "(", "result", ".", "error_message", ")", "+", "\" (\"", "+", "str", "(", "result", ".", "rc", ")", "+", "\")\"", "LOGGER", ".", "warning", "(", "err_msg", ")", "return", "ret" ]
46.97561
21.512195
def label_contiguous_1d(X): """ WARNING: API for this function is not liable to change!!! By example: X = [F T T F F T F F F T T T] result = [0 1 1 0 0 2 0 0 0 3 3 3] Or: X = [0 3 3 0 0 5 5 5 1 1 0 2] result = [0 1 1 0 0 2 2 2 3 3 0 4] The ``0`` or ``False`` elements of ``X`` are labeled as ``0`` in the output. If ``X`` is a boolean array, each contiguous block of ``True`` is given an integer label, if ``X`` is not boolean, then each contiguous block of identical values is given an integer label. Integer labels are 1, 2, 3,..... (i.e. start a 1 and increase by 1 for each block with no skipped numbers.) """ if X.ndim != 1: raise ValueError("this is for 1d masks only.") is_start = np.empty(len(X), dtype=bool) is_start[0] = X[0] # True if X[0] is True or non-zero if X.dtype.kind == 'b': is_start[1:] = ~X[:-1] & X[1:] M = X else: M = X.astype(bool) is_start[1:] = X[:-1] != X[1:] is_start[~M] = False L = np.cumsum(is_start) L[~M] = 0 return L
[ "def", "label_contiguous_1d", "(", "X", ")", ":", "if", "X", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"this is for 1d masks only.\"", ")", "is_start", "=", "np", ".", "empty", "(", "len", "(", "X", ")", ",", "dtype", "=", "bool", ")", "is_start", "[", "0", "]", "=", "X", "[", "0", "]", "# True if X[0] is True or non-zero", "if", "X", ".", "dtype", ".", "kind", "==", "'b'", ":", "is_start", "[", "1", ":", "]", "=", "~", "X", "[", ":", "-", "1", "]", "&", "X", "[", "1", ":", "]", "M", "=", "X", "else", ":", "M", "=", "X", ".", "astype", "(", "bool", ")", "is_start", "[", "1", ":", "]", "=", "X", "[", ":", "-", "1", "]", "!=", "X", "[", "1", ":", "]", "is_start", "[", "~", "M", "]", "=", "False", "L", "=", "np", ".", "cumsum", "(", "is_start", ")", "L", "[", "~", "M", "]", "=", "0", "return", "L" ]
29.131579
22.684211
def convert_response(allocate_quota_response, project_id): """Computes a http status code and message `AllocateQuotaResponse` The return value a tuple (code, message) where code: is the http status code message: is the message to return Args: allocate_quota_response (:class:`endpoints_management.gen.servicecontrol_v1_messages.AllocateQuotaResponse`): the response from calling an api Returns: tuple(code, message) """ if not allocate_quota_response or not allocate_quota_response.allocateErrors: return _IS_OK # only allocate_quota the first error for now, as per ESP theError = allocate_quota_response.allocateErrors[0] error_tuple = _QUOTA_ERROR_CONVERSION.get(theError.code, _IS_UNKNOWN) if error_tuple[1].find(u'{') == -1: # no replacements needed: return error_tuple updated_msg = error_tuple[1].format(project_id=project_id, detail=theError.description or u'') return error_tuple[0], updated_msg
[ "def", "convert_response", "(", "allocate_quota_response", ",", "project_id", ")", ":", "if", "not", "allocate_quota_response", "or", "not", "allocate_quota_response", ".", "allocateErrors", ":", "return", "_IS_OK", "# only allocate_quota the first error for now, as per ESP", "theError", "=", "allocate_quota_response", ".", "allocateErrors", "[", "0", "]", "error_tuple", "=", "_QUOTA_ERROR_CONVERSION", ".", "get", "(", "theError", ".", "code", ",", "_IS_UNKNOWN", ")", "if", "error_tuple", "[", "1", "]", ".", "find", "(", "u'{'", ")", "==", "-", "1", ":", "# no replacements needed:", "return", "error_tuple", "updated_msg", "=", "error_tuple", "[", "1", "]", ".", "format", "(", "project_id", "=", "project_id", ",", "detail", "=", "theError", ".", "description", "or", "u''", ")", "return", "error_tuple", "[", "0", "]", ",", "updated_msg" ]
37.692308
25.269231
def filter_gradient_threshold(self, analyte, win, threshold, recalc=True): """ Apply gradient threshold filter. Generates threshold filters for the given analytes above and below the specified threshold. Two filters are created with prefixes '_above' and '_below'. '_above' keeps all the data above the threshold. '_below' keeps all the data below the threshold. i.e. to select data below the threshold value, you should turn the '_above' filter off. Parameters ---------- analyte : str Description of `analyte`. threshold : float Description of `threshold`. win : int Window used to calculate gradients (n points) recalc : bool Whether or not to re-calculate the gradients. Returns ------- None """ params = locals() del(params['self']) # calculate absolute gradient if recalc or not self.grads_calced: self.grads = calc_grads(self.Time, self.focus, [analyte], win) self.grads_calced = True below, above = filters.threshold(abs(self.grads[analyte]), threshold) setn = self.filt.maxset + 1 self.filt.add(analyte + '_gthresh_below', below, 'Keep gradient below {:.3e} '.format(threshold) + analyte, params, setn=setn) self.filt.add(analyte + '_gthresh_above', above, 'Keep gradient above {:.3e} '.format(threshold) + analyte, params, setn=setn)
[ "def", "filter_gradient_threshold", "(", "self", ",", "analyte", ",", "win", ",", "threshold", ",", "recalc", "=", "True", ")", ":", "params", "=", "locals", "(", ")", "del", "(", "params", "[", "'self'", "]", ")", "# calculate absolute gradient", "if", "recalc", "or", "not", "self", ".", "grads_calced", ":", "self", ".", "grads", "=", "calc_grads", "(", "self", ".", "Time", ",", "self", ".", "focus", ",", "[", "analyte", "]", ",", "win", ")", "self", ".", "grads_calced", "=", "True", "below", ",", "above", "=", "filters", ".", "threshold", "(", "abs", "(", "self", ".", "grads", "[", "analyte", "]", ")", ",", "threshold", ")", "setn", "=", "self", ".", "filt", ".", "maxset", "+", "1", "self", ".", "filt", ".", "add", "(", "analyte", "+", "'_gthresh_below'", ",", "below", ",", "'Keep gradient below {:.3e} '", ".", "format", "(", "threshold", ")", "+", "analyte", ",", "params", ",", "setn", "=", "setn", ")", "self", ".", "filt", ".", "add", "(", "analyte", "+", "'_gthresh_above'", ",", "above", ",", "'Keep gradient above {:.3e} '", ".", "format", "(", "threshold", ")", "+", "analyte", ",", "params", ",", "setn", "=", "setn", ")" ]
33.58
20.42
def Import(context, request): """ Read Dimensional-CSV analysis results """ form = request.form # TODO form['file'] sometimes returns a list infile = form['instrument_results_file'][0] if \ isinstance(form['instrument_results_file'], list) else \ form['instrument_results_file'] artoapply = form['artoapply'] override = form['results_override'] instrument = form.get('instrument', None) errors = [] logs = [] # Load the most suitable parser according to file extension/options/etc... parser = None if not hasattr(infile, 'filename'): errors.append(_("No file selected")) parser = TwoDimensionCSVParser(infile) status = get_instrument_import_ar_allowed_states(artoapply) over = get_instrument_import_override(override) importer = TwoDimensionImporter(parser=parser, context=context, allowed_ar_states=status, allowed_analysis_states=None, override=over, instrument_uid=instrument, form=form) tbex = '' try: importer.process() except: tbex = traceback.format_exc() errors = importer.errors logs = importer.logs warns = importer.warns if tbex: errors.append(tbex) results = {'errors': errors, 'log': logs, 'warns': warns} return json.dumps(results)
[ "def", "Import", "(", "context", ",", "request", ")", ":", "form", "=", "request", ".", "form", "# TODO form['file'] sometimes returns a list", "infile", "=", "form", "[", "'instrument_results_file'", "]", "[", "0", "]", "if", "isinstance", "(", "form", "[", "'instrument_results_file'", "]", ",", "list", ")", "else", "form", "[", "'instrument_results_file'", "]", "artoapply", "=", "form", "[", "'artoapply'", "]", "override", "=", "form", "[", "'results_override'", "]", "instrument", "=", "form", ".", "get", "(", "'instrument'", ",", "None", ")", "errors", "=", "[", "]", "logs", "=", "[", "]", "# Load the most suitable parser according to file extension/options/etc...", "parser", "=", "None", "if", "not", "hasattr", "(", "infile", ",", "'filename'", ")", ":", "errors", ".", "append", "(", "_", "(", "\"No file selected\"", ")", ")", "parser", "=", "TwoDimensionCSVParser", "(", "infile", ")", "status", "=", "get_instrument_import_ar_allowed_states", "(", "artoapply", ")", "over", "=", "get_instrument_import_override", "(", "override", ")", "importer", "=", "TwoDimensionImporter", "(", "parser", "=", "parser", ",", "context", "=", "context", ",", "allowed_ar_states", "=", "status", ",", "allowed_analysis_states", "=", "None", ",", "override", "=", "over", ",", "instrument_uid", "=", "instrument", ",", "form", "=", "form", ")", "tbex", "=", "''", "try", ":", "importer", ".", "process", "(", ")", "except", ":", "tbex", "=", "traceback", ".", "format_exc", "(", ")", "errors", "=", "importer", ".", "errors", "logs", "=", "importer", ".", "logs", "warns", "=", "importer", ".", "warns", "if", "tbex", ":", "errors", ".", "append", "(", "tbex", ")", "results", "=", "{", "'errors'", ":", "errors", ",", "'log'", ":", "logs", ",", "'warns'", ":", "warns", "}", "return", "json", ".", "dumps", "(", "results", ")" ]
34.302326
16.860465
def account_history(self, account, count): """ Reports send/receive information for a **account** :param account: Account to get send/receive information for :type account: str :param count: number of blocks to return :type count: int :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.account_history( ... account="xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000", ... count=1 ... ) [ { "hash": "000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F", "type": "receive", "account": "xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000", "amount": 100000000000000000000000000000000 } ] """ account = self._process_value(account, 'account') count = self._process_value(count, 'int') payload = {"account": account, "count": count} resp = self.call('account_history', payload) history = resp.get('history') or [] for entry in history: entry['amount'] = int(entry['amount']) return history
[ "def", "account_history", "(", "self", ",", "account", ",", "count", ")", ":", "account", "=", "self", ".", "_process_value", "(", "account", ",", "'account'", ")", "count", "=", "self", ".", "_process_value", "(", "count", ",", "'int'", ")", "payload", "=", "{", "\"account\"", ":", "account", ",", "\"count\"", ":", "count", "}", "resp", "=", "self", ".", "call", "(", "'account_history'", ",", "payload", ")", "history", "=", "resp", ".", "get", "(", "'history'", ")", "or", "[", "]", "for", "entry", "in", "history", ":", "entry", "[", "'amount'", "]", "=", "int", "(", "entry", "[", "'amount'", "]", ")", "return", "history" ]
29.846154
23.897436
def novo(args): """ %prog novo reads.fastq Reference-free tGBS pipeline v1. """ from jcvi.assembly.kmer import jellyfish, histogram from jcvi.assembly.preprocess import diginorm from jcvi.formats.fasta import filter as fasta_filter, format from jcvi.apps.cdhit import filter as cdhit_filter p = OptionParser(novo.__doc__) p.add_option("--technology", choices=("illumina", "454", "iontorrent"), default="iontorrent", help="Sequencing platform") p.set_depth(depth=50) p.set_align(pctid=96) p.set_home("cdhit", default="/usr/local/bin/") p.set_home("fiona", default="/usr/local/bin/") p.set_home("jellyfish", default="/usr/local/bin/") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastqfile, = args cpus = opts.cpus depth = opts.depth pf, sf = fastqfile.rsplit(".", 1) diginormfile = pf + ".diginorm." + sf if need_update(fastqfile, diginormfile): diginorm([fastqfile, "--single", "--depth={0}".format(depth)]) keepabund = fastqfile + ".keep.abundfilt" sh("cp -s {0} {1}".format(keepabund, diginormfile)) jf = pf + "-K23.histogram" if need_update(diginormfile, jf): jellyfish([diginormfile, "--prefix={0}".format(pf), "--cpus={0}".format(cpus), "--jellyfish_home={0}".format(opts.jellyfish_home)]) genomesize = histogram([jf, pf, "23"]) fiona = pf + ".fiona.fa" if need_update(diginormfile, fiona): cmd = op.join(opts.fiona_home, "fiona") cmd += " -g {0} -nt {1} --sequencing-technology {2}".\ format(genomesize, cpus, opts.technology) cmd += " -vv {0} {1}".format(diginormfile, fiona) logfile = pf + ".fiona.log" sh(cmd, outfile=logfile, errfile=logfile) dedup = "cdhit" pctid = opts.pctid cons = fiona + ".P{0}.{1}.consensus.fasta".format(pctid, dedup) if need_update(fiona, cons): deduplicate([fiona, "--consensus", "--reads", "--pctid={0}".format(pctid), "--cdhit_home={0}".format(opts.cdhit_home)]) filteredfile = pf + ".filtered.fasta" if need_update(cons, filteredfile): covfile = pf + ".cov.fasta" cdhit_filter([cons, "--outfile={0}".format(covfile), "--minsize={0}".format(depth / 5)]) fasta_filter([covfile, "50", "--outfile={0}".format(filteredfile)]) finalfile = pf + ".final.fasta" if need_update(filteredfile, finalfile): format([filteredfile, finalfile, "--sequential=replace", "--prefix={0}_".format(pf)])
[ "def", "novo", "(", "args", ")", ":", "from", "jcvi", ".", "assembly", ".", "kmer", "import", "jellyfish", ",", "histogram", "from", "jcvi", ".", "assembly", ".", "preprocess", "import", "diginorm", "from", "jcvi", ".", "formats", ".", "fasta", "import", "filter", "as", "fasta_filter", ",", "format", "from", "jcvi", ".", "apps", ".", "cdhit", "import", "filter", "as", "cdhit_filter", "p", "=", "OptionParser", "(", "novo", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--technology\"", ",", "choices", "=", "(", "\"illumina\"", ",", "\"454\"", ",", "\"iontorrent\"", ")", ",", "default", "=", "\"iontorrent\"", ",", "help", "=", "\"Sequencing platform\"", ")", "p", ".", "set_depth", "(", "depth", "=", "50", ")", "p", ".", "set_align", "(", "pctid", "=", "96", ")", "p", ".", "set_home", "(", "\"cdhit\"", ",", "default", "=", "\"/usr/local/bin/\"", ")", "p", ".", "set_home", "(", "\"fiona\"", ",", "default", "=", "\"/usr/local/bin/\"", ")", "p", ".", "set_home", "(", "\"jellyfish\"", ",", "default", "=", "\"/usr/local/bin/\"", ")", "p", ".", "set_cpus", "(", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "fastqfile", ",", "=", "args", "cpus", "=", "opts", ".", "cpus", "depth", "=", "opts", ".", "depth", "pf", ",", "sf", "=", "fastqfile", ".", "rsplit", "(", "\".\"", ",", "1", ")", "diginormfile", "=", "pf", "+", "\".diginorm.\"", "+", "sf", "if", "need_update", "(", "fastqfile", ",", "diginormfile", ")", ":", "diginorm", "(", "[", "fastqfile", ",", "\"--single\"", ",", "\"--depth={0}\"", ".", "format", "(", "depth", ")", "]", ")", "keepabund", "=", "fastqfile", "+", "\".keep.abundfilt\"", "sh", "(", "\"cp -s {0} {1}\"", ".", "format", "(", "keepabund", ",", "diginormfile", ")", ")", "jf", "=", "pf", "+", "\"-K23.histogram\"", "if", "need_update", "(", "diginormfile", ",", "jf", ")", ":", "jellyfish", "(", "[", "diginormfile", ",", "\"--prefix={0}\"", ".", "format", "(", "pf", ")", ",", "\"--cpus={0}\"", ".", "format", "(", "cpus", ")", ",", "\"--jellyfish_home={0}\"", ".", "format", "(", "opts", ".", "jellyfish_home", ")", "]", ")", "genomesize", "=", "histogram", "(", "[", "jf", ",", "pf", ",", "\"23\"", "]", ")", "fiona", "=", "pf", "+", "\".fiona.fa\"", "if", "need_update", "(", "diginormfile", ",", "fiona", ")", ":", "cmd", "=", "op", ".", "join", "(", "opts", ".", "fiona_home", ",", "\"fiona\"", ")", "cmd", "+=", "\" -g {0} -nt {1} --sequencing-technology {2}\"", ".", "format", "(", "genomesize", ",", "cpus", ",", "opts", ".", "technology", ")", "cmd", "+=", "\" -vv {0} {1}\"", ".", "format", "(", "diginormfile", ",", "fiona", ")", "logfile", "=", "pf", "+", "\".fiona.log\"", "sh", "(", "cmd", ",", "outfile", "=", "logfile", ",", "errfile", "=", "logfile", ")", "dedup", "=", "\"cdhit\"", "pctid", "=", "opts", ".", "pctid", "cons", "=", "fiona", "+", "\".P{0}.{1}.consensus.fasta\"", ".", "format", "(", "pctid", ",", "dedup", ")", "if", "need_update", "(", "fiona", ",", "cons", ")", ":", "deduplicate", "(", "[", "fiona", ",", "\"--consensus\"", ",", "\"--reads\"", ",", "\"--pctid={0}\"", ".", "format", "(", "pctid", ")", ",", "\"--cdhit_home={0}\"", ".", "format", "(", "opts", ".", "cdhit_home", ")", "]", ")", "filteredfile", "=", "pf", "+", "\".filtered.fasta\"", "if", "need_update", "(", "cons", ",", "filteredfile", ")", ":", "covfile", "=", "pf", "+", "\".cov.fasta\"", "cdhit_filter", "(", "[", "cons", ",", "\"--outfile={0}\"", ".", "format", "(", "covfile", ")", ",", "\"--minsize={0}\"", ".", "format", "(", "depth", "/", "5", ")", "]", ")", "fasta_filter", "(", "[", "covfile", ",", "\"50\"", ",", "\"--outfile={0}\"", ".", "format", "(", "filteredfile", ")", "]", ")", "finalfile", "=", "pf", "+", "\".final.fasta\"", "if", "need_update", "(", "filteredfile", ",", "finalfile", ")", ":", "format", "(", "[", "filteredfile", ",", "finalfile", ",", "\"--sequential=replace\"", ",", "\"--prefix={0}_\"", ".", "format", "(", "pf", ")", "]", ")" ]
36.915493
16.943662
def UTCFromGps(gpsWeek, SOW, leapSecs=14): """converts gps week and seconds to UTC see comments of inverse function! SOW = seconds of week gpsWeek is the full number (not modulo 1024) """ secFract = SOW % 1 epochTuple = gpsEpoch + (-1, -1, 0) t0 = time.mktime(epochTuple) - time.timezone #mktime is localtime, correct for UTC tdiff = (gpsWeek * secsInWeek) + SOW - leapSecs t = t0 + tdiff (year, month, day, hh, mm, ss, dayOfWeek, julianDay, daylightsaving) = time.gmtime(t) #use gmtime since localtime does not allow to switch off daylighsavings correction!!! return (year, month, day, hh, mm, ss + secFract)
[ "def", "UTCFromGps", "(", "gpsWeek", ",", "SOW", ",", "leapSecs", "=", "14", ")", ":", "secFract", "=", "SOW", "%", "1", "epochTuple", "=", "gpsEpoch", "+", "(", "-", "1", ",", "-", "1", ",", "0", ")", "t0", "=", "time", ".", "mktime", "(", "epochTuple", ")", "-", "time", ".", "timezone", "#mktime is localtime, correct for UTC", "tdiff", "=", "(", "gpsWeek", "*", "secsInWeek", ")", "+", "SOW", "-", "leapSecs", "t", "=", "t0", "+", "tdiff", "(", "year", ",", "month", ",", "day", ",", "hh", ",", "mm", ",", "ss", ",", "dayOfWeek", ",", "julianDay", ",", "daylightsaving", ")", "=", "time", ".", "gmtime", "(", "t", ")", "#use gmtime since localtime does not allow to switch off daylighsavings correction!!!", "return", "(", "year", ",", "month", ",", "day", ",", "hh", ",", "mm", ",", "ss", "+", "secFract", ")" ]
40.625
19.75