code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def setDriftLength(self, x): if x != self.getDriftLength(): self._setDriftList(x) self.refresh = True
set lengths for drift sections :param x: single double or list :return: None :Example: >>> import beamline >>> chi = beamline.mathutils.Chicane(bend_length=1,bend_field=0.5,drift_length=1,gamma=1000) >>> chi.getMatrix() >>> r56 = chi.getR(5,6) # r56 = -0.432 >>> chi.setDriftLength([2,4,2]) >>> # same effect (to R56) as ``chi.setDriftLength([2,4])`` or ``chi.setDriftLength([2])`` >>> # or ``chi.setDriftLength(2)`` >>> r56 = chi.getR(5,6) # r56 = -0.620
def bind_port(self, context): port = context.current log_context("bind_port: port", port) for segment in context.segments_to_bind: physnet = segment.get(driver_api.PHYSICAL_NETWORK) segment_type = segment[driver_api.NETWORK_TYPE] if not physnet: if (segment_type == n_const.TYPE_VXLAN and self.manage_fabric): if self._bind_fabric(context, segment): continue elif (port.get(portbindings.VNIC_TYPE) == portbindings.VNIC_BAREMETAL): if (not self.managed_physnets or physnet in self.managed_physnets): if self._bind_baremetal_port(context, segment): continue LOG.debug("Arista mech driver unable to bind port %(port)s to " "%(seg_type)s segment on physical_network %(physnet)s", {'port': port.get('id'), 'seg_type': segment_type, 'physnet': physnet})
Bind port to a network segment. Provisioning request to Arista Hardware to plug a host into appropriate network is done when the port is created this simply tells the ML2 Plugin that we are binding the port
def get_machines_by_groups(self, groups): if not isinstance(groups, list): raise TypeError("groups can only be an instance of type list") for a in groups[:10]: if not isinstance(a, basestring): raise TypeError( "array can only contain objects of type basestring") machines = self._call("getMachinesByGroups", in_p=[groups]) machines = [IMachine(a) for a in machines] return machines
Gets all machine references which are in one of the specified groups. in groups of type str What groups to match. The usual group list rules apply, i.e. passing an empty list will match VMs in the toplevel group, likewise the empty string. return machines of type :class:`IMachine` All machines which matched.
def _digitize_lons(lons, lon_bins): if cross_idl(lon_bins[0], lon_bins[-1]): idx = numpy.zeros_like(lons, dtype=numpy.int) for i_lon in range(len(lon_bins) - 1): extents = get_longitudinal_extent(lons, lon_bins[i_lon + 1]) lon_idx = extents > 0 if i_lon != 0: extents = get_longitudinal_extent(lon_bins[i_lon], lons) lon_idx &= extents >= 0 idx[lon_idx] = i_lon return numpy.array(idx) else: return numpy.digitize(lons, lon_bins) - 1
Return indices of the bins to which each value in lons belongs. Takes into account the case in which longitude values cross the international date line. :parameter lons: An instance of `numpy.ndarray`. :parameter lons_bins: An instance of `numpy.ndarray`.
def parse(self, string, evaluate_result=True): m = self._match_re.match(string) if m is None: return None if evaluate_result: return self.evaluate_result(m) else: return Match(self, m)
Match my format to the string exactly. Return a Result or Match instance or None if there's no match.
def validate(self, url): if not url.startswith('http') or not 'github' in url: bot.error('Test of preview must be given a Github repostitory.') return False if not self._validate_preview(url): return False return True
takes in a Github repository for validation of preview and runtime (and possibly tests passing?
def read_message(self): msg = "" num_blank_lines = 0 while True: with self._reader_lock: line = self.input_stream.readline() if line == "end\n": break elif line == "": raise StormWentAwayError() elif line == "\n": num_blank_lines += 1 if num_blank_lines % 1000 == 0: log.warn( "While trying to read a command or pending task " "ID, Storm has instead sent %s '\\n' messages.", num_blank_lines, ) continue msg = "{}{}\n".format(msg, line[0:-1]) try: return json.loads(msg) except Exception: log.error("JSON decode error for message: %r", msg, exc_info=True) raise
The Storm multilang protocol consists of JSON messages followed by a newline and "end\n". All of Storm's messages (for either bolts or spouts) should be of the form:: '<command or task_id form prior emit>\\nend\\n' Command example, an incoming Tuple to a bolt:: '{ "id": "-6955786537413359385", "comp": "1", "stream": "1", "task": 9, "tuple": ["snow white and the seven dwarfs", "field2", 3]}\\nend\\n' Command example for a spout to emit its next Tuple:: '{"command": "next"}\\nend\\n' Example, the task IDs a prior emit was sent to:: '[12, 22, 24]\\nend\\n' The edge case of where we read ``''`` from ``input_stream`` indicating EOF, usually means that communication with the supervisor has been severed.
def get_grouped_indices(self, voigt=False, **kwargs): if voigt: array = self.voigt else: array = self indices = list(itertools.product(*[range(n) for n in array.shape])) remaining = indices.copy() grouped = [list(zip(*np.where(np.isclose(array, 0, **kwargs))))] remaining = [i for i in remaining if i not in grouped[0]] while remaining: new = list(zip(*np.where(np.isclose( array, array[remaining[0]], **kwargs)))) grouped.append(new) remaining = [i for i in remaining if i not in new] return [g for g in grouped if g]
Gets index sets for equivalent tensor values Args: voigt (bool): whether to get grouped indices of voigt or full notation tensor, defaults to false **kwargs: keyword args for np.isclose. Can take atol and rtol for absolute and relative tolerance, e. g. >>> tensor.group_array_indices(atol=1e-8) or >>> tensor.group_array_indices(rtol=1e-5) Returns: list of index groups where tensor values are equivalent to within tolerances
def _client_connection(self, conn, addr): log.debug('Established connection with %s:%d', addr[0], addr[1]) conn.settimeout(self.socket_timeout) try: while self.__up: msg = conn.recv(self.buffer_size) if not msg: continue log.debug('[%s] Received %s from %s. Adding in the queue', time.time(), msg, addr) self.buffer.put((msg, '{}:{}'.format(addr[0], addr[1]))) except socket.timeout: if not self.__up: return log.debug('Connection %s:%d timed out', addr[1], addr[0]) raise ListenerException('Connection %s:%d timed out' % addr) finally: log.debug('Closing connection with %s', addr) conn.close()
Handle the connecition with one client.
def elekta_icon_fbp(ray_transform, padding=False, filter_type='Hann', frequency_scaling=0.6, parker_weighting=True): fbp_op = odl.tomo.fbp_op(ray_transform, padding=padding, filter_type=filter_type, frequency_scaling=frequency_scaling) if parker_weighting: parker_weighting = odl.tomo.parker_weighting(ray_transform) fbp_op = fbp_op * parker_weighting return fbp_op
Approximation of the FDK reconstruction used in the Elekta Icon. Parameters ---------- ray_transform : `RayTransform` The ray transform to be used, should have an Elekta Icon geometry. padding : bool, optional Whether the FBP filter should use padding, increases memory use significantly. filter_type : str, optional Type of filter to apply in the FBP filter. frequency_scaling : float, optional Frequency scaling for FBP filter. parker_weighting : bool, optional Whether Parker weighting should be applied to compensate for partial scan. Returns ------- elekta_icon_fbp : `DiscreteLp` Examples -------- Create default FBP for default geometry: >>> from odl.contrib import tomo >>> geometry = tomo.elekta_icon_geometry() >>> space = tomo.elekta_icon_space() >>> ray_transform = odl.tomo.RayTransform(space, geometry) >>> fbp_op = tomo.elekta_icon_fbp(ray_transform)
def calculate_first_digit(number): sum = 0 if len(number) == 9: weights = CPF_WEIGHTS[0] else: weights = CNPJ_WEIGHTS[0] for i in range(len(number)): sum = sum + int(number[i]) * weights[i] rest_division = sum % DIVISOR if rest_division < 2: return '0' return str(11 - rest_division)
This function calculates the first check digit of a cpf or cnpj. :param number: cpf (length 9) or cnpf (length 12) string to check the first digit. Only numbers. :type number: string :returns: string -- the first digit
def reset(self, base=0, item=0, leng=None, refs=None, both=True, kind=None, type=None): if base < 0: raise ValueError('invalid option: %s=%r' % ('base', base)) else: self.base = base if item < 0: raise ValueError('invalid option: %s=%r' % ('item', item)) else: self.item = item if leng in _all_lengs: self.leng = leng else: raise ValueError('invalid option: %s=%r' % ('leng', leng)) if refs in _all_refs: self.refs = refs else: raise ValueError('invalid option: %s=%r' % ('refs', refs)) if both in (False, True): self.both = both else: raise ValueError('invalid option: %s=%r' % ('both', both)) if kind in _all_kinds: self.kind = kind else: raise ValueError('invalid option: %s=%r' % ('kind', kind)) self.type = type
Reset all specified attributes.
def is_almost_simplicial(G, n): for w in G[n]: if all(u in G[v] for u, v in itertools.combinations(G[n], 2) if u != w and v != w): return True return False
Determines whether a node n in G is almost simplicial. Parameters ---------- G : NetworkX graph The graph on which to check whether node n is almost simplicial. n : node A node in graph G. Returns ------- is_almost_simplicial : bool True if all but one of its neighbors induce a clique Examples -------- This example checks whether node 0 is simplicial or almost simplicial for a :math:`K_5` complete graph with one edge removed. >>> import dwave_networkx as dnx >>> import networkx as nx >>> K_5 = nx.complete_graph(5) >>> K_5.remove_edge(1,3) >>> dnx.is_simplicial(K_5, 0) False >>> dnx.is_almost_simplicial(K_5, 0) True
def get_filter(self): q = self.q().select('name').expand('filter') response = self.session.get(self.build_url(''), params=q.as_params()) if not response: return None data = response.json() return data.get('criteria', None)
Returns the filter applie to this column
def analyse_text(text): if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE): return 1.0 elif re.search(r'\s*REBOL\s*[', text, re.IGNORECASE): return 0.5
Check if code contains REBOL header and so it probably not R code
def _parse_flowcontrol_receive(self, config): value = 'off' match = re.search(r'flowcontrol receive (\w+)$', config, re.M) if match: value = match.group(1) return dict(flowcontrol_receive=value)
Scans the config block and returns the flowcontrol receive value Args: config (str): The interface config block to scan Returns: dict: Returns a dict object with the flowcontrol receive value retrieved from the config block. The returned dict object is intended to be merged into the interface resource dict
def hist(darray, figsize=None, size=None, aspect=None, ax=None, **kwargs): ax = get_axis(figsize, size, aspect, ax) xincrease = kwargs.pop('xincrease', None) yincrease = kwargs.pop('yincrease', None) xscale = kwargs.pop('xscale', None) yscale = kwargs.pop('yscale', None) xticks = kwargs.pop('xticks', None) yticks = kwargs.pop('yticks', None) xlim = kwargs.pop('xlim', None) ylim = kwargs.pop('ylim', None) no_nan = np.ravel(darray.values) no_nan = no_nan[pd.notnull(no_nan)] primitive = ax.hist(no_nan, **kwargs) ax.set_title('Histogram') ax.set_xlabel(label_from_attrs(darray)) _update_axes(ax, xincrease, yincrease, xscale, yscale, xticks, yticks, xlim, ylim) return primitive
Histogram of DataArray Wraps :func:`matplotlib:matplotlib.pyplot.hist` Plots N dimensional arrays by first flattening the array. Parameters ---------- darray : DataArray Can be any dimension figsize : tuple, optional A tuple (width, height) of the figure in inches. Mutually exclusive with ``size`` and ``ax``. aspect : scalar, optional Aspect ratio of plot, so that ``aspect * size`` gives the width in inches. Only used if a ``size`` is provided. size : scalar, optional If provided, create a new figure for the plot with the given size. Height (in inches) of each plot. See also: ``aspect``. ax : matplotlib axes object, optional Axis on which to plot this figure. By default, use the current axis. Mutually exclusive with ``size`` and ``figsize``. **kwargs : optional Additional keyword arguments to matplotlib.pyplot.hist
def _module_callers(parser, modname, result): if modname in result: return module = parser.get(modname) mresult = {} if module is not None: for xname, xinst in module.executables(): _exec_callers(xinst, mresult) result[modname] = mresult for depkey in module.dependencies: depmod = depkey.split('.')[0].lower() _module_callers(parser, depmod, result)
Adds any calls to executables contained in the specified module.
def function_application(func): if func not in NUMEXPR_MATH_FUNCS: raise ValueError("Unsupported mathematical function '%s'" % func) @with_doc(func) @with_name(func) def mathfunc(self): if isinstance(self, NumericalExpression): return NumExprFactor( "{func}({expr})".format(func=func, expr=self._expr), self.inputs, dtype=float64_dtype, ) else: return NumExprFactor( "{func}(x_0)".format(func=func), (self,), dtype=float64_dtype, ) return mathfunc
Factory function for producing function application methods for Factor subclasses.
def store(self, stream, linesep=os.linesep): for k, v in self.items(): write_key_val(stream, k, v, linesep) stream.write(linesep.encode('utf-8'))
Serialize this section and write it to a binary stream
def isAboveUpperDetectionLimit(self): if self.isUpperDetectionLimit(): return True result = self.getResult() if result and str(result).strip().startswith(UDL): return True if api.is_floatable(result): return api.to_float(result) > self.getUpperDetectionLimit() return False
Returns True if the result is above the Upper Detection Limit or if Upper Detection Limit has been manually set
def _non_blocking_wrapper(self, method, *args, **kwargs): exceptions = [] def task_run(task): try: getattr(task, method)(*args, **kwargs) except Exception as e: exceptions.append(e) threads = [threading.Thread(name=f'task_{method}_{i}', target=task_run, args=[t]) for i, t in enumerate(self.tasks)] for thread in threads: thread.start() for thread in threads: thread.join() if exceptions: raise exceptions[0]
Runs given method on every task in the job. Blocks until all tasks finish. Propagates exception from first failed task.
def request_help(self, req, msg): if not msg.arguments: for name, method in sorted(self._request_handlers.items()): doc = method.__doc__ req.inform(name, doc) num_methods = len(self._request_handlers) return req.make_reply("ok", str(num_methods)) else: name = msg.arguments[0] if name in self._request_handlers: method = self._request_handlers[name] doc = method.__doc__.strip() req.inform(name, doc) return req.make_reply("ok", "1") return req.make_reply("fail", "Unknown request method.")
Return help on the available requests. Return a description of the available requests using a sequence of #help informs. Parameters ---------- request : str, optional The name of the request to return help for (the default is to return help for all requests). Informs ------- request : str The name of a request. description : str Documentation for the named request. Returns ------- success : {'ok', 'fail'} Whether sending the help succeeded. informs : int Number of #help inform messages sent. Examples -------- :: ?help #help halt ...description... #help help ...description... ... !help ok 5 ?help halt #help halt ...description... !help ok 1
def _repr_html_(): from bonobo.commands.version import get_versions return ( '<div style="padding: 8px;">' ' <div style="float: left; width: 20px; height: 20px;">{}</div>' ' <pre style="white-space: nowrap; padding-left: 8px">{}</pre>' "</div>" ).format(__logo__, "<br/>".join(get_versions(all=True)))
This allows to easily display a version snippet in Jupyter.
def get_master_status(**connection_args): mod = sys._getframe().f_code.co_name log.debug('%s<--', mod) conn = _connect(**connection_args) if conn is None: return [] rtnv = __do_query_into_hash(conn, "SHOW MASTER STATUS") conn.close() if not rtnv: rtnv.append([]) log.debug('%s-->%s', mod, len(rtnv[0])) return rtnv[0]
Retrieves the master status from the minion. Returns:: {'host.domain.com': {'Binlog_Do_DB': '', 'Binlog_Ignore_DB': '', 'File': 'mysql-bin.000021', 'Position': 107}} CLI Example: .. code-block:: bash salt '*' mysql.get_master_status
def _merge_report(self, target, new): time = None if 'ts' in new['parsed']: time = new['parsed']['ts'] if (target.get('lastSeenDate', None) and time and target['lastSeenDate'] < time): target['lastSeenDate'] = time query_millis = int(new['parsed']['stats']['millis']) target['stats']['totalTimeMillis'] += query_millis target['stats']['count'] += 1 target['stats']['avgTimeMillis'] = target['stats']['totalTimeMillis'] / target['stats']['count']
Merges a new report into the target report
def _handle_github(self): value = click.prompt( _BUG + click.style( '1. Open an issue by typing "open";\n', fg='green', ) + click.style( '2. Print human-readable information by typing ' '"print";\n', fg='yellow', ) + click.style( '3. See the full traceback without submitting details ' '(default: "ignore").\n\n', fg='red', ) + 'Please select an action by typing its name', type=click.Choice([ 'open', 'print', 'ignore', ], ), default='ignore', ) getattr(self, '_process_' + value)()
Handle exception and submit it as GitHub issue.
def inertial_advective_wind(u, v, u_geostrophic, v_geostrophic, dx, dy, lats): r f = coriolis_parameter(lats) dugdy, dugdx = gradient(u_geostrophic, deltas=(dy, dx), axes=(-2, -1)) dvgdy, dvgdx = gradient(v_geostrophic, deltas=(dy, dx), axes=(-2, -1)) u_component = -(u * dvgdx + v * dvgdy) / f v_component = (u * dugdx + v * dugdy) / f return u_component, v_component
r"""Calculate the inertial advective wind. .. math:: \frac{\hat k}{f} \times (\vec V \cdot \nabla)\hat V_g .. math:: \frac{\hat k}{f} \times \left[ \left( u \frac{\partial u_g}{\partial x} + v \frac{\partial u_g}{\partial y} \right) \hat i + \left( u \frac{\partial v_g} {\partial x} + v \frac{\partial v_g}{\partial y} \right) \hat j \right] .. math:: \left[ -\frac{1}{f}\left(u \frac{\partial v_g}{\partial x} + v \frac{\partial v_g}{\partial y} \right) \right] \hat i + \left[ \frac{1}{f} \left( u \frac{\partial u_g}{\partial x} + v \frac{\partial u_g}{\partial y} \right) \right] \hat j This formula is based on equation 27 of [Rochette2006]_. Parameters ---------- u : (M, N) ndarray x component of the advecting wind v : (M, N) ndarray y component of the advecting wind u_geostrophic : (M, N) ndarray x component of the geostrophic (advected) wind v_geostrophic : (M, N) ndarray y component of the geostrophic (advected) wind dx : float or ndarray The grid spacing(s) in the x-direction. If an array, there should be one item less than the size of `u` along the applicable axis. dy : float or ndarray The grid spacing(s) in the y-direction. If an array, there should be one item less than the size of `u` along the applicable axis. lats : (M, N) ndarray latitudes of the wind data in radians or with appropriate unit information attached Returns ------- (M, N) ndarray x component of inertial advective wind (M, N) ndarray y component of inertial advective wind Notes ----- Many forms of the inertial advective wind assume the advecting and advected wind to both be the geostrophic wind. To do so, pass the x and y components of the geostrophic with for u and u_geostrophic/v and v_geostrophic. If inputs have more than two dimensions, they are assumed to have either leading dimensions of (x, y) or trailing dimensions of (y, x), depending on the value of ``dim_order``.
def _parse_and_sort_accept_header(accept_header): return sorted([_split_into_mimetype_and_priority(x) for x in accept_header.split(',')], key=lambda x: x[1], reverse=True)
Parse and sort the accept header items. >>> _parse_and_sort_accept_header('application/json;q=0.5, text/*') [('text/*', 1.0), ('application/json', 0.5)]
def visit_setcomp(self, node, parent): newnode = nodes.SetComp(node.lineno, node.col_offset, parent) newnode.postinit( self.visit(node.elt, newnode), [self.visit(child, newnode) for child in node.generators], ) return newnode
visit a SetComp node by returning a fresh instance of it
def MetaGraph(self): if self._meta_graph is None: raise ValueError('There is no metagraph in this EventAccumulator') meta_graph = meta_graph_pb2.MetaGraphDef() meta_graph.ParseFromString(self._meta_graph) return meta_graph
Return the metagraph definition, if there is one. Raises: ValueError: If there is no metagraph for this run. Returns: The `meta_graph_def` proto.
def global_exception_handler(handler): if not hasattr(handler, "__call__"): raise TypeError("exception handlers must be callable") log.info("setting a new global exception handler") state.global_exception_handlers.append(weakref.ref(handler)) return handler
add a callback for when an exception goes uncaught in any greenlet :param handler: the callback function. must be a function taking 3 arguments: - ``klass`` the exception class - ``exc`` the exception instance - ``tb`` the traceback object :type handler: function Note also that the callback is only held by a weakref, so if all other refs to the function are lost it will stop handling greenlets' exceptions
def get_or_none(cls, video_id, language_code): try: transcript = cls.objects.get(video__edx_video_id=video_id, language_code=language_code) except cls.DoesNotExist: transcript = None return transcript
Returns a data model object if found or none otherwise. Arguments: video_id(unicode): video id to which transcript may be associated language_code(unicode): language of the requested transcript
def make_response(self, data: Any = None, **kwargs: Any) -> Any: r if not self._valid_request: logger.error('Request not validated, cannot make response') raise self.make_error('Request not validated before, cannot make ' 'response') if data is None and self.response_factory is None: logger.error('Response data omit, but no response factory is used') raise self.make_error('Response data could be omitted only when ' 'response factory is used') response_schema = getattr(self.module, 'response', None) if response_schema is not None: self._validate(data, response_schema) if self.response_factory is not None: return self.response_factory( *([data] if data is not None else []), **kwargs) return data
r"""Validate response data and wrap it inside response factory. :param data: Response data. Could be ommited. :param \*\*kwargs: Keyword arguments to be passed to response factory.
def encodeSentence(self, *words): encoded = map(self.encodeWord, words) encoded = b''.join(encoded) encoded += b'\x00' return encoded
Encode given sentence in API format. :param words: Words to endoce. :returns: Encoded sentence.
def json_2_text(inp, out, verbose = False): for root, dirs, filenames in os.walk(inp): for f in filenames: log = codecs.open(os.path.join(root, f), 'r') j_obj = json.load(log) j_obj = json_format(j_obj) textWriter(j_obj, out, verbose)
Convert a Wikipedia article to Text object. Concatenates the sections in wikipedia file and rearranges other information so it can be interpreted as a Text object. Links and other elements with start and end positions are annotated as layers. Parameters ---------- inp: directory of parsed et.wikipedia articles in json format out: output directory of .txt files verbose: if True, prints every article title and total count of converted files if False prints every 50th count Returns ------- estnltk.text.Text The Text object.
def register(self, notification_cls=None): self.loaded = True display_names = [n.display_name for n in self.registry.values()] if ( notification_cls.name not in self.registry and notification_cls.display_name not in display_names ): self.registry.update({notification_cls.name: notification_cls}) models = getattr(notification_cls, "models", []) if not models and getattr(notification_cls, "model", None): models = [getattr(notification_cls, "model")] for model in models: try: if notification_cls.name not in [ n.name for n in self.models[model] ]: self.models[model].append(notification_cls) except KeyError: self.models.update({model: [notification_cls]}) else: raise AlreadyRegistered( f"Notification {notification_cls.name}: " f"{notification_cls.display_name} is already registered." )
Registers a Notification class unique by name.
def fetch(self, url, body=None, headers=None): if body: method = 'POST' else: method = 'GET' if headers is None: headers = {} if not (url.startswith('http://') or url.startswith('https://')): raise ValueError('URL is not a HTTP URL: %r' % (url,)) httplib2_response, content = self.httplib2.request( url, method, body=body, headers=headers) try: final_url = httplib2_response['content-location'] except KeyError: assert not httplib2_response.previous assert httplib2_response.status != 200 final_url = url return HTTPResponse( body=content, final_url=final_url, headers=dict(httplib2_response.items()), status=httplib2_response.status, )
Perform an HTTP request @raises Exception: Any exception that can be raised by httplib2 @see: C{L{HTTPFetcher.fetch}}
def add(self, scene): if not isinstance(scene, Scene): raise TypeError() self.__scenes.append(scene)
Add scene.
def _find_ancillary_vars(self, ds, refresh=False): if self._ancillary_vars.get(ds, None) and refresh is False: return self._ancillary_vars[ds] self._ancillary_vars[ds] = [] for name, var in ds.variables.items(): if hasattr(var, 'ancillary_variables'): for anc_name in var.ancillary_variables.split(" "): if anc_name in ds.variables: self._ancillary_vars[ds].append(anc_name) if hasattr(var, 'grid_mapping'): gm_name = var.grid_mapping if gm_name in ds.variables: self._ancillary_vars[ds].append(gm_name) return self._ancillary_vars[ds]
Returns a list of variable names that are defined as ancillary variables in the dataset ds. An ancillary variable generally is a metadata container and referenced from other variables via a string reference in an attribute. - via ancillary_variables (3.4) - "grid mapping var" (5.6) - TODO: more? The result is cached by the passed in dataset object inside of this checker. Pass refresh=True to redo the cached value. :param netCDF4.Dataset ds: An open netCDF dataset :param bool refresh: if refresh is set to True, the cache is invalidated. :rtype: list :return: List of variable names (str) that are defined as ancillary variables in the dataset ds.
def markInputline( self, markerString = ">!<" ): line_str = self.line line_column = self.column - 1 if markerString: line_str = "".join((line_str[:line_column], markerString, line_str[line_column:])) return line_str.strip()
Extracts the exception line from the input string, and marks the location of the exception with a special symbol.
def wr_tsv(self, fout_tsv): with open(fout_tsv, 'w') as prt: kws_tsv = { 'fld2fmt': {f:'{:8.2e}' for f in self.flds_cur if f[:2] == 'p_'}, 'prt_flds':self.flds_cur} prt_tsv_sections(prt, self.desc2nts['sections'], **kws_tsv) print(" WROTE: {TSV}".format(TSV=fout_tsv))
Print grouped GOEA results into a tab-separated file.
def geometry_identifiers(self): identifiers = {mesh.identifier_md5: name for name, mesh in self.geometry.items()} return identifiers
Look up geometries by identifier MD5 Returns --------- identifiers: dict, identifier md5: key in self.geometry
def place_oceans_at_map_borders(world): ocean_border = int(min(30, max(world.width / 5, world.height / 5))) def place_ocean(x, y, i): world.layers['elevation'].data[y, x] = \ (world.layers['elevation'].data[y, x] * i) / ocean_border for x in range(world.width): for i in range(ocean_border): place_ocean(x, i, i) place_ocean(x, world.height - i - 1, i) for y in range(world.height): for i in range(ocean_border): place_ocean(i, y, i) place_ocean(world.width - i - 1, y, i)
Lower the elevation near the border of the map
def spread_stats(stats, spreader=False): spread = spread_t() if spreader else True descendants = deque(stats) while descendants: _stats = descendants.popleft() if spreader: spread.clear() yield _stats, spread else: yield _stats if spread: descendants.extend(_stats)
Iterates all descendant statistics under the given root statistics. When ``spreader=True``, each iteration yields a descendant statistics and `spread()` function together. You should call `spread()` if you want to spread the yielded statistics also.
def filter_channels_by_status( channel_states: List[NettingChannelState], exclude_states: Optional[List[str]] = None, ) -> List[NettingChannelState]: if exclude_states is None: exclude_states = [] states = [] for channel_state in channel_states: if channel.get_status(channel_state) not in exclude_states: states.append(channel_state) return states
Filter the list of channels by excluding ones for which the state exists in `exclude_states`.
def add_environment_vars(config: MutableMapping[str, Any]): for e in os.environ: if re.match("BELBIO_", e): val = os.environ.get(e) if val: e.replace("BELBIO_", "") env_keys = e.lower().split("__") if len(env_keys) > 1: joined = '"]["'.join(env_keys) eval_config = f'config["{joined}"] = val' try: eval(eval_config) except Exception as exc: log.warn("Cannot process {e} into config") else: config[env_keys[0]] = val
Override config with environment variables Environment variables have to be prefixed with BELBIO_ which will be stripped before splitting on '__' and lower-casing the environment variable name that is left into keys for the config dictionary. Example: BELBIO_BEL_API__SERVERS__API_URL=http://api.bel.bio 1. BELBIO_BEL_API__SERVERS__API_URL ==> BEL_API__SERVERS__API_URL 2. BEL_API__SERVERS__API_URL ==> bel_api__servers__api_url 3. bel_api__servers__api_url ==> [bel_api, servers, api_url] 4. [bel_api, servers, api_url] ==> config['bel_api']['servers']['api_url'] = http://api.bel.bio
def get_db_versions(self, conn): curs = conn.cursor() query = 'select version from {}'.format(self.version_table) try: curs.execute(query) return set(version for version, in curs.fetchall()) except: raise VersioningNotInstalled('Run oq engine --upgrade-db')
Get all the versions stored in the database as a set. :param conn: a DB API 2 connection
def _fingerprint(public_key, fingerprint_hash_type): if fingerprint_hash_type: hash_type = fingerprint_hash_type.lower() else: hash_type = 'sha256' try: hash_func = getattr(hashlib, hash_type) except AttributeError: raise CommandExecutionError( 'The fingerprint_hash_type {0} is not supported.'.format( hash_type ) ) try: if six.PY2: raw_key = public_key.decode('base64') else: raw_key = base64.b64decode(public_key, validate=True) except binascii.Error: return None ret = hash_func(raw_key).hexdigest() chunks = [ret[i:i + 2] for i in range(0, len(ret), 2)] return ':'.join(chunks)
Return a public key fingerprint based on its base64-encoded representation The fingerprint string is formatted according to RFC 4716 (ch.4), that is, in the form "xx:xx:...:xx" If the key is invalid (incorrect base64 string), return None public_key The public key to return the fingerprint for fingerprint_hash_type The public key fingerprint hash type that the public key fingerprint was originally hashed with. This defaults to ``sha256`` if not specified. .. versionadded:: 2016.11.4 .. versionchanged:: 2017.7.0: default changed from ``md5`` to ``sha256``
def add_node_from_appliance(self, appliance_id, x=0, y=0, compute_id=None): try: template = self.controller.appliances[appliance_id].data except KeyError: msg = "Appliance {} doesn't exist".format(appliance_id) log.error(msg) raise aiohttp.web.HTTPNotFound(text=msg) template["x"] = x template["y"] = y node_type = template.pop("node_type") compute = self.controller.get_compute(template.pop("server", compute_id)) name = template.pop("name") default_name_format = template.pop("default_name_format", "{name}-{0}") name = default_name_format.replace("{name}", name) node_id = str(uuid.uuid4()) node = yield from self.add_node(compute, name, node_id, node_type=node_type, **template) return node
Create a node from an appliance
def LoadPlugins(cls): if cls.PLUGINS_LOADED: return reg = ComponentRegistry() for _, record in reg.load_extensions('iotile.update_record'): cls.RegisterRecordType(record) cls.PLUGINS_LOADED = True
Load all registered iotile.update_record plugins.
def parse_barcode_file(fp, primer=None, header=False): tr = trie.trie() reader = csv.reader(fp) if header: next(reader) records = (record for record in reader if record) for record in records: specimen, barcode = record[:2] if primer is not None: pr = primer else: pr = record[2] for sequence in all_unambiguous(barcode + pr): if sequence in tr: raise ValueError("Duplicate sample: {0}, {1} both have {2}", specimen, tr[sequence], sequence) logging.info('%s->%s', sequence, specimen) tr[sequence] = specimen return tr
Load label, barcode, primer records from a CSV file. Returns a map from barcode -> label Any additional columns are ignored
def pois_from_address(address, distance, amenities=None): point = geocode(query=address) return pois_from_point(point=point, amenities=amenities, distance=distance)
Get OSM points of Interests within some distance north, south, east, and west of an address. Parameters ---------- address : string the address to geocode to a lat-long point distance : numeric distance in meters amenities : list List of amenities that will be used for finding the POIs from the selected area. See available amenities from: http://wiki.openstreetmap.org/wiki/Key:amenity Returns ------- GeoDataFrame
def memoize(func): @wraps(func) def memoizer(self): if not hasattr(self, '_cache'): self._cache = {} if func.__name__ not in self._cache: self._cache[func.__name__] = func(self) return self._cache[func.__name__] return memoizer
Memoize a method that should return the same result every time on a given instance.
def setupTable_glyf(self): if not {"glyf", "loca"}.issubset(self.tables): return self.otf["loca"] = newTable("loca") self.otf["glyf"] = glyf = newTable("glyf") glyf.glyphs = {} glyf.glyphOrder = self.glyphOrder hmtx = self.otf.get("hmtx") allGlyphs = self.allGlyphs for name in self.glyphOrder: glyph = allGlyphs[name] pen = TTGlyphPen(allGlyphs) try: glyph.draw(pen) except NotImplementedError: logger.error("%r has invalid curve format; skipped", name) ttGlyph = Glyph() else: ttGlyph = pen.glyph() if ( ttGlyph.isComposite() and hmtx is not None and self.autoUseMyMetrics ): self.autoUseMyMetrics(ttGlyph, name, hmtx) glyf[name] = ttGlyph
Make the glyf table.
def get_item(self): try: item_lookup_session = get_item_lookup_session(runtime=self._runtime, proxy=self._proxy) item_lookup_session.use_federated_bank_view() item = item_lookup_session.get_item(self._item_id) except errors.NotFound: if self._section is not None: question = self._section.get_question(self._item_id) ils = self._section._get_item_lookup_session() real_item_id = Id(question._my_map['itemId']) item = ils.get_item(real_item_id) else: raise errors.NotFound() return item.get_question()
Gets the ``Item``. return: (osid.assessment.Item) - the assessment item *compliance: mandatory -- This method must be implemented.*
def get_positions(self, attr=None): pos = self.parent.get_positions(self) try: if attr is not None: attr = attr.replace("quantity", "position") return pos[attr] except Exception as e: return pos
Get the positions data for the instrument :Optional: attr : string Position attribute to get (optional attributes: symbol, position, avgCost, account) :Retruns: positions : dict (positions) / float/str (attribute) positions data for the instrument
def _print_divide(self): for space in self.AttributesLength: self.StrTable += "+ " + "- " * space self.StrTable += "+" + "\n"
Prints all those table line dividers.
def removeIndividual(self): self._openRepo() dataset = self._repo.getDatasetByName(self._args.datasetName) individual = dataset.getIndividualByName(self._args.individualName) def func(): self._updateRepo(self._repo.removeIndividual, individual) self._confirmDelete("Individual", individual.getLocalId(), func)
Removes an individual from this repo
def collect_summands(cls, ops, kwargs): from qnet.algebra.core.abstract_quantum_algebra import ( ScalarTimesQuantumExpression) coeff_map = OrderedDict() for op in ops: if isinstance(op, ScalarTimesQuantumExpression): coeff, term = op.coeff, op.term else: coeff, term = 1, op if term in coeff_map: coeff_map[term] += coeff else: coeff_map[term] = coeff fops = [] for (term, coeff) in coeff_map.items(): op = coeff * term if not op.is_zero: fops.append(op) if len(fops) == 0: return cls._zero elif len(fops) == 1: return fops[0] else: return tuple(fops), kwargs
Collect summands that occur multiple times into a single summand Also filters out zero-summands. Example: >>> A, B, C = (OperatorSymbol(s, hs=0) for s in ('A', 'B', 'C')) >>> collect_summands( ... OperatorPlus, (A, B, C, ZeroOperator, 2 * A, B, -C) , {}) ((3 * A^(0), 2 * B^(0)), {}) >>> collect_summands(OperatorPlus, (A, -A), {}) ZeroOperator >>> collect_summands(OperatorPlus, (B, A, -B), {}) A^(0)
def reset_image_attribute(self, image_id, attribute='launchPermission'): params = {'ImageId' : image_id, 'Attribute' : attribute} return self.get_status('ResetImageAttribute', params, verb='POST')
Resets an attribute of an AMI to its default value. :type image_id: string :param image_id: ID of the AMI for which an attribute will be described :type attribute: string :param attribute: The attribute to reset :rtype: bool :return: Whether the operation succeeded or not
def get_file_client(opts, pillar=False): client = opts.get('file_client', 'remote') if pillar and client == 'local': client = 'pillar' return { 'remote': RemoteClient, 'local': FSClient, 'pillar': PillarClient, }.get(client, RemoteClient)(opts)
Read in the ``file_client`` option and return the correct type of file server
def filter(self, predicate: Callable[[FileLine], 'FileLineSet'] ) -> 'FileLineSet': filtered = [fileline for fileline in self if predicate(fileline)] return FileLineSet.from_list(filtered)
Returns a subset of the file lines within this set that satisfy a given filtering criterion.
def normalize_job_id(job_id): if not isinstance(job_id, uuid.UUID): job_id = uuid.UUID(job_id) return job_id
Convert a value to a job id. :param job_id: Value to convert. :type job_id: int, str :return: The job id. :rtype: :py:class:`uuid.UUID`
def clean_previous_run(self): super(Alignak, self).clean_previous_run() self.pollers.clear() self.reactionners.clear() self.brokers.clear()
Clean variables from previous configuration :return: None
def _pb_timestamp_to_datetime(timestamp_pb): return _EPOCH + datetime.timedelta( seconds=timestamp_pb.seconds, microseconds=(timestamp_pb.nanos / 1000.0) )
Convert a Timestamp protobuf to a datetime object. :type timestamp_pb: :class:`google.protobuf.timestamp_pb2.Timestamp` :param timestamp_pb: A Google returned timestamp protobuf. :rtype: :class:`datetime.datetime` :returns: A UTC datetime object converted from a protobuf timestamp.
def _drop_gracefully(self): shard_id = self.request.headers[util._MR_SHARD_ID_TASK_HEADER] mr_id = self.request.headers[util._MR_ID_TASK_HEADER] shard_state, mr_state = db.get([ model.ShardState.get_key_by_shard_id(shard_id), model.MapreduceState.get_key_by_job_id(mr_id)]) if shard_state and shard_state.active: shard_state.set_for_failure() config = util.create_datastore_write_config(mr_state.mapreduce_spec) shard_state.put(config=config)
Drop worker task gracefully. Set current shard_state to failed. Controller logic will take care of other shards and the entire MR.
def track_parallel(items, sub_type): out = [] for i, args in enumerate(items): item_i, item = _get_provitem_from_args(args) if item: sub_entity = "%s.%s.%s" % (item["provenance"]["entity"], sub_type, i) item["provenance"]["entity"] = sub_entity args = list(args) args[item_i] = item out.append(args) return out
Create entity identifiers to trace the given items in sub-commands. Helps handle nesting in parallel program execution: run id => sub-section id => parallel ids
def find_external_metabolites(model): ex_comp = find_external_compartment(model) return [met for met in model.metabolites if met.compartment == ex_comp]
Return all metabolites in the external compartment.
def access_keys(opts): keys = {} publisher_acl = opts['publisher_acl'] acl_users = set(publisher_acl.keys()) if opts.get('user'): acl_users.add(opts['user']) acl_users.add(salt.utils.user.get_user()) for user in acl_users: log.info('Preparing the %s key for local communication', user) key = mk_key(opts, user) if key is not None: keys[user] = key if opts['client_acl_verify'] and HAS_PWD: log.profile('Beginning pwd.getpwall() call in masterapi access_keys function') for user in pwd.getpwall(): user = user.pw_name if user not in keys and salt.utils.stringutils.check_whitelist_blacklist(user, whitelist=acl_users): keys[user] = mk_key(opts, user) log.profile('End pwd.getpwall() call in masterapi access_keys function') return keys
A key needs to be placed in the filesystem with permissions 0400 so clients are required to run as root.
def FindFileContainingSymbol(self, symbol): symbol = _NormalizeFullyQualifiedName(symbol) try: return self._descriptors[symbol].file except KeyError: pass try: return self._enum_descriptors[symbol].file except KeyError: pass try: return self._FindFileContainingSymbolInDb(symbol) except KeyError: pass try: return self._file_desc_by_toplevel_extension[symbol] except KeyError: pass message_name, _, extension_name = symbol.rpartition('.') try: message = self.FindMessageTypeByName(message_name) assert message.extensions_by_name[extension_name] return message.file except KeyError: raise KeyError('Cannot find a file containing %s' % symbol)
Gets the FileDescriptor for the file containing the specified symbol. Args: symbol: The name of the symbol to search for. Returns: A FileDescriptor that contains the specified symbol. Raises: KeyError: if the file cannot be found in the pool.
def _add_junction(item): type_, channels = _expand_one_key_dictionary(item) junction = UnnamedStatement(type='junction') for item in channels: type_, value = _expand_one_key_dictionary(item) channel = UnnamedStatement(type='channel') for val in value: if _is_reference(val): _add_reference(val, channel) elif _is_inline_definition(val): _add_inline_definition(val, channel) junction.add_child(channel) _current_statement.add_child(junction)
Adds a junction to the _current_statement.
def _processEscapeSequences(replaceText): def _replaceFunc(escapeMatchObject): char = escapeMatchObject.group(0)[1] if char in _escapeSequences: return _escapeSequences[char] return escapeMatchObject.group(0) return _seqReplacer.sub(_replaceFunc, replaceText)
Replace symbols like \n \\, etc
def get_channel(self, name): return self._api_get('/api/channels/{0}'.format( urllib.parse.quote_plus(name) ))
Details about an individual channel. :param name: The channel name :type name: str
def geocode(self): submit_set = [] data_map = {} for address, o in self.gen: submit_set.append(address) data_map[address] = o if len(submit_set) >= self.submit_size: results = self._send(submit_set) submit_set = [] for k, result in results.items(): o = data_map[k] yield (k, result, o) if len(submit_set) > 0: results = self._send(submit_set) for k, result in results.items(): o = data_map[k] yield (k, result, o)
A Generator that reads from the address generators and returns geocode results. The generator yields ( address, geocode_results, object)
def IsEquivalent(self, other): if self.name and other.name: return self.name == other.name if self.name: self_family, self_version_tuple = self._FAMILY_AND_VERSION_PER_NAME.get( self.name, self._DEFAULT_FAMILY_AND_VERSION) return ( self_family == other.family and self_version_tuple == other.version_tuple) if self.family and self.version: if other.name: other_family, other_version_tuple = ( self._FAMILY_AND_VERSION_PER_NAME.get( other.name, self._DEFAULT_FAMILY_AND_VERSION)) else: other_family = other.family other_version_tuple = other.version_tuple return ( self.family == other_family and self.version_tuple == other_version_tuple) if self.family: if other.name: other_family, _ = self._FAMILY_AND_VERSION_PER_NAME.get( other.name, self._DEFAULT_FAMILY_AND_VERSION) else: other_family = other.family return self.family == other_family return False
Determines if 2 operating system artifacts are equivalent. This function compares the operating systems based in order of: * name derived from product * family and version * family Args: other (OperatingSystemArtifact): operating system artifact attribute container to compare with. Returns: bool: True if the operating systems are considered equivalent, False if the most specific criteria do no match, or no criteria are available.
def graph_to_gluon(self, graph, ctx): sym, arg_params, aux_params = self.from_onnx(graph) metadata = self.get_graph_metadata(graph) data_names = [input_tensor[0] for input_tensor in metadata['input_tensor_data']] data_inputs = [symbol.var(data_name) for data_name in data_names] from ....gluon import SymbolBlock net = SymbolBlock(outputs=sym, inputs=data_inputs) net_params = net.collect_params() for param in arg_params: if param in net_params: net_params[param].shape = arg_params[param].shape net_params[param]._load_init(arg_params[param], ctx=ctx) for param in aux_params: if param in net_params: net_params[param].shape = aux_params[param].shape net_params[param]._load_init(aux_params[param], ctx=ctx) return net
Construct SymbolBlock from onnx graph. Parameters ---------- graph : onnx protobuf object The loaded onnx graph ctx : Context or list of Context Loads the model into one or many context(s). Returns ------- sym_block :gluon.nn.SymbolBlock The returned gluon SymbolBlock
def filepaths(self) -> List[str]: path = self.currentpath return [os.path.join(path, name) for name in self.filenames]
Absolute path names of the files contained in the current working directory. Files names starting with underscores are ignored: >>> from hydpy.core.filetools import FileManager >>> filemanager = FileManager() >>> filemanager.BASEDIR = 'basename' >>> filemanager.projectdir = 'projectname' >>> from hydpy import repr_, TestIO >>> with TestIO(): ... filemanager.currentdir = 'testdir' ... open('projectname/basename/testdir/file1.txt', 'w').close() ... open('projectname/basename/testdir/file2.npy', 'w').close() ... open('projectname/basename/testdir/_file1.nc', 'w').close() ... for filepath in filemanager.filepaths: ... repr_(filepath) # doctest: +ELLIPSIS '...hydpy/tests/iotesting/projectname/basename/testdir/file1.txt' '...hydpy/tests/iotesting/projectname/basename/testdir/file2.npy'
def echo_utc(string): from datetime import datetime click.echo('{} | {}'.format(datetime.utcnow().isoformat(), string))
Echo the string to standard out, prefixed with the current date and time in UTC format. :param string: string to echo
def interested_in(self): genders = [] for gender in self.cache['interested_in']: genders.append(gender) return genders
A list of strings describing the genders the user is interested in.
def get_depts(self, dept_name=None): depts = self.json_response.get("department", None) params = self.kwargs.get("params", None) fetch_child = params.get("fetch_child", True) if params else True if dept_name is not None: depts = [dept for dept in depts if dept["name"] == dept_name] depts = [{"id": dept["id"], "name": dept["name"]} for dept in depts] self.logger.info("%s\t%s" % (self.request_method, self.request_url)) return depts if fetch_child else depts[0]
Method to get department by name.
def stop_recording_skipped(cls): if cls._errors_recorded is None: raise Exception('Cannot stop recording before it is started') recorded = cls._errors_recorded[:] cls._errors_recorded = None return recorded
Stop collecting OptionErrors recorded with the record_skipped_option method and return them
def make_epub_base(location): log.info('Making EPUB base files in {0}'.format(location)) with open(os.path.join(location, 'mimetype'), 'w') as out: out.write('application/epub+zip') os.mkdir(os.path.join(location, 'META-INF')) os.mkdir(os.path.join(location, 'EPUB')) os.mkdir(os.path.join(location, 'EPUB', 'css')) with open(os.path.join(location, 'META-INF', 'container.xml'), 'w') as out: out.write( ) with open(os.path.join(location, 'EPUB', 'css', 'default.css') ,'wb') as out: out.write(bytes(DEFAULT_CSS, 'UTF-8'))
Creates the base structure for an EPUB file in a specified location. This function creates constant components for the structure of the EPUB in a specified directory location. Parameters ---------- location : str A path string to a local directory in which the EPUB is to be built
def get_iter(self, times, seconds, chunk_size=2000): def entry_generator(): with ConstantRateLimit(times, seconds, sleep_func=self._steam.sleep) as r: for entries in chunks(self, chunk_size): if not entries: return for entry in entries: yield entry r.wait() return entry_generator()
Make a iterator over the entries See :class:`steam.util.throttle.ConstantRateLimit` for ``times`` and ``seconds`` parameters. :param chunk_size: number of entries per request :type chunk_size: :class:`int` :returns: generator object :rtype: :class:`generator` The iterator essentially buffers ``chuck_size`` number of entries, and ensures we are not sending messages too fast. For example, the ``__iter__`` method on this class uses ``get_iter(1, 1, 2000)``
def render_document(template_name, data_name, output_name): env = Environment(loader=PackageLoader('aide_document')) with open(output_name, 'w') as output_file: output = env.get_template(template_name).render(yaml.load(open(data_name))) output_file.write(output)
Combines a MarkDown template file from the aide_document package with a local associated YAML data file, then outputs the rendered combination to a local MarkDown output file. Parameters ========== template_name : String Exact name of the MarkDown template file from the aide_document/templates folder. Do not use the file path. data_name : String Relative file path from where this method is called to the location of the YAML data file to be used. output_name : String Relative file path from where this method is called to the location to which the output file is written. Examples ======== Suppose we have template.md in aide_document and a directory as follows: data/ params.yaml To render the document: >>> from aide_document import combine >>> combine.render_document('template.md', 'data/params.yaml', 'data/output.md') This will then combine the data and template files and write to a new output file within data/.
def at_depth(self, level): return Zconfig(lib.zconfig_at_depth(self._as_parameter_, level), False)
Locate the last config item at a specified depth
def add_entry(self, net_type, cn, addresses): self.entries.append({ 'cn': cn, 'addresses': addresses})
Add a request to the batch :param net_type: str netwrok space name request is for :param cn: str Canonical Name for certificate :param addresses: [] List of addresses to be used as SANs
def set_file_filters(self, file_filters): file_filters = util.return_list(file_filters) self.file_filters = file_filters
Sets internal file filters to `file_filters` by tossing old state. `file_filters` can be single object or iterable.
def search(ctx, tags, prefix=None): _generate_api(ctx) for i, match in enumerate(ctx.obj.api.search(*tags, prefix=prefix)): click.echo(match, nl=False) print('')
List all archives matching tag search criteria
def update(self, argv): if len(argv) == 0: error("Command requires an index name", 2) name = argv[0] if name not in self.service.indexes: error("Index '%s' does not exist" % name, 2) index = self.service.indexes[name] fields = self.service.indexes.itemmeta().fields.optional rules = dict([(field, {'flags': ["--%s" % field]}) for field in fields]) opts = cmdline(argv, rules) index.update(**opts.kwargs)
Update an index according to the given argument vector.
def set_version(version): global UNIVERSION global UNIVERSION_INFO if version is None: version = unicodedata.unidata_version UNIVERSION = version UNIVERSION_INFO = tuple([int(x) for x in UNIVERSION.split('.')])
Set version.
def on_key_down(self, event): keycode = event.GetKeyCode() meta_down = event.MetaDown() or event.GetCmdDown() if keycode == 86 and meta_down: self.do_fit(event)
If user does command v, re-size window in case pasting has changed the content size.
def _gcs_list_keys(bucket, pattern): data = [{'Name': obj.metadata.name, 'Type': obj.metadata.content_type, 'Size': obj.metadata.size, 'Updated': obj.metadata.updated_on} for obj in _gcs_get_keys(bucket, pattern)] return google.datalab.utils.commands.render_dictionary(data, ['Name', 'Type', 'Size', 'Updated'])
List all Google Cloud Storage keys in a specified bucket that match a pattern.
def _metric_value(value_str, metric_type): if metric_type in (int, float): try: return metric_type(value_str) except ValueError: raise ValueError("Invalid {} metric value: {!r}". format(metric_type.__class__.__name__, value_str)) elif metric_type is six.text_type: return value_str.strip('"').encode('utf-8').decode('unicode_escape') else: assert metric_type is bool lower_str = value_str.lower() if lower_str == 'true': return True elif lower_str == 'false': return False else: raise ValueError("Invalid boolean metric value: {!r}". format(value_str))
Return a Python-typed metric value from a metric value string.
def get_current(): global current if exists( SETTINGSFILE ): f = open( SETTINGSFILE ).read() current = re.findall('config[^\s]+.+', f)[1].split('/')[-1] return current else: return "** Not Set **"
return current Xresources color theme
def status(queue, munin, munin_config): if munin_config: return status_print_config(queue) queues = get_queues(queue) for queue in queues: status_print_queue(queue, munin=munin) if not munin: print('-' * 40)
List queued tasks aggregated by name
def DeactivateCard(self, card): if hasattr(card, 'connection'): card.connection.disconnect() if None != self.parent.apdutracerpanel: card.connection.deleteObserver(self.parent.apdutracerpanel) delattr(card, 'connection') self.dialogpanel.OnDeactivateCard(card)
Deactivate a card.
def set_motion_detect(self, enable): if enable: return api.request_motion_detection_enable(self.sync.blink, self.network_id, self.camera_id) return api.request_motion_detection_disable(self.sync.blink, self.network_id, self.camera_id)
Set motion detection.
def lint(filename, lines, config): _, ext = os.path.splitext(filename) if ext in config: output = collections.defaultdict(list) for linter in config[ext]: linter_output = linter(filename, lines) for category, values in linter_output[filename].items(): output[category].extend(values) if 'comments' in output: output['comments'] = sorted( output['comments'], key=lambda x: (x.get('line', -1), x.get('column', -1))) return {filename: dict(output)} else: return { filename: { 'skipped': [ 'no linter is defined or enabled for files' ' with extension "%s"' % ext ] } }
Lints a file. Args: filename: string: filename to lint. lines: list[int]|None: list of lines that we want to capture. If None, then all lines will be captured. config: dict[string: linter]: mapping from extension to a linter function. Returns: dict: if there were errors running the command then the field 'error' will have the reasons in a list. if the lint process was skipped, then a field 'skipped' will be set with the reasons. Otherwise, the field 'comments' will have the messages.
def socket(self): if not hasattr(self, '_socket'): self._socket = self.context.socket(zmq.REQ) if hasattr(zmq, 'RECONNECT_IVL_MAX'): self._socket.setsockopt( zmq.RECONNECT_IVL_MAX, 5000 ) self._set_tcp_keepalive() if self.master.startswith('tcp://['): if hasattr(zmq, 'IPV6'): self._socket.setsockopt(zmq.IPV6, 1) elif hasattr(zmq, 'IPV4ONLY'): self._socket.setsockopt(zmq.IPV4ONLY, 0) self._socket.linger = self.linger if self.id_: self._socket.setsockopt(zmq.IDENTITY, self.id_) self._socket.connect(self.master) return self._socket
Lazily create the socket.