text
stringlengths
78
104k
score
float64
0
0.18
def get_latitude(self, ip): ''' Get latitude ''' rec = self.get_all(ip) return rec and rec.latitude
0.01626
def _link_user(self, userid): """Link an existing user to the current Contact """ # check if we have a selected user from the search-list if userid: try: self.context.setUser(userid) self.add_status_message( _("User linked to this Contact"), "info") except ValueError, e: self.add_status_message(e, "error") else: self.add_status_message( _("Please select a User from the list"), "info")
0.003656
def convert_widgets(self): """ During form initialization, some widgets have to be replaced by a counterpart suitable to be rendered the AngularJS way. """ for field in self.base_fields.values(): try: new_widget = field.get_converted_widget() except AttributeError: pass else: if new_widget: field.widget = new_widget
0.006508
def config_generator(search_space, max_search, rng, shuffle=True): """Generates config dicts from the given search space Args: search_space: (dict) A dictionary of parameters to search over. See note below for more details. max_search: (int) The maximum number of configurations to search. If max_search is None, do a full grid search of all discrete parameters, filling in range parameters as needed. Otherwise, do a full grid search of all discrete parameters and then cycle through again filling in new range parameters values; if there are no range parameters, stop after yielding the full cross product of parameters once. shuffle: (bool) If True, shuffle the order of generated configs Yields: configs: each config is a dict of parameter values based on the provided search space The search_space dictionary may consist of two types of parameters: --discrete: a discrete parameter is either a single value or a list of values. Use single values, for example, to override a default model parameter or set a flag such as 'verbose'=True. --range: a range parameter is a dict of the form: {'range': [<min>, <max>], 'scale': <scale>} where <min> and <max> are the min/max values to search between and scale is one of ['linear', 'log'] (defaulting to 'linear') representing the scale to use when searching the given range Example: search_space = { 'verbose': True, # discrete 'n_epochs': 100, # discrete 'momentum': [0.0, 0.9, 0.99], # discrete 'l2': {'range': [0.0001, 10]} # linear range 'lr': {'range': [0.001, 1], 'scale': 'log'}, # log range } If max_search is None, this will return 3 configurations (enough to just cover the full cross-product of discrete values, filled in with sampled range values) Otherewise, this will return max_search configurations (cycling through the discrete value combinations multiple time if necessary) """ def dict_product(d): keys = d.keys() for element in product(*d.values()): yield dict(zip(keys, element)) def range_param_func(v): scale = v.get("scale", "linear") mini = min(v["range"]) maxi = max(v["range"]) if scale == "linear": func = lambda rand: mini + (maxi - mini) * rand elif scale == "log": mini = np.log(mini) maxi = np.log(maxi) func = lambda rand: np.exp(mini + (maxi - mini) * rand) else: raise ValueError(f"Unrecognized scale '{scale}' for " "parameter {k}") return func discretes = {} ranges = {} for k, v in search_space.items(): if isinstance(v, dict): ranges[k] = range_param_func(v) elif isinstance(v, list): discretes[k] = v else: discretes[k] = [v] discrete_configs = list(dict_product(discretes)) if shuffle: rng.shuffle(discrete_configs) # If there are range parameters and a non-None max_search, cycle # through the discrete_configs (with new range values) until # max_search is met if ranges and max_search: discrete_configs = cycle(discrete_configs) for i, config in enumerate(discrete_configs): # We may see the same config twice due to cycle config = config.copy() if max_search and i == max_search: break for k, v in ranges.items(): config[k] = float(v(rng.random())) yield config
0.001181
def status_bar(python_input): """ Create the `Layout` for the status bar. """ TB = 'class:status-toolbar' @if_mousedown def toggle_paste_mode(mouse_event): python_input.paste_mode = not python_input.paste_mode @if_mousedown def enter_history(mouse_event): python_input.enter_history() def get_text_fragments(): python_buffer = python_input.default_buffer result = [] append = result.append append((TB, ' ')) result.extend(get_inputmode_fragments(python_input)) append((TB, ' ')) # Position in history. append((TB, '%i/%i ' % (python_buffer.working_index + 1, len(python_buffer._working_lines)))) # Shortcuts. app = get_app() if not python_input.vi_mode and app.current_buffer == python_input.search_buffer: append((TB, '[Ctrl-G] Cancel search [Enter] Go to this position.')) elif bool(app.current_buffer.selection_state) and not python_input.vi_mode: # Emacs cut/copy keys. append((TB, '[Ctrl-W] Cut [Meta-W] Copy [Ctrl-Y] Paste [Ctrl-G] Cancel')) else: result.extend([ (TB + ' class:key', '[F3]', enter_history), (TB, ' History ', enter_history), (TB + ' class:key', '[F6]', toggle_paste_mode), (TB, ' ', toggle_paste_mode), ]) if python_input.paste_mode: append((TB + ' class:paste-mode-on', 'Paste mode (on)', toggle_paste_mode)) else: append((TB, 'Paste mode', toggle_paste_mode)) return result return ConditionalContainer( content=Window(content=FormattedTextControl(get_text_fragments), style=TB), filter=~is_done & renderer_height_is_known & Condition(lambda: python_input.show_status_bar and not python_input.show_exit_confirmation))
0.00398
def _shape(self): """Return the tensor shape of the matrix operator""" return tuple(reversed(self.output_dims())) + tuple( reversed(self.input_dims()))
0.011173
def dispatch(self,request,*args,**kwargs): ''' Check that a valid Invoice ID has been passed in session data, and that said invoice is marked as paid. ''' paymentSession = request.session.get(INVOICE_VALIDATION_STR, {}) self.invoiceID = paymentSession.get('invoiceID') self.amount = paymentSession.get('amount',0) self.success_url = paymentSession.get('success_url',reverse('registration')) # Check that Invoice matching passed ID exists try: i = Invoice.objects.get(id=self.invoiceID) except ObjectDoesNotExist: return HttpResponseBadRequest(_('Invalid invoice information passed.')) if i.unpaid or i.amountPaid != self.amount: return HttpResponseBadRequest(_('Passed invoice is not paid.')) return super(GiftCertificateCustomizeView,self).dispatch(request,*args,**kwargs)
0.014192
def rename_feature(self, mapobject_type_name, name, new_name): '''Renames a feature. Parameters ---------- mapobject_type_name: str name of the segmented objects type name: str name of the feature that should be renamed new_name: str name that should be given to the feature See also -------- :func:`tmserver.api.feature.update_feature` :class:`tmlib.models.feature.Feature` ''' logger.info( 'rename feature "%s" of experiment "%s", mapobject type "%s"', name, self.experiment_name, mapobject_type_name ) content = { 'name': new_name, } feature_id = self._get_feature_id(mapobject_type_name, name) url = self._build_api_url( '/experiments/{experiment_id}/features/{feature_id}'.format( experiment_id=self._experiment_id, feature_id=feature_id ) ) res = self._session.put(url, json=content) res.raise_for_status()
0.001842
def passwd_check(hashed_passphrase, passphrase): """Verify that a given passphrase matches its hashed version. Parameters ---------- hashed_passphrase : str Hashed password, in the format returned by `passwd`. passphrase : str Passphrase to validate. Returns ------- valid : bool True if the passphrase matches the hash. Examples -------- In [1]: from IPython.lib.security import passwd_check In [2]: passwd_check('sha1:0e112c3ddfce:a68df677475c2b47b6e86d0467eec97ac5f4b85a', ...: 'mypassword') Out[2]: True In [3]: passwd_check('sha1:0e112c3ddfce:a68df677475c2b47b6e86d0467eec97ac5f4b85a', ...: 'anotherpassword') Out[3]: False """ try: algorithm, salt, pw_digest = hashed_passphrase.split(':', 2) except (ValueError, TypeError): return False try: h = hashlib.new(algorithm) except ValueError: return False if len(pw_digest) == 0: return False h.update(cast_bytes(passphrase, 'utf-8') + str_to_bytes(salt, 'ascii')) return h.hexdigest() == pw_digest
0.002595
def start(self): """Starts the worker threads""" if self.working: return self.working = True for i in range(self.num_workers): w = threading.Thread( name="Worker Thread #{i}".format(i=i), target=self._worker, ) w.daemon = True w.start() self.workers.append(w)
0.005013
def _register(self, assignment): # type: (Assignment) -> None """ Registers an Assignment in _positive or _negative. """ name = assignment.dependency.name old_positive = self._positive.get(name) if old_positive is not None: self._positive[name] = old_positive.intersect(assignment) return ref = assignment.dependency.name negative_by_ref = self._negative.get(name) old_negative = None if negative_by_ref is None else negative_by_ref.get(ref) if old_negative is None: term = assignment else: term = assignment.intersect(old_negative) if term.is_positive(): if name in self._negative: del self._negative[name] self._positive[name] = term else: if name not in self._negative: self._negative[name] = {} self._negative[name][ref] = term
0.003096
def __telnet_event_listener(self, ip, callback): """creates a telnet connection to the lightpad""" tn = telnetlib.Telnet(ip, 2708) self._last_event = "" self._telnet_running = True while self._telnet_running: try: raw_string = tn.read_until(b'.\n', 5) if len(raw_string) >= 2 and raw_string[-2:] == b'.\n': # lightpad sends ".\n" at the end that we need to chop off json_string = raw_string.decode('ascii')[0:-2] if json_string != self._last_event: callback(json.loads(json_string)) self._last_event = json_string except: pass tn.close()
0.003922
def classify_intersection9(s, curve1, curve2): """Image for :func:`._surface_helpers.classify_intersection` docstring.""" if NO_IMAGES: return surface1 = bezier.Surface.from_nodes( np.asfortranarray( [ [0.0, 20.0, 40.0, 10.0, 30.0, 20.0], [0.0, 40.0, 0.0, 25.0, 25.0, 50.0], ] ) ) surface2 = bezier.Surface.from_nodes( np.asfortranarray( [ [40.0, 20.0, 0.0, 30.0, 10.0, 20.0], [40.0, 0.0, 40.0, 15.0, 15.0, -10.0], ] ) ) figure, (ax1, ax2) = plt.subplots(1, 2) classify_help(s, curve1, surface1, curve2, surface2, 0, ax=ax1) classify_help(s, curve1, surface1, curve2, surface2, 1, ax=ax2) # Remove the alpha from the color color1 = ax1.patches[0].get_facecolor()[:3] color2 = ax1.patches[1].get_facecolor()[:3] # Now add the "degenerate" intersection polygons. cp_edges1, cp_edges2 = _edges_classify_intersection9() curved_polygon1 = bezier.CurvedPolygon(*cp_edges1) curved_polygon1.plot(256, ax=ax1) curved_polygon2 = bezier.CurvedPolygon(*cp_edges2) curved_polygon2.plot(256, ax=ax2) (int_x,), (int_y,) = curve1.evaluate(s) ax1.plot([int_x], [int_y], color=color1, linestyle="None", marker="o") ax2.plot([int_x], [int_y], color=color2, linestyle="None", marker="o") for ax in (ax1, ax2): ax.axis("scaled") ax.set_xlim(-2.0, 42.0) ax.set_ylim(-12.0, 52.0) plt.setp(ax2.get_yticklabels(), visible=False) figure.tight_layout(w_pad=1.0) save_image(figure, "classify_intersection9.png")
0.000602
def parse(self, fileobj, name_hint='', parser=None): """Fill from a file-like object.""" self.current_block = None # Reset current block parser = parser or Parser() for line in parser.parse(fileobj, name_hint=name_hint): self.handle_line(line)
0.006944
def _find_coeffs(orig_pts:Points, targ_pts:Points)->Tensor: "Find 8 coeff mentioned [here](https://web.archive.org/web/20150222120106/xenia.media.mit.edu/~cwren/interpolator/)." matrix = [] #The equations we'll need to solve. for p1, p2 in zip(targ_pts, orig_pts): matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0]*p1[0], -p2[0]*p1[1]]) matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1]*p1[0], -p2[1]*p1[1]]) A = FloatTensor(matrix) B = FloatTensor(orig_pts).view(8, 1) #The 8 scalars we seek are solution of AX = B return _solve_func(B,A)[0][:,0]
0.015203
def _save_results(self, output_dir, label, results, ngrams, type_label): """Saves `results` filtered by `label` and `ngram` to `output_dir`. :param output_dir: directory to save results to :type output_dir: `str` :param label: catalogue label of results, used in saved filename :type label: `str` :param results: results to filter and save :type results: `pandas.DataFrame` :param ngrams: n-grams to save from results :type ngrams: `list` of `str` :param type_label: name of type of results, used in saved filename :type type_label: `str` """ path = os.path.join(output_dir, '{}-{}.csv'.format(label, type_label)) results[results[constants.NGRAM_FIELDNAME].isin( ngrams)].to_csv(path, encoding='utf-8', float_format='%d', index=False)
0.002252
def register_rate_producer(self, rate_name: str, source: Callable[..., pd.DataFrame]=None) -> Pipeline: """Marks a ``Callable`` as the producer of a named rate. This is a convenience wrapper around ``register_value_producer`` that makes sure rate data is appropriately scaled to the size of the simulation time step. It is equivalent to ``register_value_producer(value_name, source, preferred_combiner=replace_combiner, preferred_post_processor=rescale_post_processor)`` Parameters ---------- rate_name : The name of the new dynamic rate pipeline. source : A callable source for the dynamic rate pipeline. Returns ------- Callable A callable reference to the named dynamic rate pipeline. """ return self._value_manager.register_rate_producer(rate_name, source)
0.008782
def syscall(self, state, allow_unsupported=True): """ Given a state, return the procedure corresponding to the current syscall. This procedure will have .syscall_number, .display_name, and .addr set. :param state: The state to get the syscall number from :param allow_unsupported: Whether to return a "dummy" sycall instead of raising an unsupported exception """ abi = self.syscall_abi(state) if state.os_name in SYSCALL_CC[state.arch.name]: cc = SYSCALL_CC[state.arch.name][state.os_name](state.arch) else: # Use the default syscall calling convention - it may bring problems _l.warning("No syscall calling convention available for %s/%s", state.arch.name, state.os_name) cc = SYSCALL_CC[state.arch.name]['default'](state.arch) sym_num = cc.syscall_num(state) try: num = state.solver.eval_one(sym_num) except SimSolverError: if allow_unsupported: num = self.unknown_syscall_number else: if not state.solver.satisfiable(): raise AngrUnsupportedSyscallError("The program state is not satisfiable") else: raise AngrUnsupportedSyscallError("Got a symbolic syscall number") proc = self.syscall_from_number(num, allow_unsupported=allow_unsupported, abi=abi) proc.cc = cc return proc
0.00604
def load_default(self): """Loads the default J-Link SDK DLL. The default J-Link SDK is determined by first checking if ``ctypes`` can find the DLL, then by searching the platform-specific paths. Args: self (Library): the ``Library`` instance Returns: ``True`` if the DLL was loaded, otherwise ``False``. """ path = ctypes_util.find_library(self._sdk) if path is None: # Couldn't find it the standard way. Fallback to the non-standard # way of finding the J-Link library. These methods are operating # system specific. if self._windows or self._cygwin: path = next(self.find_library_windows(), None) elif sys.platform.startswith('linux'): path = next(self.find_library_linux(), None) elif sys.platform.startswith('darwin'): path = next(self.find_library_darwin(), None) if path is not None: return self.load(path) return False
0.00188
def recvProtocolVersion(self, data): """ Read handshake packet If protocol receive from client is unknown try best version of protocol version (ProtocolVersion.RFB003008) @param data: Stream """ self.readProtocolVersion(data) if self._version.value == ProtocolVersion.UNKNOWN: log.info("Unknown protocol version %s send 003.008"%data.getvalue()) #protocol version is unknown try best version we can handle self._version.value = ProtocolVersion.RFB003008 #send same version of self.send(self._version) #next state read security if self._version.value == ProtocolVersion.RFB003003: self.expect(4, self.recvSecurityServer) else: self.expectWithHeader(1, self.recvSecurityList)
0.011792
def data_log_encode(self, fl_1, fl_2, fl_3, fl_4, fl_5, fl_6): ''' Configurable data log probes to be used inside Simulink fl_1 : Log value 1 (float) fl_2 : Log value 2 (float) fl_3 : Log value 3 (float) fl_4 : Log value 4 (float) fl_5 : Log value 5 (float) fl_6 : Log value 6 (float) ''' return MAVLink_data_log_message(fl_1, fl_2, fl_3, fl_4, fl_5, fl_6)
0.004658
def parse_params(self, y=None, y_target=None, batch_size=1, confidence=0, learning_rate=5e-3, binary_search_steps=5, max_iterations=1000, abort_early=True, initial_const=1e-2, clip_min=0, clip_max=1): """ :param y: (optional) A tensor with the true labels for an untargeted attack. If None (and y_target is None) then use the original labels the classifier assigns. :param y_target: (optional) A tensor with the target labels for a targeted attack. :param confidence: Confidence of adversarial examples: higher produces examples with larger l2 distortion, but more strongly classified as adversarial. :param batch_size: Number of attacks to run simultaneously. :param learning_rate: The learning rate for the attack algorithm. Smaller values produce better results but are slower to converge. :param binary_search_steps: The number of times we perform binary search to find the optimal tradeoff- constant between norm of the purturbation and confidence of the classification. :param max_iterations: The maximum number of iterations. Setting this to a larger value will produce lower distortion results. Using only a few iterations requires a larger learning rate, and will produce larger distortion results. :param abort_early: If true, allows early aborts if gradient descent is unable to make progress (i.e., gets stuck in a local minimum). :param initial_const: The initial tradeoff-constant to use to tune the relative importance of size of the perturbation and confidence of classification. If binary_search_steps is large, the initial constant is not important. A smaller value of this constant gives lower distortion results. :param clip_min: (optional float) Minimum input component value :param clip_max: (optional float) Maximum input component value """ # ignore the y and y_target argument self.batch_size = batch_size self.confidence = confidence self.learning_rate = learning_rate self.binary_search_steps = binary_search_steps self.max_iterations = max_iterations self.abort_early = abort_early self.initial_const = initial_const self.clip_min = clip_min self.clip_max = clip_max
0.004112
def add_classdiff_optgroup(parser): """ option group specific to class checking """ g = parser.add_argument_group("Class Checking Options") g.add_argument("--ignore-version-up", action="store_true", default=False) g.add_argument("--ignore-version-down", action="store_true", default=False) g.add_argument("--ignore-platform-up", action="store_true", default=False) g.add_argument("--ignore-platform-down", action="store_true", default=False) g.add_argument("--ignore-absolute-lines", action="store_true", default=False) g.add_argument("--ignore-relative-lines", action="store_true", default=False) g.add_argument("--ignore-deprecated", action="store_true", default=False) g.add_argument("--ignore-added", action="store_true", default=False) g.add_argument("--ignore-pool", action="store_true", default=False) g.add_argument("--ignore-lines", nargs=0, help="ignore relative and absolute line-number changes", action=_opt_cb_ign_lines) g.add_argument("--ignore-platform", nargs=0, help="ignore platform changes", action=_opt_cb_ign_platform) g.add_argument("--ignore-version", nargs=0, help="ignore version changes", action=_opt_cb_ign_version)
0.000714
def get_by_wiki(citiao): ''' Get the wiki record by title. ''' q_res = TabWiki.select().where(TabWiki.title == citiao) the_count = q_res.count() if the_count == 0 or the_count > 1: return None else: MWiki.update_view_count(citiao) return q_res.get()
0.005865
def get_results_file_name(boundaries_id, labels_id, config, annotator_id): """Based on the config and the dataset, get the file name to store the results.""" utils.ensure_dir(msaf.config.results_dir) file_name = os.path.join(msaf.config.results_dir, "results") file_name += "_boundsE%s_labelsE%s" % (boundaries_id, labels_id) file_name += "_annotatorE%d" % (annotator_id) sorted_keys = sorted(config.keys(), key=str.lower) for key in sorted_keys: file_name += "_%sE%s" % (key, str(config[key]).replace("/", "_")) # Check for max file length if len(file_name) > 255 - len(msaf.config.results_ext): file_name = file_name[:255 - len(msaf.config.results_ext)] return file_name + msaf.config.results_ext
0.001272
def yn2Kn2Der(nu, y, n=0, tol=5e-4, nterms=1, nu_step=0.001): r"""Computes the function :math:`y^{\nu/2} K_{\nu}(y^{1/2})` and its derivatives. Care has been taken to handle the conditions at :math:`y=0`. For `n=0`, uses a direct evaluation of the expression, replacing points where `y=0` with the appropriate value. For `n>0`, uses a general sum expression to evaluate the expression, and handles the value at `y=0` using a power series expansion. Where it becomes infinite, the infinities will have the appropriate sign for a limit approaching zero from the right. Uses a power series expansion around :math:`y=0` to avoid numerical issues. Handles integer `nu` by performing a linear interpolation between values of `nu` slightly above and below the requested value. Parameters ---------- nu : float The order of the modified Bessel function and the exponent of `y`. y : array of float The points to evaluate the function at. These are assumed to be nonegative. n : nonnegative int, optional The order of derivative to take. Set to zero (the default) to get the value. tol : float, optional The distance from zero for which the power series is used. Default is 5e-4. nterms : int, optional The number of terms to include in the power series. Default is 1. nu_step : float, optional The amount to vary `nu` by when handling integer values of `nu`. Default is 0.001. """ n = int(n) y = scipy.asarray(y, dtype=float) if n == 0: K = y**(nu / 2.0) * scipy.special.kv(nu, scipy.sqrt(y)) K[y == 0.0] = scipy.special.gamma(nu) / 2.0**(1.0 - nu) else: K = scipy.zeros_like(y) for k in scipy.arange(0.0, n + 1.0, dtype=float): K += ( scipy.special.binom(n, k) * fixed_poch(1.0 + nu / 2.0 - k, k) * y**(nu / 2.0 - k) * Kn2Der(nu, y, n=n-k) ) # Do the extra work to handle y == 0 only if we need to: mask = (y == 0.0) if (mask).any(): if int(nu) == nu: K[mask] = 0.5 * ( yn2Kn2Der(nu - nu_step, y[mask], n=n, tol=tol, nterms=nterms, nu_step=nu_step) + yn2Kn2Der(nu + nu_step, y[mask], n=n, tol=tol, nterms=nterms, nu_step=nu_step) ) else: if n > nu: K[mask] = scipy.special.gamma(-nu) * fixed_poch(1 + nu - n, n) * scipy.inf else: K[mask] = scipy.special.gamma(nu) * scipy.special.gamma(n + 1.0) / ( 2.0**(1.0 - nu + 2.0 * n) * fixed_poch(1.0 - nu, n) * scipy.special.factorial(n) ) if tol > 0.0: # Replace points within tol (absolute distance) of zero with the power # series approximation: mask = (y <= tol) & (y > 0.0) K[mask] = 0.0 if int(nu) == nu: K[mask] = 0.5 * ( yn2Kn2Der(nu - nu_step, y[mask], n=n, tol=tol, nterms=nterms, nu_step=nu_step) + yn2Kn2Der(nu + nu_step, y[mask], n=n, tol=tol, nterms=nterms, nu_step=nu_step) ) else: for k in scipy.arange(n, n + nterms, dtype=float): K[mask] += ( scipy.special.gamma(nu) * fixed_poch(1.0 + k - n, n) * y[mask]**(k - n) / ( 2.0**(1.0 - nu + 2 * k) * fixed_poch(1.0 - nu, k) * scipy.special.factorial(k)) ) for k in scipy.arange(0, nterms, dtype=float): K[mask] += ( scipy.special.gamma(-nu) * fixed_poch(1.0 + nu + k - n, n) * y[mask]**(nu + k - n) / ( 2.0**(1.0 + nu + 2.0 * k) * fixed_poch(1.0 + nu, k) * scipy.special.factorial(k) ) ) return K
0.004745
def percent_point(self, U): """Given a cumulated distribution value, returns a value in original space. Arguments: U: `np.ndarray` of shape (n, 1) and values in [0,1] Returns: `np.ndarray`: Estimated values in original space. """ self.check_fit() return norm.ppf(U, loc=self.mean, scale=self.std)
0.008108
def is_ready(self): """Is thread & ioloop ready. :returns bool: """ if not self._thread: return False if not self._ready.is_set(): return False return True
0.008696
def _col_widths2xls(self, worksheets): """Writes col_widths to xls file Format: <col>\t<tab>\t<value>\n """ xls_max_cols, xls_max_tabs = self.xls_max_cols, self.xls_max_tabs dict_grid = self.code_array.dict_grid for col, tab in dict_grid.col_widths: if col < xls_max_cols and tab < xls_max_tabs: pys_width = dict_grid.col_widths[(col, tab)] xls_width = self.pys_width2xls_width(pys_width) worksheets[tab].col(col).width = xls_width
0.003676
def incoming_copying_manipulators(self): """**DEPRECATED**: All incoming SON copying manipulators. .. versionchanged:: 3.5 Deprecated. .. versionadded:: 2.0 """ warnings.warn("Database.incoming_copying_manipulators() is deprecated", DeprecationWarning, stacklevel=2) return [manipulator.__class__.__name__ for manipulator in self.__incoming_copying_manipulators]
0.004338
def run(cls, command, cwd=".", **kwargs): """ Make a subprocess call, collect its output and returncode. Returns CommandResult instance as ValueObject. """ assert isinstance(command, six.string_types) command_result = CommandResult() command_result.command = command use_shell = cls.USE_SHELL if "shell" in kwargs: use_shell = kwargs.pop("shell") # -- BUILD COMMAND ARGS: if six.PY2 and isinstance(command, six.text_type): # -- PREPARE-FOR: shlex.split() # In PY2, shlex.split() requires bytes string (non-unicode). # In PY3, shlex.split() accepts unicode string. command = codecs.encode(command, "utf-8") cmdargs = shlex.split(command) # -- TRANSFORM COMMAND (optional) command0 = cmdargs[0] real_command = cls.COMMAND_MAP.get(command0, None) if real_command: cmdargs0 = real_command.split() cmdargs = cmdargs0 + cmdargs[1:] preprocessors = cls.PREPROCESSOR_MAP.get(command0) if preprocessors: cmdargs = cls.preprocess_command(preprocessors, cmdargs, command, cwd) # -- RUN COMMAND: try: process = subprocess.Popen(cmdargs, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=use_shell, cwd=cwd, **kwargs) out, err = process.communicate() if six.PY2: # py3: we get unicode strings, py2 not default_encoding = 'UTF-8' out = six.text_type(out, process.stdout.encoding or default_encoding) err = six.text_type(err, process.stderr.encoding or default_encoding) process.poll() assert process.returncode is not None command_result.stdout = out command_result.stderr = err command_result.returncode = process.returncode if cls.DEBUG: print("shell.cwd={0}".format(kwargs.get("cwd", None))) print("shell.command: {0}".format(" ".join(cmdargs))) print("shell.command.output:\n{0};".format(command_result.output)) except OSError as e: command_result.stderr = u"OSError: %s" % e command_result.returncode = e.errno assert e.errno != 0 postprocessors = cls.POSTPROCESSOR_MAP.get(command0) if postprocessors: command_result = cls.postprocess_command(postprocessors, command_result) return command_result
0.005181
def load_keys(key_file, origin_passphrase, stash, passphrase, backend): """Load all keys from an exported key file to the stash `KEY_FILE` is the exported stash file to load keys from """ stash = _get_stash(backend, stash, passphrase) click.echo('Importing all keys from {0}...'.format(key_file)) stash.load(origin_passphrase, key_file=key_file) click.echo('Import complete!')
0.002463
def unregister_editorstack(self, editorstack): """Removing editorstack only if it's not the last remaining""" self.remove_last_focus_editorstack(editorstack) if len(self.editorstacks) > 1: index = self.editorstacks.index(editorstack) self.editorstacks.pop(index) return True else: # editorstack was not removed! return False
0.004706
def p_bound(p): """ bound : expr """ p[0] = make_bound(make_number(OPTIONS.array_base.value, lineno=p.lineno(1)), p[1], p.lexer.lineno)
0.005525
def annual_heating_design_day_990(self): """A design day object representing the annual 99.0% heating design day.""" if bool(self._winter_des_day_dict) is True: return DesignDay.from_ashrae_dict_heating( self._winter_des_day_dict, self.location, True, self._stand_press_at_elev) else: return None
0.007979
def write_single_coil(self, bit_addr, bit_value): """Modbus function WRITE_SINGLE_COIL (0x05) :param bit_addr: bit address (0 to 65535) :type bit_addr: int :param bit_value: bit value to write :type bit_value: bool :returns: True if write ok or None if fail :rtype: bool or None """ # check params if not (0 <= int(bit_addr) <= 65535): self.__debug_msg('write_single_coil(): bit_addr out of range') return None # build frame bit_value = 0xFF if bit_value else 0x00 tx_buffer = self._mbus_frame(const.WRITE_SINGLE_COIL, struct.pack('>HBB', bit_addr, bit_value, 0)) # send request s_send = self._send_mbus(tx_buffer) # check error if not s_send: return None # receive f_body = self._recv_mbus() # check error if not f_body: return None # check fix frame size if len(f_body) != 4: self.__last_error = const.MB_RECV_ERR self.__debug_msg('write_single_coil(): rx frame size error') self.close() return None # register extract (rx_bit_addr, rx_bit_value, rx_padding) = struct.unpack('>HBB', f_body[:4]) # check bit write is_ok = (rx_bit_addr == bit_addr) and (rx_bit_value == bit_value) return True if is_ok else None
0.002793
def _internal_add(self, pattern: Pattern, label, renaming) -> int: """Add a new pattern to the matcher. Equivalent patterns are not added again. However, patterns that are structurally equivalent, but have different constraints or different variable names are distinguished by the matcher. Args: pattern: The pattern to add. Returns: The internal id for the pattern. This is mainly used by the :class:`CommutativeMatcher`. """ pattern_index = len(self.patterns) renamed_constraints = [c.with_renamed_vars(renaming) for c in pattern.local_constraints] constraint_indices = [self._add_constraint(c, pattern_index) for c in renamed_constraints] self.patterns.append((pattern, label, constraint_indices)) self.pattern_vars.append(renaming) pattern = rename_variables(pattern.expression, renaming) state = self.root patterns_stack = [deque([pattern])] self._process_pattern_stack(state, patterns_stack, renamed_constraints, pattern_index) return pattern_index
0.007201
def fast_distance(r1: 'Region', r2: 'Region'): """ A quicker way of calculating approximate distance. Lower accuracy but faster results.""" return abs(r1.x - r2.x) + abs(r1.y - r2.y)
0.015152
def partition_by_vid(self, ref): """A much faster way to get partitions, by vid only""" from ambry.orm import Partition p = self.session.query(Partition).filter(Partition.vid == str(ref)).first() if p: return self.wrap_partition(p) else: return None
0.009554
def add_node(self, node_name, dataframe=False): """ Add a single node to the network. """ if node_name is None: return None return self.add_nodes([node_name], dataframe=dataframe)
0.009302
def _polarBreaks(self): """Determine where breaks in a polar orbiting satellite orbit occur. Looks for sign changes in latitude (magnetic or geographic) as well as breaks in UT. """ if self.orbit_index is None: raise ValueError('Orbit properties must be defined at ' + 'pysat.Instrument object instantiation.' + 'See Instrument docs.') else: try: self.sat[self.orbit_index] except ValueError: raise ValueError('Provided orbit index does not appear to ' + 'exist in loaded data') # determine where orbit index goes from positive to negative pos = (self.sat[self.orbit_index] >= 0) npos = -pos change = (pos.values[:-1] & npos.values[1:]) | (npos.values[:-1] & pos.values[1:]) ind, = np.where(change) ind += 1 ut_diff = Series(self.sat.data.index).diff() ut_ind, = np.where(ut_diff / self.orbit_period > 0.95) if len(ut_ind) > 0: ind = np.hstack((ind, ut_ind)) ind = np.sort(ind) ind = np.unique(ind) # print 'Time Gap' # create orbitbreak index, ensure first element is always 0 if ind[0] != 0: ind = np.hstack((np.array([0]), ind)) # number of orbits num_orbits = len(ind) # set index of orbit breaks self._orbit_breaks = ind # set number of orbits for the day self.num = num_orbits
0.002424
def _get_chartjs_chart(self, xcol, ycol, chart_type, label=None, opts={}, style={}, options={}, **kwargs): """ Get Chartjs html """ try: xdata = list(self.df[xcol]) except Exception as e: self.err(e, self._get_chartjs_chart, "Can not get data for x field ", ycol) return if label is None: label = "Data" try: if type(ycol) != list: ydata = [dict(name=label, data=list(self.df[ycol]))] else: ydata = [] for col in ycol: y = {} y["name"] = col y["data"] = list(self.df[col]) ydata.append(y) except Exception as e: self.err(e, self._get_chartjs_chart, "Can not get data for y field ", xcol) return try: slug = str(uuid.uuid4()) html = chart.get(slug, xdata, ydata, label, opts, style, chart_type, **kwargs) return html except Exception as e: self.err(e, self._get_chartjs_chart, "Can not get chart")
0.002408
def update_from_devices(self): """Retrieve a list of &devices and values.""" # _LOGGER.warning("update from devices") try: rest = requests.get(URL_DEVICES.format(self._url)) if rest.status_code != 200: _LOGGER.error("Devices returned %s", rest.status_code) return False self.devices.update_devices(rest.json()) return True except requests.exceptions.ConnectionError as conn_err: _LOGGER.error("Could not connect: %s", conn_err) except Exception as err: # pylint: disable=broad-except _LOGGER.error(err)
0.003091
def DbGetDeviceAliasList(self, argin): """ Get device alias name with a specific filter :param argin: The filter :type: tango.DevString :return: Device alias list :rtype: tango.DevVarStringArray """ self._log.debug("In DbGetDeviceAliasList()") if not argin: argin = "%" else: argin = replace_wildcard(argin) return self.db.get_device_alias_list(argin)
0.004454
def put_logging(Bucket, TargetBucket=None, TargetPrefix=None, TargetGrants=None, region=None, key=None, keyid=None, profile=None): ''' Given a valid config, update the logging parameters for a bucket. Returns {updated: true} if parameters were updated and returns {updated: False} if parameters were not updated. CLI Example: .. code-block:: bash salt myminion boto_s3_bucket.put_logging my_bucket log_bucket '[{...}]' prefix ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) logstate = {} targets = {'TargetBucket': TargetBucket, 'TargetGrants': TargetGrants, 'TargetPrefix': TargetPrefix} for key, val in six.iteritems(targets): if val is not None: logstate[key] = val if logstate: logstatus = {'LoggingEnabled': logstate} else: logstatus = {} if TargetGrants is not None and isinstance(TargetGrants, six.string_types): TargetGrants = salt.utils.json.loads(TargetGrants) conn.put_bucket_logging(Bucket=Bucket, BucketLoggingStatus=logstatus) return {'updated': True, 'name': Bucket} except ClientError as e: return {'updated': False, 'error': __utils__['boto3.get_error'](e)}
0.003676
def step_impl13(context, runs): """Check called apps / files. :param runs: expected number of records. :param context: test context. """ executor_ = context.fuzz_executor stats = executor_.stats count = stats.cumulated_counts() assert count == runs, "VERIFY: Number of recorded runs." successful_runs = stats.cumulated_counts_for_status(Status.SUCCESS) assert successful_runs == runs
0.002358
def envCheckFilter(self, name, attr): """Check if a specific graph attribute is enabled or disabled through the use of a filter based on include_<name> and exclude_<name> environment variables. @param name: Name of the Filter. @param attr: Name of the Attribute. @return: Return True if the attribute is enabled. """ flt = self._filters.get(name) if flt: return flt.check(attr) else: raise AttributeError("Undefined filter: %s" % name)
0.012411
def mass_3d(self, r, kwargs, bool_list=None): """ computes the mass within a 3d sphere of radius r :param r: radius (in angular units) :param kwargs: list of keyword arguments of lens model parameters matching the lens model classes :param bool_list: list of bools that are part of the output :return: mass (in angular units, modulo epsilon_crit) """ bool_list = self._bool_list(bool_list) mass_3d = 0 for i, func in enumerate(self.func_list): if bool_list[i] is True: kwargs_i = {k:v for k, v in kwargs[i].items() if not k in ['center_x', 'center_y']} mass_3d_i = func.mass_3d_lens(r, **kwargs_i) mass_3d += mass_3d_i #except: # raise ValueError('Lens profile %s does not support a 3d mass function!' % self.model_list[i]) return mass_3d
0.008677
def delete_namespaced_network_policy(self, name, namespace, **kwargs): """ delete a NetworkPolicy This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_namespaced_network_policy(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the NetworkPolicy (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_namespaced_network_policy_with_http_info(name, namespace, **kwargs) else: (data) = self.delete_namespaced_network_policy_with_http_info(name, namespace, **kwargs) return data
0.003837
def draw_pegasus(G, crosses=False, **kwargs): """Draws graph G in a Pegasus topology. If `linear_biases` and/or `quadratic_biases` are provided, these are visualized on the plot. Parameters ---------- G : NetworkX graph Should be a Pegasus graph or a subgraph of a Pegasus graph, a product of dwave_networkx.pegasus_graph. linear_biases : dict (optional, default {}) A dict of biases associated with each node in G. Should be of form {node: bias, ...}. Each bias should be numeric. quadratic_biases : dict (optional, default {}) A dict of biases associated with each edge in G. Should be of form {edge: bias, ...}. Each bias should be numeric. Self-loop edges (i.e., :math:`i=j`) are treated as linear biases. crosses: boolean (optional, default False) If crosses is True, K_4,4 subgraphs are shown in a cross rather than L configuration. Ignored if G was defined with nice_coordinates=True. kwargs : optional keywords See networkx.draw_networkx() for a description of optional keywords, with the exception of the `pos` parameter which is not used by this function. If `linear_biases` or `quadratic_biases` are provided, any provided `node_color` or `edge_color` arguments are ignored. Examples -------- >>> # Plot a Pegasus graph with size parameter 2 >>> import networkx as nx >>> import dwave_networkx as dnx >>> import matplotlib.pyplot as plt >>> G = dnx.pegasus_graph(2) >>> dnx.draw_pegasus(G) >>> plt.show() """ draw_qubit_graph(G, pegasus_layout(G, crosses=crosses), **kwargs)
0.000593
def _get_range_timestamp_key(self, start: Key, end: Key, count: int = 0) -> List[Tuple[Key, Any]]: """ Returns the list of items from the store based on the given time range or count. This is used when the key being used is a TIMESTAMP key. """ raise NotImplementedError()
0.011561
def multi_token_match(stream_item, aligner_data): ''' iterate through tokens looking for near-exact matches to strings in si.ratings...mentions ''' tagger_id = _get_tagger_id(stream_item, aligner_data) sentences = stream_item.body.sentences.get(tagger_id) if not sentences: return ## construct a list of tuples, where the first part of each tuple ## is a tuple of cleansed strings, and the second part is the ## Token object from which it came. tokens = map(lambda tok: (cleanse(tok.token.decode('utf8')).split(' '), tok), itertools.chain(*[sent.tokens for sent in sentences])) required_annotator_id = aligner_data['annotator_id'] for annotator_id, ratings in stream_item.ratings.items(): if (required_annotator_id is None) or (annotator_id == required_annotator_id): for rating in ratings: label = Label(annotator=rating.annotator, target=rating.target) num_tokens_matched = 0 for tok in look_ahead_match(rating, tokens): if aligner_data.get('update_labels'): tok.labels.pop(annotator_id, None) add_annotation(tok, label) num_tokens_matched += 1 if num_tokens_matched == 0: logger.warning('multi_token_match didn\'t actually match ' 'entity %r in stream_id %r', rating.target.target_id, stream_item.stream_id) else: logger.debug('matched %d tokens for %r in %r', num_tokens_matched, rating.target.target_id, stream_item.stream_id)
0.005376
def write_as_json(blob, dest, indent=0, sort_keys=True): """Writes `blob` as JSON to the filepath `dest` or the filestream `dest` (if it isn't a string) uses utf-8 encoding if the filepath is given (does not change the encoding if dest is already open). """ opened_out = False if is_str_type(dest): out = codecs.open(dest, mode='w', encoding='utf-8') opened_out = True else: out = dest try: json.dump(blob, out, indent=indent, sort_keys=sort_keys) out.write('\n') finally: out.flush() if opened_out: out.close()
0.00491
def process_stencil(cookbook, cookbook_name, template_pack, force_argument, stencil_set, stencil, written_files): """Process the stencil requested, writing any missing files as needed. The stencil named 'stencilset_name' should be one of templatepack's stencils. """ # force can be passed on the command line or forced in a stencil's options force = force_argument or stencil['options'].get('force', False) stencil['files'] = stencil.get('files') or {} files = { # files.keys() are template paths, files.values() are target paths # {path to template: rendered target path, ... } os.path.join(stencil_set.path, tpl): os.path.join(cookbook.path, tgt) for tgt, tpl in stencil['files'].items() } stencil['partials'] = stencil.get('partials') or {} partials = { # files.keys() are template paths, files.values() are target paths # {path to template: rendered target path, ... } os.path.join(stencil_set.path, tpl): os.path.join(cookbook.path, tgt) for tgt, tpl in stencil['partials'].items() } stencil['binaries'] = stencil.get('binaries') or {} binaries = { # files.keys() are binary paths, files.values() are target paths # {path to binary: rendered target path, ... } os.path.join(stencil_set.path, tpl): os.path.join(cookbook.path, tgt) for tgt, tpl in stencil['binaries'].items() } template_map = _build_template_map(cookbook, cookbook_name, stencil) filetable = templating.render_templates(*files.keys(), **template_map) _render_templates(files, filetable, written_files, force) parttable = templating.render_templates(*partials.keys(), **template_map) _render_templates(partials, parttable, written_files, force, open_mode='a') # no templating needed for binaries, just pass off to the copy method _render_binaries(binaries, written_files) # merge metadata.rb dependencies stencil_metadata_deps = {'depends': stencil.get('dependencies', {})} stencil_metadata = book.MetadataRb.from_dict(stencil_metadata_deps) cookbook.metadata.merge(stencil_metadata) # merge Berksfile dependencies stencil_berks_deps = {'cookbook': stencil.get('berks_dependencies', {})} stencil_berks = book.Berksfile.from_dict(stencil_berks_deps) cookbook.berksfile.merge(stencil_berks) return cookbook
0.000412
def center_eigenvalue_diff(mat): """Compute the eigvals of mat and then find the center eigval difference.""" N = len(mat) evals = np.sort(la.eigvals(mat)) diff = np.abs(evals[N/2] - evals[N/2-1]) return diff
0.008772
def desired_destination(self, network, edge): """Returns the agents next destination given their current location on the network. An ``Agent`` chooses one of the out edges at random. The probability that the ``Agent`` will travel along a specific edge is specified in the :class:`QueueNetwork's<.QueueNetwork>` transition matrix. Parameters ---------- network : :class:`.QueueNetwork` The :class:`.QueueNetwork` where the Agent resides. edge : tuple A 4-tuple indicating which edge this agent is located at. The first two slots indicate the current edge's source and target vertices, while the third slot indicates this edges ``edge_index``. The last slot indicates the edge type of that edge Returns ------- out : int Returns an the edge index corresponding to the agents next edge to visit in the network. See Also -------- :meth:`.transitions` : :class:`QueueNetwork's<.QueueNetwork>` method that returns the transition probabilities for each edge in the graph. """ n = len(network.out_edges[edge[1]]) if n <= 1: return network.out_edges[edge[1]][0] u = uniform() pr = network._route_probs[edge[1]] k = _choice(pr, u, n) # _choice returns an integer between 0 and n-1 where the # probability of k being selected is equal to pr[k]. return network.out_edges[edge[1]][k]
0.001244
def _find_unprocessed(config): """Find any finished directories that have not been processed. """ reported = _read_reported(config["msg_db"]) for dname in _get_directories(config): if os.path.isdir(dname) and dname not in reported: if _is_finished_dumping(dname): yield dname
0.003058
def load_plugins(config, plugin_kwargs): """ Discover and instantiate plugins. Args: config (dict): loaded configuration for the Gordon service. plugin_kwargs (dict): keyword arguments to give to plugins during instantiation. Returns: Tuple of 3 lists: list of names of plugins, list of instantiated plugin objects, and any errors encountered while loading/instantiating plugins. A tuple of three empty lists is returned if there are no plugins found or activated in gordon config. """ installed_plugins = _gather_installed_plugins() metrics_plugin = _get_metrics_plugin(config, installed_plugins) if metrics_plugin: plugin_kwargs['metrics'] = metrics_plugin active_plugins = _get_activated_plugins(config, installed_plugins) if not active_plugins: return [], [], [], None plugin_namespaces = _get_plugin_config_keys(active_plugins) plugin_configs = _load_plugin_configs(plugin_namespaces, config) plugin_names, plugins, errors = _init_plugins( active_plugins, installed_plugins, plugin_configs, plugin_kwargs) return plugin_names, plugins, errors, plugin_kwargs
0.000825
def cufflinks(args): """ %prog cufflinks folder reference Run cufflinks on a folder containing tophat results. """ p = OptionParser(cufflinks.__doc__) p.add_option("--gtf", help="Reference annotation [default: %default]") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) folder, reference = args cpus = opts.cpus gtf = opts.gtf transcripts = "transcripts.gtf" mm = MakeManager() gtfs = [] for bam in iglob(folder, "*.bam"): pf = op.basename(bam).split(".")[0] outdir = pf + "_cufflinks" cmd = "cufflinks" cmd += " -o {0}".format(outdir) cmd += " -p {0}".format(cpus) if gtf: cmd += " -g {0}".format(gtf) cmd += " --frag-bias-correct {0}".format(reference) cmd += " --multi-read-correct" cmd += " {0}".format(bam) cgtf = op.join(outdir, transcripts) mm.add(bam, cgtf, cmd) gtfs.append(cgtf) assemblylist = "assembly_list.txt" cmd = 'find . -name "{0}" > {1}'.format(transcripts, assemblylist) mm.add(gtfs, assemblylist, cmd) mergedgtf = "merged/merged.gtf" cmd = "cuffmerge" cmd += " -o merged" cmd += " -p {0}".format(cpus) if gtf: cmd += " -g {0}".format(gtf) cmd += " -s {0}".format(reference) cmd += " {0}".format(assemblylist) mm.add(assemblylist, mergedgtf, cmd) mm.write()
0.000683
def _process_content_body(self, channel, payload): """Process Content Body frames""" partial = self.partial_messages[channel] partial.add_payload(payload) if partial.complete: # # Stick the message in the queue and go back to # waiting for method frames # self._quick_put((channel, partial.method_sig, partial.args, partial.msg)) self.partial_messages.pop(channel, None) self.expected_types[channel] = 1
0.00365
def get_report_interpreted(year, report_type): """Download, exract, and interpret a CO-TRACER report. Generate a URL for the given report, download the corresponding archive, extract the CSV report, and interpret it using TRACER-specific logic. @param year: The year for which data should be downloaded. @type year: int @param report_type: The type of report that should be downloaded. Should be one of the strings in constants.REPORT_TYPES. @type report_type: str @return: A collection of dict with the loaded data. Note that this data has been interpreted so data fields like floating point values, dates, and boolean values are no longer strings. @rtype: Iterable over dict """ if not is_valid_report_type(report_type): msg = '%s is not a valid report type.' % report_type raise ValueError(msg) raw_report = get_report_raw(year, report_type) interpreter = REPORT_TYPE_INTERPRETERS[report_type] return interpreter(raw_report)
0.000975
def main(output): """ Generate a c7n-org gcp projects config file """ client = Session().client('cloudresourcemanager', 'v1', 'projects') results = [] for page in client.execute_paged_query('list', {}): for project in page.get('projects', []): if project['lifecycleState'] != 'ACTIVE': continue project_info = { 'project_id': project['projectId'], 'name': project['name'], } if 'labels' in project: project_info['tags'] = [ 'label:%s:%s' % (k, v) for k, v in project.get('labels', {}).items()] results.append(project_info) output.write( yaml.safe_dump({'projects': results}, default_flow_style=False))
0.002516
def frustum(left, right, bottom, top, znear, zfar): """Create view frustum matrix.""" assert right != left assert bottom != top assert znear != zfar M = np.zeros((4, 4), dtype=np.float32) M[0, 0] = +2.0 * znear / (right - left) M[2, 0] = (right + left) / (right - left) M[1, 1] = +2.0 * znear / (top - bottom) M[3, 1] = (top + bottom) / (top - bottom) M[2, 2] = -(zfar + znear) / (zfar - znear) M[3, 2] = -2.0 * znear * zfar / (zfar - znear) M[2, 3] = -1.0 return M
0.028455
def parse(ifp, pb_cls, **kwargs): """Parse a stream. Args: ifp (string or file-like object): input stream. pb_cls (protobuf.message.Message.__class__): The class object of the protobuf message type encoded in the stream. """ mode = 'rb' if isinstance(ifp, str): istream = open(ifp, mode=mode, **kwargs) else: istream = open(fileobj=ifp, mode=mode, **kwargs) with istream: for data in istream: pb_obj = pb_cls() pb_obj.ParseFromString(data) yield pb_obj
0.001757
def _get_xfstyle(self, worksheets, key): """Gets XFStyle for cell key""" row, col, tab = key dict_grid = self.code_array.dict_grid dict_grid.cell_attributes._update_table_cache() pys_style = dict_grid.cell_attributes[key] pys_style_above = dict_grid.cell_attributes[row - 1, col, tab] pys_style_left = dict_grid.cell_attributes[row, col - 1, tab] xfstyle = xlwt.XFStyle() # Font # ---- font = self._get_font(pys_style) if font is not None: xfstyle.font = font # Alignment # --------- alignment = self._get_alignment(pys_style) if alignment is not None: xfstyle.alignment = alignment # Background / pattern # -------------------- pattern = self._get_pattern(pys_style) if pattern is not None: xfstyle.pattern = pattern # Border # ------ borders = self._get_borders(pys_style, pys_style_above, pys_style_left) if borders is not None: xfstyle.borders = borders return xfstyle
0.001767
def run(self, endpoint, data=None, headers=None, extra_options=None): """ Performs the request :param endpoint: the endpoint to be called i.e. resource/v1/query? :type endpoint: str :param data: payload to be uploaded or request parameters :type data: dict :param headers: additional headers to be passed through as a dictionary :type headers: dict :param extra_options: additional options to be used when executing the request i.e. {'check_response': False} to avoid checking raising exceptions on non 2XX or 3XX status codes :type extra_options: dict """ extra_options = extra_options or {} session = self.get_conn(headers) if self.base_url and not self.base_url.endswith('/') and \ endpoint and not endpoint.startswith('/'): url = self.base_url + '/' + endpoint else: url = (self.base_url or '') + (endpoint or '') req = None if self.method == 'GET': # GET uses params req = requests.Request(self.method, url, params=data, headers=headers) elif self.method == 'HEAD': # HEAD doesn't use params req = requests.Request(self.method, url, headers=headers) else: # Others use data req = requests.Request(self.method, url, data=data, headers=headers) prepped_request = session.prepare_request(req) self.log.info("Sending '%s' to url: %s", self.method, url) return self.run_and_check(session, prepped_request, extra_options)
0.002092
def delete_post(apikey, post_id, username, password, publish): """ blogger.deletePost(api_key, post_id, username, password, 'publish') => boolean """ user = authenticate(username, password, 'zinnia.delete_entry') entry = Entry.objects.get(id=post_id, authors=user) entry.delete() return True
0.003096
def _add_to_ngcorpus(self, corpus, words, count): """Build up a corpus entry recursively. Parameters ---------- corpus : Corpus The corpus words : [str] Words to add to the corpus count : int Count of words """ if words[0] not in corpus: corpus[words[0]] = Counter() if len(words) == 1: corpus[words[0]][None] += count else: self._add_to_ngcorpus(corpus[words[0]], words[1:], count)
0.003724
def _find_lib_path(): """Find mxnet library.""" curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) amalgamation_lib_path = os.path.join(curr_path, '../../lib/libmxnet_predict.so') if os.path.exists(amalgamation_lib_path) and os.path.isfile(amalgamation_lib_path): lib_path = [amalgamation_lib_path] return lib_path else: logging.info('Cannot find libmxnet_predict.so. Will search for MXNet library using libinfo.py then.') try: from mxnet.libinfo import find_lib_path lib_path = find_lib_path() return lib_path except ImportError: libinfo_path = os.path.join(curr_path, '../../python/mxnet/libinfo.py') if os.path.exists(libinfo_path) and os.path.isfile(libinfo_path): libinfo = {'__file__': libinfo_path} exec(compile(open(libinfo_path, "rb").read(), libinfo_path, 'exec'), libinfo, libinfo) lib_path = libinfo['find_lib_path']() return lib_path else: raise RuntimeError('Cannot find libinfo.py at %s.' % libinfo_path)
0.006061
def synthesize(self, duration): """ Synthesize white noise Args: duration (numpy.timedelta64): The duration of the synthesized sound """ sr = self.samplerate.samples_per_second seconds = duration / Seconds(1) samples = np.random.uniform(low=-1., high=1., size=int(sr * seconds)) return AudioSamples(samples, self.samplerate)
0.004988
def json(self, align_threshold: float = 0.0) -> Dict: """ Returns a dictionary suitable for json.dumps() representing all the information in the class. It is initialized with any keys present in the corresponding `TranslatorInput` object's pass_through_dict. Keys from here that are not overwritten by Sockeye will thus be passed through to the output. :param align_threshold: If alignments are defined, only print ones over this threshold. :return: A dictionary. """ _d = self.pass_through_dict # type: Dict[str, Any] _d['sentence_id'] = self.sentence_id _d['translation'] = self.translation _d['score'] = self.score if self.nbest_translations is not None and len(self.nbest_translations) > 1: _d['translations'] = self.nbest_translations _d['scores'] = self.nbest_scores if self.nbest_attention_matrices: extracted_alignments = [] for alignment_matrix in self.nbest_attention_matrices: extracted_alignments.append(list(utils.get_alignments(alignment_matrix, threshold=align_threshold))) _d['alignments'] = extracted_alignments return _d
0.004739
def _set_number_of_plots(self, n): """ Adjusts number of plots & curves to the desired value the gui. """ # multi plot, right number of plots and curves = great! if self.button_multi.is_checked() \ and len(self._curves) == len(self.plot_widgets) \ and len(self._curves) == n: return # single plot, right number of curves = great! if not self.button_multi.is_checked() \ and len(self.plot_widgets) == 1 \ and len(self._curves) == n: return # time to rebuild! # don't show the plots as they are built self.grid_plot.block_events() # make sure the number of curves is on target while len(self._curves) > n: self._curves.pop(-1) while len(self._curves) < n: self._curves.append(_g.PlotCurveItem(pen = (len(self._curves), n))) # figure out the target number of plots if self.button_multi.is_checked(): n_plots = n else: n_plots = min(n,1) # clear the plots while len(self.plot_widgets): # pop the last plot widget and remove all items p = self.plot_widgets.pop(-1) p.clear() # remove it from the grid self.grid_plot.remove_object(p) # add new plots for i in range(n_plots): self.plot_widgets.append(self.grid_plot.place_object(_g.PlotWidget(), 0, i, alignment=0)) # loop over the curves and add them to the plots for i in range(n): self.plot_widgets[min(i,len(self.plot_widgets)-1)].addItem(self._curves[i]) # loop over the ROI's and add them if self.ROIs is not None: for i in range(len(self.ROIs)): # get the ROIs for this plot ROIs = self.ROIs[i] if not _spinmob.fun.is_iterable(ROIs): ROIs = [ROIs] # loop over the ROIs for this plot for ROI in ROIs: # determine which plot to add the ROI to m = min(i, len(self.plot_widgets)-1) # add the ROI to the appropriate plot if m>=0: self.plot_widgets[m].addItem(ROI) # show the plots self.grid_plot.unblock_events()
0.009479
def cache_mappings(file_path): """ Make a full mapping for 2 --> 3 columns. Output the mapping to json in the specified file_path. Note: This file is currently called maps.py, full path is PmagPy/pmagpy/mapping/maps.py. Parameters ---------- file_path : string with full file path to dump mapping json. Returns --------- maps : nested dictionary with format {table_name: {magic2_col_name: magic3_col_name, ...}, ...} """ def get_2_to_3(dm_type, dm): table_names3_2_table_names2 = {'measurements': ['magic_measurements'], 'locations': ['er_locations'], 'sites': ['er_sites', 'pmag_sites'], 'samples': ['er_samples', 'pmag_samples'], 'specimens': ['er_specimens', 'pmag_specimens'], 'ages': ['er_ages'], 'criteria': ['pmag_criteria'], 'images': ['er_images'], 'contribution': []} table_names3 = table_names3_2_table_names2[dm_type] dictionary = {} for label, row in dm.iterrows(): # if there are one or more corresponding 2.5 columns: if isinstance(row['previous_columns'], list): for previous_values in row['previous_columns']: previous_table = previous_values['table'] previous_value = previous_values['column'] if previous_table in table_names3: add_to_dict(previous_value, label, dictionary) elif previous_table in ["pmag_results", "rmag_results"]: if label not in dictionary.values(): if previous_value not in dictionary.keys(): add_to_dict(previous_value, label, dictionary) return dictionary def add_to_dict(key, value, dictionary): if key in dictionary: if value != dictionary[key]: print('W- OVERWRITING') print('was:', key, dictionary[key]) print('now:', key, value) dictionary[key] = value # begin data_model = DataModel() maps = {} for table_name in data_model.dm: dm = data_model.dm[table_name] new_mapping = get_2_to_3(table_name, dm) maps[table_name] = new_mapping # write maps out to file f = open(file_path, 'w') f.write("all_maps = ") json.dump(maps, f) f.close() return maps
0.001498
def with_stmt__26(self, with_loc, context, with_var, colon_loc, body): """(2.6, 3.0) with_stmt: 'with' test [ with_var ] ':' suite""" if with_var: as_loc, optional_vars = with_var item = ast.withitem(context_expr=context, optional_vars=optional_vars, as_loc=as_loc, loc=context.loc.join(optional_vars.loc)) else: item = ast.withitem(context_expr=context, optional_vars=None, as_loc=None, loc=context.loc) return ast.With(items=[item], body=body, keyword_loc=with_loc, colon_loc=colon_loc, loc=with_loc.join(body[-1].loc))
0.005706
def console_save_asc(con: tcod.console.Console, filename: str) -> bool: """Save a console to a non-delimited ASCII `.asc` file.""" return bool( lib.TCOD_console_save_asc(_console(con), filename.encode("utf-8")) )
0.00431
def _parse_remote_response(self, response): """ Parse simple JWKS or signed JWKS from the HTTP response. :param response: HTTP response from the 'jwks_uri' or 'signed_jwks_uri' endpoint :return: response parsed as JSON or None """ # Check if the content type is the right one. try: if response.headers["Content-Type"] == 'application/json': logger.debug( "Loaded JWKS: %s from %s" % (response.text, self.source)) try: return json.loads(response.text) except ValueError: return None elif response.headers["Content-Type"] == 'application/jwt': logger.debug( "Signed JWKS: %s from %s" % (response.text, self.source)) _jws = factory(response.text) _resp = _jws.verify_compact( response.text, keys=self.verify_keys.get_signing_key()) return _resp else: logger.error('Wrong content type: {}'.format( response.headers['Content-Type'])) raise ValueError('Content-type mismatch') except KeyError: pass
0.001561
def find_by_fields(self, table, queryset={}): ''' 从数据库里查询 符合多个条件的记录 Args: table: 表名字 str queryset : key 字段 value 值 dict return: 成功: [dict] 保存的记录 失败: -1 并打印返回报错信息 ''' querys = "" for k, v in queryset.items(): querys += "{} = '{}' and ".format(k, v) sql = "select * from {} where {} ".format( table, querys[:-4]) res = self.query(sql) return res
0.007722
def show_discrete_data(values, grid, title=None, method='', force_show=False, fig=None, **kwargs): """Display a discrete 1d or 2d function. Parameters ---------- values : `numpy.ndarray` The values to visualize. grid : `RectGrid` or `RectPartition` Grid of the values. title : string, optional Set the title of the figure. method : string, optional 1d methods: 'plot' : graph plot 'scatter' : scattered 2d points (2nd axis <-> value) 2d methods: 'imshow' : image plot with coloring according to value, including a colorbar. 'scatter' : cloud of scattered 3d points (3rd axis <-> value) 'wireframe', 'plot_wireframe' : surface plot force_show : bool, optional Whether the plot should be forced to be shown now or deferred until later. Note that some backends always displays the plot, regardless of this value. fig : `matplotlib.figure.Figure`, optional The figure to show in. Expected to be of same "style", as the figure given by this function. The most common usecase is that fig is the return value from an earlier call to this function. Default: New figure interp : {'nearest', 'linear'}, optional Interpolation method to use. Default: 'nearest' axis_labels : string, optional Axis labels, default: ['x', 'y'] update_in_place : bool, optional Update the content of the figure in-place. Intended for faster real time plotting, typically ~5 times faster. This is only performed for ``method == 'imshow'`` with real data and ``fig != None``. Otherwise this parameter is treated as False. Default: False axis_fontsize : int, optional Fontsize for the axes. Default: 16 colorbar : bool, optional Argument relevant for 2d plots using ``method='imshow'``. If ``True``, include a colorbar in the plot. Default: True kwargs : {'figsize', 'saveto', ...}, optional Extra keyword arguments passed on to display method See the Matplotlib functions for documentation of extra options. Returns ------- fig : `matplotlib.figure.Figure` The resulting figure. It is also shown to the user. See Also -------- matplotlib.pyplot.plot : Show graph plot matplotlib.pyplot.imshow : Show data as image matplotlib.pyplot.scatter : Show scattered 3d points """ # Importing pyplot takes ~2 sec, only import when needed. import matplotlib.pyplot as plt args_re = [] args_im = [] dsp_kwargs = {} sub_kwargs = {} arrange_subplots = (121, 122) # horzontal arrangement # Create axis labels which remember their original meaning axis_labels = kwargs.pop('axis_labels', ['x', 'y']) values_are_complex = not is_real_dtype(values.dtype) figsize = kwargs.pop('figsize', None) saveto = kwargs.pop('saveto', None) interp = kwargs.pop('interp', 'nearest') axis_fontsize = kwargs.pop('axis_fontsize', 16) colorbar = kwargs.pop('colorbar', True) # Normalize input interp, interp_in = str(interp).lower(), interp method, method_in = str(method).lower(), method # Check if we should and can update the plot in-place update_in_place = kwargs.pop('update_in_place', False) if (update_in_place and (fig is None or values_are_complex or values.ndim != 2 or (values.ndim == 2 and method not in ('', 'imshow')))): update_in_place = False if values.ndim == 1: # TODO: maybe a plotter class would be better if not method: if interp == 'nearest': method = 'step' dsp_kwargs['where'] = 'mid' elif interp == 'linear': method = 'plot' else: raise ValueError('`interp` {!r} not supported' ''.format(interp_in)) if method == 'plot' or method == 'step' or method == 'scatter': args_re += [grid.coord_vectors[0], values.real] args_im += [grid.coord_vectors[0], values.imag] else: raise ValueError('`method` {!r} not supported' ''.format(method_in)) elif values.ndim == 2: if not method: method = 'imshow' if method == 'imshow': args_re = [np.rot90(values.real)] args_im = [np.rot90(values.imag)] if values_are_complex else [] extent = [grid.min()[0], grid.max()[0], grid.min()[1], grid.max()[1]] if interp == 'nearest': interpolation = 'nearest' elif interp == 'linear': interpolation = 'bilinear' else: raise ValueError('`interp` {!r} not supported' ''.format(interp_in)) dsp_kwargs.update({'interpolation': interpolation, 'cmap': 'bone', 'extent': extent, 'aspect': 'auto'}) elif method == 'scatter': pts = grid.points() args_re = [pts[:, 0], pts[:, 1], values.ravel().real] args_im = ([pts[:, 0], pts[:, 1], values.ravel().imag] if values_are_complex else []) sub_kwargs.update({'projection': '3d'}) elif method in ('wireframe', 'plot_wireframe'): method = 'plot_wireframe' x, y = grid.meshgrid args_re = [x, y, np.rot90(values.real)] args_im = ([x, y, np.rot90(values.imag)] if values_are_complex else []) sub_kwargs.update({'projection': '3d'}) else: raise ValueError('`method` {!r} not supported' ''.format(method_in)) else: raise NotImplementedError('no method for {}d display implemented' ''.format(values.ndim)) # Additional keyword args are passed on to the display method dsp_kwargs.update(**kwargs) if fig is not None: # Reuse figure if given as input if not isinstance(fig, plt.Figure): raise TypeError('`fig` {} not a matplotlib figure'.format(fig)) if not plt.fignum_exists(fig.number): # If figure does not exist, user either closed the figure or # is using IPython, in this case we need a new figure. fig = plt.figure(figsize=figsize) updatefig = False else: # Set current figure to given input fig = plt.figure(fig.number) updatefig = True if values.ndim > 1 and not update_in_place: # If the figure is larger than 1d, we can clear it since we # dont reuse anything. Keeping it causes performance problems. fig.clf() else: fig = plt.figure(figsize=figsize) updatefig = False if values_are_complex: # Real if len(fig.axes) == 0: # Create new axis if needed sub_re = plt.subplot(arrange_subplots[0], **sub_kwargs) sub_re.set_title('Real part') sub_re.set_xlabel(axis_labels[0], fontsize=axis_fontsize) if values.ndim == 2: sub_re.set_ylabel(axis_labels[1], fontsize=axis_fontsize) else: sub_re.set_ylabel('value') else: sub_re = fig.axes[0] display_re = getattr(sub_re, method) csub_re = display_re(*args_re, **dsp_kwargs) # Axis ticks if method == 'imshow' and not grid.is_uniform: (xpts, xlabels), (ypts, ylabels) = _axes_info(grid) plt.xticks(xpts, xlabels) plt.yticks(ypts, ylabels) if method == 'imshow' and len(fig.axes) < 2: # Create colorbar if none seems to exist # Use clim from kwargs if given if 'clim' not in kwargs: minval_re, maxval_re = _safe_minmax(values.real) else: minval_re, maxval_re = kwargs['clim'] ticks_re = _colorbar_ticks(minval_re, maxval_re) fmt_re = _colorbar_format(minval_re, maxval_re) plt.colorbar(csub_re, orientation='horizontal', ticks=ticks_re, format=fmt_re) # Imaginary if len(fig.axes) < 3: sub_im = plt.subplot(arrange_subplots[1], **sub_kwargs) sub_im.set_title('Imaginary part') sub_im.set_xlabel(axis_labels[0], fontsize=axis_fontsize) if values.ndim == 2: sub_im.set_ylabel(axis_labels[1], fontsize=axis_fontsize) else: sub_im.set_ylabel('value') else: sub_im = fig.axes[2] display_im = getattr(sub_im, method) csub_im = display_im(*args_im, **dsp_kwargs) # Axis ticks if method == 'imshow' and not grid.is_uniform: (xpts, xlabels), (ypts, ylabels) = _axes_info(grid) plt.xticks(xpts, xlabels) plt.yticks(ypts, ylabels) if method == 'imshow' and len(fig.axes) < 4: # Create colorbar if none seems to exist # Use clim from kwargs if given if 'clim' not in kwargs: minval_im, maxval_im = _safe_minmax(values.imag) else: minval_im, maxval_im = kwargs['clim'] ticks_im = _colorbar_ticks(minval_im, maxval_im) fmt_im = _colorbar_format(minval_im, maxval_im) plt.colorbar(csub_im, orientation='horizontal', ticks=ticks_im, format=fmt_im) else: if len(fig.axes) == 0: # Create new axis object if needed sub = plt.subplot(111, **sub_kwargs) sub.set_xlabel(axis_labels[0], fontsize=axis_fontsize) if values.ndim == 2: sub.set_ylabel(axis_labels[1], fontsize=axis_fontsize) else: sub.set_ylabel('value') try: # For 3d plots sub.set_zlabel('z') except AttributeError: pass else: sub = fig.axes[0] if update_in_place: import matplotlib as mpl imgs = [obj for obj in sub.get_children() if isinstance(obj, mpl.image.AxesImage)] if len(imgs) > 0 and updatefig: imgs[0].set_data(args_re[0]) csub = imgs[0] # Update min-max if 'clim' not in kwargs: minval, maxval = _safe_minmax(values) else: minval, maxval = kwargs['clim'] csub.set_clim(minval, maxval) else: display = getattr(sub, method) csub = display(*args_re, **dsp_kwargs) else: display = getattr(sub, method) csub = display(*args_re, **dsp_kwargs) # Axis ticks if method == 'imshow' and not grid.is_uniform: (xpts, xlabels), (ypts, ylabels) = _axes_info(grid) plt.xticks(xpts, xlabels) plt.yticks(ypts, ylabels) if method == 'imshow' and colorbar: # Add colorbar # Use clim from kwargs if given if 'clim' not in kwargs: minval, maxval = _safe_minmax(values) else: minval, maxval = kwargs['clim'] ticks = _colorbar_ticks(minval, maxval) fmt = _colorbar_format(minval, maxval) if len(fig.axes) < 2: # Create colorbar if none seems to exist plt.colorbar(mappable=csub, ticks=ticks, format=fmt) elif update_in_place: # If it exists and we should update it csub.colorbar.set_clim(minval, maxval) csub.colorbar.set_ticks(ticks) if '%' not in fmt: labels = [fmt] * len(ticks) else: labels = [fmt % t for t in ticks] csub.colorbar.set_ticklabels(labels) csub.colorbar.draw_all() # Set title of window if title is not None: if not values_are_complex: # Do not overwrite title for complex values plt.title(title) fig.canvas.manager.set_window_title(title) # Fixes overlapping stuff at the expense of potentially squashed subplots if not update_in_place: fig.tight_layout() if updatefig or plt.isinteractive(): # If we are running in interactive mode, we can always show the fig # This causes an artifact, where users of `CallbackShow` without # interactive mode only shows the figure after the second iteration. plt.show(block=False) if not update_in_place: plt.draw() warning_free_pause() else: try: sub.draw_artist(csub) fig.canvas.blit(fig.bbox) fig.canvas.update() fig.canvas.flush_events() except AttributeError: plt.draw() warning_free_pause() if force_show: plt.show() if saveto is not None: fig.savefig(saveto) return fig
0.000074
def finalize(self, **kwargs): """ Finalize executes any subclass-specific axes finalization steps. The user calls poof and poof calls finalize. Parameters ---------- kwargs: generic keyword arguments. """ # Set the title and add the legend self.set_title('ROC Curves for {}'.format(self.name)) self.ax.legend(loc='lower right', frameon=True) # Set the limits for the ROC/AUC (always between 0 and 1) self.ax.set_xlim([0.0, 1.0]) self.ax.set_ylim([0.0, 1.0]) # Set x and y axis labels self.ax.set_ylabel('True Postive Rate') self.ax.set_xlabel('False Positive Rate')
0.002869
def killServices(self, services, error=False): """ :param dict services: Maps service jobStoreIDs to the communication flags for the service """ for serviceJobStoreID in services: serviceJob = services[serviceJobStoreID] if error: self.jobStore.deleteFile(serviceJob.errorJobStoreID) self.jobStore.deleteFile(serviceJob.terminateJobStoreID)
0.007075
def iter_tex_documents(base_dir="."): """Iterate through all .tex documents in the current directory.""" for path, dirlist, filelist in os.walk(base_dir): for name in fnmatch.filter(filelist, "*.tex"): yield os.path.join(path, name)
0.003846
def cli_command_restart(self, msg): """\ restart the subprocess i. we set our state to RESTARTING - on restarting we still send heartbeat ii. we kill the subprocess iii. we start again iv. if its started we set our state to RUNNING, else we set it to WAITING :param msg: :return: """ info = '' if self.state == State.RUNNING and self.sprocess and self.sprocess.proc: self.state = State.RESTARTING self.sprocess.set_exit_callback(self.proc_exit_cb_restart) self.sprocess.proc.kill() info = 'killed' # TODO: check if process is really dead etc. return info
0.007032
def dictify_device_meta(device_object): """ Input: Portals device object. Output: The same device object with the device meta converted to a python dictionary. """ try: if isinstance(device_object['info']['description']['meta'], str) or \ isinstance(device_object['info']['description']['meta'], unicode): device_object['info']['description']['meta'] =\ json.loads(device_object['info']['description']['meta']) except ValueError as err: print("dictify: {0}".format(err)) return device_object
0.005102
def errinfo(msmt): """Return (limtype, repval, errval1, errval2). Like m_liminfo, but also provides error bar information for values that have it.""" if isinstance(msmt, Textual): msmt = msmt.unwrap() if np.isscalar(msmt): return 0, msmt, msmt, msmt if isinstance(msmt, Uval): rep, plus1, minus1 = msmt.repvals(uval_default_repval_method) return 0, rep, plus1, minus1 if isinstance(msmt, Lval): return limtype(msmt), msmt.value, msmt.value, msmt.value raise ValueError('don\'t know how to treat %r as a measurement' % msmt)
0.001678
def make_call_positionals(stack_builders, count): """ Make the args entry for an ast.Call node. """ out = [make_expr(stack_builders) for _ in range(count)] out.reverse() return out
0.004902
def museval(inargs=None): """ Commandline interface for museval evaluation tools """ parser = argparse.ArgumentParser() parser.add_argument( 'estimates_dir', type=str ) parser.add_argument('-o', help='output_dir') parser.add_argument('--cpu', type=int, help='number of cpus', default=4) parser.add_argument( '-p', help='enable multiprocessing', action='store_true', ) parser.add_argument( '--musdb', help='path to musdb', type=str ) parser.add_argument( '--iswav', help='Read musdb wav instead of stems', action='store_true', ) parser.add_argument( '--version', '-v', action='version', version='%%(prog)s %s' % util.__version__ ) args = parser.parse_args(inargs) mus = musdb.DB(root_dir=args.musdb, is_wav=args.iswav) if not args.o: output_dir = args.estimates_dir else: output_dir = args.o # evaluate an existing estimate folder with wav files eval_mus_dir( dataset=mus, # instance of musdb estimates_dir=args.estimates_dir, # path to estiamte folder output_dir=output_dir, # set a folder to write eval json files parallel=args.p, cpus=args.cpu )
0.000767
def append_segment(self, apdu): """This function appends the apdu content to the end of the current APDU being built. The segmentAPDU is the context.""" if _debug: SSM._debug("append_segment %r", apdu) # check for no context if not self.segmentAPDU: raise RuntimeError("no segmentation context established") # append the data self.segmentAPDU.put_data(apdu.pduData)
0.006881
def _get_cursor_vertical_diff_once(self): """Returns the how far down the cursor moved.""" old_top_usable_row = self.top_usable_row row, col = self.get_cursor_position() if self._last_cursor_row is None: cursor_dy = 0 else: cursor_dy = row - self._last_cursor_row logger.info('cursor moved %d lines down' % cursor_dy) while self.top_usable_row > -1 and cursor_dy > 0: self.top_usable_row += 1 cursor_dy -= 1 while self.top_usable_row > 1 and cursor_dy < 0: self.top_usable_row -= 1 cursor_dy += 1 logger.info('top usable row changed from %d to %d', old_top_usable_row, self.top_usable_row) logger.info('returning cursor dy of %d from curtsies' % cursor_dy) self._last_cursor_row = row return cursor_dy
0.002181
def intersection(*argv): """Returns the intersection of multiple sets. Items are ordered by set1, set2, ... **中文文档** 求多个有序集合的交集, 按照第一个集合, 第二个, ..., 这样的顺序。 """ res = OrderedSet(argv[0]) for ods in argv: res = ods & res return res
0.009554
def do_commit(repo, message_template, branch_name, hexsha): "Do a commit if modified/untracked files" repo.git.add(repo.working_tree_dir) if not repo.git.diff(staged=True): _LOGGER.warning('No modified files in this Autorest run') return False checkout_and_create_branch(repo, branch_name) msg = message_template.format(hexsha=hexsha) commit = repo.index.commit(msg) _LOGGER.info("Commit done: %s", msg) return commit.hexsha
0.00211
def summary(self, raw): """Use the Backscatter.io summary data to create a view.""" taxonomies = list() level = 'info' namespace = 'Backscatter.io' if self.service == 'observations': summary = raw.get('results', dict()).get('summary', dict()) taxonomies = taxonomies + [ self.build_taxonomy(level, namespace, 'Observations', summary.get('observations_count', 0)), self.build_taxonomy(level, namespace, 'IP Addresses', summary.get('ip_address_count', 0)), self.build_taxonomy(level, namespace, 'Networks', summary.get('network_count', 0)), self.build_taxonomy(level, namespace, 'AS', summary.get('autonomous_system_count', 0)), self.build_taxonomy(level, namespace, 'Ports', summary.get('port_count', 0)), self.build_taxonomy(level, namespace, 'Protocols', summary.get('protocol_count', 0)) ] elif self.service == 'enrichment': summary = raw.get('results', dict()) if self.data_type == 'ip': taxonomies = taxonomies + [ self.build_taxonomy(level, namespace, 'Network', summary.get('network')), self.build_taxonomy(level, namespace, 'Network Broadcast', summary.get('network_broadcast')), self.build_taxonomy(level, namespace, 'Network Size', summary.get('network_size')), self.build_taxonomy(level, namespace, 'Country', summary.get('country_name')), self.build_taxonomy(level, namespace, 'AS Number', summary.get('as_num')), self.build_taxonomy(level, namespace, 'AS Name', summary.get('as_name')), ] elif self.data_type == 'network': taxonomies = taxonomies + [ self.build_taxonomy(level, namespace, 'Network Size', summary.get('network_size')) ] elif self.data_type == 'autonomous-system': taxonomies = taxonomies + [ self.build_taxonomy(level, namespace, 'Prefix Count', summary.get('prefix_count')), self.build_taxonomy(level, namespace, 'AS Number', summary.get('as_num')), self.build_taxonomy(level, namespace, 'AS Name', summary.get('as_name')) ] elif self.data_type == 'port': for result in raw.get('results', list()): display = "%s (%s)" % (result.get('service'), result.get('protocol')) taxonomies.append(self.build_taxonomy(level, namespace, 'Service', display)) else: pass else: pass return {"taxonomies": taxonomies}
0.007199
def hash_from_algo(algo): """ Return a :mod:`hashlib` hash given the :xep:`300` `algo`. :param algo: The algorithm identifier as defined in :xep:`300`. :type algo: :class:`str` :raises NotImplementedError: if the hash algortihm is not supported by :mod:`hashlib`. :raises ValueError: if the hash algorithm MUST NOT be supported. :return: A hash object from :mod:`hashlib` or compatible. If the `algo` is not supported by the :mod:`hashlib` module, :class:`NotImplementedError` is raised. """ try: enabled, (fun_name, fun_args, fun_kwargs) = _HASH_ALGO_MAP[algo] except KeyError: raise NotImplementedError( "hash algorithm {!r} unknown".format(algo) ) from None if not enabled: raise ValueError( "support of {} in XMPP is forbidden".format(algo) ) try: fun = getattr(hashlib, fun_name) except AttributeError as exc: raise NotImplementedError( "{} not supported by hashlib".format(algo) ) from exc return fun(*fun_args, **fun_kwargs)
0.000902
def login(self, username=None, password=None, android_id=None): """Authenticate the gmusicapi Mobileclient instance. Parameters: username (Optional[str]): Your Google Music username. Will be prompted if not given. password (Optional[str]): Your Google Music password. Will be prompted if not given. android_id (Optional[str]): The 16 hex digits from an Android device ID. Default: Use gmusicapi.Mobileclient.FROM_MAC_ADDRESS to create ID from computer's MAC address. Returns: ``True`` on successful login or ``False`` on unsuccessful login. """ cls_name = type(self).__name__ if username is None: username = input("Enter your Google username or email address: ") if password is None: password = getpass.getpass("Enter your Google Music password: ") if android_id is None: android_id = Mobileclient.FROM_MAC_ADDRESS try: self.api.login(username, password, android_id) except OSError: logger.exception("{} authentication failed.".format(cls_name)) if not self.is_authenticated: logger.warning("{} authentication failed.".format(cls_name)) return False logger.info("{} authentication succeeded.\n".format(cls_name)) return True
0.025
def switch_psm_to_peptable_fields(oldheader): """Returns a dict map with old to new header fields""" return {old: new for old, new in zip([mzidtsvdata.HEADER_PEPTIDE, mzidtsvdata.HEADER_PROTEIN, mzidtsvdata.HEADER_PEPTIDE_Q, mzidtsvdata.HEADER_PEPTIDE_PEP], [peptabledata.HEADER_PEPTIDE, peptabledata.HEADER_PROTEINS, peptabledata.HEADER_QVAL, peptabledata.HEADER_PEP])}
0.00149
def get_qcos_client(self, app_uri): """获得资源管理客户端 缓存,但不是线程安全的 """ client = self.qcos_clients.get(app_uri) if (client is None): client = self.create_qcos_client(app_uri) self.qcos_clients[app_uri] = client return client
0.006873
def write_data(self, buf): """Send data to the device. :param buf: the data to send. :type buf: list(int) :return: success status. :rtype: bool """ data = ''.join(map(chr, buf)) size = len(data) if hidapi.hid_write(self.device, ctypes.c_char_p(data), size) != size: raise IOError( 'pywws.device_ctypes_hidapi.USBDevice.write_data failed') return True
0.00432
def _get_crud_params(compiler, stmt, **kw): """ extract values from crud parameters taken from SQLAlchemy's crud module (since 1.0.x) and adapted for Crate dialect""" compiler.postfetch = [] compiler.insert_prefetch = [] compiler.update_prefetch = [] compiler.returning = [] # no parameters in the statement, no parameters in the # compiled params - return binds for all columns if compiler.column_keys is None and stmt.parameters is None: return [(c, crud._create_bind_param(compiler, c, None, required=True)) for c in stmt.table.columns] if stmt._has_multi_parameters: stmt_parameters = stmt.parameters[0] else: stmt_parameters = stmt.parameters # getters - these are normally just column.key, # but in the case of mysql multi-table update, the rules for # .key must conditionally take tablename into account if SA_VERSION >= SA_1_1: _column_as_key, _getattr_col_key, _col_bind_name = \ crud._key_getters_for_crud_column(compiler, stmt) else: _column_as_key, _getattr_col_key, _col_bind_name = \ crud._key_getters_for_crud_column(compiler) # if we have statement parameters - set defaults in the # compiled params if compiler.column_keys is None: parameters = {} else: parameters = dict((_column_as_key(key), crud.REQUIRED) for key in compiler.column_keys if not stmt_parameters or key not in stmt_parameters) # create a list of column assignment clauses as tuples values = [] if stmt_parameters is not None: crud._get_stmt_parameters_params( compiler, parameters, stmt_parameters, _column_as_key, values, kw) check_columns = {} crud._scan_cols(compiler, stmt, parameters, _getattr_col_key, _column_as_key, _col_bind_name, check_columns, values, kw) if stmt._has_multi_parameters: values = crud._extend_values_for_multiparams(compiler, stmt, values, kw) return values
0.00082
def table_from_file(source, ifo=None, columns=None, selection=None, loudest=False, extended_metadata=True): """Read a `Table` from a PyCBC live HDF5 file Parameters ---------- source : `str`, `h5py.File`, `h5py.Group` the file path of open `h5py` object from which to read the data ifo : `str`, optional the interferometer prefix (e.g. ``'G1'``) to read; this is required if reading from a file path or `h5py.File` and the containing file stores data for multiple interferometers columns : `list` or `str`, optional the list of column names to read, defaults to all in group loudest : `bool`, optional read only those events marked as 'loudest', default: `False` (read all) extended_metadata : `bool`, optional record non-column datasets found in the H5 group (e.g. ``'psd'``) in the ``meta`` dict, default: `True` Returns ------- table : `~gwpy.table.EventTable` """ # find group if isinstance(source, h5py.File): source, ifo = _find_table_group(source, ifo=ifo) # -- by this point 'source' is guaranteed to be an h5py.Group # parse default columns if columns is None: columns = list(_get_columns(source)) readcols = set(columns) # parse selections selection = parse_column_filters(selection or []) if selection: readcols.update(list(zip(*selection))[0]) # set up meta dict meta = {'ifo': ifo} meta.update(source.attrs) if extended_metadata: meta.update(_get_extended_metadata(source)) if loudest: loudidx = source['loudest'][:] # map data to columns data = [] for name in readcols: # convert hdf5 dataset into Column try: arr = source[name][:] except KeyError: if name in GET_COLUMN: arr = GET_COLUMN[name](source) else: raise if loudest: arr = arr[loudidx] data.append(Table.Column(arr, name=name)) # read, applying selection filters, and column filters return filter_table(Table(data, meta=meta), selection)[columns]
0.000453
def continuous_eval(self): """Evaluate until checkpoints stop being produced.""" for ckpt_path in next_checkpoint(self._hparams.model_dir, self._hparams.eval_timeout_mins): # Skip zero'th step. train_step = decoding.get_step_from_ckpt_path(ckpt_path) if train_step == 0: tf.logging.info("Skipping evaluation at step 0") continue self.evaluate()
0.011655
def _transform(xsl_filename, xml, **kwargs): """Transforms the xml using the specifiec xsl file.""" xslt = _make_xsl(xsl_filename) xml = xslt(xml, **kwargs) return xml
0.005464