Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
378,500
def stop_step(self, step_name): if self.finished is not None: raise AlreadyFinished() steps = copy.deepcopy(self.steps) step_data = self._get_step(step_name, steps=steps) if step_data is None: raise StepNotStarted() elif in step_data: raise StepAlreadyFinished() step_data[] = datetime.utcnow() step_data[] = util.timedelta_total_seconds(step_data[] - step_data[]) self._save(steps=steps)
Stop a step.
378,501
def get_html(self, url, params=None, cache_cb=None, decoder_encoding=None, decoder_errors=url_specified_decoder.ErrorsHandle.strict, **kwargs): response = self.get( url=url, params=params, cache_cb=cache_cb, **kwargs ) return url_specified_decoder.decode( binary=response.content, url=response.url, encoding=decoder_encoding, errors=decoder_errors, )
Get html of an url.
378,502
def create_binary_descriptor(streamer): trigger = 0 if streamer.automatic: trigger = 1 elif streamer.with_other is not None: trigger = (1 << 7) | streamer.with_other return struct.pack("<8sHBBBx", streamer.dest.encode(), streamer.selector.encode(), trigger, streamer.KnownFormats[streamer.format], streamer.KnownTypes[streamer.report_type])
Create a packed binary descriptor of a DataStreamer object. Args: streamer (DataStreamer): The streamer to create a packed descriptor for Returns: bytes: A packed 14-byte streamer descriptor.
378,503
def common_wire_version(self): servers = self.known_servers if servers: return min(s.max_wire_version for s in self.known_servers) return None
Minimum of all servers' max wire versions, or None.
378,504
def _read_composites(self, compositor_nodes): keepables = set() for item in compositor_nodes: self._generate_composite(item, keepables) return keepables
Read (generate) composites.
378,505
def _calculateBasalLearning(self, activeColumns, burstingColumns, correctPredictedCells, activeBasalSegments, matchingBasalSegments, basalPotentialOverlaps): learningActiveBasalSegments = self.basalConnections.filterSegmentsByCell( activeBasalSegments, correctPredictedCells) cellsForMatchingBasal = self.basalConnections.mapSegmentsToCells( matchingBasalSegments) matchingCells = np.unique(cellsForMatchingBasal) (matchingCellsInBurstingColumns, burstingColumnsWithNoMatch) = np2.setCompare( matchingCells, burstingColumns, matchingCells / self.cellsPerColumn, rightMinusLeft=True) learningMatchingBasalSegments = self._chooseBestSegmentPerColumn( self.basalConnections, matchingCellsInBurstingColumns, matchingBasalSegments, basalPotentialOverlaps, self.cellsPerColumn) newBasalSegmentCells = self._getCellsWithFewestSegments( self.basalConnections, self.rng, burstingColumnsWithNoMatch, self.cellsPerColumn) learningCells = np.concatenate( (correctPredictedCells, self.basalConnections.mapSegmentsToCells(learningMatchingBasalSegments), newBasalSegmentCells)) correctMatchingBasalMask = np.in1d( cellsForMatchingBasal / self.cellsPerColumn, activeColumns) basalSegmentsToPunish = matchingBasalSegments[~correctMatchingBasalMask] return (learningActiveBasalSegments, learningMatchingBasalSegments, basalSegmentsToPunish, newBasalSegmentCells, learningCells)
Basic Temporal Memory learning. Correctly predicted cells always have active basal segments, and we learn on these segments. In bursting columns, we either learn on an existing basal segment, or we grow a new one. The only influence apical dendrites have on basal learning is: the apical dendrites influence which cells are considered "predicted". So an active apical dendrite can prevent some basal segments in active columns from learning. @param correctPredictedCells (numpy array) @param burstingColumns (numpy array) @param activeBasalSegments (numpy array) @param matchingBasalSegments (numpy array) @param basalPotentialOverlaps (numpy array) @return (tuple) - learningActiveBasalSegments (numpy array) Active basal segments on correct predicted cells - learningMatchingBasalSegments (numpy array) Matching basal segments selected for learning in bursting columns - basalSegmentsToPunish (numpy array) Basal segments that should be punished for predicting an inactive column - newBasalSegmentCells (numpy array) Cells in bursting columns that were selected to grow new basal segments - learningCells (numpy array) Cells that have learning basal segments or are selected to grow a basal segment
378,506
def fmt_routes(bottle_app): routes = [(r.method, r.rule) for r in bottle_app.routes] if not routes: return string = string += fmt_pairs(routes, sort_key=operator.itemgetter(1)) return string
Return a pretty formatted string of the list of routes.
378,507
async def start_component_in_thread(executor, workload: CoroutineFunction[T], *args: Any, loop=None, **kwargs: Any) -> Component[T]: loop = loop or asyncio.get_event_loop() commands_a, commands_b = pipe(loop=loop) events_a, events_b = pipe(loop=loop) commands_b = ConcurrentPipeEnd(commands_b, loop=loop) events_b = ConcurrentPipeEnd(events_b, loop=loop) _workload = workload(*args, commands=commands_b, events=events_b, **kwargs) future = cast(_Future[T], loop.run_in_executor(executor, asyncio.run, _workload)) component = Component[T](commands_a, events_a, future) await component.wait_for_start() return component
\ Starts the passed `workload` with additional `commands` and `events` pipes. The workload will be executed on an event loop in a new thread; the thread is provided by `executor`. This function is not compatible with `ProcessPoolExecutor`, as references between the workload and component are necessary. Be careful when using an executor with a maximum number of threads, as long running workloads may starve other tasks. Consider using a dedicated executor that can spawn at least as many threads as concurrent long-running tasks are expected.
378,508
def match_uriinfo(cls, info): items = sorted( cls._entries.items(), key=lambda matcher_entries: matcher_entries[0].priority, reverse=True, ) for matcher, value in items: if matcher.matches(info): return (matcher, info) return (None, [])
:param info: an :py:class:`~httpretty.core.URIInfo` :returns: a 2-item tuple: (:py:class:`~httpretty.core.URLMatcher`, :py:class:`~httpretty.core.URIInfo`) or ``(None, [])``
378,509
def blend(self, clr, factor=0.5): r = self.r * (1 - factor) + clr.r * factor g = self.g * (1 - factor) + clr.g * factor b = self.b * (1 - factor) + clr.b * factor a = self.a * (1 - factor) + clr.a * factor return Color(r, g, b, a, mode="rgb")
Returns a mix of two colors.
378,510
def make_spiral_texture(spirals=6.0, ccw=False, offset=0.0, resolution=1000): dist = np.sqrt(np.linspace(0., 1., resolution)) if ccw: direction = 1. else: direction = -1. angle = dist * spirals * np.pi * 2. * direction spiral_texture = ( (np.cos(angle) * dist / 2.) + 0.5, (np.sin(angle) * dist / 2.) + 0.5 ) return spiral_texture
Makes a texture consisting of a spiral from the origin. Args: spirals (float): the number of rotations to make ccw (bool): make spirals counter-clockwise (default is clockwise) offset (float): if non-zero, spirals start offset by this amount resolution (int): number of midpoints along the spiral Returns: A texture.
378,511
def get(self, oid): snmpsecurity = self._get_snmp_security() try: engine_error, pdu_error, pdu_error_index, objects = self._cmdgen.getCmd( snmpsecurity, cmdgen.UdpTransportTarget((self.host, self.port), timeout=self.timeout, retries=self.retries), oid, ) except Exception as e: raise SNMPError(e) if engine_error: raise SNMPError(engine_error) if pdu_error: raise SNMPError(pdu_error.prettyPrint()) _, value = objects[0] value = _convert_value_to_native(value) return value
Get a single OID value.
378,512
def validate(self, _portfolio, account, algo_datetime, _algo_current_data): if (algo_datetime > self.deadline and account.leverage < self.min_leverage): self.fail()
Make validation checks if we are after the deadline. Fail if the leverage is less than the min leverage.
378,513
def get_race_card(self, market_ids, data_entries=None, session=None, lightweight=None): if not self.app_key: raise RaceCardError("You need to login before requesting a race_card\n" "APIClient.race_card.login()") params = self.create_race_card_req(market_ids, data_entries) (response, elapsed_time) = self.request(params=params, session=session) return self.process_response(response, resources.RaceCard, elapsed_time, lightweight)
Returns a list of race cards based on market ids provided. :param list market_ids: The filter to select desired markets :param str data_entries: Data to be returned :param requests.session session: Requests session object :param bool lightweight: If True will return dict not a resource :rtype: list[resources.RaceCard]
378,514
def native(s): if not isinstance(s, (binary_type, text_type)): raise TypeError("%r is neither bytes nor unicode" % s) if PY3: if isinstance(s, binary_type): return s.decode("utf-8") else: if isinstance(s, text_type): return s.encode("utf-8") return s
Convert :py:class:`bytes` or :py:class:`unicode` to the native :py:class:`str` type, using UTF-8 encoding if conversion is necessary. :raise UnicodeError: The input string is not UTF-8 decodeable. :raise TypeError: The input is neither :py:class:`bytes` nor :py:class:`unicode`.
378,515
def register(self, key, value, type_info): old_action = self._actions.get(key) if old_action == value and key in self._type_info: self._type_info[key].update(type_info) else: self._type_info[key] = dict(type_info) super(ExtRegistry, self).register(key, value)
Registers a callable with the specified key and type info. `key` String key to identify a callable. `value` Callable object. `type_info` Dictionary with type information about the value provided.
378,516
def _get_chart(self, chart_type, x=None, y=None, style=None, opts=None, label=None, options={}, **kwargs): sbcharts = ["density", "distribution", "dlinear"] acharts = ["tick", "circle", "text", "line_num", "bar_num"] if chart_type in sbcharts: self._set_seaborn_engine() if chart_type in acharts: self._set_altair_engine() if chart_type != "sline": x, y = self._check_fields(x, y) if opts is None: opts = self.chart_opts if style is None: style = self.chart_style if self.engine == "bokeh": func = self._get_bokeh_chart elif self.engine == "altair": func = self._get_altair_chart elif self.engine == "chartjs": func = self._get_chartjs_chart elif self.engine == "seaborn": func = self._get_seaborn_chart else: self.err("Engine " + self.engine + " unknown") return try: chart = func( x, y, chart_type, label, opts, style, options=options, **kwargs) return chart except Exception as e: self.err(e)
Get a full chart object
378,517
def entry_at(cls, filepath, index): fp = open(filepath, ) if index < 0: return RefLogEntry.from_line(fp.readlines()[index].strip()) else: for i in xrange(index + 1): line = fp.readline() if not line: break if i != index or not line: raise IndexError return RefLogEntry.from_line(line.strip())
:return: RefLogEntry at the given index :param filepath: full path to the index file from which to read the entry :param index: python list compatible index, i.e. it may be negative to specify an entry counted from the end of the list :raise IndexError: If the entry didn't exist .. note:: This method is faster as it only parses the entry at index, skipping all other lines. Nonetheless, the whole file has to be read if the index is negative
378,518
def set_max_attempts(self, value): if value is None: raise InvalidArgument() if value is not None and not isinstance(value, int): raise InvalidArgument() if not self.my_osid_object_form._is_valid_integer(value, self.get_max_attempts_metadata()): raise InvalidArgument() self.my_osid_object_form._my_map[] = value
stub
378,519
def set_vibration(self, left_motor, right_motor, duration): if WIN: self._set_vibration_win(left_motor, right_motor, duration) elif NIX: self._set_vibration_nix(left_motor, right_motor, duration) else: raise NotImplementedError
Control the speed of both motors seperately or together. left_motor and right_motor arguments require a number between 0 (off) and 1 (full). duration is miliseconds, e.g. 1000 for a second.
378,520
def is_in_schedule_mode(self): resource = "schedule" mode_event = self.publish_and_get_event(resource) if mode_event and mode_event.get("resource", None) == "schedule": properties = mode_event.get() return properties.get("active", False) return False
Returns True if base_station is currently on a scheduled mode.
378,521
def _changes(name, uid=None, gid=None, groups=None, optional_groups=None, remove_groups=True, home=None, createhome=True, password=None, enforce_password=True, empty_password=False, shell=None, fullname=, roomnumber=, workphone=, homephone=, other=, loginclass=None, date=None, mindays=0, maxdays=999999, inactdays=0, warndays=7, expire=None, win_homedrive=None, win_profile=None, win_logonscript=None, win_description=None, allow_uid_change=False, allow_gid_change=False): if in __salt__: lshad = __salt__[](name) lusr = __salt__[](name) if not lusr: return False change = {} if groups is None: groups = lusr[] wanted_groups = sorted(set((groups or []) + (optional_groups or []))) if uid and lusr[] != uid: change[] = uid if gid is not None and lusr[] not in (gid, __salt__[](gid)): change[] = gid default_grp = __salt__[]( gid if gid is not None else lusr[] ) if default_grp in lusr[]: lusr[].remove(default_grp) if name in lusr[] and name not in wanted_groups: lusr[].remove(name) if default_grp in wanted_groups: wanted_groups.remove(default_grp) if _group_changes(lusr[], wanted_groups, remove_groups): change[] = wanted_groups if home and lusr[] != home: change[] = home if createhome: newhome = home if home else lusr[] if newhome is not None and not os.path.isdir(newhome): change[] = newhome if shell and lusr[] != shell: change[] = shell if in __salt__ and in __salt__: if password and not empty_password: default_hash = __salt__[]() if lshad[] == default_hash \ or lshad[] != default_hash and enforce_password: if lshad[] != password: change[] = password if empty_password and lshad[] != : change[] = True if date is not None and lshad[] != date: change[] = date if mindays is not None and lshad[] != mindays: change[] = mindays if maxdays is not None and lshad[] != maxdays: change[] = maxdays if inactdays is not None and lshad[] != inactdays: change[] = inactdays if warndays is not None and lshad[] != warndays: change[] = warndays if expire and lshad[] != expire: change[] = expire elif in __salt__ and salt.utils.platform.is_windows(): if expire and expire is not -1 and salt.utils.dateutils.strftime(lshad[]) != salt.utils.dateutils.strftime(expire): change[] = expire fullname = salt.utils.data.decode(fullname) lusr[] = salt.utils.data.decode(lusr[]) if fullname is not None and lusr[] != fullname: change[] = fullname if win_homedrive and lusr[] != win_homedrive: change[] = win_homedrive if win_profile and lusr[] != win_profile: change[] = win_profile if win_logonscript and lusr[] != win_logonscript: change[] = win_logonscript if win_description and lusr[] != win_description: change[] = win_description info=errors ) return change
Return a dict of the changes required for a user if the user is present, otherwise return False. Updated in 2015.8.0 to include support for windows homedrive, profile, logonscript, and description fields. Updated in 2014.7.0 to include support for shadow attributes, all attributes supported as integers only.
378,522
def backward_committor(T, A, B, mu=None): r X = set(range(T.shape[0])) A = set(A) B = set(B) AB = A.intersection(B) notAB = X.difference(A).difference(B) if len(AB) > 0: raise ValueError("Sets A and B have to be disjoint") if mu is None: mu = stationary_distribution(T) K = np.transpose(mu[:, np.newaxis] * (T - np.eye(T.shape[0]))) W = 1.0 * K W[list(A), :] = 0.0 W[list(A), list(A)] = 1.0 W[list(B), :] = 0.0 W[list(B), list(B)] = 1.0 r = np.zeros(T.shape[0]) r[list(A)] = 1.0 u = solve(W, r) return u
r"""Backward committor between given sets. The backward committor u(x) between sets A and B is the probability for the chain starting in x to have come from A last rather than from B. Parameters ---------- T : (M, M) ndarray Transition matrix A : array_like List of integer state labels for set A B : array_like List of integer state labels for set B mu : (M, ) ndarray (optional) Stationary vector Returns ------- u : (M, ) ndarray Vector of forward committor probabilities Notes ----- The forward committor is a solution to the following boundary-value problem .. math:: \sum_j K_{ij} \pi_{j} u_{j}=0 for i in X\(A u B) (I) u_{i}=1 for i \in A (II) u_{i}=0 for i \in B (III) with adjoint of the generator matrix K=(D_pi(P-I))'.
378,523
def cache(self, checkvalidity=True, staleonly=False, allowraise=True): note = "staleonly = {}".format(staleonly) self._log_an_event(,,note) docache = True if staleonly: lc = self.last_cache() if lc: freshthresh = self.freshthresh nw = dt.datetime.now() freshness = (nw - lc).total_seconds() / 60.0 if freshness <= freshthresh: docache = False smrp = SymbolReport(self.name) if docache: data = [] cols = [, , ] if len(self.feeds) == 0: err_msg = "Symbol has no Feeds. Candatadefclasst need to do anything here, as the concatenation def build_hi_df(which, colname): objs = object_session(self) qry = objs.query(which.ind, func.max(which.dt_log).label()) qry = qry.filter_by(symname = self.name) grb = qry.group_by(which.ind).subquery() qry = objs.query(which) ords = qry.join((grb, and_(which.ind == grb.c.ind, which.dt_log == grb.c.max_dt_log))).all() if len(ords): orind = [row.ind for row in ords] orval = [row.val for row in ords] ordf = indt.build_ordf(orind, orval, colname) else: ordf = pd.DataFrame(columns=[colname]) return ordf ordf = build_hi_df(Override, ) fsdf = build_hi_df(FailSafe, ) orfsdf = pd.merge(ordf, fsdf, how=, left_index=True, right_index=True) data = pd.merge(orfsdf, data, how=, left_index=True, right_index=True) data = indt.process_post_orfs(data) try: data = data.fillna(value=pd.np.nan) data = data[sorted_feed_cols(data)] data[] = FeedAggregator(self.agg_method).aggregate(data) except: point = "aggregation" smrp = self._generic_exception(point, smrp, allowraise) return smrp
Re-caches the Symbol's datatable by querying each Feed. Parameters ---------- checkvalidity : bool, optional Optionally, check validity post-cache. Improve speed by turning to False. staleonly : bool, default False Set to True, for speed up, by looking at staleness allowraise : bool, default True AND with the Symbol.handle and Feed.handle's 'raise', set to False, to do a list of symbols. Note, this won't silence bugs in Trump, eg. unhandled edge cases. So, those still need to be handled by the application. Returns ------- SymbolReport
378,524
def create(self, key, value): data = values.of({: key, : value, }) payload = self._version.create( , self._uri, data=data, ) return VariableInstance( self._version, payload, service_sid=self._solution[], environment_sid=self._solution[], )
Create a new VariableInstance :param unicode key: The key :param unicode value: The value :returns: Newly created VariableInstance :rtype: twilio.rest.serverless.v1.service.environment.variable.VariableInstance
378,525
def _get_team_results(self, team_result_html): link = [i for i in team_result_html().items()] if len(link) < 1: return None name, abbreviation = self._get_name(link[0]) return name, abbreviation
Extract the winning or losing team's name and abbreviation. Depending on which team's data field is passed (either the winner or loser), return the name and abbreviation of that team to denote which team won and which lost the game. Parameters ---------- team_result_html : PyQuery object A PyQuery object representing either the winning or losing team's data field within the boxscore. Returns ------- tuple Returns a tuple of the team's name followed by the abbreviation.
378,526
def _check_label_or_level_ambiguity(self, key, axis=0): if self.ndim > 2: raise NotImplementedError( "_check_label_or_level_ambiguity is not implemented for {type}" .format(type=type(self))) axis = self._get_axis_number(axis) other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis) if (key is not None and is_hashable(key) and key in self.axes[axis].names and any(key in self.axes[ax] for ax in other_axes)): level_article, level_type = ((, ) if axis == 0 else (, )) label_article, label_type = ((, ) if axis == 0 else (, )) msg = (" is both {level_article} {level_type} level and " "{label_article} {label_type} label, which is ambiguous." ).format(key=key, level_article=level_article, level_type=level_type, label_article=label_article, label_type=label_type) raise ValueError(msg)
Check whether `key` is ambiguous. By ambiguous, we mean that it matches both a level of the input `axis` and a label of the other axis. Parameters ---------- key: str or object label or level name axis: int, default 0 Axis that levels are associated with (0 for index, 1 for columns) Raises ------ ValueError: `key` is ambiguous
378,527
def rename_columns(self, col): try: self.cleaned_data.columns = col except Exception as e: raise e
Rename columns of dataframe. Parameters ---------- col : list(str) List of columns to rename.
378,528
def get_account_from_name(self, name): for account in self.accounts: if account.get_name() == name: return account return None
Returns the account with the given name. :type name: string :param name: The name of the account.
378,529
def add_default_initial_conditions(self, value=None): if value is not None: try: value_num = float(value) except ValueError: logger.error() return else: value_num = self.default_initial_amount if self.model is None: return for m in self.model.monomers: set_base_initial_condition(self.model, m, value_num)
Set default initial conditions in the PySB model. Parameters ---------- value : Optional[float] Optionally a value can be supplied which will be the initial amount applied. Otherwise a built-in default is used.
378,530
def do_fuzzyindex(self, word): word = list(preprocess_query(word))[0] token = Token(word) neighbors = make_fuzzy(token) neighbors = [(n, DB.zcard(dbkeys.token_key(n))) for n in neighbors] neighbors.sort(key=lambda n: n[1], reverse=True) for token, freq in neighbors: if freq == 0: break print(white(token), blue(freq))
Compute fuzzy extensions of word that exist in index. FUZZYINDEX lilas
378,531
def random_word(tokens, tokenizer): output_label = [] for i, token in enumerate(tokens): prob = random.random() if prob < 0.15: prob /= 0.15 if prob < 0.8: tokens[i] = "[MASK]" elif prob < 0.9: tokens[i] = random.choice(list(tokenizer.vocab.items()))[0] try: output_label.append(tokenizer.vocab[token]) except KeyError: output_label.append(tokenizer.vocab["[UNK]"]) logger.warning("Cannot find token in vocab. Using [UNK] insetad".format(token)) else: output_label.append(-1) return tokens, output_label
Masking some random tokens for Language Model task with probabilities as in the original BERT paper. :param tokens: list of str, tokenized sentence. :param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here) :return: (list of str, list of int), masked tokens and related labels for LM prediction
378,532
def makephy(data, samples, longname): names = [i.name for i in samples] names.sort() locifile = os.path.join(data.dirs.outfiles, data.name+".loci") locus = iter(open(locifile, )) fdict = {name:[] for name in names} partitions = [] initial_pos = 1 done = 0 nloci = 0 nbases = 0 for partition in partitions: print >>raxml_part_out, "DNA, %s" % (partition) raxml_part_out.close() return partitions
builds phy output. If large files writes 50000 loci at a time to tmp files and rebuilds at the end
378,533
def get(self): self.log.info() self._copy_folder_and_get_directory_listings() self._join_all_filenames_and_text() self._collect_placeholders_required() self._populate_dynamic_placeholders() self._fill_placeholders_from_settings() self._request_remaining_placeholders() self._populate_placeholders_in_files() self._move_template_to_destination(ignoreExisting=self.ignoreExisting) self.log.info() return None
*do the frankenstein magic!*
378,534
def hexstr(x, onlyasc=0, onlyhex=0, color=False): x = bytes_encode(x) _sane_func = sane_color if color else sane s = [] if not onlyasc: s.append(" ".join("%02X" % orb(b) for b in x)) if not onlyhex: s.append(_sane_func(x)) return " ".join(s)
Build a fancy tcpdump like hex from bytes.
378,535
def _get_stream(filename, openfunction=open, mode=): try: stream = openfunction(filename, mode=mode) except (IOError, OSError) as err: try: stream.readline() except IOError: stream.close() stream = None except: stream.close() raise else: stream.close() stream = openfunction(filename, mode=mode) return stream
Return open stream if *filename* can be opened with *openfunction* or else ``None``.
378,536
def get_canonical_key_id(self, key_id): shard_num = self.get_shard_num_by_key_id(key_id) return self._canonical_keys[shard_num]
get_canonical_key_id is used by get_canonical_key, see the comment for that method for more explanation. Keyword arguments: key_id -- the key id (e.g. '12345') returns the canonical key id (e.g. '12')
378,537
def x(self): with self._condition: result = None if not self.done(): self._condition.wait(self._timeout) if not self.done(): self.set_exception(TimeoutError()) if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: result = CancelledError() elif self._state == FINISHED: if self._exception: result = self._exception else: result = self._result if isinstance(result, Exception): if self.catch_exception: result = FailureException(result) return result else: raise result return result
Block the main thead until future finish, return the future.result().
378,538
def next_frame_basic_stochastic_discrete(): hparams = basic_deterministic_params.next_frame_sampling() hparams.batch_size = 4 hparams.video_num_target_frames = 6 hparams.scheduled_sampling_mode = "prob_inverse_lin" hparams.scheduled_sampling_decay_steps = 40000 hparams.scheduled_sampling_max_prob = 1.0 hparams.dropout = 0.15 hparams.filter_double_steps = 3 hparams.hidden_size = 96 hparams.learning_rate_constant = 0.002 hparams.learning_rate_warmup_steps = 2000 hparams.learning_rate_schedule = "linear_warmup * constant" hparams.concat_internal_states = True hparams.video_modality_loss_cutoff = 0.03 hparams.add_hparam("bottleneck_bits", 128) hparams.add_hparam("bottleneck_noise", 0.1) hparams.add_hparam("discretize_warmup_steps", 40000) hparams.add_hparam("latent_rnn_warmup_steps", 40000) hparams.add_hparam("latent_rnn_max_sampling", 0.5) hparams.add_hparam("latent_use_max_probability", 0.8) hparams.add_hparam("full_latent_tower", False) hparams.add_hparam("latent_predictor_state_size", 128) hparams.add_hparam("latent_predictor_temperature", 1.0) hparams.add_hparam("complex_addn", True) hparams.add_hparam("recurrent_state_size", 64) return hparams
Basic 2-frame conv model with stochastic discrete latent.
378,539
def unregister(self, svc_ref): with self.__svc_lock: try: return self.__pending_services.pop(svc_ref) except KeyError: pass if svc_ref not in self.__svc_registry: raise BundleException("Unknown service: {0}".format(svc_ref)) bundle = svc_ref.get_bundle() service = self.__svc_registry.pop(svc_ref) for spec in svc_ref.get_property(OBJECTCLASS): spec_services = self.__svc_specs[spec] idx = bisect.bisect_left(spec_services, svc_ref) del spec_services[idx] if not spec_services: del self.__svc_specs[spec] if svc_ref.is_factory(): factory, svc_reg = self.__svc_factories.pop(svc_ref) for counter in self.__factory_usage.values(): counter.cleanup_service(factory, svc_reg) else: bundle_services = self.__bundle_svc[bundle] bundle_services.remove(svc_ref) if not bundle_services: del self.__bundle_svc[bundle] return service
Unregisters a service :param svc_ref: A service reference :return: The unregistered service instance :raise BundleException: Unknown service reference
378,540
def date_range_builder(self, start=, end=None): if not end: end = time.strftime() return % (start, end)
Builds date range query. :param start: Date string. format: YYYY-MM-DD :type start: String :param end: date string. format: YYYY-MM-DD :type end: String :returns: String
378,541
def remove_node(self, p_id, remove_unconnected_nodes=True): if self.has_node(p_id): for neighbor in self.incoming_neighbors(p_id): self._edges[neighbor].remove(p_id) neighbors = set() if remove_unconnected_nodes: neighbors = self.outgoing_neighbors(p_id) del self._edges[p_id] for neighbor in neighbors: if self.is_isolated(neighbor): self.remove_node(neighbor)
Removes a node from the graph.
378,542
def _get_lineage(self, tax_id, merge_obsolete=True): return lineage
Return a list of [(rank, tax_id)] describing the lineage of tax_id. If ``merge_obsolete`` is True and ``tax_id`` has been replaced, use the corresponding value in table merged.
378,543
def do_ls(self, nothing = ): for d in self.dirs: self.stdout.write("\033[0;34m" + ( % d) + "\033[0m") for f in self.files: self.stdout.write( % f)
list files in current remote directory
378,544
def run_benchmarks(dir, models, wav, alphabet, lm_binary=None, trie=None, iters=-1): r assert_valid_dir(dir) inference_times = [ ] for model in models: model_filename = model current_model = { : model, : [ ], : numpy.infty, : numpy.infty } if lm_binary and trie: cmdline = % (model_filename, alphabet, lm_binary, trie, wav) else: cmdline = % (model_filename, alphabet, wav) for it in range(iters): sys.stdout.write( % (os.path.basename(model), (it+1), iters)) sys.stdout.flush() rc, stdout, stderr = exec_command(cmdline, cwd=dir) if rc == 0: inference_time = float(stdout.split()[1].split()[-1]) current_model[].append(inference_time) else: print( % (cmdline, rc)) print( % stdout) print( % stderr) raise AssertionError( % (rc)) sys.stdout.write() sys.stdout.flush() current_model[] = numpy.mean(current_model[]) current_model[] = numpy.std(current_model[]) inference_times.append(current_model) return inference_times
r''' Core of the running of the benchmarks. We will run on all of models, against the WAV file provided as wav, and the provided alphabet.
378,545
def http2time(text): m = STRICT_DATE_RE.search(text) if m: g = m.groups() mon = MONTHS_LOWER.index(g[1].lower()) + 1 tt = (int(g[2]), mon, int(g[0]), int(g[3]), int(g[4]), float(g[5])) return _timegm(tt) text = text.lstrip() text = WEEKDAY_RE.sub("", text, 1) day, mon, yr, hr, min, sec, tz = [None]*7 m = LOOSE_HTTP_DATE_RE.search(text) if m is not None: day, mon, yr, hr, min, sec, tz = m.groups() else: return None return _str2time(day, mon, yr, hr, min, sec, tz)
Returns time in seconds since epoch of time represented by a string. Return value is an integer. None is returned if the format of str is unrecognized, the time is outside the representable range, or the timezone string is not recognized. If the string contains no timezone, UTC is assumed. The timezone in the string may be numerical (like "-0800" or "+0100") or a string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the timezone strings equivalent to UTC (zero offset) are known to the function. The function loosely parses the following formats: Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format 09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday) 08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday) 08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday) The parser ignores leading and trailing whitespace. The time may be absent. If the year is given with only 2 digits, the function will select the century that makes the year closest to the current date.
378,546
def compare_config(self): if self.ssh_connection is False: self._open_ssh() self.ssh_device.exit_config_mode() diff = self.ssh_device.send_command("show config diff") return diff.strip()
Netmiko is being used to obtain config diffs because pan-python doesn't support the needed command.
378,547
def snippetWithLink(self, url): link = self.soup.find("a", attrs={: url}) if link: for p in link.parents: if p.name in (, ): return .join(p.text.split()[0:30]) return None
This method will try to return the first <p> or <div> that contains an <a> tag linking to the given URL.
378,548
def log_info(self, data): info = info_filled = info.format(label = data[], id = data[], ilx = data[], super_id = data[][0][]) logging.info(info_filled) return info_filled
Logs successful responses
378,549
def _is_intrinsic_dict(self, input): return isinstance(input, dict) \ and len(input) == 1 \ and list(input.keys())[0] in self.supported_intrinsics
Can the input represent an intrinsic function in it? :param input: Object to be checked :return: True, if the input contains a supported intrinsic function. False otherwise
378,550
def compress_dir(path, compression="gz"): for parent, subdirs, files in os.walk(path): for f in files: compress_file(os.path.join(parent, f), compression=compression)
Recursively compresses all files in a directory. Note that this compresses all files singly, i.e., it does not create a tar archive. For that, just use Python tarfile class. Args: path (str): Path to parent directory. compression (str): A compression mode. Valid options are "gz" or "bz2". Defaults to gz.
378,551
def make_filename(s, space=None, language=, strict=False, max_len=None, repeats=1024): r filename = None if strict or language.lower().strip() in (, , , ): if space is None: space = elif not space: space = filename = make_name(s, space=space, lower=False) else: if space is None: space = elif not space: space = if not filename: if language.lower().strip() in (, , , , , , , , , ): filename = re.sub(r + .format(repeats), space, s) else: filename = re.sub(r + (.format(repeats)) + r, space, s) if max_len and int(max_len) > 0 and filename: return filename[:int(max_len)] else: return filename
r"""Process string to remove any characters not allowed by the language specified (default: MSDOS) In addition, optionally replace spaces with the indicated "space" character (to make the path useful in a copy-paste without quoting). Uses the following regular expression to substitute spaces for invalid characters: re.sub(r'[ :\\/?*&"<>|~`!]{1}', space, s) >>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!', strict=False) 'Whatever-crazy-s-$h-7-n-m3-ou-can-come-up.-with.-txt-' >>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!', strict=False, repeats=1) 'Whatever-crazy--s-$h-7-n-m3----ou--can-come-up.-with.-txt--' >>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!', repeats=1) 'Whatever-crazy--s-$h-7-n-m3----ou--can-come-up.-with.-txt--' >>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!') 'Whatever-crazy-s-$h-7-n-m3-ou-can-come-up.-with.-txt-' >>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!', strict=True, repeats=1) 'Whatever_crazy_s_h_7_n_m3_ou_can_come_up_with_txt_' >>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!', strict=True, repeats=1, max_len=14) 'Whatever_crazy' >>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!', max_len=14) 'Whatever-crazy'
378,552
def melt(self, plot=False): from pyny3d.utils import bool2index from scipy.spatial import ConvexHull para = [poly.get_parametric() for poly in self] para = np.array([p/np.linalg.norm(p) for p in para]) n = para.shape[0] cop = [] for i, plane in enumerate(para[:-1]): indexes = np.zeros((n-i-1, 4)) for c in range(4): indexes[:, c] = np.isclose(para[i+1:, c], plane[c]) pos = bool2index(indexes.sum(axis=1)==4)+i+1 if pos.shape[0] > 0: cop.append(np.hstack((i, pos))) para[pos, :] = np.nan substituted = [] cop_cont = [] for i, group in enumerate(cop): polygons = [self[i] for i in group] if Surface.contiguous(polygons): cop_cont.append(polygons) substituted.append(group) if len(substituted) != 0: self.save() if plot: self.plot() substituted = sum(substituted) merged = [] for polygons in cop_cont: points = np.concatenate([polygon.points for polygon in polygons]) hull = ConvexHull(points[:, :2]) merged.append(Polygon(points[hull.vertices])) new_surface = [self[i] for i in range(len(self.polygons)) if i not in substituted] new_surface += merged self.polygons = new_surface self.sorted_areas = None if plot: self.plot()
Find and merge groups of polygons in the surface that meet the following criteria: * Are coplanars. * Are contiguous. * The result is convex. This method is very useful at reducing the number the items and, therefore, the shadowing time computing. Before override this instance, it is saved and can be restored with ``.restore()`` :param plot: If True, generates the before and after visualizations for the surface. Use it to check the results. :type plot: bool :returns: None .. warning:: This method do not check if the merged polygons are actually convex. The convex hull of the union is directly calculated. For this reason, it is very important to visualy check the solution.
378,553
def list(self, pattern=): if self._descriptors is None: self._descriptors = self._client.list_resource_descriptors( filter_string=self._filter_string) return [resource for resource in self._descriptors if fnmatch.fnmatch(resource.type, pattern)]
Returns a list of resource descriptors that match the filters. Args: pattern: An optional pattern to further filter the descriptors. This can include Unix shell-style wildcards. E.g. ``"aws*"``, ``"*cluster*"``. Returns: A list of ResourceDescriptor objects that match the filters.
378,554
def run_action(self, unit_sentry, action, _check_output=subprocess.check_output, params=None): self.log.warn( ) return unit_sentry.run_action(action, action_args=params)
Translate to amulet's built in run_action(). Deprecated. Run the named action on a given unit sentry. params a dict of parameters to use _check_output parameter is no longer used @return action_id.
378,555
def _GetGsScopes(self): service_accounts = self.watcher.GetMetadata(metadata_key=self.metadata_key) try: scopes = service_accounts[self.service_account][] return list(GS_SCOPES.intersection(set(scopes))) if scopes else None except KeyError: return None
Return all Google Storage scopes available on this VM.
378,556
def precompute_optimzation_Y(laplacian_matrix, n_samples, relaxation_kwds): relaxation_kwds.setdefault(,False) relaxation_kwds.setdefault(,) relaxation_kwds.setdefault(,False) if relaxation_kwds[]: print () Lk_tensor, nbk, si_map = \ compute_Lk(laplacian_matrix, n_samples, relaxation_kwds[]) if relaxation_kwds[]: raise NotImplementedError() return { : Lk_tensor, : nbk, : si_map }
compute Lk, neighbors and subset to index map for projected == False
378,557
def op(scalars_layout, collections=None): import tensorflow.compat.v1 as tf assert isinstance(scalars_layout, layout_pb2.Layout) summary_metadata = metadata.create_summary_metadata() return tf.summary.tensor_summary(name=metadata.CONFIG_SUMMARY_TAG, tensor=tf.constant( scalars_layout.SerializeToString(), dtype=tf.string), collections=collections, summary_metadata=summary_metadata)
Creates a summary that contains a layout. When users navigate to the custom scalars dashboard, they will see a layout based on the proto provided to this function. Args: scalars_layout: The scalars_layout_pb2.Layout proto that specifies the layout. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A tensor summary op that writes the layout to disk.
378,558
def set_idle_params(self, timeout=None, exit=None): self._set(, timeout) self._set(, exit, cast=bool) return self._section
Activate idle mode - put uWSGI in cheap mode after inactivity timeout. :param int timeout: Inactivity timeout in seconds. :param bool exit: Shutdown uWSGI when idle.
378,559
def upload_sequence_fileobj(file_obj, file_name, fields, retry_fields, session, samples_resource): try: _direct_upload(file_obj, file_name, fields, session, samples_resource) sample_id = fields["sample_id"] except RetryableUploadException: logging.error("{}: Connectivity issue, trying direct upload...".format(file_name)) file_obj.seek(0) try: retry_fields = samples_resource.init_multipart_upload(retry_fields) except requests.exceptions.HTTPError as e: raise_api_error(e.response, state="init") except requests.exceptions.ConnectionError: raise_connectivity_error(file_name) s3_upload = _s3_intermediate_upload( file_obj, file_name, retry_fields, session, samples_resource._client._root_url + retry_fields["callback_url"], ) sample_id = s3_upload.get("sample_id", "<UUID not yet assigned>") logging.info("{}: finished as sample {}".format(file_name, sample_id)) return sample_id
Uploads a single file-like object to the One Codex server via either fastx-proxy or directly to S3. Parameters ---------- file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the case of paired files, they will be interleaved and uploaded uncompressed. In the case of a single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'. file_name : `string` The file_name you wish to associate this fastx file with at One Codex. fields : `dict` Additional data fields to include as JSON in the POST. Must include 'sample_id' and 'upload_url' at a minimum. retry_fields : `dict` Metadata sent to `init_multipart_upload` in the case that the upload via fastx-proxy fails. session : `requests.Session` Connection to One Codex API. samples_resource : `onecodex.models.Samples` Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline. Raises ------ UploadException In the case of a fatal exception during an upload. Returns ------- `string` containing sample ID of newly uploaded file.
378,560
def __create_author_investigator_str(self): _author = "" try: for pub in self.noaa_data_sorted["Publication"]: if "author" in pub: if pub["author"]: _author_src = pub["author"] if isinstance(_author_src, str): try: if " and " in _author_src: _author = _author_src.replace(" and ", "; ") elif ";" in _author_src: _author = _author_src.replace(";", "; ") break except Exception as e: _author = "" elif isinstance(_author_src, list): try: for _entry in _author_src: _author += _entry["name"].split(",")[0] + ", " except Exception as e: _author = "" except Exception: _author = "" return _author
When investigators is empty, try to get authors from the first publication instead. :return str author: Author names
378,561
def get_results_msg(self, results, study): msg = [] if results: fmt = "{M:6,} GO terms are associated with {N:6,} of {NT:6,}" stu_items, num_gos_stu = self.get_item_cnt(results, "study_items") pop_items, num_gos_pop = self.get_item_cnt(results, "pop_items") stu_txt = fmt.format(N=len(stu_items), M=num_gos_stu, NT=len(set(study))) pop_txt = fmt.format(N=len(pop_items), M=num_gos_pop, NT=self.pop_n) msg.append("{POP} population items".format(POP=pop_txt)) msg.append("{STU} study items".format(STU=stu_txt)) return msg
Return summary for GOEA results.
378,562
def decrement_display_ref_count(self, amount: int=1): assert not self._closed self.__display_ref_count -= amount if self.__display_ref_count == 0: self.__is_master = False if self.__data_item: for _ in range(amount): self.__data_item.decrement_data_ref_count()
Decrement display reference count to indicate this library item is no longer displayed.
378,563
def render_targets_weighted_spans( targets, preserve_density, ): prepared_weighted_spans = prepare_weighted_spans( targets, preserve_density) def _fmt_pws(pws): name = (.format(pws.doc_weighted_spans.vec_name) if pws.doc_weighted_spans.vec_name else ) return .format(name, render_weighted_spans(pws)) def _fmt_pws_list(pws_lst): return .join(_fmt_pws(pws) for pws in pws_lst) return [_fmt_pws_list(pws_lst) if pws_lst else None for pws_lst in prepared_weighted_spans]
Return a list of rendered weighted spans for targets. Function must accept a list in order to select consistent weight ranges across all targets.
378,564
def copyMakeBorder(src, top, bot, left, right, *args, **kwargs): return _internal._cvcopyMakeBorder(src, top, bot, left, right, *args, **kwargs)
Pad image border with OpenCV. Parameters ---------- src : NDArray source image top : int, required Top margin. bot : int, required Bottom margin. left : int, required Left margin. right : int, required Right margin. type : int, optional, default='0' Filling type (default=cv2.BORDER_CONSTANT). 0 - cv2.BORDER_CONSTANT - Adds a constant colored border. 1 - cv2.BORDER_REFLECT - Border will be mirror reflection of the border elements, like this : fedcba|abcdefgh|hgfedcb 2 - cv2.BORDER_REFLECT_101 or cv.BORDER_DEFAULT - Same as above, but with a slight change, like this : gfedcb|abcdefgh|gfedcba 3 - cv2.BORDER_REPLICATE - Last element is replicated throughout, like this: aaaaaa|abcdefgh|hhhhhhh 4 - cv2.BORDER_WRAP - it will look like this : cdefgh|abcdefgh|abcdefg value : double, optional, default=0 (Deprecated! Use ``values`` instead.) Fill with single value. values : tuple of <double>, optional, default=[] Fill with value(RGB[A] or gray), up to 4 channels. out : NDArray, optional The output NDArray to hold the result. Returns ------- out : NDArray or list of NDArrays The output of this function. Example -------- >>> with open("flower.jpeg", 'rb') as fp: ... str_image = fp.read() ... >>> image = mx.img.imdecode(str_image) >>> image <NDArray 2321x3482x3 @cpu(0)> >>> new_image = mx_border = mx.image.copyMakeBorder(mx_img, 1, 2, 3, 4, type=0) >>> new_image <NDArray 2324x3489x3 @cpu(0)>
378,565
def get_raw_input(description, default=False): additional = % default if default else prompt = % (description, additional) user_input = input_(prompt) return user_input
Get user input from the command line via raw_input / input. description (unicode): Text to display before prompt. default (unicode or False/None): Default value to display with prompt. RETURNS (unicode): User input.
378,566
def set_default_names(data): if all(name is not None for name in data.index.names): return data data = data.copy() if data.index.nlevels > 1: names = [name if name is not None else .format(i) for i, name in enumerate(data.index.names)] data.index.names = names else: data.index.name = data.index.name or return data
Sets index names to 'index' for regular, or 'level_x' for Multi
378,567
def _tag_net_direction(data): src = data[][] dst = data[][] if src == : if dst == or in dst or in dst: return else: return elif dst == : return else: return None
Create a tag based on the direction of the traffic
378,568
def extend(self, clauses, weights=None): if weights: for i, cl in enumerate(clauses): self.append(cl, weight=weights[i]) else: for cl in clauses: self.append(cl)
Add several clauses to WCNF formula. The clauses should be given in the form of list. For every clause in the list, method :meth:`append` is invoked. The clauses can be hard or soft depending on the ``weights`` argument. If no weights are set, the clauses are considered to be hard. :param clauses: a list of new clauses to add. :param weights: a list of integer weights. :type clauses: list(list(int)) :type weights: list(int) Example: .. code-block:: python >>> from pysat.formula import WCNF >>> cnf = WCNF() >>> cnf.extend([[-3, 4], [5, 6]]) >>> cnf.extend([[3], [-4], [-5], [-6]], weights=[1, 5, 3, 4]) >>> print cnf.hard [[-3, 4], [5, 6]] >>> print cnf.soft [[3], [-4], [-5], [-6]] >>> print cnf.wght [1, 5, 3, 4]
378,569
def ssn(self): def _checksum(digits): res = 97 - (digits % 97) return res mydate = self.generator.date() elms = mydate.split("-") if elms[0][0] == : above = True else: above = False elms[0] = elms[0][2:4] seq = self.generator.random_int(1, 998) seq_str = "{:0>3}".format(seq) elms.append(seq_str) date_as_int = int("".join(elms)) if above: date_as_int += 2000000000 s = _checksum(date_as_int) s_rjust = "{:0>2}".format(s) elms.append(s_rjust) return "".join(elms)
Returns a 11 digits Belgian SSN called "rijksregisternummer" as a string The first 6 digits represent the birthdate with (in order) year, month and day. The second group of 3 digits is represents a sequence number (order of birth). It is even for women and odd for men. For men the range starts at 1 and ends 997, for women 2 until 998. The third group of 2 digits is a checksum based on the previous 9 digits (modulo 97). Divide those 9 digits by 97, subtract the remainder from 97 and that's the result. For persons born in or after 2000, the 9 digit number needs to be proceeded by a 2 (add 2000000000) before the division by 97.
378,570
def parse_config(file_name=): * ret = {} if not os.path.isfile(file_name): return .format(file_name) with salt.utils.files.fopen(file_name) as ifile: for line in ifile: line = salt.utils.stringutils.to_unicode(line) if line.startswith() or line.startswith(): pass else: key, value = line.split() ret[key] = value ret[] = file_name return ret
Return dict of uncommented global variables. CLI Example: .. code-block:: bash salt '*' pkg.parse_config ``NOTE:`` not working properly right now
378,571
def bisect(func, a, b, xtol=1e-12, maxiter=100): fa = func(a) if fa == 0.: return a fb = func(b) if fb == 0.: return b assert sign(fa) != sign(fb) for i in xrange(maxiter): c = (a + b) / 2. fc = func(c) if fc == 0. or abs(b - a) / 2. < xtol: return c if sign(fc) == sign(func(a)): a = c else: b = c else: raise RuntimeError( % maxiter)
Finds the root of `func` using the bisection method. Requirements ------------ - func must be continuous function that accepts a single number input and returns a single number - `func(a)` and `func(b)` must have opposite sign Parameters ---------- func : function the function that we want to find the root of a : number one of the bounds on the input b : number the other bound on the input xtol : number, optional the solution tolerance of the input value. The algorithm is considered converged if `abs(b-a)2. < xtol` maxiter : number, optional the maximum number of iterations allowed for convergence
378,572
def from_text(text): if text.isdigit(): v = int(text) if v >= 0 and v <= 4095: return v v = _by_text.get(text.upper()) if v is None: raise UnknownRcode return v
Convert text into an rcode. @param text: the texual rcode @type text: string @raises UnknownRcode: the rcode is unknown @rtype: int
378,573
def _draw(self, color): width = self.winfo_width() height = self.winfo_height() self.delete("bg") self.delete("cross_h") self.delete("cross_v") del self.bg self.bg = tk.PhotoImage(width=width, height=height, master=self) self._fill() self.create_image(0, 0, image=self.bg, anchor="nw", tags="bg") self.tag_lower("bg") h, s, v = color x = v / 100. y = (1 - s / 100.) self.create_line(0, y * height, width, y * height, tags="cross_h", fill=" self.create_line(x * width, 0, x * width, height, tags="cross_v", fill="
Draw the gradient and the selection cross on the canvas.
378,574
def revoke(self, paths: Union[str, Iterable[str]], users: Union[str, Iterable[str], User, Iterable[User]], recursive: bool=False):
See `AccessControlMapper.revoke`. :param paths: see `AccessControlMapper.revoke` :param users: see `AccessControlMapper.revoke` :param recursive: whether the access control list should be changed recursively for all nested collections
378,575
def main(): try: with open() as config_file: config_data = json.load(config_file) except FileNotFoundError: sys.exit() tenant_id = config_data[] app_id = config_data[] app_secret = config_data[] subscription_id = config_data[] access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) vmsslist = azurerm.list_vmss_sub(access_token, subscription_id) for vmss in vmsslist[]: name = vmss[] location = vmss[] capacity = vmss[][] print(.join([, name, , location, , str(capacity)])) print() rgname = get_rg_from_id(vmss[]) vmss_nics = azurerm.get_vmss_nics( access_token, subscription_id, rgname, name) print(json.dumps(vmss_nics, sort_keys=False, indent=2, separators=(, ))) print() vms = azurerm.list_vmss_vms( access_token, subscription_id, rgname, name) for vmssvm in vms[]: vm_id = vmssvm[] print(vm_id + + vmssvm[] + ) print() vmnics = azurerm.get_vmss_vm_nics(access_token, subscription_id, rgname, name, vm_id) print(json.dumps(vmnics, sort_keys=False, indent=2, separators=(, )))
main routine
378,576
def bundle_dir(): if frozen(): directory = sys._MEIPASS else: directory = os.path.dirname(os.path.abspath(stack()[1][1])) if os.path.exists(directory): return directory
Handle resource management within an executable file.
378,577
def tenant_token(self): rv = getattr(self, , None) if rv is None: rv = self._tenant_token = self.tenant.get_token() return rv
The cached token of the current tenant.
378,578
def resolve(self, file_path, follow_symlinks=True, allow_fd=False): if isinstance(file_path, int): if allow_fd and sys.version_info >= (3, 3): return self.get_open_file(file_path).get_object() raise TypeError( ) if follow_symlinks: file_path = make_string_path(file_path) return self.get_object_from_normpath(self.resolve_path(file_path)) return self.lresolve(file_path)
Search for the specified filesystem object, resolving all links. Args: file_path: Specifies the target FakeFile object to retrieve. follow_symlinks: If `False`, the link itself is resolved, otherwise the object linked to. allow_fd: If `True`, `file_path` may be an open file descriptor Returns: The FakeFile object corresponding to `file_path`. Raises: IOError: if the object is not found.
378,579
def ping(self): msg = "PING\0" self.sfile.write(msg) ret, output = self.__response__() reply = str(output[0]) if ret: msg = "Error pinging server %d:%s" % (ret, reply) raise LDBDClientException, msg return reply
Ping the LDBD Server and return any message received back as a string. @return: message received (may be empty) from LDBD Server as a string
378,580
def catalog(self, categories=None): ids = self._filter_ids(None, categories) doc_meta = self._get_meta() return [(doc_id, doc_meta[doc_id].title) for doc_id in ids]
Return information about documents in corpora: a list of tuples (doc_id, doc_title).
378,581
def _tls_pad(self, s): padding = b"" block_size = self.tls_session.wcs.cipher.block_size padlen = block_size - ((len(s) + 1) % block_size) if padlen == block_size: padlen = 0 pad_pattern = chb(padlen) padding = pad_pattern * (padlen + 1) return s + padding
Provided with the concatenation of the TLSCompressed.fragment and the HMAC, append the right padding and return it as a whole. This is the TLS-style padding: while SSL allowed for random padding, TLS (misguidedly) specifies the repetition of the same byte all over, and this byte must be equal to len(<entire padding>) - 1. Meant to be used with a block cipher only.
378,582
def raise_sigint(): if hasattr(signal, ): os.kill(os.getpid(), signal.CTRL_C_EVENT) else: pgid = os.getpgid(os.getpid()) if pgid == 1: os.kill(os.getpid(), signal.SIGINT) else: os.killpg(os.getpgid(os.getpid()), signal.SIGINT)
Raising the SIGINT signal in the current process and all sub-processes. os.kill() only issues a signal in the current process (without subprocesses). CTRL+C on the console sends the signal to the process group (which we need).
378,583
def touch(self, connection=None): self.create_marker_table() if connection is None: connection = self.connect() connection.execute_non_query( .format(marker_table=self.marker_table), {"update_id": self.update_id, "table": self.table}) assert self.exists(connection)
Mark this update as complete. IMPORTANT, If the marker table doesn't exist, the connection transaction will be aborted and the connection reset. Then the marker table will be created.
378,584
def generatemouseevent(self, x, y, eventType="b1c", drag_button_override=): if drag_button_override not in mouse_click_override: raise ValueError( % \ drag_button_override) global drag_button_remembered point = (x, y) button = centre click_type = None if eventType == "abs" or eventType == "rel": if drag_button_override is not : events = [mouse_click_override[drag_button_override]] elif drag_button_remembered: events = [drag_button_remembered] else: events = [move] if eventType == "rel": point = CGEventGetLocation(CGEventCreate(None)) point.x += x point.y += y elif eventType == "b1p": events = [press_left] drag_button_remembered = drag_left elif eventType == "b1r": events = [release_left] drag_button_remembered = None elif eventType == "b1c": events = [press_left, release_left] elif eventType == "b1d": events = [press_left, release_left] click_type = double_click elif eventType == "b2p": events = [press_other] drag_button_remembered = drag_other elif eventType == "b2r": events = [release_other] drag_button_remembered = None elif eventType == "b2c": events = [press_other, release_other] elif eventType == "b2d": events = [press_other, release_other] click_type = double_click elif eventType == "b3p": events = [press_right] drag_button_remembered = drag_right elif eventType == "b3r": events = [release_right] drag_button_remembered = None elif eventType == "b3c": events = [press_right, release_right] elif eventType == "b3d": events = [press_right, release_right] click_type = double_click else: raise LdtpServerException(u"Mouse event not implemented" % eventType) for event in events: CG_event = CGEventCreateMouseEvent(None, event, point, button) if click_type: CGEventSetIntegerValueField( CG_event, kCGMouseEventClickState, click_type) CGEventPost(kCGHIDEventTap, CG_event) time.sleep(0.01) return 1
Generate mouse event on x, y co-ordinates. @param x: X co-ordinate @type x: int @param y: Y co-ordinate @type y: int @param eventType: Mouse click type @type eventType: str @param drag_button_override: Any drag_xxx value Only relevant for movements, i.e. |type| = "abs" or "rel" Quartz is not fully compatible with windows, so for drags the drag button must be explicitly defined. generatemouseevent will remember the last button pressed by default, and drag that button, use this argument to override that. @type drag_button_override: str @return: 1 on success. @rtype: integer
378,585
def sc_to_fc(spvec, nmax, mmax, nrows, ncols): fdata = np.zeros([int(nrows), ncols], dtype=np.complex128) for k in xrange(0, int(ncols / 2)): if k < mmax: kk = k ind = mindx(kk, nmax, mmax) vec = spvec[ind:ind + nmax - np.abs(kk) + 1] fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows) kk = -(k + 1) ind = mindx(kk, nmax, mmax) vec = spvec[ind:ind + nmax - np.abs(kk) + 1] fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows) if k == mmax: kk = k ind = mindx(kk, nmax, mmax) vec = spvec[ind:ind + nmax - np.abs(kk) + 1] fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows) return fdata
assume Ncols is even
378,586
def from_nds2(cls, nds2channel): name = nds2channel.name sample_rate = nds2channel.sample_rate unit = nds2channel.signal_units if not unit: unit = None ctype = nds2channel.channel_type_to_string(nds2channel.channel_type) dtype = { nds2channel.DATA_TYPE_INT16: numpy.int16, nds2channel.DATA_TYPE_INT32: numpy.int32, nds2channel.DATA_TYPE_INT64: numpy.int64, nds2channel.DATA_TYPE_FLOAT32: numpy.float32, nds2channel.DATA_TYPE_FLOAT64: numpy.float64, nds2channel.DATA_TYPE_COMPLEX32: numpy.complex64, }.get(nds2channel.data_type) return cls(name, sample_rate=sample_rate, unit=unit, dtype=dtype, type=ctype)
Generate a new channel using an existing nds2.channel object
378,587
def delegate_method(other, method, name=None): frame = sys._getframe(1) classdict = frame.f_locals @functools.wraps(method) def delegate(self, *args, **kwargs): other_self = other.__get__(self) return method(other_self, *args, **kwargs) if getattr(method, , False): delegate.__switchpoint__ = True if name is None: name = method.__name__ propname = None for key in classdict: if classdict[key] is other: propname = key break if propname: qname = getattr(method, , method.__name__) if in qname: delegate.__doc__ = \ .format(name=name, propname=propname) else: delegate.__doc__ = \ .format(name=name, propname=propname) classdict[name] = delegate
Add a method to the current class that delegates to another method. The *other* argument must be a property that returns the instance to delegate to. Due to an implementation detail, the property must be defined in the current class. The *method* argument specifies a method to delegate to. It can be any callable as long as it takes the instances as its first argument. It is a common paradigm in Gruvi to expose protocol methods onto clients. This keeps most of the logic into the protocol, but prevents the user from having to type ``'client.protocol.*methodname*'`` all the time. For example:: class MyClient(Client): protocol = Client.protocol delegate_method(protocol, MyProtocol.method)
378,588
def delete_user(self, user_descriptor): route_values = {} if user_descriptor is not None: route_values[] = self._serialize.url(, user_descriptor, ) self._send(http_method=, location_id=, version=, route_values=route_values)
DeleteUser. [Preview API] Disables a user. :param str user_descriptor: The descriptor of the user to delete.
378,589
def read_state_file(self, state_file): try: fh = open(state_file, ) state = json.load(fh) self.vpc_id = state[] self.sg_id = state[] self.sn_ids = state[] self.instances = state[] except Exception as e: logger.debug("Caught exception while reading state file: {0}".format(e)) raise e logger.debug("Done reading state from the local state file.")
Read the state file, if it exists. If this script has been run previously, resource IDs will have been written to a state file. On starting a run, a state file will be looked for before creating new infrastructure. Information on VPCs, security groups, and subnets are saved, as well as running instances and their states. AWS has a maximum number of VPCs per region per account, so we do not want to clutter users' AWS accounts with security groups and VPCs that will be used only once.
378,590
def endline_repl(self, inputstring, reformatting=False, **kwargs): out = [] ln = 1 for line in inputstring.splitlines(): add_one_to_ln = False try: if line.endswith(lnwrapper): line, index = line[:-1].rsplit(" new_ln = self.get_ref("ln", index) if new_ln < ln: raise CoconutInternalException("line number decreased", (ln, new_ln)) ln = new_ln line = line.rstrip() add_one_to_ln = True if not reformatting or add_one_to_ln: line += self.comments.get(ln, "") if not reformatting and line.rstrip() and not line.lstrip().startswith(" line += self.ln_comment(ln) except CoconutInternalException as err: complain(err) out.append(line) if add_one_to_ln: ln += 1 return "\n".join(out)
Add end of line comments.
378,591
def visit_Tuple(self, node): demodef foo(a, b): return a, b|[0]=a||[1]=b| if node.elts: elts_aliases = set() for i, elt in enumerate(node.elts): elt_aliases = self.visit(elt) elts_aliases.update(ContainerOf(alias, i) for alias in elt_aliases) else: elts_aliases = None return self.add(node, elts_aliases)
A tuple is abstracted as an ordered container of its values >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> module = ast.parse('def foo(a, b): return a, b') >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.Tuple) (a, b) => ['|[0]=a|', '|[1]=b|'] where the |[i]=id| notation means something that may contain ``id`` at index ``i``.
378,592
def update_user_trackers(sender, topic, user, request, response, **kwargs): TrackingHandler = get_class(, ) track_handler = TrackingHandler() track_handler.mark_topic_read(topic, user)
Receiver to mark a topic being viewed as read. This can result in marking the related forum tracker as read.
378,593
def committer(self) -> Developer: return Developer(self._c_object.committer.name, self._c_object.committer.email)
Return the committer of the commit as a Developer object. :return: committer
378,594
def modules(self): defmodule = lib.EnvGetNextDefmodule(self._env, ffi.NULL) while defmodule != ffi.NULL: yield Module(self._env, defmodule) defmodule = lib.EnvGetNextDefmodule(self._env, defmodule)
Iterates over the defined Modules.
378,595
def _add_config_regions(nblock_regions, ref_regions, data): input_regions_bed = dd.get_variant_regions(data) if input_regions_bed: input_regions = pybedtools.BedTool(input_regions_bed) if len(input_regions) == 1: str_regions = str(input_regions[0]).strip() input_regions = pybedtools.BedTool("%s\n%s" % (str_regions, str_regions), from_string=True) input_nblock = ref_regions.subtract(input_regions, nonamecheck=True) if input_nblock == ref_regions: raise ValueError("Input variant_region file (%s) " "excludes all genomic regions. Do the chromosome names " "in the BED file match your genome (chr1 vs 1)?" % input_regions_bed) all_intervals = _combine_regions([input_nblock, nblock_regions], ref_regions) else: all_intervals = nblock_regions if "noalt_calling" in dd.get_tools_on(data) or "altcontigs" in dd.get_exclude_regions(data): from bcbio.heterogeneity import chromhacks remove_intervals = ref_regions.filter(lambda r: not chromhacks.is_nonalt(r.chrom)) all_intervals = _combine_regions([all_intervals, remove_intervals], ref_regions) return all_intervals.merge()
Add additional nblock regions based on configured regions to call. Identifies user defined regions which we should not be analyzing.
378,596
def set_transform_interface_params(spec, input_features, output_features, are_optional = False): input_features = _fm.process_or_validate_features(input_features) output_features = _fm.process_or_validate_features(output_features) for (fname, ftype) in input_features: input_ = spec.description.input.add() input_.name = fname datatypes._set_datatype(input_.type, ftype) if are_optional: input_.type.isOptional = are_optional for (fname, ftype) in output_features: output_ = spec.description.output.add() output_.name = fname datatypes._set_datatype(output_.type, ftype) return spec
Common utilities to set transform interface params.
378,597
def request_slot(client, service: JID, filename: str, size: int, content_type: str): payload = Request(filename, size, content_type) return (yield from client.send(IQ( type_=IQType.GET, to=service, payload=payload )))
Request an HTTP upload slot. :param client: The client to request the slot with. :type client: :class:`aioxmpp.Client` :param service: Address of the HTTP upload service. :type service: :class:`~aioxmpp.JID` :param filename: Name of the file (without path), may be used by the server to generate the URL. :type filename: :class:`str` :param size: Size of the file in bytes :type size: :class:`int` :param content_type: The MIME type of the file :type content_type: :class:`str` :return: The assigned upload slot. :rtype: :class:`.xso.Slot` Sends a :xep:`363` slot request to the XMPP service to obtain HTTP PUT and GET URLs for a file upload. The upload slot is returned as a :class:`~.xso.Slot` object.
378,598
def seek(self, offset, whence=0): self._check_can_seek() if whence == 0: pass elif whence == 1: offset = self._pos + offset elif whence == 2: if self._size < 0: self._read_all(return_data=False) offset = self._size + offset else: raise ValueError("Invalid value for whence: {}".format(whence)) if offset is None: raise TypeError("Seek offset should be an integer, not None") if offset < self._pos: self._rewind() else: offset -= self._pos if self._mode != _MODE_READ_EOF: self._read_block(offset, return_data=False) return self._pos
Change the file position. The new position is specified by offset, relative to the position indicated by whence. Possible values for whence are: 0: start of stream (default): offset must not be negative 1: current stream position 2: end of stream; offset must not be positive Returns the new file position. Note that seeking is emulated, sp depending on the parameters, this operation may be extremely slow.
378,599
def prj_show_path(self, ): f = self.prj_path_le.text() osinter = ostool.get_interface() osinter.open_path(f)
Show the dir in the a filebrowser of the project :returns: None :rtype: None :raises: None