Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
22,900
def getDigestableArgs(Argv): r first_kwarg_pos = 0 for arg in Argv: if KWARG_VALIDATOR.search(arg): break else: first_kwarg_pos += 1 for arg in Argv[first_kwarg_pos:]: if not KWARG_VALIDATOR.search(arg): raise HandledException( % arg) return Argv[:first_kwarg_pos], list2dict(Argv[first_kwarg_pos:])
r"""Splits the given Argv into *Args and **KwArgs.
22,901
def calc_q1_lz_v1(self): con = self.parameters.control.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess if sta.lz > 0.: flu.q1 = con.k4*sta.lz**(1.+con.gamma) else: flu.q1 = 0. sta.lz -= flu.q1
Calculate the slow response of the lower zone layer. Required control parameters: |K4| |Gamma| Calculated fluxes sequence: |Q1| Updated state sequence: |LZ| Basic equations: :math:`\\frac{dLZ}{dt} = -Q1` \n :math:`Q1 = \\Bigl \\lbrace { {K4 \\cdot LZ^{1+Gamma} \\ | \\ LZ > 0} \\atop {0 \\ | \\ LZ\\leq 0} }` Examples: As long as the lower zone storage is negative... >>> from hydpy.models.hland import * >>> parameterstep('1d') >>> simulationstep('12h') >>> k4(0.2) >>> gamma(0.0) >>> states.lz = -2.0 >>> model.calc_q1_lz_v1() >>> fluxes.q1 q1(0.0) >>> states.lz lz(-2.0) ...or zero, no slow discharge response occurs: >>> states.lz = 0.0 >>> model.calc_q1_lz_v1() >>> fluxes.q1 q1(0.0) >>> states.lz lz(0.0) For storage values above zero the linear... >>> states.lz = 2.0 >>> model.calc_q1_lz_v1() >>> fluxes.q1 q1(0.2) >>> states.lz lz(1.8) ...or nonlinear storage routing equation applies: >>> gamma(1.) >>> states.lz = 2.0 >>> model.calc_q1_lz_v1() >>> fluxes.q1 q1(0.4) >>> states.lz lz(1.6) Note that the assumed length of the simulation step is only a half day. Hence the effective value of the storage coefficient is not 0.2 but 0.1: >>> k4 k4(0.2) >>> k4.value 0.1
22,902
def check_ordered(self): sys.stderr.write("error unimplemented check_ordered\n") sys.exit() seen_chrs = set() curr_chr = None prevstart = 0 for l in self._lines: if not l[]: continue if l[].chr != curr_chr: prevstart = 0 if l[].chr in seen_chrs: return False curr_chr = l[].chr seen_chrs.add(curr_chr) if l[].start < prevstart: return False prevstart = l[].start return True
True if each chromosome is listed together as a chunk and if the range starts go from smallest to largest otherwise false :return: is it ordered? :rtype: bool
22,903
def poll(self, timeout=-1, maxevents=-1): if self._epfd < 0: _err_closed() if timeout != -1: timeout = int(timeout * 1000) if maxevents == -1: maxevents = FD_SETSIZE - 1 events = (epoll_event * maxevents)() num_events = epoll_wait( self._epfd, cast(byref(events), POINTER(epoll_event)), maxevents, timeout) return [(events[i].data.fd, events[i].events) for i in range(num_events)]
Poll for events :param timeout: The amount of seconds to wait for events before giving up. The default value, -1, represents infinity. Note that unlike the underlying ``epoll_wait()`` timeout is a fractional number representing **seconds**. :param maxevents: The maximum number of events to report. The default is a reasonably-sized maximum, identical to the one selected by Python 3.4. :returns: A list of (fd, events) that were reported or an empty list if the timeout elapsed. :raises ValueError: If :meth:`closed()` is True :raises OSError: If the underlying ``epoll_wait(2)`` fails. The error message matches those found in the manual page.
22,904
def find_first(self, attr_name, resources, extra_prefix=): prefix = self.name + + (extra_prefix + if extra_prefix else ) for res in resources: attr = getattr(res, attr_name) if attr.startswith(prefix): return res
Returns the boto object for the first resource in ``resources`` that belongs to this stack. Uses the attribute specified by ``attr_name`` to match the stack name. E.g. An RDS instance for a stack named ``foo`` might be named ``foo-mydb-fis8932ifs``. This call:: find_first('id', conn.get_all_dbinstances()) would return the boto.rds.dbinstance.DBInstance object whose ``id`` is ``foo-mydb-fis8932ifs``. Returns None if a matching resource is not found. If specified, ``extra_prefix`` is appended to the stack name prefix before matching.
22,905
def geocode_addresses(self, project_id, dataset_id, address_field, geometry_field, **extra_params): project_url = ( ).format(project_id=project_id) dataset_url = ( ).format(project_url=project_url, dataset_id=dataset_id) project_query_url = ( ).format(project_url=project_url) dataset_data = self.get(dataset_url) dataset_count = dataset_data[] print( % dataset_count) print( % (dataset_count * 2 / 60)) start = datetime.now() print( % start.strftime()) get_query = ( ).format(address_column=address_field, dataset_id=dataset_id) rows = [] print() for i in range(0, dataset_count, 1000): response = self.get( project_query_url, params={ : get_query, : i, : 1000 } ) dataset_rows = response[] rows.extend(dataset_rows) print() print() geocoder_url = geocoder_params = {: 0, : 0} components = for key, value in extra_params.items(): components += .format(key=key, value=value) geocoder_params[] = components[:-1] def geocode_address(row_data): address = row_data[address_field] amigo_id = row_data[] geocoder_params[] = address geocoder_result = self.get(geocoder_url, params=geocoder_params, stream=True) if geocoder_result.status_code == 200: coordinates = json.loads(geocoder_result.text)[ ][0][][] lng = str(coordinates[0]) lat = str(coordinates[1]) return ("(, " "ST_SetSRID(ST_MakePoint({lng}, {lat}), 4326))," ).format(amigo_id=amigo_id, lng=lng, lat=lat) return processed = 0 steps = 30 for i in range(0, len(rows), steps): rows_to_geocode = rows[i: i + steps] threads = [] for row in rows_to_geocode: threads.append(gevent.spawn(geocode_address, row)) gevent.joinall(threads) values = .join([thread.value for thread in threads]) if values != : data = { : ( ).format(dataset_id=dataset_id, geo_column=geometry_field, values=values[:-1]) } self.post(project_query_url, data=data) processed += len(rows_to_geocode) print( % (float(processed) / dataset_count * 100)) count_query = ( ).format(dataset_id=dataset_id, geo_column=geometry_field) points_count = self.get( project_query_url, params={: count_query})[][0][] print() print( % datetime.now().strftime()) print( % (points_count, dataset_count)) total_time = datetime.now() - start print( % total_time) average_time = total_time.total_seconds() / dataset_count print( % average_time)
Geocode addresses in a dataset. The dataset must have a string field with the addresses to geocode and a geometry field (points) for the geocoding results. :param project_id: Must be a string. :param dataset_id: Must be a string. :param address_field: Name of the address field in the dataset. :param geometry_field: Name of the geometry field in the dataset. :param extra_params: Dictionary to filter the Geocoding response. For example: {'country':'PE'} More information: https://developers.google.com/maps/documentation/geocoding/intro#ComponentFiltering
22,906
def result_sort(result_list, start_index=0): if len(result_list) < 2: return result_list to_sort = result_list[start_index:] minmax = [x[0] for x in to_sort] minimum = min(minmax) maximum = max(minmax) sorted_list = [None for _ in range(minimum, maximum + 1)] for elem in to_sort: key = elem[0] - minimum sorted_list[key] = elem idx_count = start_index for elem in sorted_list: if elem is not None: result_list[idx_count] = elem idx_count += 1 return result_list
Sorts a list of results in O(n) in place (since every run is unique) :param result_list: List of tuples [(run_idx, res), ...] :param start_index: Index with which to start, every entry before `start_index` is ignored
22,907
def is_premium(self, media_type): if self.logged_in: if media_type in self._user_data[]: return True return False
Get if the session is premium for a given media type @param str media_type Should be one of ANDROID.MEDIA_TYPE_* @return bool
22,908
def int_gps_time_to_str(t): if isinstance(t, int): return str(t) elif isinstance(t, float): raise ValueError(err_msg)
Takes an integer GPS time, either given as int or lal.LIGOTimeGPS, and converts it to a string. If a LIGOTimeGPS with nonzero decimal part is given, raises a ValueError.
22,909
def show(closeToo=False): IPython.display.display(pylab.gcf()) if closeToo: pylab.close()
alternative to pylab.show() that updates IPython window.
22,910
def _init_map(self): MultiChoiceAnswerFormRecord._init_map(self) FilesAnswerFormRecord._init_map(self) FeedbackAnswerFormRecord._init_map(self) super(MultiChoiceFeedbackAndFilesAnswerFormRecord, self)._init_map()
stub
22,911
def random_subset_ids_by_count(self, count_per_class=1): class_sizes = self.class_sizes subsets = list() if count_per_class < 1: warnings.warn() return list() elif count_per_class >= self.num_samples: warnings.warn() return self.keys for class_id, class_size in class_sizes.items(): this_class = self.keys_with_value(self.classes, class_id) random.shuffle(this_class) subset_size_this_class = max(0, min(class_size, count_per_class)) if subset_size_this_class < 1 or this_class is None: warnings.warn(.format(class_id)) else: subsets_this_class = this_class[0:count_per_class] subsets.extend(subsets_this_class) if len(subsets) > 0: return subsets else: warnings.warn() return list()
Returns a random subset of sample ids of specified size by count, within each class. Parameters ---------- count_per_class : int Exact number of samples per each class. Returns ------- subset : list Combined list of sample ids from all classes.
22,912
def remove(name, **kwargs): * cmd = if in kwargs: cmd += +kwargs[] if in kwargs: cmd += +kwargs[] cmd += +name sysrcs = __salt__[](cmd) if "sysrc: unknown variable" in sysrcs: raise CommandExecutionError(sysrcs) else: return name+" removed"
Remove system rc configuration variables CLI Example: .. code-block:: bash salt '*' sysrc.remove name=sshd_enable
22,913
def RegisterDefinition(self, data_type_definition): name_lower = data_type_definition.name.lower() if name_lower in self._definitions: raise KeyError(.format( data_type_definition.name)) if data_type_definition.name in self._aliases: raise KeyError(.format( data_type_definition.name)) for alias in data_type_definition.aliases: if alias in self._aliases: raise KeyError(.format(alias)) self._definitions[name_lower] = data_type_definition for alias in data_type_definition.aliases: self._aliases[alias] = name_lower if data_type_definition.TYPE_INDICATOR == definitions.TYPE_INDICATOR_FORMAT: self._format_definitions.append(name_lower)
Registers a data type definition. The data type definitions are identified based on their lower case name. Args: data_type_definition (DataTypeDefinition): data type definitions. Raises: KeyError: if data type definition is already set for the corresponding name.
22,914
def helper(*commands): def decorated_func(f): f.__help_targets__ = list(commands) return f return decorated_func
Decorate a function to be the helper function of commands. Arguments: commands: Names of command that should trigger this function object. --------------------------- Interface of helper methods: @helper('some-command') def help_foo(self, args): ''' Arguments: args: A list of arguments. Returns: A string that is the help message. ''' pass
22,915
def load_and_parse(self): f = open(self.file_path, "r") metrics_json = f.read() self.metrics = json.loads(metrics_json)
Load the metrics file from the given path
22,916
def _set_dpod(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=dpod.dpod, is_container=, presence=False, yang_name="dpod", rest_name="dpod", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__dpod = t if hasattr(self, ): self._set()
Setter method for dpod, mapped from YANG variable /dpod (container) If this variable is read-only (config: false) in the source YANG file, then _set_dpod is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_dpod() directly.
22,917
def build_clustbits(data, ipyclient, force): if os.path.exists(data.tmpdir): shutil.rmtree(data.tmpdir) os.mkdir(data.tmpdir) lbview = ipyclient.load_balanced_view() start = time.time() printstr = " building clusters | {} | s6 |" elapsed = datetime.timedelta(seconds=int(time.time()-start)) progressbar(3, 0, printstr.format(elapsed), spacer=data._spacer) uhandle = os.path.join(data.dirs.across, data.name+".utemp") usort = os.path.join(data.dirs.across, data.name+".utemp.sort") async1 = "" if not os.path.exists(usort) or force: LOGGER.info("building reads file -- loading utemp file into mem") async1 = lbview.apply(sort_seeds, *(uhandle, usort)) while 1: elapsed = datetime.timedelta(seconds=int(time.time()-start)) progressbar(3, 0, printstr.format(elapsed), spacer=data._spacer) if async1.ready(): break else: time.sleep(0.1) async2 = lbview.apply(count_seeds, usort) while 1: elapsed = datetime.timedelta(seconds=int(time.time()-start)) progressbar(3, 1, printstr.format(elapsed), spacer=data._spacer) if async2.ready(): break else: time.sleep(0.1) nseeds = async2.result() async3 = lbview.apply(sub_build_clustbits, *(data, usort, nseeds)) while 1: elapsed = datetime.timedelta(seconds=int(time.time()-start)) progressbar(3, 2, printstr.format(elapsed), spacer=data._spacer) if async3.ready(): break else: time.sleep(0.1) elapsed = datetime.timedelta(seconds=int(time.time()-start)) progressbar(3, 3, printstr.format(elapsed), spacer=data._spacer) print("") for job in [async1, async2, async3]: try: if not job.successful(): raise IPyradWarningExit(job.result()) except AttributeError: pass
Reconstitutes clusters from .utemp and htemp files and writes them to chunked files for aligning in muscle.
22,918
def _compute_ogg_page_crc(page): page_zero_crc = page[:OGG_FIRST_PAGE_HEADER_CRC_OFFSET] + \ b"\00" * OGG_FIRST_PAGE_HEADER_CRC.size + \ page[OGG_FIRST_PAGE_HEADER_CRC_OFFSET + OGG_FIRST_PAGE_HEADER_CRC.size:] return ogg_page_crc(page_zero_crc)
Compute CRC of an Ogg page.
22,919
def update(self, capacity=values.unset, available=values.unset): data = values.of({: capacity, : available, }) payload = self._version.update( , self._uri, data=data, ) return WorkerChannelInstance( self._version, payload, workspace_sid=self._solution[], worker_sid=self._solution[], sid=self._solution[], )
Update the WorkerChannelInstance :param unicode capacity: The total number of Tasks worker should handle for this TaskChannel type. :param bool available: Toggle the availability of the WorkerChannel. :returns: Updated WorkerChannelInstance :rtype: twilio.rest.taskrouter.v1.workspace.worker.worker_channel.WorkerChannelInstance
22,920
def debug_mode(self, toggle): if toggle: self.log.setLevel(logging.DEBUG) else: self.log.setLevel(logging.ERROR)
Toggle debug mode for more detailed output obj.debug_mode(True) - Turn debug mode on obj.debug_mode(False) - Turn debug mode off
22,921
def httperror_handler(error): status_code = error.status_code or 500 output = { : status_code, : error.body or UNEXPECTED_ERROR, : bottle.HTTP_CODES.get(status_code) or None, } if bottle.DEBUG: LOG.warning("Debug-mode server is returning traceback and error " "details in the response with a %s status.", error.status_code) if error.exception: output[] = repr(error.exception) else: if any(sys.exc_info()): output[] = repr(sys.exc_info()[1]) else: output[] = None if error.traceback: output[] = error.traceback else: if any(sys.exc_info()): output[] = traceback.format_exc() else: output[] = None if isinstance(output[], bytes): output[] = output[].decode( , errors=) accept = bottle.request.get_header() or writer = functools.partial( json.dumps, sort_keys=True, indent=4) error.set_header(, ) if not in accept: if in accept: if not yaml: LOG.warning("Yaml requested but pyyaml is not installed.") else: error.set_header(, ) writer = functools.partial( yaml.safe_dump, default_flow_style=False, indent=4) error.body = [writer(output).encode()] return error.body
Format error responses properly, return the response body. This function can be attached to the Bottle instance as the default_error_handler function. It is also used by the FormatExceptionMiddleware.
22,922
def shared_options(rq): "Default class options to pass to the CLI commands." return { : rq.redis_url, : None, : rq.worker_class, : rq.job_class, : rq.queue_class, : rq.connection_class, }
Default class options to pass to the CLI commands.
22,923
def node_radius(self, node): return self.get_idx(node) * self.scale + self.internal_radius
Computes the radial position of the node.
22,924
def gradient(self): r self._update_approx() g = self._ep.lml_derivatives(self._X) ed = exp(-self.logitdelta) es = exp(self.logscale) grad = dict() grad["logitdelta"] = g["delta"] * (ed / (1 + ed)) / (1 + ed) grad["logscale"] = g["scale"] * es grad["beta"] = g["mean"] return grad
r"""Gradient of the log of the marginal likelihood. Returns ------- dict Map between variables to their gradient values.
22,925
def is_mouse_over(self, event): return event.x == self._x and self._y <= event.y < self._y + self._height
Check whether a MouseEvent is over thus scroll bar. :param event: The MouseEvent to check. :returns: True if the mouse event is over the scroll bar.
22,926
def load_saved_records(self, status, records): if isinstance(status, ALDBStatus): self._status = status else: self._status = ALDBStatus(status) for mem_addr in records: rec = records[mem_addr] control_flags = int(rec.get(, 0)) group = int(rec.get(, 0)) rec_addr = rec.get(, ) data1 = int(rec.get(, 0)) data2 = int(rec.get(, 0)) data3 = int(rec.get(, 0)) self[int(mem_addr)] = ALDBRecord(int(mem_addr), control_flags, group, rec_addr, data1, data2, data3) if self._status == ALDBStatus.LOADED: keys = list(self._records.keys()) keys.sort(reverse=True) first_key = keys[0] self._mem_addr = first_key
Load ALDB records from a set of saved records.
22,927
def FromMicroseconds(self, micros): self.seconds = micros // _MICROS_PER_SECOND self.nanos = (micros % _MICROS_PER_SECOND) * _NANOS_PER_MICROSECOND
Converts microseconds since epoch to Timestamp.
22,928
def get_details(self, language=None): if self._details is None: if language is None: try: language = self._query_instance._request_params[] except KeyError: language = lang.ENGLISH self._details = _get_place_details( self.place_id, self._query_instance.api_key, self._query_instance.sensor, language=language)
Retrieves full information on the place matching the place_id. Further attributes will be made available on the instance once this method has been invoked. keyword arguments: language -- The language code, indicating in which language the results should be returned, if possible. This value defaults to the language that was used to generate the GooglePlacesSearchResult instance.
22,929
def get_client(self, name): mech = self.get(name) return mech if isinstance(mech, ClientMechanism) else None
Like :meth:`.get`, but only mechanisms inheriting :class:`ClientMechanism` will be returned. Args: name: The SASL mechanism name. Returns: The mechanism object or ``None``
22,930
def ICALImporter(ctx, filename, all, owner, calendar, create_calendar, clear_calendar, dry, execfilter): log() objectmodels = ctx.obj[].objectmodels if objectmodels[].count({: owner}) > 0: owner_object = objectmodels[].find_one({: owner}) elif objectmodels[].count({: owner}) > 0: owner_object = objectmodels[].find_one({: owner}) else: log(, lvl=warn) return log() if objectmodels[].count({: calendar}) > 0: calendar = objectmodels[].find_one({: calendar}) elif objectmodels[].count({: owner}) > 0: calendar = objectmodels[].find_one({: calendar}) elif create_calendar: calendar = objectmodels[]({ : std_uuid(), : calendar }) else: log(, lvl=warn) return log() if clear_calendar is True: log() for item in objectmodels[].find({: calendar.uuid}): item.delete() with open(filename, ) as file_object: caldata = Calendar.from_ical(file_object.read()) keys = { : , : , : , : , : , : , : , : , : , : , : } mapping = { : , : } imports = [] def ical_import_filter(original, logfacilty): log() return original if execfilter is not None: import os textFilePath = os.path.abspath(os.path.join(os.path.curdir, execfilter)) textFileFolder = os.path.dirname(textFilePath) from importlib.machinery import SourceFileLoader filter_module = SourceFileLoader("importfilter", textFilePath).load_module() ical_import_filter = filter_module.ical_import_filter for event in caldata.walk(): if event.name == : log(event, lvl=verbose, pretty=True) initializer = { : std_uuid(), : calendar.uuid, } for item in keys: thing = event.get(item, None) if thing is None: thing = + item else: if keys[item] == : thing = str(thing) else: thing = parser.parse(str(thing.dt)) thing = thing.isoformat() if item in mapping: item_assignment = mapping[item] else: item_assignment = item initializer[item_assignment] = thing new_event = objectmodels[](initializer) new_event = ical_import_filter(new_event, log) imports.append(new_event) log(new_event, lvl=debug) for ev in imports: log(ev.summary) if not dry: log() objectmodels[].bulk_create(imports) calendar.save() else: log(, lvl=warn)
Calendar Importer for iCal (ics) files
22,931
def ystep(self): r self.Y = sp.prox_l1(self.AX + self.U, (self.lmbda / self.rho) * self.wl1) super(ConvBPDN, self).ystep()
r"""Minimise Augmented Lagrangian with respect to :math:`\mathbf{y}`.
22,932
def time_to_first_byte(self): if self.page_id == : return None ttfb = 0 for entry in self.entries: if entry[][] == 200: for k, v in iteritems(entry[]): if k != : if v > 0: ttfb += v break else: ttfb += entry[] return ttfb
Time to first byte of the page request in ms
22,933
def imatch(pattern, name): try: re_pat = _PATTERN_CACHE[(pattern, False)] except KeyError: res = "(?ms)" + _translate(pattern, case_sensitive=False) + r _PATTERN_CACHE[(pattern, False)] = re_pat = re.compile(res, re.IGNORECASE) return re_pat.match(name) is not None
Test whether a name matches a wildcard pattern (case insensitive). Arguments: pattern (str): A wildcard pattern, e.g. ``"*.py"``. name (bool): A filename. Returns: bool: `True` if the filename matches the pattern.
22,934
def rgb_to_yiq(rgb): r, g, b = rgb[0] / 255, rgb[1] / 255, rgb[2] / 255 y = (0.299 * r) + (0.587 * g) + (0.114 * b) i = (0.596 * r) - (0.275 * g) - (0.321 * b) q = (0.212 * r) - (0.528 * g) + (0.311 * b) return round(y, 3), round(i, 3), round(q, 3)
Convert an RGB color representation to a YIQ color representation. (r, g, b) :: r -> [0, 255] g -> [0, 255] b -> [0, 255] :param rgb: A tuple of three numeric values corresponding to the red, green, and blue value. :return: YIQ representation of the input RGB value. :rtype: tuple
22,935
def sspro8_results(self): return ssbio.protein.sequence.utils.fasta.load_fasta_file_as_dict_of_seqs(self.out_sspro8)
Parse the SSpro8 output file and return a dict of secondary structure compositions.
22,936
def cookie_get(self, name): if not hasattr(self, ): return None if self.cookies.get(name): return self.cookies.get(name).value return None
Check for a cookie value by name. :param str name: Name of the cookie value to retreive. :return: Returns the cookie value if it's set or None if it's not found.
22,937
def load_commands(self, parser): entrypoints = self._get_entrypoints() already_loaded = set() for entrypoint in entrypoints: if entrypoint.name not in already_loaded: command_class = entrypoint.load() command_class(entrypoint.name, self, parser).prepare() already_loaded.add(entrypoint.name)
Load commands of this profile. :param parser: argparse parser on which to add commands
22,938
def transmit(self, channel, message): target = ( self.slack.server.channels.find(channel) or self._find_user_channel(username=channel) ) message = self._expand_references(message) target.send_message(message, thread=getattr(channel, , None))
Send the message to Slack. :param channel: channel or user to whom the message should be sent. If a ``thread`` attribute is present, that thread ID is used. :param str message: message to send.
22,939
def top_sections(self): top_line = self.text.split()[0] sections = len(top_line.split()) - 2 return sections
The number of sections that touch the top side. Returns ------- sections : int The number of sections on the top
22,940
def is_current_manager_equals_to(cls, pm): if hasattr(cls, ): return cls.works_result is_ok = bool(cls._try_get_current_manager() == pm) setattr(cls, , is_ok) return is_ok
Returns True if this package manager is usable, False otherwise.
22,941
def get_storage(self, script_hash, key, **kwargs): hexkey = binascii.hexlify(key.encode()).decode() hexresult = self._call( JSONRPCMethods.GET_STORAGE.value, params=[script_hash, hexkey, ], **kwargs) try: assert hexresult result = bytearray(binascii.unhexlify(hexresult.encode())) except AssertionError: result = hexresult return result
Returns the value stored in the storage of a contract script hash for a given key. :param script_hash: contract script hash :param key: key to look up in the storage :type script_hash: str :type key: str :return: value associated with the storage key :rtype: bytearray
22,942
def create_dashboard(self, panel_file, data_sources=None, strict=True): es_enrich = self.conf[][] kibana_url = self.conf[][] mboxes_sources = set([, , , ]) if data_sources and any(x in data_sources for x in mboxes_sources): data_sources = list(data_sources) data_sources.append() if data_sources and ( in data_sources): data_sources = list(data_sources) data_sources.append() if data_sources and in data_sources: data_sources = list(data_sources) data_sources.append() if data_sources and in data_sources: data_sources = list(data_sources) data_sources.append() if data_sources and in data_sources: data_sources = list(data_sources) data_sources.append() try: import_dashboard(es_enrich, kibana_url, panel_file, data_sources=data_sources, strict=strict) except ValueError: logger.error("%s does not include release field. Not loading the panel.", panel_file) except RuntimeError: logger.error("Can not load the panel %s", panel_file)
Upload a panel to Elasticsearch if it does not exist yet. If a list of data sources is specified, upload only those elements (visualizations, searches) that match that data source. :param panel_file: file name of panel (dashobard) to upload :param data_sources: list of data sources :param strict: only upload a dashboard if it is newer than the one already existing
22,943
def _check_configs(self): configs = set(self._find_configs()) known_configs = set(self.configs.keys()) new_configs = configs - known_configs for cfg in (known_configs - configs): self.log.debug("Compass configuration has been removed: " + cfg) del self.configs[cfg] for cfg in new_configs: self.log.debug("Found new compass configuration: " + cfg) self.configs[cfg] = CompassConfig(cfg)
Reloads the configuration files.
22,944
def yield_expr__26(self, yield_loc, exprs): if exprs is not None: return ast.Yield(value=exprs, yield_loc=yield_loc, loc=yield_loc.join(exprs.loc)) else: return ast.Yield(value=None, yield_loc=yield_loc, loc=yield_loc)
(2.6, 2.7, 3.0, 3.1, 3.2) yield_expr: 'yield' [testlist]
22,945
def getAttributeData(self, name, channel=None): return self._getNodeData(name, self._ATTRIBUTENODE, channel)
Returns a attribut
22,946
def delete(self, *args, **kwargs): skip_reverses = kwargs.pop(, False) if not skip_reverses: self._delete_reverses() return super(Cloneable, self).delete(*args, **kwargs)
Delete clonable relations first, since they may be objects that wouldn't otherwise be deleted. Calls super to actually delete the object.
22,947
def get_custom_annotations_for_alias(data_type): result = [] data_type, _ = unwrap_nullable(data_type) while is_alias(data_type): result.extend(data_type.custom_annotations) data_type, _ = unwrap_nullable(data_type.data_type) return result
Given a Stone data type, returns all custom annotations applied to it.
22,948
def load_waypoints(self, filename): self.wploader.target_system = self.target_system self.wploader.target_component = self.target_component try: self.wploader.load(filename) except Exception as msg: print("Unable to load %s - %s" % (filename, msg)) return print("Loaded %u waypoints from %s" % (self.wploader.count(), filename)) self.send_all_waypoints()
load waypoints from a file
22,949
def to_text(value, encoding=): if not value: return if isinstance(value, six.text_type): return value if isinstance(value, six.binary_type): return value.decode(encoding) return six.text_type(value)
Convert value to unicode, default encoding is utf-8 :param value: Value to be converted :param encoding: Desired encoding
22,950
def legacy_decrypt(jwe, jwk, adata=, validate_claims=True, expiry_seconds=None): protected_header, encrypted_key, iv, ciphertext, authentication_tag = map( b64decode_url, jwe) header = json_decode(protected_header) alg = header[HEADER_ALG] enc = header[HEADER_ENC] encryption_key = _decrypt_key(encrypted_key, jwk, alg) ((_, decipher), _), ((hash_fn, _), mod) = JWA[enc] version = header.get(_TEMP_VER_KEY) if version: plaintext = decipher(ciphertext, encryption_key[-mod.digest_size/2:], iv) hash = hash_fn(_jwe_hash_str(ciphertext, iv, adata, version), encryption_key[:-mod.digest_size/2], mod=mod) else: plaintext = decipher(ciphertext, encryption_key[:-mod.digest_size], iv) hash = hash_fn(_jwe_hash_str(ciphertext, iv, adata, version), encryption_key[-mod.digest_size:], mod=mod) if not const_compare(auth_tag(hash), authentication_tag): raise Error() if HEADER_ZIP in header: try: (_, decompress) = COMPRESSION[header[HEADER_ZIP]] except KeyError: raise Error(.format( header[HEADER_ZIP])) plaintext = decompress(plaintext) claims = json_decode(plaintext) try: del claims[_TEMP_VER_KEY] except KeyError: pass _validate(claims, validate_claims, expiry_seconds) return JWT(header, claims)
Decrypts a deserialized :class:`~jose.JWE` :param jwe: An instance of :class:`~jose.JWE` :param jwk: A `dict` representing the JWK required to decrypt the content of the :class:`~jose.JWE`. :param adata: Arbitrary string data used during encryption for additional authentication. :param validate_claims: A `bool` indicating whether or not the `exp`, `iat` and `nbf` claims should be validated. Defaults to `True`. :param expiry_seconds: An `int` containing the JWT expiry in seconds, used when evaluating the `iat` claim. Defaults to `None`, which disables `iat` claim validation. :rtype: :class:`~jose.JWT` :raises: :class:`~jose.Expired` if the JWT has expired :raises: :class:`~jose.NotYetValid` if the JWT is not yet valid :raises: :class:`~jose.Error` if there is an error decrypting the JWE
22,951
def get_argument_values( arg_defs, arg_asts, variables=None, ): if not arg_defs: return {} if arg_asts: arg_ast_map = { arg.name.value: arg for arg in arg_asts } else: arg_ast_map = {} result = {} for name, arg_def in arg_defs.items(): arg_type = arg_def.type arg_ast = arg_ast_map.get(name) if name not in arg_ast_map: if arg_def.default_value is not None: result[arg_def.out_name or name] = arg_def.default_value continue elif isinstance(arg_type, GraphQLNonNull): raise GraphQLError( .format( name=name, arg_type=arg_type ), arg_asts, ) elif isinstance(arg_ast.value, ast.Variable): variable_name = arg_ast.value.name.value if variables and variable_name in variables: result[arg_def.out_name or name] = variables[variable_name] elif arg_def.default_value is not None: result[arg_def.out_name or name] = arg_def.default_value elif isinstance(arg_type, GraphQLNonNull): raise GraphQLError( .format( name=name, arg_type=arg_type, variable_name=variable_name ), arg_asts, ) continue else: value = value_from_ast(arg_ast.value, arg_type, variables) if value is None: if arg_def.default_value is not None: value = arg_def.default_value result[arg_def.out_name or name] = value else: result[arg_def.out_name or name] = value return result
Prepares an object map of argument values given a list of argument definitions and list of argument AST nodes.
22,952
def remove_usb_device_source(self, id_p): if not isinstance(id_p, basestring): raise TypeError("id_p can only be an instance of type basestring") self._call("removeUSBDeviceSource", in_p=[id_p])
Removes a previously added USB device source. in id_p of type str The identifier used when the source was added.
22,953
def sigusr2_handler(self, unused_signum, unused_frame): if self._sigusr1_handler_func is not None: self._sigusr2_handler_func(self.context)
Handle SIGUSR2 signal. Call function which is defined in the **settings.SIGUSR2_HANDLER**.
22,954
def assign_default_log_values(self, fpath, line, formatter): file=/path/to/log_file.log:formatter=logagg.formatters.basescriptlogagg.formatters.mongodb/var/log/mongodb/mongodb.logsome log line heredataerrorerror_tbeventeventfile/var/log/mongodb/mongodb.logformatterlogagg.formatters.mongodbhost...idleveldebugrawsome log line heretimestamp...typelog return dict( id=None, file=fpath, host=self.HOST, formatter=formatter, event=, data={}, raw=line, timestamp=datetime.datetime.utcnow().isoformat(), type=, level=, error= False, error_tb=, )
>>> lc = LogCollector('file=/path/to/log_file.log:formatter=logagg.formatters.basescript', 30) >>> from pprint import pprint >>> formatter = 'logagg.formatters.mongodb' >>> fpath = '/var/log/mongodb/mongodb.log' >>> line = 'some log line here' >>> default_log = lc.assign_default_log_values(fpath, line, formatter) >>> pprint(default_log) #doctest: +ELLIPSIS {'data': {}, 'error': False, 'error_tb': '', 'event': 'event', 'file': '/var/log/mongodb/mongodb.log', 'formatter': 'logagg.formatters.mongodb', 'host': '...', 'id': None, 'level': 'debug', 'raw': 'some log line here', 'timestamp': '...', 'type': 'log'}
22,955
def getCovariance(self,normalize=True,i0=None,i1=None,pos0=None,pos1=None,chrom=None,center=True,unit=True,pos_cum0=None,pos_cum1=None,blocksize=None,X=None,**kw_args): if X is not None: K=X.dot(X.T) Nsnp=X.shape[1] else: if (i0 is None) and (i1 is None) and ((pos0 is not None) & (pos1 is not None) & (chrom is not None)) or ((pos_cum0 is not None) & (pos_cum1 is not None)): i0,i1=self.getGenoIndex(pos0=pos0,pos1=pos1,chrom=chrom,pos_cum0=pos_cum0,pos_cum1=pose_cum1) [N,M]=self.genoM.shape if blocksize is None: blocksize=M if i0 is None: i0=0 if i1 is None: i1=M nread = i0 K=None Nsnp=i1-i0 while nread<i1: thisblock=min(blocksize,i1-nread) X=self.getGenotypes(i0=nread,i1=(nread+thisblock),center=center,unit=unit,**kw_args) if K is None: K=X.dot(X.T) else: K+=X.dot(X.T) nread+=thisblock if normalize: K/=(K.diagonal().mean()) else: K/=Nsnp return K
calculate the empirical genotype covariance in a region
22,956
def add_document(self, key, url, **kwargs): document = self._check_metadata_for_file(key=key, url=url, **kwargs) for dict_key in ( , , , , , , , ): if kwargs.get(dict_key): document[dict_key] = kwargs[dict_key] if key_already_there(document, self.record.get(, ())): raise ValueError( s already a document with the key %s.keydocuments', document)
Adds document to record Args: key (string): document key url (string): document url Keyword Args: description (string): simple description fulltext (bool): mark if this is a full text hidden (bool): is document should be hidden material (string): original_url (string): original url filename (string): current url Returns: None
22,957
def data(self, namespace): assert namespace if namespace in self._data: return self._data[namespace] new_data = {} self._data[namespace] = new_data return new_data
Gets the thread.local data (dict) for a given namespace. Args: namespace (string): The namespace, or key, of the data dict. Returns: (dict)
22,958
def add_native(cls, name, func, ret, interp=None, send_interp=False): if interp is None: natives = cls._natives else: natives = interp._natives natives[name] = functions.NativeFunction( name, func, ret, send_interp )
Add the native python function ``func`` into the pfp interpreter with the name ``name`` and return value ``ret`` so that it can be called from within a template script. .. note:: The :any:`@native <pfp.native.native>` decorator exists to simplify this. All native functions must have the signature ``def func(params, ctxt, scope, stream, coord [,interp])``, optionally allowing an interpreter param if ``send_interp`` is ``True``. Example: The example below defines a function ``Sum`` using the ``add_native`` method. :: import pfp.fields from pfp.fields import PYVAL def native_sum(params, ctxt, scope, stream, coord): return PYVAL(params[0]) + PYVAL(params[1]) pfp.interp.PfpInterp.add_native("Sum", native_sum, pfp.fields.Int64) :param basestring name: The name the function will be exposed as in the interpreter. :param function func: The native python function that will be referenced. :param type(pfp.fields.Field) ret: The field class that the return value should be cast to. :param pfp.interp.PfpInterp interp: The specific pfp interpreter the function should be defined in. :param bool send_interp: If true, the current pfp interpreter will be added as an argument to the function.
22,959
def show(): sys.stdout.write(colorful.bold() + ) sys.stdout.write(colorful.dimmed() + ) sys.stdout.write(colorful.italic() + ) sys.stdout.write(colorful.underlined() + ) sys.stdout.write(colorful.inversed() + ) sys.stdout.write(colorful.concealed() + ) sys.stdout.write(colorful.struckthrough() + ) sys.stdout.write(colorful.red() + ) sys.stdout.write(colorful.green() + ) sys.stdout.write(colorful.yellow() + ) sys.stdout.write(colorful.blue() + ) sys.stdout.write(colorful.magenta() + ) sys.stdout.write(colorful.cyan() + ) sys.stdout.write(colorful.white() + ) sys.stdout.write(colorful.on_red() + ) sys.stdout.write(colorful.on_green() + ) sys.stdout.write(colorful.on_yellow() + ) sys.stdout.write(colorful.on_blue() + ) sys.stdout.write(colorful.on_magenta() + ) sys.stdout.write(colorful.on_cyan() + ) sys.stdout.write(colorful.on_white() + )
Show the modifiers and colors
22,960
def autocrop(im, bgcolor): "Crop away a border of the given background color." if im.mode != "RGB": im = im.convert("RGB") bg = Image.new("RGB", im.size, bgcolor) diff = ImageChops.difference(im, bg) bbox = diff.getbbox() if bbox: return im.crop(bbox) return im
Crop away a border of the given background color.
22,961
def definition_to_message( definition, message=None, table_of_contents=None, heading_level=None): if message is None: message = m.Message() if table_of_contents is None: table_of_contents = m.Message() if heading_level: _create_section_header( message, table_of_contents, definition[].replace(, ), definition[], heading_level=heading_level) else: header = m.Paragraph(m.ImportantText(definition[])) message.add(header) url = _definition_icon_url(definition) if url is None: message.add(m.Paragraph(definition[])) if in definition: _citations_to_message(message, definition) else: LOGGER.info( + url) table = m.Table(style_class=) row = m.Row() row.add(m.Cell(m.Image(url, **MEDIUM_ICON_STYLE))) row.add(m.Cell(definition[])) table.add(row) for citation in definition[]: if citation[] in [None, ]: continue row = m.Row() row.add(m.Cell()) if citation[] in [None, ]: row.add(m.Cell(citation[])) else: row.add(m.Cell(m.Link(citation[], citation[]))) table.add(row) message.add(table) url = _definition_screenshot_url(definition) if url: message.add(m.Paragraph(m.Image(url), style_class=)) if in definition: for sub_definition in definition[]: definition_to_message( sub_definition, message, table_of_contents, heading_level=3) if in definition: message.add(m.Heading( tr(), **DETAILS_STYLE)) message.add(m.Heading( tr(), **DETAILS_SUBGROUP_STYLE)) bullets = m.BulletedList() for note in definition[]: if isinstance(note, dict): bullets = _add_dict_to_bullets(bullets, note) elif note: bullets.add(m.Text(note)) message.add(bullets) if in definition: _citations_to_message(message, definition) if in definition: current_function = current_earthquake_model_name() paragraph = m.Paragraph(tr( ), m.ImportantText(current_function) ) message.add(paragraph) models_definition = definition[] for model in models_definition: message.add(m.Heading(model[], **DETAILS_SUBGROUP_STYLE)) if in model: paragraph = m.Paragraph(model[]) message.add(paragraph) for note in model[]: paragraph = m.Paragraph(note) message.add(paragraph) _citations_to_message(message, model) for exposure in exposure_all: extra_exposure_notes = specific_notes(definition, exposure) if extra_exposure_notes: title = tr().format( exposure_name=exposure[]) message.add(m.Heading(title, **DETAILS_SUBGROUP_STYLE)) bullets = m.BulletedList() for note in extra_exposure_notes: if isinstance(note, dict): bullets = _add_dict_to_bullets(bullets, note) elif note: bullets.add(m.Text(note)) message.add(bullets) if in definition: message.add(m.Heading( tr(), **DETAILS_SUBGROUP_STYLE)) bullets = m.BulletedList() for note in definition[]: bullets.add(m.Text(note)) message.add(bullets) if in definition: message.add(m.Heading( tr(), **DETAILS_SUBGROUP_STYLE)) bullets = m.BulletedList() for note in definition[]: bullets.add(m.Text(note)) message.add(bullets) if in definition: message.add( m.Heading(tr(), **DETAILS_STYLE)) if len(definition[]) < 1: message.add(m.Paragraph(tr())) else: bullets = m.BulletedList() for note in definition[]: bullets.add(m.Text(note)) message.add(bullets) if in definition: message.add( m.Heading( tr(), **DETAILS_STYLE)) if len(definition[]) < 1: message.add(m.Paragraph(tr())) else: bullets = m.BulletedList() for note in definition[]: bullets.add(m.Text(note)) message.add(bullets) if in definition: message.add(m.Paragraph(m.ImportantText(tr()))) bullets = m.BulletedList() for note in definition[]: if isinstance(note, dict): bullets = _add_dict_to_bullets(bullets, note) elif note: bullets.add(m.Text(note)) message.add(bullets) for exposure in exposure_all: extra_exposure_actions = specific_actions(definition, exposure) if extra_exposure_actions: title = tr().format( exposure_name=exposure[]) message.add(m.Heading(title, **DETAILS_SUBGROUP_STYLE)) bullets = m.BulletedList() for note in extra_exposure_actions: if isinstance(note, dict): bullets = _add_dict_to_bullets(bullets, note) elif note: bullets.add(m.Text(note)) message.add(bullets) if in definition: message.add(m.Paragraph(m.ImportantText(tr()))) table = m.Table(style_class=) row = m.Row() row.add(m.Cell(tr(), header=True)) row.add(m.Cell(tr(), header=True)) row.add(m.Cell(tr(), header=True)) row.add(m.Cell(tr(), header=True)) table.add(row) for unit in definition[]: row = m.Row() row.add(m.Cell(unit[])) row.add(m.Cell(unit[])) row.add(m.Cell(unit[])) row.add(m.Cell(unit[])) table.add(row) message.add(table) if in definition: message.add(m.Paragraph(m.ImportantText(tr()))) table = _create_fields_table() if in definition: all_fields = definition[] + definition[] else: all_fields = definition[] for field in all_fields: _add_field_to_table(field, table) message.add(table) if in definition: message.add(m.Heading( tr(), **DETAILS_STYLE)) message.add(m.Paragraph( definitions.hazard_classification[])) for inasafe_class in definition[]: definition_to_message( inasafe_class, message, table_of_contents, heading_level=3) if in definition: message.add(m.Paragraph(m.ImportantText(tr()))) is_hazard = definition[] == hazard_classification_type if is_hazard: table = _make_defaults_hazard_table() else: table = _make_defaults_exposure_table() for inasafe_class in definition[]: row = m.Row() if is_hazard: if in inasafe_class: colour = inasafe_class[].name() row.add(m.Cell( , attributes= % colour)) else: row.add(m.Cell()) row.add(m.Cell(inasafe_class[])) if is_hazard: if in inasafe_class: row.add(m.Cell(tr(inasafe_class[]))) else: row.add(m.Cell(tr())) if is_hazard: if inasafe_class.get() is None or \ inasafe_class.get() < 0: row.add(m.Cell(tr())) elif inasafe_class.get() > 0: rate = html_scientific_notation_rate( inasafe_class[]) rate = % rate row.add(m.Cell(rate)) else: row.add(m.Cell()) if is_hazard: if in inasafe_class: rate = inasafe_class[] * 100 rate = % rate row.add(m.Cell(rate)) else: row.add(m.Cell(tr())) if in inasafe_class: defaults = None for default in inasafe_class[]: if defaults: defaults += % default else: defaults = default row.add(m.Cell(defaults)) else: row.add(m.Cell(tr())) if is_hazard: if in inasafe_class: if isinstance(inasafe_class[], dict): bullets = m.BulletedList() minima = inasafe_class[] for key, value in sorted(minima.items()): bullets.add( % (key, value)) row.add(m.Cell(bullets)) else: row.add(m.Cell(inasafe_class[])) else: row.add(m.Cell(tr())) if is_hazard: if in inasafe_class: if isinstance(inasafe_class[], dict): bullets = m.BulletedList() maxima = inasafe_class[] for key, value in sorted(maxima.items()): bullets.add( % (key, value)) row.add(m.Cell(bullets)) else: row.add(m.Cell(inasafe_class[])) else: row.add(m.Cell(tr())) table.add(row) row = m.Row() row.add(m.Cell()) row.add(m.Cell(inasafe_class[], span=7)) table.add(row) if definition[] == definitions.hazard_classification_type: row = m.Row() colour = definitions.not_exposed_class[].name() row.add(m.Cell( , attributes= % colour)) description = definitions.not_exposed_class[] row.add(m.Cell(description, span=7)) table.add(row) message.add(table) if in definition: if definition[]: message.add(m.Paragraph(tr( ))) else: message.add(m.Paragraph(tr( ))) if in definition: if definition[]: message.add(m.Paragraph(tr( ))) else: message.add(m.Paragraph(tr( ))) return message
Helper function to render a definition to a message. :param definition: A definition dictionary (see definitions package). :type definition: dict :param message: The message that the definition should be appended to. :type message: parameters.message.Message :param table_of_contents: Table of contents that the headings should be included in. :type message: parameters.message.Message :param heading_level: Optional style to apply to the definition heading. See HEADING_LOOKUPS :type heading_level: int :returns: Message :rtype: str
22,962
def _from_any(cls, spec): if isinstance(spec, str): spec = cls.from_file(spec) elif isinstance(spec, dict): spec = cls.from_dict(spec) elif not isinstance(spec, cls): raise context.TypeError("spec must be either an ApplicationSpec, " "path, or dict, got " "%s" % type(spec).__name__) return spec
Generic creation method for all types accepted as ``spec``
22,963
def show(thing, domain=(0, 1), **kwargs): if isinstance(thing, np.ndarray): rank = len(thing.shape) if rank == 4: log.debug("Show is assuming rank 4 tensor to be a list of images.") images(thing, domain=domain, **kwargs) elif rank in (2, 3): log.debug("Show is assuming rank 2 or 3 tensor to be an image.") image(thing, domain=domain, **kwargs) else: log.warning("Show only supports numpy arrays of rank 2-4. Using repr().") print(repr(thing)) elif isinstance(thing, (list, tuple)): log.debug("Show is assuming list or tuple to be a collection of images.") images(thing, domain=domain, **kwargs) else: log.warning("Show only supports numpy arrays so far. Using repr().") print(repr(thing))
Display a nupmy array without having to specify what it represents. This module will attempt to infer how to display your tensor based on its rank, shape and dtype. rank 4 tensors will be displayed as image grids, rank 2 and 3 tensors as images.
22,964
def _generate_docstring_for_func(self, namespace, arg_data_type, result_data_type=None, error_data_type=None, overview=None, extra_request_args=None, extra_return_arg=None, footer=None): fields = [] if is_void_type(arg_data_type) else arg_data_type.fields if not fields and not overview:
Generates a docstring for a function or method. This function is versatile. It will create a docstring using all the data that is provided. :param arg_data_type: The data type describing the argument to the route. The data type should be a struct, and each field will be treated as an input parameter of the method. :param result_data_type: The data type of the route result. :param error_data_type: The data type of the route result in the case of an error. :param str overview: A description of the route that will be located at the top of the docstring. :param extra_request_args: [(field name, field type, field doc), ...] Describes any additional parameters for the method that aren't a field in arg_data_type. :param str extra_return_arg: Name of an additional return type that. If this is specified, it is assumed that the return of the function will be a tuple of return_data_type and extra_return-arg. :param str footer: Additional notes at the end of the docstring.
22,965
def compute_from_text(self,text,beta=0.001): prevlett = {:, :, :, :} countmat = [] text = re.sub(,,text.upper()) for i in range(len(text)): D = {: 0, : 0, :0, :0} letter = text[i] if letter in [, , , ]: _omit = prevlett[letter] for L in ACGT: if L != _omit: D[L] = 0.3333 elif one2two.has_key(letter): for L in list(one2two[letter]): D[L] = 0.5 elif letter == : for L in D.keys(): D[L] = self.background[L] elif letter == : for L in D.keys(): D[L] = self.background[L]-(0.0001) D[] = D[] + 0.0004 else: D[letter] = 1.0 countmat.append(D) self.compute_from_counts(countmat,beta)
m.compute_from_text(,text,beta=0.001) -- Compute a matrix values from a text string of ambiguity codes. Use Motif_from_text utility instead to build motifs on the fly.
22,966
def _split_chemical_equations(value): pieces = _split_arrows(value) return [(pieces[i] + pieces[i + 1] + pieces[i + 2]).strip() for i in range(0, len(pieces) - 2, 2)]
Split a string with sequential chemical equations into separate strings. Each string in the returned iterable represents a single chemical equation of the input. See the docstrings of `ChemicalEquation` and `ChemicalSystem` for more. Parameters ---------- value : `str` A string with sequential chemical equations in the mini-language (see notes on `ChemicalEquation`). Returns ------- iterable of `str` An iterable of strings in the format specified by the mini-language (see notes on `ChemicalEquation`). Examples -------- >>> from pyrrole.core import _split_chemical_equations >>> _split_chemical_equations('A + B -> C + D -> D + E <=> F + G <- H + I') ['A + B -> C + D', 'C + D -> D + E', 'D + E <=> F + G', 'F + G <- H + I']
22,967
def set_hook(fn, key, **kwargs): if fn is None: return functools.partial(set_hook, key=key, **kwargs) try: hook_config = fn.__marshmallow_hook__ except AttributeError: fn.__marshmallow_hook__ = hook_config = {} hook_config[key] = kwargs return fn
Mark decorated function as a hook to be picked up later. .. note:: Currently only works with functions and instance methods. Class and static methods are not supported. :return: Decorated function if supplied, else this decorator with its args bound.
22,968
def _golden(self, triplet, fun): self.num_golden = 0 (qa, fa), (qb, fb), (qc, fc) = triplet while True: self.num_golden += 1 qd = qa + (qb-qa)*phi/(1+phi) fd = fun(qd) if fd < fb: (qa, fa), (qb, fb) = (qb, fb), (qd, fd) else: (qa, fa), (qc, fc) = (qd, fd), (qa, fa) if abs(qa-qb) < self.qtol: return qc, fc
Reduce the size of the bracket until the minimum is found
22,969
def dump_img(fname): img = Image.open(fname) width, _ = img.size txt = pixels = list(img.getdata()) for col in range(width): txt += str(pixels[col:col+width]) return txt
output the image as text
22,970
def perform(action_name, container, **kwargs): cf = container_fabric() cf.call(action_name, container, **kwargs)
Performs an action on the given container map and configuration. :param action_name: Name of the action (e.g. ``update``). :param container: Container configuration name. :param kwargs: Keyword arguments for the action implementation.
22,971
def generate(env,**kw): import SCons.Util from SCons.Tool.GettextCommon import _detect_msginit try: env[] = _detect_msginit(env) except: env[] = msginitcom = \ +
Generate the `msginit` tool
22,972
def _check_team_login(team): contents = _load_auth() for auth in itervalues(contents): existing_team = auth.get() if team and team != existing_team: raise CommandException( "Cant log in as a public user; log out from team %r first." % existing_team )
Disallow simultaneous public cloud and team logins.
22,973
def serialize_footer(signer): footer = b"" if signer is not None: signature = signer.finalize() footer = struct.pack(">H{sig_len}s".format(sig_len=len(signature)), len(signature), signature) return footer
Uses the signer object which has been used to sign the message to generate the signature, then serializes that signature. :param signer: Cryptographic signer object :type signer: aws_encryption_sdk.internal.crypto.Signer :returns: Serialized footer :rtype: bytes
22,974
def _integrate_variable_trajectory(self, h, g, tol, step, relax): solution = np.hstack((self.t, self.y)) while self.successful(): self.integrate(self.t + h, step, relax) current_step = np.hstack((self.t, self.y)) solution = np.vstack((solution, current_step)) if g(self.t, self.y, *self.f_params) < tol: break else: continue return solution
Generates a solution trajectory of variable length.
22,975
def load_user_from_request(req): load_jws_from_request(req) if not hasattr(req, ) or req.jws_header is None or not \ in req.jws_payload: current_app.logger.info("invalid jws request.") return None ln = current_app.bitjws.get_last_nonce(current_app, req.jws_header[], req.jws_payload[]) if (ln is None or not in req.jws_payload or req.jws_payload[] * 1000 <= ln): current_app.logger.info("invalid nonce. lastnonce: %s" % ln) return None rawu = current_app.bitjws.get_user_by_key(current_app, req.jws_header[]) if rawu is None: return None current_app.logger.info("logging in user: %s" % rawu) return FlaskUser(rawu)
Just like the Flask.login load_user_from_request If you need to customize the user loading from your database, the FlaskBitjws.get_user_by_key method is the one to modify. :param req: The flask request to load a user based on.
22,976
def add_backend(self, backend): "Add a RapidSMS backend to this tenant" if backend in self.get_backends(): return backend_link, created = BackendLink.all_tenants.get_or_create(backend=backend) self.backendlink_set.add(backend_link)
Add a RapidSMS backend to this tenant
22,977
def extract_payload(self): if not self.check_signature(): raise InvalidSignature() if request.is_json: delete_cached_json_for(request) return request.get_json(silent=False, cache=False) elif request.content_type == : return dict(request.form) raise InvalidPayload(request.content_type)
Extract payload from request.
22,978
def _fingerprint_dict_with_files(self, option_val): return stable_option_fingerprint({ k: self._expand_possible_file_value(v) for k, v in option_val.items() })
Returns a fingerprint of the given dictionary containing file paths. Any value which is a file path which exists on disk will be fingerprinted by that file's contents rather than by its path. This assumes the files are small enough to be read into memory. NB: The keys of the dict are assumed to be strings -- if they are not, the dict should be converted to encode its keys with `stable_option_fingerprint()`, as is done in the `fingerprint()` method.
22,979
def _db_filename_from_dataframe(base_filename, df): db_filename = base_filename + ("_nrows%d" % len(df)) for column_name in df.columns: column_db_type = db_type(df[column_name].dtype) column_name = column_name.replace(" ", "_") db_filename += ".%s_%s" % (column_name, column_db_type) return db_filename + ".db"
Generate database filename for a sqlite3 database we're going to fill with the contents of a DataFrame, using the DataFrame's column names and types.
22,980
def blend(self, other, percent=0.5): dest = 1.0 - percent rgb = tuple(((u * percent) + (v * dest) for u, v in zip(self.__rgb, other.__rgb))) a = (self.__a * percent) + (other.__a * dest) return Color(rgb, , a, self.__wref)
blend this color with the other one. Args: :other: the grapefruit.Color to blend with this one. Returns: A grapefruit.Color instance which is the result of blending this color on the other one. >>> c1 = Color.from_rgb(1, 0.5, 0, 0.2) >>> c2 = Color.from_rgb(1, 1, 1, 0.6) >>> c3 = c1.blend(c2) >>> c3 Color(1.0, 0.75, 0.5, 0.4)
22,981
def register_doi(self, submission_id, request_xml): endpoint = self.get_endpoint() files = { : ( % submission_id, request_xml) } params = { : , : self.api_user, : self.api_key } result = self.do_http_request( , endpoint, data=params, files=files, timeout=10, custom_header=str(self.etiquette) ) return result
This method registry a new DOI number in Crossref or update some DOI metadata. submission_id: Will be used as the submission file name. The file name could be used in future requests to retrieve the submission status. request_xml: The XML with the document metadata. It must be under compliance with the Crossref Submission Schema.
22,982
def column_reflection_fallback(self): sql = sa.select([sa.text("*")]).select_from(self._table) col_names = self.engine.execute(sql).keys() col_dict = [{: col_name} for col_name in col_names] return col_dict
If we can't reflect the table, use a query to at least get column names.
22,983
def replace_body_vars(self, body): for key, val in self.job_vars.items(): body = body.replace(key, val) return body
Given a multiline string that is the body of the job script, replace the placeholders for environment variables with backend-specific realizations, and return the modified body. See the `job_vars` attribute for the mappings that are performed.
22,984
def flatten_and_write(dotenv_path, dotenv_as_dict, quote_mode=): with open(dotenv_path, ) as f: for k, v in dotenv_as_dict.items(): str_format = _get_format(v, quote_mode) f.write(str_format.format(key=k, value=v)) return True
Writes dotenv_as_dict to dotenv_path, flattening the values :param dotenv_path: .env path :param dotenv_as_dict: dict :param quote_mode: :return:
22,985
def _get_id_format(self): id_format = gf.safe_get( self.parameters, gc.PPN_TASK_OS_FILE_ID_REGEX, self.DEFAULT_ID_FORMAT, can_return_none=False ) try: identifier = id_format % 1 except (TypeError, ValueError) as exc: self.log_exc(u"String is not a valid id format" % (id_format), exc, True, ValueError) return id_format
Return the id regex from the parameters
22,986
def create(self, create_missing=None): return DockerComputeResource( self._server_config, id=self.create_json(create_missing)[], ).read()
Do extra work to fetch a complete set of attributes for this entity. For more information, see `Bugzilla #1223540 <https://bugzilla.redhat.com/show_bug.cgi?id=1223540>`_.
22,987
def pairwise_point_combinations(xs, ys, anchors): for i in xs: anchors.append((i, max(ys))) anchors.append((i, min(ys))) for i in ys: anchors.append((max(xs), i)) anchors.append((min(xs), i))
Does an in-place addition of the four points that can be composed by combining coordinates from the two lists to the given list of anchors
22,988
def check_missing_atoms(self, template=None, ha_only=True): missing_atoms = {} if not template: import protein_residues template = protein_residues.normal structure_set = set(residue.child_dict.keys()) diff = reference_set.difference(structure_set) if diff: residue_uniq_id = (residue.parent.id, residue.resname, residue.get_id()[1]) missing_atoms[residue_uniq_id] = list(diff) return missing_atoms
Checks for missing atoms based on a template. Default: Searches for missing heavy atoms (not Hydrogen) based on Bio.Struct.protein_residues Arguments: - template, dictionary, keys are residue names, values list of atom names. - ha_only, boolean, default True, restrict check to heavy atoms. Returns a dictionary of tuples with the missing atoms per residue.
22,989
def parse_instance(self, tup_tree): self.check_node(tup_tree, , (,), (,), (, , , )) qualifiers = self.list_of_matching(tup_tree, (,)) props = self.list_of_matching(tup_tree, (, , )) obj = CIMInstance(attrs(tup_tree)[], qualifiers=qualifiers) for prop in props: obj.__setitem__(prop.name, prop) return obj
Return a CIMInstance. The instance contains the properties, qualifiers and classname for the instance. :: <!ELEMENT INSTANCE (QUALIFIER*, (PROPERTY | PROPERTY.ARRAY | PROPERTY.REFERENCE)*)> <!ATTLIST INSTANCE %ClassName; xml:lang NMTOKEN #IMPLIED>
22,990
def get_config_value(name, path_to_file=): if not os.path.isfile(path_to_file): path_to_file = os.path.join(, path_to_file) path_to_file = os.path.abspath(path_to_file) if not os.path.isfile(path_to_file): print((, path_to_file)) return None f = open(path_to_file, ) string_of_file_contents = f.read() if name[-1] is not : name += if name not in string_of_file_contents: return None else: config_value = [line.split(name)[1] for line in string_of_file_contents.split() if len(line.split(name)) > 1][0].strip() return config_value
gets the value for "name" from "path_to_file" config file Args: name: name of varibale in config file path_to_file: path to config file Returns: path to dll if name exists in the file; otherwise, returns None
22,991
def area_top_orifice(self): z = self.hl - 0.5 * self.b_rows return self.stout_w_per_flow(z) * self.q * self.b_rows
Estimate the orifice area corresponding to the top row of orifices. Another solution method is to use integration to solve this problem. Here we use the width of the stout weir in the center of the top row to estimate the area of the top orifice
22,992
def _construct_options(options_bootstrapper, build_configuration): top_level_optionables = ( {GlobalOptionsRegistrar} | GlobalSubsystems.get() | build_configuration.optionables() | set(Goal.get_optionables()) ) known_scope_infos = [si for optionable in top_level_optionables for si in optionable.known_scope_infos()] return options_bootstrapper.get_full_options(known_scope_infos)
Parse and register options. :returns: An Options object representing the full set of runtime options.
22,993
def collect(self): if boto is None: self.log.error("Unable to import boto python module") return {} for s3instance in self.config[]: self.log.info("S3: byte_unit: %s" % self.config[]) aws_access = self.config[][s3instance][] aws_secret = self.config[][s3instance][] for bucket_name in self.config[][s3instance][]: bucket = self.getBucket(aws_access, aws_secret, bucket_name) total_size = self.getBucketSize(bucket) for byte_unit in self.config[]: new_size = diamond.convertor.binary.convert( value=total_size, oldUnit=, newUnit=byte_unit ) self.publish("%s.size.%s" % (bucket_name, byte_unit), new_size)
Collect s3 bucket stats
22,994
def close(self): uwsgi.disconnect() if self._req_ctx is None: self._select_greenlet.kill() self._event.set()
Disconnects uWSGI from the client.
22,995
def disable_paging(self, command="terminal length 999", delay_factor=1): delay_factor = self.select_delay_factor(delay_factor) time.sleep(delay_factor * 0.1) self.clear_buffer() command = self.normalize_cmd(command) log.debug("In disable_paging") log.debug("Command: {0}".format(command)) self.write_channel(command) output = self.read_until_prompt() if self.ansi_escape_codes: output = self.strip_ansi_escape_codes(output) log.debug("{0}".format(output)) log.debug("Exiting disable_paging") return output
Disable paging default to a Cisco CLI method.
22,996
def compute(self, x, yerr): K = self.kernel.get_value(x) K[np.diag_indices_from(K)] += yerr ** 2 self._factor = (cholesky(K, overwrite_a=True, lower=False), False) self.log_determinant = 2 * np.sum(np.log(np.diag(self._factor[0]))) self.computed = True
Compute and factorize the covariance matrix. Args: x (ndarray[nsamples, ndim]): The independent coordinates of the data points. yerr (ndarray[nsamples] or float): The Gaussian uncertainties on the data points at coordinates ``x``. These values will be added in quadrature to the diagonal of the covariance matrix.
22,997
def write(self, page, data): if len(data) != 4: raise ValueError("data must be a four byte string or array") log.debug("write {0} to page {1}".format(hexlify(data), page)) rsp = self.transceive("\xA2" + chr(page % 256) + data) if len(rsp) != 1: log.debug("invalid response " + hexlify(data)) raise Type2TagCommandError(INVALID_RESPONSE_ERROR) if rsp[0] != 0x0A: log.debug("invalid page, received nak") raise Type2TagCommandError(INVALID_PAGE_ERROR) return True
Send a WRITE command to store data on the tag. The *page* argument specifies the offset in multiples of 4 bytes. The *data* argument must be a string or bytearray of length 4. Command execution errors raise :exc:`Type2TagCommandError`.
22,998
def patch(self, resource_endpoint, data={}): url = self._create_request_url(resource_endpoint) return req.patch(url, headers=self.auth_header, json=data)
Don't use it.
22,999
def intersect(obj1, obj2): if not isinstance(obj1, Vector) or not isinstance(obj2, Vector): raise RuntimeError() obj1 = obj1.clone() obj2 = obj2.clone() obj1.reproject(obj2.srs) union1 = ogr.Geometry(ogr.wkbMultiPolygon) for feat in obj1.layer: union1.AddGeometry(feat.GetGeometryRef()) obj1.layer.ResetReading() union1.Simplify(0) union2 = ogr.Geometry(ogr.wkbMultiPolygon) for feat in obj2.layer: union2.AddGeometry(feat.GetGeometryRef()) obj2.layer.ResetReading() union2.Simplify(0) intersect_base = union1.Intersection(union2) union1 = None union2 = None if intersect_base.GetArea() > 0: intersection = Vector(driver=) intersection.addlayer(, obj1.srs, ogr.wkbPolygon) fieldmap = [] for index, fielddef in enumerate([obj1.fieldDefs, obj2.fieldDefs]): for field in fielddef: name = field.GetName() i = 2 while name in intersection.fieldnames: name = .format(field.GetName(), i) i += 1 fieldmap.append((index, field.GetName(), name)) intersection.addfield(name, type=field.GetType(), width=field.GetWidth()) for feature1 in obj1.layer: geom1 = feature1.GetGeometryRef() if geom1.Intersects(intersect_base): for feature2 in obj2.layer: geom2 = feature2.GetGeometryRef() if geom2.Intersects(intersect_base): intersect = geom2.Intersection(geom1) fields = {} for item in fieldmap: if item[0] == 0: fields[item[2]] = feature1.GetField(item[1]) else: fields[item[2]] = feature2.GetField(item[1]) intersection.addfeature(intersect, fields) intersect_base = None return intersection
intersect two Vector objects Parameters ---------- obj1: Vector the first vector object; this object is reprojected to the CRS of obj2 if necessary obj2: Vector the second vector object Returns ------- Vector the intersect of obj1 and obj2