Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
19,100
def get_global_register_objects(self, do_sort=None, reverse=False, **kwargs): try: names = iterable(kwargs.pop()) except KeyError: register_objects = [] else: register_objects = [self.global_registers[reg] for reg in names] for keyword in kwargs.iterkeys(): allowed_values = iterable(kwargs[keyword]) register_objects.extend(filter(lambda global_register: set(iterable(global_register[keyword])).intersection(allowed_values), self.global_registers.itervalues())) if not register_objects and filter(None, kwargs.itervalues()): raise ValueError() if do_sort: return sorted(register_objects, key=itemgetter(*do_sort), reverse=reverse) else: return register_objects
Generate register objects (list) from register name list Usage: get_global_register_objects(name = ["Amp2Vbn", "GateHitOr", "DisableColumnCnfg"], address = [2, 3]) Receives: keyword lists of register names, addresses,... for making cuts Returns: list of register objects
19,101
def _write_fuzzmanagerconf(self, path): output = configparser.RawConfigParser() output.add_section() output.set(, , self.moz_info[].replace(, )) output.set(, , + self._branch) output.set(, , % (self.build_id, self.changeset)) os_name = self.moz_info[].lower() if os_name.startswith(): output.set(, , ) elif os_name.startswith(): output.set(, , ) elif os_name.startswith(): output.set(, , ) elif os_name.startswith(): output.set(, , ) else: output.set(, , self.moz_info[]) output.add_section() output.set(, , self.moz_info[]) output.set(, , self._flags.build_string().lstrip()) if self._platform.system == "Windows": fm_name = self._target + conf_path = os.path.join(path, , , fm_name) elif self._platform.system == "Android": conf_path = os.path.join(path, ) else: fm_name = self._target + conf_path = os.path.join(path, , , fm_name) with open(conf_path, ) as conf_fp: output.write(conf_fp)
Write fuzzmanager config file for selected build @type path: basestring @param path: A string representation of the fuzzmanager config path
19,102
def add_node(self, node): nodes = self.nodes() if len(nodes) > 1: first_node = min(nodes, key=attrgetter("creation_time")) first_node.connect(direction="both", whom=node)
Add a node and connect it to the center.
19,103
def region_interface_areas(regions, areas, voxel_size=1, strel=None): r print(*60) print() from skimage.morphology import disk, square, ball, cube im = regions.copy() if im.ndim != im.squeeze().ndim: warnings.warn( + str(im.shape) + + ) if im.ndim == 2: cube = square ball = disk slices = spim.find_objects(im) Ps = sp.arange(1, sp.amax(im)+1) sa = sp.zeros_like(Ps, dtype=float) sa_combined = [] cn = [] for i in tqdm(Ps): reg = i - 1 if slices[reg] is not None: s = extend_slice(slices[reg], im.shape) sub_im = im[s] mask_im = sub_im == i sa[reg] = areas[reg] im_w_throats = spim.binary_dilation(input=mask_im, structure=ball(1)) im_w_throats = im_w_throats*sub_im Pn = sp.unique(im_w_throats)[1:] - 1 for j in Pn: if j > reg: cn.append([reg, j]) merged_region = im[(min(slices[reg][0].start, slices[j][0].start)): max(slices[reg][0].stop, slices[j][0].stop), (min(slices[reg][1].start, slices[j][1].start)): max(slices[reg][1].stop, slices[j][1].stop)] merged_region = ((merged_region == reg + 1) + (merged_region == j + 1)) mesh = mesh_region(region=merged_region, strel=strel) sa_combined.append(mesh_surface_area(mesh)) cn = sp.array(cn) ia = 0.5 * (sa[cn[:, 0]] + sa[cn[:, 1]] - sa_combined) ia[ia <= 0] = 1 result = namedtuple(, (, )) result.conns = cn result.area = ia * voxel_size**2 return result
r""" Calculates the interfacial area between all pairs of adjecent regions Parameters ---------- regions : ND-array An image of the pore space partitioned into individual pore regions. Note that zeros in the image will not be considered for area calculation. areas : array_like A list containing the areas of each regions, as determined by ``region_surface_area``. Note that the region number and list index are offset by 1, such that the area for region 1 is stored in ``areas[0]``. voxel_size : scalar The resolution of the image, expressed as the length of one side of a voxel, so the volume of a voxel would be **voxel_size**-cubed. The default is 1. strel : array_like The structuring element used to blur the region. If not provided, then a spherical element (or disk) with radius 1 is used. See the docstring for ``mesh_region`` for more details, as this argument is passed to there. Returns ------- result : named_tuple A named-tuple containing 2 arrays. ``conns`` holds the connectivity information and ``area`` holds the result for each pair. ``conns`` is a N-regions by 2 array with each row containing the region number of an adjacent pair of regions. For instance, if ``conns[0, 0]`` is 0 and ``conns[0, 1]`` is 5, then row 0 of ``area`` contains the interfacial area shared by regions 0 and 5.
19,104
def encode_max_apdu_length_accepted(arg): for i in range(5, -1, -1): if (arg >= _max_apdu_length_encoding[i]): return i raise ValueError("invalid max APDU length accepted: %r" % (arg,))
Return the encoding of the highest encodable value less than the value of the arg.
19,105
def score_leaves(self) -> Set[BaseEntity]: leaves = set(self.iter_leaves()) if not leaves: log.warning() return set() for leaf in leaves: self.graph.nodes[leaf][self.tag] = self.calculate_score(leaf) log.log(5, , leaf) return leaves
Calculate the score for all leaves. :return: The set of leaf nodes that were scored
19,106
def join_mwp(tags: List[str]) -> List[str]: ret = [] verb_flag = False for tag in tags: if "V" in tag: prefix, _ = tag.split("-") if verb_flag: prefix = ret.append(f"{prefix}-V") verb_flag = True else: ret.append(tag) verb_flag = False return ret
Join multi-word predicates to a single predicate ('V') token.
19,107
def get_accounts(cls, soco=None): root = XML.fromstring(cls._get_account_xml(soco)) xml_accounts = root.findall() result = {} for xml_account in xml_accounts: serial_number = xml_account.get() is_deleted = True if xml_account.get() == else False if cls._all_accounts.get(serial_number): if is_deleted: del cls._all_accounts[serial_number] continue else: account = cls._all_accounts.get(serial_number) else: if is_deleted: return result
Get all accounts known to the Sonos system. Args: soco (`SoCo`, optional): a `SoCo` instance to query. If `None`, a random instance is used. Defaults to `None`. Returns: dict: A dict containing account instances. Each key is the account's serial number, and each value is the related Account instance. Accounts which have been marked as deleted are excluded. Note: Any existing Account instance will have its attributes updated to those currently stored on the Sonos system.
19,108
def do_reload(bot, target, cmdargs, server_send=None): def send(msg): if server_send is not None: server_send("%s\n" % msg) else: do_log(bot.connection, bot.get_target(target), msg) confdir = bot.handler.confdir if cmdargs == : if isinstance(target, irc.client.Event) and target.source.nick != bot.config[][]: bot.connection.privmsg(bot.get_target(target), "Nope, not gonna do it.") return if exists(join(confdir, )): send(misc.do_pull(srcdir=confdir)) else: send(misc.do_pull(repo=bot.config[][])) importlib.reload(config) bot.config = config.load_config(join(confdir, ), send) errored_helpers = modutils.scan_and_reimport() if errored_helpers: send("Failed to load some helpers.") for error in errored_helpers: send("%s: %s" % error) return False if not load_modules(bot.config, confdir, send): return False data = bot.handler.get_data() bot.shutdown_mp() bot.handler = handler.BotHandler(bot.config, bot.connection, bot.channels, confdir) bot.handler.set_data(data) bot.handler.connection = bot.connection bot.handler.channels = bot.channels return True
The reloading magic. - First, reload handler.py. - Then make copies of all the handler data we want to keep. - Create a new handler and restore all the data.
19,109
def _push_cm_exit(self, cm, cm_exit): _exit_wrapper = self._create_exit_wrapper(cm, cm_exit) self._push_exit_callback(_exit_wrapper, True)
Helper to correctly register callbacks to __exit__ methods.
19,110
def run_from_command_line(): for commands_conf in firenado.conf.management[]: logger.debug("Loading %s commands from %s." % ( commands_conf[], commands_conf[] )) exec( % commands_conf[]) command_index = 1 for arg in sys.argv[1:]: command_index += 1 if arg[0] != "-": break parser = FirenadoArgumentParser(prog=os.path.split(sys.argv[0])[1], add_help=False) parser.add_argument("-h", "--help", default=argparse.SUPPRESS) parser.add_argument("command", default="help", help="Command to executed") try: namespace = parser.parse_args(sys.argv[1:command_index]) if not command_exists(namespace.command): show_command_line_usage(parser) else: run_command(namespace.command, sys.argv[command_index-1:]) except FirenadoArgumentError: show_command_line_usage(parser, True)
Run Firenado's management commands from a command line
19,111
def get_options(server): try: response = requests.options( server, allow_redirects=False, verify=False, timeout=5) except (requests.exceptions.ConnectionError, requests.exceptions.MissingSchema): return "Server {} is not available!".format(server) try: return {: response.headers[]} except KeyError: return "Unable to get HTTP methods"
Retrieve the available HTTP verbs
19,112
def get_key(self, key_id): url = ENCRYPTION_KEY_URL.format(key_id) return self._key_from_json(self._get_resource(url))
Returns a restclients.Key object for the given key ID. If the key ID isn't found, or if there is an error communicating with the KWS, a DataFailureException will be thrown.
19,113
def regular(u): uxx, uyy, uzz, uyz, uxz, uxy = u[0], u[1], u[2], u[3], u[4], u[5] return array( [[uxx, uyy + uzz, 0], [uyy, uxx + uzz, 0], [uzz, uxx + uyy, 0], [0, 0, 2*uyz], [0, 0, 2*uxz], [0, 0, 2*uxy]])
Equation matrix generation for the regular (cubic) lattice. The order of constants is as follows: .. math:: C_{11}, C_{12}, C_{44} :param u: vector of deformations: [ :math:`u_{xx}, u_{yy}, u_{zz}, u_{yz}, u_{xz}, u_{xy}` ] :returns: Symmetry defined stress-strain equation matrix
19,114
def _update_mtime(self): try: self._mtime = os.path.getmtime(self.editor.file.path) except OSError: self._mtime = 0 self._timer.stop() except (TypeError, AttributeError): try: self._timer.stop() except AttributeError: pass
Updates modif time
19,115
def crop(self, lat, lon, var): dims, idx = cropIndices(self.dims, lat, lon) subset = {} for v in var: subset = {v: self.ncs[0][v][idx[], idx[]]} return subset, dims
Crop a subset of the dataset for each var Given doy, depth, lat and lon, it returns the smallest subset that still contains the requested coordinates inside it. It handels special cases like a region around greenwich and the international date line. Accepts 0 to 360 and -180 to 180 longitude reference. It extends time and longitude coordinates, so simplify the use of series. For example, a ship track can be requested with a longitude sequence like [352, 358, 364, 369, 380].
19,116
def is_reseller(self): return self.role == self.roles.reseller.value and self.state == State.approved
is the user a reseller
19,117
def pydict2xmlstring(metadata_dict, **kwargs): ordering = kwargs.get(, UNTL_XML_ORDER) root_label = kwargs.get(, ) root_namespace = kwargs.get(, None) elements_namespace = kwargs.get(, None) namespace_map = kwargs.get(, None) root_attributes = kwargs.get(, None) if root_namespace and namespace_map: root = Element(root_namespace + root_label, nsmap=namespace_map) elif namespace_map: root = Element(root_label, nsmap=namespace_map) else: root = Element(root_label) if root_attributes: for key, value in root_attributes.items(): root.attrib[key] = value for metadata_key in ordering: if metadata_key in metadata_dict: for element in metadata_dict[metadata_key]: if in element and in element: create_dict_subelement( root, metadata_key, element[], attribs={: element[]}, namespace=elements_namespace, ) elif in element and in element: create_dict_subelement( root, metadata_key, element[], attribs={: element[]}, namespace=elements_namespace, ) elif in element and in element: create_dict_subelement( root, metadata_key, element[], attribs={: element[]}, namespace=elements_namespace, ) elif in element: create_dict_subelement( root, metadata_key, element[], namespace=elements_namespace, ) return + tostring( root, pretty_print=True )
Create an XML string from a metadata dictionary.
19,118
def scroll_deck_x(self, decknum, scroll_x): if decknum >= len(self.decks): raise IndexError("I have no deck at {}".format(decknum)) if decknum >= len(self.deck_x_hint_offsets): self.deck_x_hint_offsets = list(self.deck_x_hint_offsets) + [0] * ( decknum - len(self.deck_x_hint_offsets) + 1 ) self.deck_x_hint_offsets[decknum] += scroll_x self._trigger_layout()
Move a deck left or right.
19,119
def force_bytes(s, encoding=, strings_only=False, errors=): if isinstance(s, memoryview): s = bytes(s) if isinstance(s, bytes): if encoding == : return s else: return s.decode(, errors).encode(encoding, errors) if strings_only and (s is None or isinstance(s, int)): return s if not isinstance(s, six.string_types): try: if six.PY3: return six.text_type(s).encode(encoding) else: return bytes(s) except UnicodeEncodeError: if isinstance(s, Exception): return b.join([force_bytes(arg, encoding, strings_only, errors) for arg in s]) return six.text_type(s).encode(encoding, errors) else: return s.encode(encoding, errors)
Similar to smart_bytes, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects.
19,120
def extract_symbols(self, raw_data_directory: str, destination_directory: str): print("Extracting Symbols from Audiveris OMR Dataset...") all_xml_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], ))] all_image_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], ))] data_pairs = [] for i in range(len(all_xml_files)): data_pairs.append((all_xml_files[i], all_image_files[i])) for data_pair in data_pairs: self.__extract_symbols(data_pair[0], data_pair[1], destination_directory)
Extracts the symbols from the raw XML documents and matching images of the Audiveris OMR dataset into individual symbols :param raw_data_directory: The directory, that contains the xml-files and matching images :param destination_directory: The directory, in which the symbols should be generated into. One sub-folder per symbol category will be generated automatically
19,121
def dependents_of_addresses(self, addresses): seen = OrderedSet(addresses) for address in addresses: seen.update(self._dependent_address_map[address]) seen.update(self._implicit_dependent_address_map[address]) return seen
Given an iterable of addresses, yield all of those addresses dependents.
19,122
def dump(self): with self.registry.lock(identifier=self.worker_id) as session: for work_spec_name in self.registry.pull(NICE_LEVELS).iterkeys(): def scan(sfx): v = self.registry.pull(WORK_UNITS_ + work_spec_name + sfx) if v is None: return [] return v.keys() for key in scan(): logger.debug( .format(work_spec_name, key)) for key in scan(_BLOCKED): blocked_on = session.get( WORK_UNITS_ + work_spec_name + _DEPENDS, key) logger.debug( .format(work_spec_name, key, blocked_on)) for key in scan(_FINISHED): logger.debug( .format(work_spec_name, key)) for key in scan(_FAILED): logger.debug( .format(work_spec_name, key))
Print the entire contents of this to debug log messages. This is really only intended for debugging. It could produce a lot of data.
19,123
def remove_hairs_from_tags(dom): transform_content( dom.match("mods:mods", "mods:titleInfo", "mods:title"), lambda x: remove_hairs(x.getContent()) ) transform_content( dom.match( "mods:originInfo", "mods:place", ["mods:placeTerm", {"type": "text"}] ), lambda x: remove_hairs(x.getContent()) )
Use :func:`.remove_hairs` to some of the tags: - mods:title - mods:placeTerm
19,124
def start_hq(output_dir, config, topic, is_master=True, **kwargs): HightQuarter = get_hq_class(config.get()) hq = HightQuarter(output_dir, config, topic, **kwargs) hq.setup() if is_master: hq.wait_turrets(config.get("min_turrets", 1)) hq.run() hq.tear_down()
Start a HQ
19,125
def __deserialize_model(self, data, klass): instance = klass() if not instance.swagger_types: return data for attr, attr_type in iteritems(instance.swagger_types): if data is not None \ and instance.attribute_map[attr] in data\ and isinstance(data, (list, dict)): value = data[instance.attribute_map[attr]] setattr(instance, + attr, self.__deserialize(value, attr_type)) return instance
Deserializes list or dict to model. :param data: dict, list. :param klass: class literal. :return: model object.
19,126
def phase_to_color_wheel(complex_number): angles = np.angle(complex_number) angle_round = int(((angles + 2 * np.pi) % (2 * np.pi))/np.pi*6) color_map = { 0: (0, 0, 1), 1: (0.5, 0, 1), 2: (1, 0, 1), 3: (1, 0, 0.5), 4: (1, 0, 0), 5: (1, 0.5, 0), 6: (1, 1, 0), 7: (0.5, 1, 0), 8: (0, 1, 0), 9: (0, 1, 0.5), 10: (0, 1, 1), 11: (0, 0.5, 1) } return color_map[angle_round]
Map a phase of a complexnumber to a color in (r,g,b). complex_number is phase is first mapped to angle in the range [0, 2pi] and then to a color wheel with blue at zero phase.
19,127
def _parse_coroutine(self): while True: d = yield if d == int2byte(0): pass elif d == IAC: d2 = yield if d2 == IAC: self.received_data(d2) elif d2 in (NOP, DM, BRK, IP, AO, AYT, EC, EL, GA): self.command_received(d2, None) elif d2 in (DO, DONT, WILL, WONT): d3 = yield self.command_received(d2, d3) elif d2 == SB: data = [] while True: d3 = yield if d3 == IAC: d4 = yield if d4 == SE: break else: data.append(d4) else: data.append(d3) self.negotiate(b.join(data)) else: self.received_data(d)
Parser state machine. Every 'yield' expression returns the next byte.
19,128
def apply_tasks_to_issue(self, tasks, issue_number, issue_body): issue_body = issue_body task_numbers = format_task_numbers_with_links(tasks) if task_numbers: new_body = ASANA_SECTION_RE.sub(, issue_body) new_body = new_body + "\n put("issue_edit", issue_number=issue_number, body=new_body) return new_body return issue_body
Applies task numbers to an issue.
19,129
def overloaded_build(type_, add_name=None): typename = type_.__name__ instrname = + typename.upper() dict_ = OrderedDict( __doc__=dedent( .format(name=instrname) ) ) try: build_instr = getattr(instructions, instrname) except AttributeError: raise TypeError("type %s is not buildable" % typename) if add_name is not None: try: add_instr = getattr( instructions, .join((typename, add_name)).upper(), ) except AttributeError: TypeError("type %s is not addable" % typename) dict_[] = pattern( build_instr, matchany[var], add_instr, )(_start_comprehension) dict_[] = pattern( instructions.RETURN_VALUE, startcodes=(IN_COMPREHENSION,), )(_return_value) else: add_instr = None dict_[] = pattern(build_instr)(_build) if not typename.endswith(): typename = typename + return type( + typename, (overloaded_constants(type_),), dict_, )
Factory for constant transformers that apply to a given build instruction. Parameters ---------- type_ : type The object type to overload the construction of. This must be one of "buildable" types, or types with a "BUILD_*" instruction. add_name : str, optional The suffix of the instruction tha adds elements to the collection. For example: 'add' or 'append' Returns ------- transformer : subclass of CodeTransformer A new code transformer class that will overload the provided literal types.
19,130
def add_apt_key(filename=None, url=None, keyid=None, keyserver=, update=False): if keyid is None: if filename is not None: run_as_root( % locals()) elif url is not None: run_as_root( % locals()) else: raise ValueError() else: if filename is not None: _check_pgp_key(filename, keyid) run_as_root( % locals()) elif url is not None: tmp_key = % locals() run_as_root( % locals()) _check_pgp_key(tmp_key, keyid) run_as_root( % locals()) else: keyserver_opt = % locals() if keyserver is not None else run_as_root( % locals()) if update: update_index()
Trust packages signed with this public key. Example:: import burlap # Varnish signing key from URL and verify fingerprint) burlap.deb.add_apt_key(keyid='C4DEFFEB', url='http://repo.varnish-cache.org/debian/GPG-key.txt') # Nginx signing key from default key server (subkeys.pgp.net) burlap.deb.add_apt_key(keyid='7BD9BF62') # From custom key server burlap.deb.add_apt_key(keyid='7BD9BF62', keyserver='keyserver.ubuntu.com') # From a file burlap.deb.add_apt_key(keyid='7BD9BF62', filename='nginx.asc'
19,131
def get_flashed_messages( with_categories: bool=False, category_filter: List[str]=[], ) -> Union[List[str], List[Tuple[str, str]]]: flashes = session.pop() if in session else [] if category_filter: flashes = [flash for flash in flashes if flash[0] in category_filter] if not with_categories: flashes = [flash[1] for flash in flashes] return flashes
Retrieve the flashed messages stored in the session. This is mostly useful in templates where it is exposed as a global function, for example .. code-block:: html+jinja <ul> {% for message in get_flashed_messages() %} <li>{{ message }}</li> {% endfor %} </ul> Note that caution is required for usage of ``category_filter`` as all messages will be popped, but only those matching the filter returned. See :func:`~quart.helpers.flash` for message creation.
19,132
def authenticate(self, request): request = request._request user = getattr(request, , None) if not user or user.is_anonymous: return None self.enforce_csrf(request) return (user, None)
Authenticate the user, requiring a logged-in account and CSRF. This is exactly the same as the `SessionAuthentication` implementation, with the `user.is_active` check removed. Args: request (HttpRequest) Returns: Tuple of `(user, token)` Raises: PermissionDenied: The CSRF token check failed.
19,133
def _compute_quantile(data, dims, cutoffs): data_arr = xr.DataArray(data, dims=dims) left, right = data_arr.quantile([cutoffs[0], 1. - cutoffs[1]], dim=[, ]) logger.debug("Interval: left=%s, right=%s", str(left), str(right)) return left.data, right.data
Helper method for stretch_linear. Dask delayed functions need to be non-internal functions (created inside a function) to be serializable on a multi-process scheduler. Quantile requires the data to be loaded since it not supported on dask arrays yet.
19,134
def stop_and_persist(self, symbol=, text=None): if not self._enabled: return self symbol = decode_utf_8_text(symbol) if text is not None: text = decode_utf_8_text(text) else: text = self._text[] text = text.strip() if self._text_color: text = colored_frame(text, self._text_color) self.stop() output = .format(*[ (text, symbol) if self._placement == else (symbol, text) ][0]) with self.output: self.output.outputs = self._output(output)
Stops the spinner and persists the final frame to be shown. Parameters ---------- symbol : str, optional Symbol to be shown in final frame text: str, optional Text to be shown in final frame Returns ------- self
19,135
def evaluate(self, system_id=1, rouge_args=None): self.write_config(system_id=system_id) options = self.__get_options(rouge_args) command = [self._bin_path] + options env = None if hasattr(self, "_home_dir") and self._home_dir: env = {: self._home_dir} self.log.info( "Running ROUGE with command {}".format(" ".join(command))) rouge_output = check_output(command, env=env).decode("UTF-8") return rouge_output
Run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. The summaries are assumed to be in the one-sentence-per-line HTML format ROUGE understands. system_id: Optional system ID which will be printed in ROUGE's output. Returns: Rouge output as string.
19,136
def join(self, other, how=, lsuffix=, rsuffix=): from pandas.core.reshape.concat import concat if isinstance(other, Panel): join_major, join_minor = self._get_join_index(other, how) this = self.reindex(major=join_major, minor=join_minor) other = other.reindex(major=join_major, minor=join_minor) merged_data = this._data.merge(other._data, lsuffix, rsuffix) return self._constructor(merged_data) else: if lsuffix or rsuffix: raise ValueError( ) if how == : how = join_axes = [self.major_axis, self.minor_axis] elif how == : raise ValueError( ) else: join_axes = None return concat([self] + list(other), axis=0, join=how, join_axes=join_axes, verify_integrity=True)
Join items with other Panel either on major and minor axes column. Parameters ---------- other : Panel or list of Panels Index should be similar to one of the columns in this one how : {'left', 'right', 'outer', 'inner'} How to handle indexes of the two objects. Default: 'left' for joining on index, None otherwise * left: use calling frame's index * right: use input frame's index * outer: form union of indexes * inner: use intersection of indexes lsuffix : string Suffix to use from left frame's overlapping columns rsuffix : string Suffix to use from right frame's overlapping columns Returns ------- joined : Panel
19,137
def parse(cls, fptr, offset, length): num_bytes = offset + length - fptr.tell() read_buffer = fptr.read(num_bytes) lst = struct.unpack_from(, read_buffer, offset=0) method, precedence, approximation = lst if method == 1: colorspace, = struct.unpack_from(, read_buffer, offset=3) if colorspace not in _COLORSPACE_MAP_DISPLAY.keys(): msg = "Unrecognized colorspace ({colorspace})." msg = msg.format(colorspace=colorspace) warnings.warn(msg, UserWarning) icc_profile = None else: colorspace = None if (num_bytes - 3) < 128: msg = ("ICC profile header is corrupt, length is " "only {length} when it should be at least 128.") warnings.warn(msg.format(length=num_bytes - 3), UserWarning) icc_profile = None else: profile = _ICCProfile(read_buffer[3:]) icc_profile = profile.header return cls(method=method, precedence=precedence, approximation=approximation, colorspace=colorspace, icc_profile=icc_profile, length=length, offset=offset)
Parse JPEG 2000 color specification box. Parameters ---------- fptr : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- ColourSpecificationBox Instance of the current colour specification box.
19,138
def read_folder(folder, ext=, uppercase=False, replace_dot=, parent=): ret = {} if os.path.exists(folder): for file in os.listdir(folder): if os.path.isdir(os.path.join(folder, file)): child = read_folder(os.path.join(folder, file), ext, uppercase, replace_dot, parent=parent + file + ) ret.update(child) else: if ext == or file.endswith(ext): key = file.replace(, replace_dot) key = uppercase and key.upper() or key ret[parent + key] = read_file(os.path.join(folder, file)) return ret
This will read all of the files in the folder with the extension equal to ext :param folder: str of the folder name :param ext: str of the extension :param uppercase: bool if True will uppercase all the file names :param replace_dot: str will replace "." in the filename :param parent: str of the parent folder :return: dict of basename with the value of the text in the file
19,139
def stderr_output(cmd): handle, gpg_stderr = stderr_handle() try: output = subprocess.check_output(cmd, stderr=gpg_stderr) if handle: handle.close() return str(polite_string(output)) except subprocess.CalledProcessError as exception: LOGGER.debug("GPG Command %s", .join(exception.cmd)) LOGGER.debug("GPG Output %s", exception.output) raise CryptoritoError()
Wraps the execution of check_output in a way that ignores stderr when not in debug mode
19,140
def _apply_worksheet_template_reference_analyses(self, wst, type=): if type == : self._apply_worksheet_template_reference_analyses(wst, ) self._apply_worksheet_template_reference_analyses(wst, ) return if type not in [, ]: return references = self._resolve_reference_samples(wst, type) for reference in references: slot = reference[] sample = reference[] services = reference[] self.addReferenceAnalyses(sample, services, slot)
Add reference analyses to worksheet according to the worksheet template layout passed in. Does not overwrite slots that are already filled. :param wst: worksheet template used as the layout
19,141
def _adb(self, commands): ctx = self.ctx ctx.prepare_build_environment(user_sdk_dir=self.sdk_dir, user_ndk_dir=self.ndk_dir, user_android_api=self.android_api, user_ndk_api=self.ndk_api) if platform in (, ): adb = sh.Command(join(ctx.sdk_dir, , )) else: adb = sh.Command(join(ctx.sdk_dir, , )) info_notify() output = adb(*commands, _iter=True, _out_bufsize=1, _err_to_out=True) for line in output: sys.stdout.write(line) sys.stdout.flush()
Call the adb executable from the SDK, passing the given commands as arguments.
19,142
def hrmint(xvals, yvals, x): work = stypes.emptyDoubleVector(int(2*len(yvals)+1)) n = ctypes.c_int(len(xvals)) xvals = stypes.toDoubleVector(xvals) yvals = stypes.toDoubleVector(yvals) x = ctypes.c_double(x) f = ctypes.c_double(0) df = ctypes.c_double(0) libspice.hrmint_c(n, xvals, yvals, x, work, f, df) return f.value, df.value
Evaluate a Hermite interpolating polynomial at a specified abscissa value. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/hrmint_c.html :param xvals: Abscissa values. :type xvals: Array of floats :param yvals: Ordinate and derivative values. :type yvals: Array of floats :param x: Point at which to interpolate the polynomial. :type x: int :return: Interpolated function value at x and the Interpolated function's derivative at x :rtype: tuple
19,143
def min_time(self): min_time = Time(self._interval_set.min / TimeMOC.DAY_MICRO_SEC, format=, scale=) return min_time
Get the `~astropy.time.Time` time of the tmoc first observation Returns ------- min_time : `astropy.time.Time` time of the first observation
19,144
def master_primary_name(self) -> Optional[str]: master_primary_name = self.master_replica.primaryName if master_primary_name: return self.master_replica.getNodeName(master_primary_name) return None
Return the name of the primary node of the master instance
19,145
def get_callproc_signature(self, name, param_types): if isinstance(param_types[0], (list, tuple)): params = [self.sql_writer.to_placeholder(*pt) for pt in param_types] else: params = [self.sql_writer.to_placeholder(None, pt) for pt in param_types] return name + self.sql_writer.to_tuple(params)
Returns a procedure's signature from the name and list of types. :name: the name of the procedure :params: can be either strings, or 2-tuples. 2-tuples must be of the form (name, db_type). :return: the procedure's signature
19,146
def main(argv=None): if argv is None: argv = sys.argv else: if isinstance(argv, list): argv = [] + argv else: argv = [] + [argv] if len(argv) >= 1: if len(argv) == 1 or argv[1].startswith() or argv[1].startswith(): print(main.__doc__) fun = None elif argv[1].startswith() or argv[1].startswith(): import doctest if len(argv) > 2 and (argv[2].startswith() or argv[2].startswith()): print() print() print() print() doctest.testmod(sys.modules[__name__], report=True) else: print() fn = stdout = sys.stdout try: with open(fn, ) as f: sys.stdout = f clock = ElapsedTime() doctest.testmod(sys.modules[__name__], report=True) t_elapsed = clock() finally: sys.stdout = stdout process_doctest_output(fn) try: import os for name in os.listdir(): if (name.startswith() and name.endswith()): os.remove(name) except: pass print() print(, t_elapsed) return elif argv[1] == : print(__doc__) print(CMAEvolutionStrategy.__doc__) print(fmin.__doc__) fun = None elif argv[1] == : print() print([d for d in dir(fcts) if not d.startswith()]) fun = None elif argv[1] in (, ): from distutils.core import setup setup(name="cma", long_description=__doc__, version=__version__.split()[0], description="CMA-ES, Covariance Matrix Adaptation Evolution Strategy for non-linear numerical optimization in Python", author="Nikolaus Hansen", author_email="hansen at lri.fr", maintainer="Nikolaus Hansen", maintainer_email="hansen at lri.fr", url="https://www.lri.fr/~hansen/cmaes_inmatlab.html license="BSD", classifiers = [ "Intended Audience :: Science/Research", "Intended Audience :: Education", "Intended Audience :: Other Audience", "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Mathematics", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Operating System :: OS Independent", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Development Status :: 4 - Beta", "Environment :: Console", "License :: OSI Approved :: BSD License", ], keywords=["optimization", "CMA-ES", "cmaes"], py_modules=["cma"], requires=["numpy"], ) fun = None elif argv[1] in (,): plot(name=argv[2] if len(argv) > 2 else None) raw_input() fun = None elif len(argv) > 3: fun = eval( + argv[1]) else: print() fun = None if fun is not None: if len(argv) > 2: x0 = np.ones(eval(argv[2])) if len(argv) > 3: sig0 = eval(argv[3]) opts = {} for i in xrange(5, len(argv), 2): opts[argv[i - 1]] = eval(argv[i]) if fun is not None: tic = time.time() fmin(fun, x0, sig0, opts) print(, round(time.time() - tic, 2)) elif not len(argv): fmin(fcts.elli, np.ones(6) * 0.1, 0.1, {:1e-9})
to install and/or test from the command line use:: python cma.py [options | func dim sig0 [optkey optval][optkey optval]...] with options being ``--test`` (or ``-t``) to run the doctest, ``--test -v`` to get (much) verbosity. ``install`` to install cma.py (uses setup from distutils.core). ``--doc`` for more infos. Or start Python or (even better) ``ipython`` and:: import cma cma.main('--test') help(cma) help(cma.fmin) res = fmin(cma.fcts.rosen, 10 * [0], 1) cma.plot() Examples ======== Testing with the local python distribution from a command line in a folder where ``cma.py`` can be found:: python cma.py --test And a single run on the Rosenbrock function:: python cma.py rosen 10 1 # dimension initial_sigma python cma.py plot In the python shell:: import cma cma.main('--test')
19,147
def mv(hdfs_src, hdfs_dst): cmd = "hadoop fs -mv %s %s" % (hdfs_src, hdfs_dst) rcode, stdout, stderr = _checked_hadoop_fs_command(cmd)
Move a file on hdfs :param hdfs_src: Source (str) :param hdfs_dst: Destination (str) :raises: IOError: If unsuccessful
19,148
def _generate_initial_score(self): self.current_energy = self.eval_fn(self.polypeptide, *self.eval_args) self.best_energy = copy.deepcopy(self.current_energy) self.best_model = copy.deepcopy(self.polypeptide) return
Runs the evaluation function for the initial pose.
19,149
def filename(self): if self.value and in self._json_data and self._json_data[]: return self._json_data[].split()[-1] return None
Filename of the attachment, without the full 'attachment' path.
19,150
def _check_auth(self, must_admin, redir_login=True): if self.auth_mode == : return username = self._check_session() if cherrypy.request.query_string == : qs = else: qs = + cherrypy.request.query_string quoted_requrl = quote_plus(cherrypy.url() + qs) if not username: if redir_login: raise cherrypy.HTTPRedirect( "/signin?url=%(url)s" % {: quoted_requrl}, ) else: raise cherrypy.HTTPError( "403 Forbidden", "You must be logged in to access this ressource.", ) if not in cherrypy.session \ or not cherrypy.session[]: if redir_login: raise cherrypy.HTTPRedirect( "/signin?url=%(url)s" % {: quoted_requrl}, ) else: raise cherrypy.HTTPError( "403 Forbidden", "You must be logged in to access this ressource.", ) if cherrypy.session[] and \ not cherrypy.session[]: if must_admin: raise cherrypy.HTTPError( "403 Forbidden", "You are not allowed to access this resource.", ) else: return username if cherrypy.session[] and \ cherrypy.session[]: return username else: if redir_login: raise cherrypy.HTTPRedirect( "/signin?url=%(url)s" % {: quoted_requrl}, ) else: raise cherrypy.HTTPError( "403 Forbidden", "You must be logged in to access this ressource.", )
check if a user is autheticated and, optionnaly an administrator if user not authenticated -> redirect to login page (with escaped URL of the originaly requested page (redirection after login) if user authenticated, not admin and must_admin enabled -> 403 error @boolean must_admin: flag "user must be an administrator to access this page" @rtype str: login of the user
19,151
def _get_simple(self, name): for item in reversed(self._stack): result = _get_value(item, name) if result is not _NOT_FOUND: return result raise KeyNotFoundError(name, "part missing")
Query the stack for a non-dotted name.
19,152
def get_next_non_summer_term(term): next_term = get_term_after(term) if next_term.is_summer_quarter(): return get_next_autumn_term(next_term) return next_term
Return the Term object for the quarter after as the given term (skip the summer quarter)
19,153
def list_(): * autoruns = {} keys = [, , ] for key in keys: autoruns[key] = [] cmd = [, , key] for line in __salt__[](cmd, python_shell=False).splitlines(): if line and line[0:4] != "HKEY" and line[0:5] != "ERROR": autoruns[key].append(line) full_dirs = _get_dirs(user_dir, startup_dir) for full_dir in full_dirs: files = os.listdir(full_dir) autoruns[full_dir] = [] for single_file in files: autoruns[full_dir].append(single_file) return autoruns
Get a list of automatically running programs CLI Example: .. code-block:: bash salt '*' autoruns.list
19,154
def accuracy(self): sub_observed = np.array([self.observed.metadata[i] for i in self.observed.arr]) return float((self.model_predictions() == sub_observed).sum()) / self.data_size
Calculates the accuracy of the tree by comparing the model predictions to the dataset (TP + TN) / (TP + TN + FP + FN) == (T / (T + F))
19,155
def _invalid_frame(fobj): fin = fobj.f_code.co_filename invalid_module = any([fin.endswith(item) for item in _INVALID_MODULES_LIST]) return invalid_module or (not os.path.isfile(fin))
Select valid stack frame to process.
19,156
def get_gravatar(email, size=80, default=): if userena_settings.USERENA_MUGSHOT_GRAVATAR_SECURE: base_url = else: base_url = gravatar_url = % \ {: base_url, : md5(email.lower().encode()).hexdigest()} gravatar_url += urlencode({ : str(size), : default }) return gravatar_url
Get's a Gravatar for a email address. :param size: The size in pixels of one side of the Gravatar's square image. Optional, if not supplied will default to ``80``. :param default: Defines what should be displayed if no image is found for this user. Optional argument which defaults to ``identicon``. The argument can be a URI to an image or one of the following options: ``404`` Do not load any image if none is associated with the email hash, instead return an HTTP 404 (File Not Found) response. ``mm`` Mystery-man, a simple, cartoon-style silhouetted outline of a person (does not vary by email hash). ``identicon`` A geometric pattern based on an email hash. ``monsterid`` A generated 'monster' with different colors, faces, etc. ``wavatar`` Generated faces with differing features and backgrounds :return: The URI pointing to the Gravatar.
19,157
def get_main_version(version): "Returns main version (X.Y[.Z]) from VERSION." parts = 2 if version[2] == 0 else 3 return .join(str(x) for x in version[:parts])
Returns main version (X.Y[.Z]) from VERSION.
19,158
def sequence_id_factory(value, datatype_cls, validation_level=None): if not value: return datatype_cls(validation_level=validation_level) try: return datatype_cls(int(value), validation_level=validation_level) except ValueError: raise ValueError(.format(value))
Creates a :class:`SI <hl7apy.base_datatypes.SI>` object The value in input can be a string representing an integer number or an ``int``. (i.e. a string valid for ``int()`` ). If it's not, a :exc:`ValueError` is raised Also an empty string or ``None`` are allowed :type value: ``str`` or ``None`` :param value: the value to assign the date object :type datatype_cls: `class` :param value: the SI class to use. It has to be loaded from one implementation of the different version modules :type validation_level: ``int`` :param validation_level: It must be a value from class :class:`VALIDATION_LEVEL hl7apy.consts.VALIDATION_LEVEL` or ``None`` to use the default value :rtype: :class:`SI <hl7apy.base_datatypes.SI>`
19,159
def add_report_data(list_all=[], module_name="TestModule", **kwargs): start_at = kwargs.get("start_at") case_name = kwargs.get("case_name","TestCase") raw_case_name = kwargs.get("raw_case_name","TestCase") exec_date_time = time.localtime(start_at) execdate = time.strftime("%Y-%m-%d",exec_date_time) exectime = time.strftime("%H:%M:%S",exec_date_time) _case_report = { : kwargs.get("resp_tester","administrator"), : kwargs.get("tester","administrator"), : case_name, : raw_case_name, : kwargs.get("status","Pass"), : execdate, : exectime, : start_at, : kwargs.get("end_at"), } for module in list_all: if module_name != module["Name"]: continue for case in module["TestCases"]: if raw_case_name == case["raw_case_name"]: case.update(_case_report) return list_all module["TestCases"].append(_case_report) return list_all list_all.append({"Name": module_name, "TestCases": [_case_report]}) return list_all
add report data to a list @param list_all: a list which save the report data @param module_name: test set name or test module name @param kwargs: such as case_name: testcase name status: test result, Pass or Fail resp_tester: responsible tester who write this case tester: tester who execute the test start_at: tester run this case at time end_at: tester stop this case at time
19,160
def set_values(self, choice_ids): organized_choices = [] for choice_id in choice_ids: choice_obj = [c for c in self._original_choice_order if c[] == choice_id][0] organized_choices.append(choice_obj) self.my_osid_object._my_map[] = organized_choices
assume choice_ids is a list of choiceIds, like ["57978959cdfc5c42eefb36d1", "57978959cdfc5c42eefb36d0", "57978959cdfc5c42eefb36cf", "57978959cdfc5c42eefb36ce"]
19,161
def kill_all(self): for pid in self.children: try: os.kill(pid, signal.SIGTRAP) except OSError: continue self.join()
kill all slaves and reap the monitor
19,162
def clear_data(self): if (self.get_data_metadata().is_read_only() or self.get_data_metadata().is_required()): raise errors.NoAccess() if self._my_map[] == self._data_default: return dbase = JSONClientValidated(, runtime=self._runtime).raw() filesys = gridfs.GridFS(dbase) filesys.delete(self._my_map[]) self._my_map[] = self._data_default del self._my_map[]
Removes the content data. raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
19,163
def iter_components(self): names = self.list_components() for name in names: yield self.get_component(name)
Iterate over all defined components yielding IOTile objects.
19,164
def channels_rename(self, room_id, name, **kwargs): return self.__call_api_post(, roomId=room_id, name=name, kwargs=kwargs)
Changes the name of the channel.
19,165
def place(vertices_resources, nets, machine, constraints, random=default_random): machine = machine.copy() placements = {} vertices_resources, nets, constraints, substitutions = \ apply_same_chip_constraints(vertices_resources, nets, constraints) for constraint in constraints: if isinstance(constraint, LocationConstraint): placements[vertex] = location resources = vertices_resources[vertex] machine[location] = subtract_resources(machine[location], resources) if overallocated(machine[location]): raise InsufficientResourceError( "Cannot meet {}".format(constraint)) elif isinstance(constraint, ReserveResourceConstraint): apply_reserve_resource_constraint(machine, constraint) movable_vertices = [v for v in vertices_resources if v not in placements] locations = set(machine) for vertex in movable_vertices: while True: if len(locations) == 0: raise InsufficientResourceError( "Ran out of chips while attempting to place vertex " "{}".format(vertex)) location = random.sample(locations, 1)[0] resources_if_placed = subtract_resources( machine[location], vertices_resources[vertex]) if overallocated(resources_if_placed): locations.remove(location) else: placements[vertex] = location machine[location] = resources_if_placed break finalise_same_chip_constraints(substitutions, placements) return placements
A random placer. This algorithm performs uniform-random placement of vertices (completely ignoring connectivty) and thus in the general case is likely to produce very poor quality placements. It exists primarily as a baseline comparison for placement quality and is probably of little value to most users. Parameters ---------- random : :py:class:`random.Random` Defaults to ``import random`` but can be set to your own instance of :py:class:`random.Random` to allow you to control the seed and produce deterministic results. For results to be deterministic, vertices_resources must be supplied as an :py:class:`collections.OrderedDict`.
19,166
def _sra_download_worker(*args): gsm = args[0][0] email = args[0][1] dirpath = args[0][2] kwargs = args[0][3] return (gsm.get_accession(), gsm.download_SRA(email, dirpath, **kwargs))
A worker to download SRA files. To be used with multiprocessing.
19,167
def load_config(): config = flatten(default_config.DEFAULT_CONFIG) cli_config = flatten(parse_args()) if "configfile" in cli_config: logging.info("Reading config file {}".format(cli_config[])) configfile = parse_configfile(cli_config[]) config = overwrite_config(config, configfile) config = overwrite_config(config, cli_config) if in config: if config[] == 1: logging.getLogger().setLevel(logging.INFO) elif config[] > 1: logging.getLogger().setLevel(logging.DEBUG) return ObjectView(config)
Load settings from default config and optionally overwrite with config file and commandline parameters (in that order).
19,168
def webhooks(self): if self._webhooks is None: self._webhooks = WebhookList(self._version, session_sid=self._solution[], ) return self._webhooks
Access the webhooks :returns: twilio.rest.messaging.v1.session.webhook.WebhookList :rtype: twilio.rest.messaging.v1.session.webhook.WebhookList
19,169
def getrdfdata(): if not os.path.exists(RDFFILES): _, _ = urllib.urlretrieve(RDFURL, RDFFILES) with tarfile.open(RDFFILES) as archive: for tarinfo in archive: yield ElementTree.parse(archive.extractfile(tarinfo))
Downloads Project Gutenberg RDF catalog. Yields: xml.etree.ElementTree.Element: An etext meta-data definition.
19,170
def get_instance(self, contract_name: str) -> None: self._validate_name_and_references(contract_name) contract_type = self.deployment_data[contract_name]["contract_type"] factory = self.contract_factories[contract_type] address = to_canonical_address(self.deployment_data[contract_name]["address"]) contract_kwargs = { "abi": factory.abi, "bytecode": factory.bytecode, "bytecode_runtime": factory.bytecode_runtime, } return self.w3.eth.contract(address=address, **contract_kwargs)
Fetches a contract instance belonging to deployment after validating contract name.
19,171
def load(self, prof_name): prof_dir = self.__profile_dir(prof_name) prof_ini_path = self.__profile_ini_path(prof_dir) if not os.path.exists(prof_ini_path): msg = "Profile does not exist" raise Exception(msg.format(prof_name)) prof_ini_file = open(prof_ini_path, "r") prof_ini = configparser.ConfigParser() prof_ini.read_file(prof_ini_file) prof_ini_file.close() prof_type = prof_ini["profile"]["type"] prof_stub = self.__profile_stub(prof_name, prof_type, prof_dir) prof_stub.prepare(prof_ini["properties"]) return prof_stub
Load the profile with the given name. :param str prof_name: Profile name. :rtype: ProfileStub :return: An stub to loaded profile.
19,172
def replace(self, src): "Given some source html substitute and annotated as applicable" for html in self.substitutions.keys(): if src == html: annotation = self.annotation % self.substitutions[src][1] return annotation + self.substitutions[src][0] return src
Given some source html substitute and annotated as applicable
19,173
def get_user(self, user_id=None, username=None, email=None): if user_id: uri = "/users/%s" % user_id elif username: uri = "/users?name=%s" % username elif email: uri = "/users?email=%s" % email else: raise ValueError("You must include one of , " ", or when calling get_user().") resp, resp_body = self.method_get(uri) if resp.status_code == 404: raise exc.NotFound("No such user exists.") users = resp_body.get("users", []) if users: return [User(self, user) for user in users] else: user = resp_body.get("user", {}) if user: return User(self, user) else: raise exc.NotFound("No such user exists.")
Returns the user specified by either ID, username or email. Since more than user can have the same email address, searching by that term will return a list of 1 or more User objects. Searching by username or ID will return a single User. If a user_id that doesn't belong to the current account is searched for, a Forbidden exception is raised. When searching by username or email, a NotFound exception is raised if there is no matching user.
19,174
def assertFileExists(self, filename, msg=None): standardMsg = % filename if not os.path.isfile(filename): self.fail(self._formatMessage(msg, standardMsg))
Fail if ``filename`` does not exist as determined by ``os.path.isfile(filename)``. Parameters ---------- filename : str, bytes msg : str If not provided, the :mod:`marbles.mixins` or :mod:`unittest` standard message will be used.
19,175
def _load_yaml_config(path=None): furious_yaml_path = path or find_furious_yaml() if furious_yaml_path is None: logging.debug("furious.yaml not found.") return None with open(furious_yaml_path) as yaml_file: return yaml_file.read()
Open and return the yaml contents.
19,176
def load_obo_file(self, obo_file, optional_attrs, load_obsolete, prt): reader = OBOReader(obo_file, optional_attrs) alt2rec = {} for rec in reader: if load_obsolete or not rec.is_obsolete: self[rec.item_id] = rec for alt in rec.alt_ids: alt2rec[alt] = rec self.typedefs = reader.typedefs self._populate_terms(reader.optobj) self._set_level_depth(reader.optobj) for goid_alt, rec in alt2rec.items(): self[goid_alt] = rec desc = self._str_desc(reader) if prt is not None: prt.write("{DESC}\n".format(DESC=desc)) return desc
Read obo file. Store results.
19,177
def _load_zp_mappings(self, file): zp_map = {} LOG.info("Loading ZP-to-EQ mappings") line_counter = 0 with open(file, , encoding="utf-8") as csvfile: filereader = csv.reader(csvfile, delimiter=, quotechar=) for row in filereader: line_counter += 1 (zp_id, zp_label, superterm1_id, subterm1_id, quality_id, modifier, superterm2_id, subterm2_id) = row key = self._make_zpkey( superterm1_id, subterm1_id, quality_id, superterm2_id, subterm2_id, modifier) zp_map[key] = { : zp_id, : zp_label, : superterm1_id, : subterm1_id, : quality_id, : modifier, : superterm2_id, : subterm2_id, } LOG.info("Loaded %s zp terms", zp_map.__len__()) return zp_map
Given a file that defines the mapping between ZFIN-specific EQ definitions and the automatically derived ZP ids, create a mapping here. This may be deprecated in the future :return:
19,178
def _step(self, theme, direction): if not self.themes: self.reload() key = (theme.source, theme.name) for i, val in enumerate(self.themes): if (val.source, val.name) == key: index = i break else: self.themes.insert(0, theme) index = 0 index = (index + direction) % len(self.themes) new_theme = self.themes[index] return new_theme
Traverse the list in the given direction and return the next theme
19,179
def p_try_statement_1(self, p): p[0] = ast.Try(statements=p[2], catch=p[3])
try_statement : TRY block catch
19,180
def refresh_balance(self): left_depth = self.left_node.depth if self.left_node else 0 right_depth = self.right_node.depth if self.right_node else 0 self.depth = 1 + max(left_depth, right_depth) self.balance = right_depth - left_depth
Recalculate self.balance and self.depth based on child node values.
19,181
def activate_right(self, token): watchers.MATCHER.debug( "Node <%s> activated right with token %r", self, token) return self._activate_right(token.copy())
Make a copy of the received token and call `_activate_right`.
19,182
def _validate_plan(plan): if not _validate_format(plan): return False partition_names = [ (p_data[], p_data[]) for p_data in plan[] ] duplicate_partitions = [ partition for partition, count in six.iteritems(Counter(partition_names)) if count > 1 ] if duplicate_partitions: _log.error( .format(p_list=duplicate_partitions), ) return False dup_replica_brokers = [] for p_data in plan[]: dup_replica_brokers = [ broker for broker, count in Counter(p_data[]).items() if count > 1 ] if dup_replica_brokers: _log.error( .format( topic=p_data[], p_id=p_data[], replicas=p_data[], ) ) return False topic_replication_factor = {} for partition_info in plan[]: topic = partition_info[] replication_factor = len(partition_info[]) if topic in list(topic_replication_factor.keys()): if topic_replication_factor[topic] != replication_factor: _log.error( .format(topic=topic), ) return False else: topic_replication_factor[topic] = replication_factor return True
Validate if given plan is valid based on kafka-cluster-assignment protocols. Validate following parameters: - Correct format of plan - Partition-list should be unique - Every partition of a topic should have same replication-factor - Replicas of a partition should have unique broker-set
19,183
def _config_from_url(self): config = { "name": self._blk.name, "options": {} } for key, value in six.iteritems(request.args): if isinstance(value, list) and len(value) == 1: config["options"][key] = value[0] else: config["options"][key] = value return config
Manage block configuration from requests.args (url params)
19,184
def set_attributes(self, doc, fields, parent_type=None): self.raw_doc = doc self.doc = doc_unwrap(doc) self.fields = fields self.parent_type = parent_type self._raw_examples = OrderedDict() self._examples = OrderedDict() self._fields_by_name = {} for field in self.fields: if field.name in self._fields_by_name: orig_lineno = self._fields_by_name[field.name]._ast_node.lineno raise InvalidSpec("Field already defined on line %s." % (field.name, orig_lineno), field._ast_node.lineno) self._fields_by_name[field.name] = field cur_type = self.parent_type while cur_type: for field in self.fields: if field.name in cur_type._fields_by_name: lineno = cur_type._fields_by_name[field.name]._ast_node.lineno raise InvalidSpec( "Field already defined in parent on line %d." % (field.name, cur_type.name, lineno), field._ast_node.lineno) cur_type = cur_type.parent_type for field in self.fields: for annotation in field.custom_annotations: if annotation.annotation_type.namespace.name != self.namespace.name: self.namespace.add_imported_namespace( annotation.annotation_type.namespace, imported_annotation_type=True) if annotation.namespace.name != self.namespace.name: self.namespace.add_imported_namespace( annotation.namespace, imported_annotation=True) self._is_forward_ref = False
Fields are specified as a list so that order is preserved for display purposes only. (Might be used for certain serialization formats...) :param str doc: Description of type. :param list(Field) fields: Ordered list of fields for type. :param Optional[Composite] parent_type: The type this type inherits from.
19,185
def get_package_data(filename, mode=): if os.path.exists(filename): with open(filename, mode=mode) as in_file: return in_file.read() else: parts = os.path.normpath(filename).split(os.sep) for part, index in zip(parts, range(len(parts))): if part.endswith(): zip_path = os.sep.join(parts[:index + 1]) member_path = os.sep.join(parts[index + 1:]) break if platform.system() == : member_path = member_path.replace(, ) with zipfile.ZipFile(zip_path) as zip_file: return zip_file.read(member_path)
Return the contents of a real file or a zip file.
19,186
def render( self, tag, single, between, kwargs ): out = "<%s" % tag for key, value in list( kwargs.items( ) ): if value is not None: key = key.strip() if key == : key = elif key == : key = out = "%s %s=\"%s\"" % ( out, key, escape( value ) ) else: out = "%s %s" % ( out, key ) if between is not None: out = "%s>%s</%s>" % ( out, between, tag ) else: if single: out = "%s />" % out else: out = "%s>" % out if self.parent is not None: self.parent.content.append( out ) else: return out
Append the actual tags to content.
19,187
def _get_hd(self, hdr_info): hdr_map, variable_length_headers, text_headers = hdr_info with open(self.filename) as fp: total_header_length = 16 while fp.tell() < total_header_length: hdr_id = np.fromfile(fp, dtype=common_hdr, count=1)[0] the_type = hdr_map[hdr_id[]] if the_type in variable_length_headers: field_length = int((hdr_id[] - 3) / the_type.itemsize) current_hdr = np.fromfile(fp, dtype=the_type, count=field_length) key = variable_length_headers[the_type] if key in self.mda: if not isinstance(self.mda[key], list): self.mda[key] = [self.mda[key]] self.mda[key].append(current_hdr) else: self.mda[key] = current_hdr elif the_type in text_headers: field_length = int((hdr_id[] - 3) / the_type.itemsize) char = list(the_type.fields.values())[0][0].char new_type = np.dtype(char + str(field_length)) current_hdr = np.fromfile(fp, dtype=new_type, count=1)[0] self.mda[text_headers[the_type]] = current_hdr else: current_hdr = np.fromfile(fp, dtype=the_type, count=1)[0] self.mda.update( dict(zip(current_hdr.dtype.names, current_hdr))) total_header_length = self.mda[] self.mda.setdefault(, 10) self.mda[] = {: 6378169.00, : 6356583.80, : 35785831.00, : 0.0} self.mda[] = {}
Open the file, read and get the basic file header info and set the mda dictionary
19,188
def warp_vrt(directory, delete_extra=False, use_band_map=False, overwrite=False, remove_bqa=True, return_profile=False): if in os.listdir(directory) and not overwrite: print(.format(directory)) return None mapping = {: Landsat8, : Landsat7, : Landsat5} vrt_options = {} list_dir = [x[0] for x in os.walk(directory) if os.path.basename(x[0])[:3] in mapping.keys()] extras = [os.path.join(directory, x) for x in os.listdir(directory) if x.endswith()] first = True for d in list_dir: sat = LandsatImage(d).satellite paths = extras root = os.path.join(directory, d) if os.path.isdir(root): for x in os.listdir(root): if remove_bqa and x.endswith(): try: os.remove(x) except FileNotFoundError: pass elif use_band_map: bands = BandMap().selected for y in bands[sat]: if x.endswith(.format(y)): paths.append(os.path.join(directory, d, x)) else: if x.endswith() or x.endswith(): paths.append(os.path.join(directory, d, x)) if x.endswith(): mtl = os.path.join(directory, d, x) if first: landsat = mapping[sat](os.path.join(directory, d)) dst = landsat.rasterio_geometry vrt_options = {: Resampling.nearest, : dst[], : dst[], : dst[], : dst[]} message = .format(d, datetime.now()) with open(os.path.join(directory, ), ) as f: f.write(message) first = False for tif_path in paths: print(.format(os.path.basename(tif_path))) with rasopen(tif_path, ) as src: with WarpedVRT(src, **vrt_options) as vrt: data = vrt.read() dst_dir, name = os.path.split(tif_path) outfile = os.path.join(dst_dir, name) meta = vrt.meta.copy() meta[] = with rasopen(outfile, , **meta) as dst: dst.write(data) if delete_extra: for x in os.listdir(os.path.join(directory, d)): x_file = os.path.join(directory, d, x) if x_file not in paths: if x[-7:] not in [, ]: print(.format(x_file)) os.remove(x_file) if return_profile: return dst
Read in image geometry, resample subsequent images to same grid. The purpose of this function is to snap many Landsat images to one geometry. Use Landsat578 to download and unzip them, then run them through this to get identical geometries for analysis. Files :param use_band_map: :param delete_extra: :param directory: A directory containing sub-directories of Landsat images. :return: None
19,189
def to_fp32(learn:Learner): "Put `learn` back to FP32 precision mode." learn.data.remove_tfm(batch_to_half) for cb in learn.callbacks: if isinstance(cb, MixedPrecision): learn.callbacks.remove(cb) learn.model = learn.model.float() return learn
Put `learn` back to FP32 precision mode.
19,190
def _setTaskParsObj(self, theTask): self._taskParsObj = cfgpars.getObjectFromTaskArg(theTask, self._strict, False) self._taskParsObj.setDebugLogger(self) self._lastSavedState = self._taskParsObj.dict()
Overridden version for ConfigObj. theTask can be either a .cfg file name or a ConfigObjPars object.
19,191
def main_help_text(self, commands_only=False): if commands_only: usage = sorted(get_commands().keys()) else: usage = [ "", "Type for help on a specific subcommand." % self.prog_name, "", "Available subcommands:", ] commands_dict = collections.defaultdict(lambda: []) for name, app in six.iteritems(get_commands()): if app == : app = else: app = app.rpartition()[-1] commands_dict[app].append(name) style = color_style() for app in sorted(commands_dict.keys()): usage.append("") usage.append(style.NOTICE("[%s]" % app)) for name in sorted(commands_dict[app]): usage.append(" %s" % name) if self.settings_exception is not None: usage.append(style.NOTICE( "Note that only Django core commands are listed " "as settings are not properly configured (error: %s)." % self.settings_exception)) return .join(usage)
Returns the script's main help text, as a string.
19,192
def sendfrom(self, user_id, dest_address, amount, minconf=1): amount = Decimal(amount).quantize(self.quantum, rounding=ROUND_HALF_EVEN) txhash = self.rpc.call("sendfrom", user_id, dest_address, float(str(amount)), minconf ) self.logger.debug("Send %s %s from %s to %s" % (str(amount), self.coin, str(user_id), dest_address)) self.logger.debug("Transaction hash: %s" % txhash) return txhash
Send coins from user's account. Args: user_id (str): this user's unique identifier dest_address (str): address which is to receive coins amount (str or Decimal): amount to send (eight decimal points) minconf (int): ensure the account has a valid balance using this many confirmations (default=1) Returns: str: transaction ID
19,193
def _has_name(soup_obj): try: name = soup_obj.name if name == None: return False return True except AttributeError: return False
checks if soup_obj is really a soup object or just a string If it has a name it is a soup object
19,194
def do_init_fields(self, flist): for f in flist: self.default_fields[f.name] = copy.deepcopy(f.default) self.fieldtype[f.name] = f if f.holds_packets: self.packetfields.append(f)
Initialize each fields of the fields_desc dict
19,195
def get_rank(): if torch.distributed.is_available() and torch.distributed.is_initialized(): rank = torch.distributed.get_rank() else: rank = 0 return rank
Gets distributed rank or returns zero if distributed is not initialized.
19,196
def make_child_of(self, chunk): if self.is_mapping(): for key, value in self.contents.items(): self.key(key, key).pointer.make_child_of(chunk.pointer) self.val(key).make_child_of(chunk) elif self.is_sequence(): for index, item in enumerate(self.contents): self.index(index).make_child_of(chunk) else: self.pointer.make_child_of(chunk.pointer)
Link one YAML chunk to another. Used when inserting a chunk of YAML into another chunk.
19,197
def _FormatSubjectOrProcessToken(self, token_data): ip_address = self._FormatPackedIPv4Address(token_data.ip_address) return { : token_data.audit_user_identifier, : token_data.effective_user_identifier, : token_data.effective_group_identifier, : token_data.real_user_identifier, : token_data.real_group_identifier, : token_data.process_identifier, : token_data.session_identifier, : token_data.terminal_port, : ip_address}
Formats a subject or process token as a dictionary of values. Args: token_data (bsm_token_data_subject32|bsm_token_data_subject64): AUT_SUBJECT32, AUT_PROCESS32, AUT_SUBJECT64 or AUT_PROCESS64 token data. Returns: dict[str, str]: token values.
19,198
def get_agent_queue(self, queue_id, project=None, action_filter=None): route_values = {} if project is not None: route_values[] = self._serialize.url(, project, ) if queue_id is not None: route_values[] = self._serialize.url(, queue_id, ) query_parameters = {} if action_filter is not None: query_parameters[] = self._serialize.query(, action_filter, ) response = self._send(http_method=, location_id=, version=, route_values=route_values, query_parameters=query_parameters) return self._deserialize(, response)
GetAgentQueue. [Preview API] Get information about an agent queue. :param int queue_id: The agent queue to get information about :param str project: Project ID or project name :param str action_filter: Filter by whether the calling user has use or manage permissions :rtype: :class:`<TaskAgentQueue> <azure.devops.v5_1.task-agent.models.TaskAgentQueue>`
19,199
def sort_index( self, axis=0, level=None, ascending=True, inplace=False, kind="quicksort", na_position="last", sort_remaining=True, by=None, ): axis = self._get_axis_number(axis) if level is not None: new_query_compiler = self._default_to_pandas( "sort_index", axis=axis, level=level, ascending=ascending, inplace=False, kind=kind, na_position=na_position, sort_remaining=sort_remaining, ) return self._create_or_update_from_compiler(new_query_compiler, inplace) if by is not None: warnings.warn( "by argument to sort_index is deprecated, " "please use .sort_values(by=...)", FutureWarning, stacklevel=2, ) if level is not None: raise ValueError("unable to simultaneously sort by and level") return self.sort_values(by, axis=axis, ascending=ascending, inplace=inplace) new_query_compiler = self._query_compiler.sort_index( axis=axis, ascending=ascending, kind=kind, na_position=na_position ) if inplace: self._update_inplace(new_query_compiler=new_query_compiler) else: return self.__constructor__(query_compiler=new_query_compiler)
Sort a DataFrame by one of the indices (columns or index). Args: axis: The axis to sort over. level: The MultiIndex level to sort over. ascending: Ascending or descending inplace: Whether or not to update this DataFrame inplace. kind: How to perform the sort. na_position: Where to position NA on the sort. sort_remaining: On Multilevel Index sort based on all levels. by: (Deprecated) argument to pass to sort_values. Returns: A sorted DataFrame