Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
377,800
def _get_init_containers(self): }]
When using git to retrieve the DAGs, use the GitSync Init Container
377,801
def operator_relocate(self, graph, solution, op_diff_round_digits, anim): dm = graph._matrix dn = graph._nodes while True: length_diff_best = 0 for route in solution.routes(): if len(route._nodes) == 1: if solution._problem._is_aggregated[str(route._nodes[0])]: continue tour = [graph._depot] + route._nodes + [graph._depot] for target_route in solution.routes(): if len(target_route._nodes) == 1: if solution._problem._is_aggregated[str(target_route._nodes[0])]: continue target_tour = [graph._depot] + target_route._nodes + [graph._depot] if route == target_route: continue n = len(route._nodes) nt = len(target_route._nodes)+1 for i in range(0,n): node = route._nodes[i] for j in range(0,nt): if target_route.can_allocate([node]): length_diff = (-dm[dn[tour[i].name()]][dn[tour[i+1].name()]] - dm[dn[tour[i+1].name()]][dn[tour[i+2].name()]] + dm[dn[tour[i].name()]][dn[tour[i+2].name()]] + dm[dn[target_tour[j].name()]][dn[tour[i+1].name()]] + dm[dn[tour[i+1].name()]][dn[target_tour[j+1].name()]] - dm[dn[target_tour[j].name()]][dn[target_tour[j+1].name()]]) if length_diff < length_diff_best: length_diff_best = length_diff node_best, target_route_best, j_best = node, target_route, j if length_diff_best < 0: target_route_best.insert([node_best], j_best) solution._routes = [route for route in solution._routes if route._nodes] if anim is not None: solution.draw_network(anim) if round(length_diff_best, op_diff_round_digits) == 0: break return solution
applies Relocate inter-route operator to solution Takes every node from every route and calculates savings when inserted into all possible positions in other routes. Insertion is done at position with max. saving and procedure starts over again with newly created graph as input. Stops when no improvement is found. Args ---- graph: :networkx:`NetworkX Graph Obj< >` A NetworkX graaph is used. solution: BaseSolution BaseSolution instance op_diff_round_digits: float Precision (floating point digits) for rounding route length differences. *Details*: In some cases when an exchange is performed on two routes with one node each, the difference between the both solutions (before and after the exchange) is not zero. This is due to internal rounding errors of float type. So the loop won't break (alternating between these two solutions), we need an additional criterion to avoid this behaviour: A threshold to handle values very close to zero as if they were zero (for a more detailed description of the matter see http://floating-point-gui.de or https://docs.python.org/3.5/tutorial/floatingpoint.html) anim: AnimationDing0 AnimationDing0 object Returns ------- LocalSearchSolution A solution (LocalSearchSolution class) Notes ----- (Inner) Loop variables: * i: node that is checked for possible moves (position in the route `tour`, not node name) * j: node that precedes the insert position in target route (position in the route `target_tour`, not node name) Todo ---- * Remove ugly nested loops, convert to more efficient matrix operations
377,802
def from_dict(cls, d): assert isinstance(d, dict) init_args = dict() for key, is_required in cls.dictionary_attributes.iteritems(): try: init_args[key] = d[key] except KeyError: if is_required: raise DictConvertible.Error(, cls, missing_key=key) return cls(**init_args)
Create an instance from a dictionary.
377,803
def binary(self): lib_name = .format(NATIVE_ENGINE_MODULE) lib_path = os.path.join(safe_mkdtemp(), lib_name) try: with closing(pkg_resources.resource_stream(__name__, lib_name)) as input_fp: engine_version = input_fp.readline().decode().strip() repo_version = input_fp.readline().decode().strip() logger.debug(.format(engine_version, repo_version)) with open(lib_path, ) as output_fp: output_fp.write(input_fp.read()) except (IOError, OSError) as e: raise self.BinaryLocationError( "Error unpacking the native engine binary to path {}: {}".format(lib_path, e), e) return lib_path
Load and return the path to the native engine binary.
377,804
def add_download(self, info, future): if self.gpmon.has_plugin(): obj = self.gpmon.get_plugin() self.gui_do(obj.add_download, info, future) else: self.show_error("Please activate the plugin to" " enable download functionality")
Hand off a download to the Downloads plugin, if it is present. Parameters ---------- info : `~ginga.misc.Bunch.Bunch` A bunch of information about the URI as returned by `ginga.util.iohelper.get_fileinfo()` future : `~ginga.misc.Future.Future` A future that represents the future computation to be performed after downloading the file. Resolving the future will trigger the computation.
377,805
def upload_service_version(self, service_zip_file, mode=, service_version=, service_id=None, **kwargs): productiondefault.zipproduction111 files = {: open(service_zip_file,)} url_suffix = %mode if mode==: url_suffix+=+service_version if service_id: url_suffix+=+service_id if kwargs: url_suffix=url_suffix++urlencode(kwargs) return self._call_rest_api(, url_suffix, files=files, error=)
upload_service_version(self, service_zip_file, mode='production', service_version='default', service_id=None, **kwargs) Upload a service version to Opereto :Parameters: * *service_zip_file* (`string`) -- zip file location containing service and service specification * *mode* (`string`) -- production/development (default is production) * *service_version* (`string`) -- Service version * *service_id* (`string`) -- Service Identifier :Keywords args: * *comment* (`string`) -- comment :Example: .. code-block:: python opereto_client.upload_service_version(service_zip_file=zip_action_file+'.zip', mode='production', service_version='111')
377,806
def _justify(texts, max_len, mode=): if mode == : return [x.ljust(max_len) for x in texts] elif mode == : return [x.center(max_len) for x in texts] else: return [x.rjust(max_len) for x in texts]
Perform ljust, center, rjust against string or list-like
377,807
def plot_info(self, dvs): axl, axc, axr = dvs.title() axc.annotate("%s %d" % (self._mission.IDSTRING, self.ID), xy=(0.5, 0.5), xycoords=, ha=, va=, fontsize=18) axc.annotate(r"%.2f ppm $\rightarrow$ %.2f ppm" % (self.cdppr, self.cdpp), xy=(0.5, 0.2), xycoords=, ha=, va=, fontsize=8, color=, fontstyle=) axl.annotate("%s %s%02d: %s" % (self.mission.upper(), self._mission.SEASONCHAR, self.season, self.name), xy=(0.5, 0.5), xycoords=, ha=, va=, fontsize=12, color=) axl.annotate(self.aperture_name if len(self.neighbors) == 0 else "%s, %d neighbors" % (self.aperture_name, len(self.neighbors)), xy=(0.5, 0.2), xycoords=, ha=, va=, fontsize=8, color=, fontstyle=) axr.annotate("%s %.3f" % (self._mission.MAGSTRING, self.mag), xy=(0.5, 0.5), xycoords=, ha=, va=, fontsize=12, color=) if not np.isnan(self.cdppg) and self.cdppg > 0: axr.annotate(r"GP %.3f ppm" % (self.cdppg), xy=(0.5, 0.2), xycoords=, ha=, va=, fontsize=8, color=, fontstyle=)
Plots miscellaneous de-trending information on the data validation summary figure. :param dvs: A :py:class:`dvs.DVS` figure instance
377,808
def rupdate(source, target): for k, v in target.iteritems(): if isinstance(v, Mapping): r = rupdate(source.get(k, {}), v) source[k] = r else: source[k] = target[k] return source
recursively update nested dictionaries see: http://stackoverflow.com/a/3233356/1289080
377,809
def get_file_metadata(self, secure_data_path, version=None): if not version: version = "CURRENT" payload = {: str(version)} secret_resp = head_with_retry(str.join(, [self.cerberus_url, , secure_data_path]), params=payload, headers=self.HEADERS) throw_if_bad_response(secret_resp) return secret_resp.headers
Get just the metadata for a file, not the content
377,810
def crossref_paths(self): return set( [address.new(repo=x.repo, path=x.path) for x in self.crossrefs])
Just like crossrefs, but all the targets are munged to :all.
377,811
def add_method(self, loop, callback): f, obj = get_method_vars(callback) wrkey = (f, id(obj)) self[wrkey] = obj self.event_loop_map[wrkey] = loop
Add a coroutine function Args: loop: The :class:`event loop <asyncio.BaseEventLoop>` instance on which to schedule callbacks callback: The :term:`coroutine function` to add
377,812
def gml_to_geojson(el): if el.get() not in (, None): if el.get() == : return _gmlv2_to_geojson(el) else: raise NotImplementedError("Unrecognized srsName %s" % el.get()) tag = el.tag.replace( % NS_GML, ) if tag == : coordinates = _reverse_gml_coords(el.findtext( % NS_GML))[0] elif tag == : coordinates = _reverse_gml_coords(el.findtext( % NS_GML)) elif tag == : coordinates = [] for ring in el.xpath(, namespaces=NSMAP) \ + el.xpath(, namespaces=NSMAP): coordinates.append(_reverse_gml_coords(ring.text)) elif tag in (, , ): single_type = tag[5:] member_tag = single_type[0].lower() + single_type[1:] + coordinates = [ gml_to_geojson(member)[] for member in el.xpath( % (member_tag, single_type), namespaces=NSMAP) ] else: raise NotImplementedError return { : tag, : coordinates }
Given an lxml Element of a GML geometry, returns a dict in GeoJSON format.
377,813
def convert_entry_to_path(path): if not isinstance(path, Mapping): raise TypeError("expecting a mapping, received {0!r}".format(path)) if not any(key in path for key in ["file", "path"]): raise ValueError("missing path-like entry in supplied mapping {0!r}".format(path)) if "file" in path: path = vistir.path.url_to_path(path["file"]) elif "path" in path: path = path["path"] return path
Convert a pipfile entry to a string
377,814
def AddPathInfo(self, path_info): if self._path_type != path_info.path_type: message = "Incompatible path types: `%s` and `%s`" raise ValueError(message % (self._path_type, path_info.path_type)) if self._components != path_info.components: message = "Incompatible path components: `%s` and `%s`" raise ValueError(message % (self._components, path_info.components)) if path_info.timestamp in self._path_infos: raise ValueError("PathInfo with timestamp %r was added before." % path_info.timestamp) new_path_info = path_info.Copy() if new_path_info.timestamp is None: new_path_info.timestamp = rdfvalue.RDFDatetime.Now() self._path_infos[new_path_info.timestamp] = new_path_info
Updates existing path information of the path record.
377,815
def main(args=None): retcode = 0 try: ci = CliInterface() args = ci.parser.parse_args() result = args.func(args) if result is not None: print(result) retcode = 0 except Exception: retcode = 1 traceback.print_exc() sys.exit(retcode)
Call the CLI interface and wait for the result.
377,816
def write_json(self, chunk, code=None, headers=None): assert chunk is not None, self.set_header("Content-Type", "application/json; charset=UTF-8") if isinstance(chunk, dict) or isinstance(chunk, list): chunk = self.json_encode(chunk) try: chunk = utf8(chunk) except Exception: app_log.error( % repr(chunk)) raise_exc_info(sys.exc_info()) self.write(chunk) if code: self.set_status(code) if headers: for k, v in headers.items(): self.set_header(k, v)
A convenient method that binds `chunk`, `code`, `headers` together chunk could be any type of (str, dict, list)
377,817
def distance_to_point(self, point): return np.abs(np.dot(self.normal_vector, point) + self.d)
Computes the absolute distance from the plane to the point :param point: Point for which distance is computed :return: Distance between the plane and the point
377,818
def create_file(self, path, message, content, branch=github.GithubObject.NotSet, committer=github.GithubObject.NotSet, author=github.GithubObject.NotSet): assert isinstance(path, (str, unicode)), \ assert isinstance(message, (str, unicode)), \ assert isinstance(content, (str, unicode, bytes)), \ assert branch is github.GithubObject.NotSet \ or isinstance(branch, (str, unicode)), \ assert author is github.GithubObject.NotSet \ or isinstance(author, github.InputGitAuthor), \ assert committer is github.GithubObject.NotSet \ or isinstance(committer, github.InputGitAuthor), \ if atLeastPython3: if isinstance(content, str): content = content.encode() content = b64encode(content).decode() else: if isinstance(content, unicode): content = content.encode() content = b64encode(content) put_parameters = {: message, : content} if branch is not github.GithubObject.NotSet: put_parameters[] = branch if author is not github.GithubObject.NotSet: put_parameters["author"] = author._identity if committer is not github.GithubObject.NotSet: put_parameters["committer"] = committer._identity headers, data = self._requester.requestJsonAndCheck( "PUT", self.url + "/contents/" + urllib.quote(path), input=put_parameters ) return {: github.ContentFile.ContentFile(self._requester, headers, data["content"], completed=False), : github.Commit.Commit(self._requester, headers, data["commit"], completed=True)}
Create a file in this repository. :calls: `PUT /repos/:owner/:repo/contents/:path <http://developer.github.com/v3/repos/contents#create-a-file>`_ :param path: string, (required), path of the file in the repository :param message: string, (required), commit message :param content: string, (required), the actual data in the file :param branch: string, (optional), branch to create the commit on. Defaults to the default branch of the repository :param committer: InputGitAuthor, (optional), if no information is given the authenticated user's information will be used. You must specify both a name and email. :param author: InputGitAuthor, (optional), if omitted this will be filled in with committer information. If passed, you must specify both a name and email. :rtype: { 'content': :class:`ContentFile <github.ContentFile.ContentFile>`:, 'commit': :class:`Commit <github.Commit.Commit>`}
377,819
def do_imageplaceholder(parser, token): name, params = parse_placeholder(parser, token) return ImagePlaceholderNode(name, **params)
Method that parse the imageplaceholder template tag.
377,820
def exec_action(module, action, module_parameter=None, action_parameter=None, state_only=False): s action to be run module_parameter additional params passed to the defined module action_parameter additional params passed to the defined action state_only don*apache2 out = __salt__[]( .format( module, module_parameter or , action, action_parameter or ), python_shell=False ) out = out.strip().split() if out[0].startswith(): return False if state_only: return True if not out: return False if len(out) == 1 and not out[0].strip(): return False return out
Execute an arbitrary action on a module. module name of the module to be executed action name of the module's action to be run module_parameter additional params passed to the defined module action_parameter additional params passed to the defined action state_only don't return any output but only the success/failure of the operation CLI Example (updating the ``php`` implementation used for ``apache2``): .. code-block:: bash salt '*' eselect.exec_action php update action_parameter='apache2'
377,821
def read(self, length, timeout=None): data = b"" while True: if timeout is not None: (rlist, _, _) = select.select([self._fd], [], [], timeout) if self._fd not in rlist: break try: data += os.read(self._fd, length - len(data)) except OSError as e: raise SerialError(e.errno, "Reading serial port: " + e.strerror) if len(data) == length: break return data
Read up to `length` number of bytes from the serial port with an optional timeout. `timeout` can be positive for a timeout in seconds, 0 for a non-blocking read, or negative or None for a blocking read that will block until `length` number of bytes are read. Default is a blocking read. For a non-blocking or timeout-bound read, read() may return data whose length is less than or equal to the requested length. Args: length (int): length in bytes. timeout (int, float, None): timeout duration in seconds. Returns: bytes: data read. Raises: SerialError: if an I/O or OS error occurs.
377,822
def _external_request(self, method, url, *args, **kwargs): self.last_url = url if url in self.responses.keys() and method == : return self.responses[url] headers = kwargs.pop(, None) custom = {: useragent} if headers: headers.update(custom) kwargs[] = headers else: kwargs[] = custom if self.timeout: kwargs[] = self.timeout start = datetime.datetime.now() response = getattr(requests, method)(url, verify=self.ssl_verify, *args, **kwargs) self.total_external_fetch_duration += datetime.datetime.now() - start if self.verbose: print("Got Response: %s (took %s)" % (url, (datetime.datetime.now() - start))) self.last_raw_response = response self.check_error(response) if method == : self.responses[url] = response return response
Wrapper for requests.get with useragent automatically set. And also all requests are reponses are cached.
377,823
def write_eps(matrix, version, out, scale=1, border=None, color=, background=None): import textwrap def write_line(writemeth, content): for line in textwrap.wrap(content, 254): writemeth(line) writemeth() def rgb_to_floats(clr): def to_float(c): if isinstance(c, float): if not 0.0 <= c <= 1.0: raise ValueError( .format(c)) return c return 1 / 255.0 * c if c != 1 else c return tuple([to_float(i) for i in colors.color_to_rgb(clr)]) check_valid_scale(scale) check_valid_border(border) with writable(out, ) as f: writeline = partial(write_line, f.write) border = get_border(version, border) width, height = get_symbol_size(version, scale, border) writeline() writeline(.format(CREATOR)) writeline(.format(time.strftime("%Y-%m-%d %H:%M:%S"))) writeline() writeline(.format(width, height)) writeline() writeline() stroke_color_is_black = colors.color_is_black(color) stroke_color = color if stroke_color_is_black else rgb_to_floats(color) if background is not None: writeline( .format(*rgb_to_floats(background))) if stroke_color_is_black: writeline() if not stroke_color_is_black: writeline(.format(*stroke_color)) if scale != 1: writeline(.format(scale)) writeline() y = get_symbol_size(version, scale=1, border=0)[1] + border - .5 line_iter = matrix_to_lines(matrix, border, y, incby=-1) (x1, y1), (x2, y2) = next(line_iter) coord = [.format(x1, y1, x2 - x1)] append_coord = coord.append x = x2 for (x1, y1), (x2, y2) in line_iter: append_coord(.format(x1 - x, int(y1 - y), x2 - x1)) x, y = x2, y2 writeline(.join(coord)) writeline() writeline()
\ Serializes the QR Code as EPS document. :param matrix: The matrix to serialize. :param int version: The (Micro) QR code version :param out: Filename or a file-like object supporting to write strings. :param scale: Indicates the size of a single module (default: 1 which corresponds to 1 point (1/72 inch) per module). :param int border: Integer indicating the size of the quiet zone. If set to ``None`` (default), the recommended border size will be used (``4`` for QR Codes, ``2`` for a Micro QR Codes). :param color: Color of the modules (default: black). The color can be provided as ``(R, G, B)`` tuple (this method acceppts floats as R, G, B values), as web color name (like "red") or in hexadecimal format (``#RGB`` or ``#RRGGBB``). :param background: Optional background color (default: ``None`` = no background color). See `color` for valid values.
377,824
def __insert_action(self, revision): revision["patch"]["_id"] = ObjectId(revision.get("master_id")) insert_response = yield self.collection.insert(revision.get("patch")) if not isinstance(insert_response, str): raise DocumentRevisionInsertFailed()
Handle the insert action type. Creates new document to be created in this collection. This allows you to stage a creation of an object :param dict revision: The revision dictionary
377,825
def read_envvar_file(name, extension): envvar_file = environ.get(.format(name).upper()) if envvar_file: return loadf(envvar_file) else: return NotConfigured
Read values from a file provided as a environment variable ``NAME_CONFIG_FILE``. :param name: environment variable prefix to look for (without the ``_CONFIG_FILE``) :param extension: *(unused)* :return: a `.Configuration`, possibly `.NotConfigured`
377,826
def compactor_daemon(conf_file): eventlet.monkey_patch() conf = config.Config(conf_file=conf_file) compactor.compactor(conf)
Run the compactor daemon. :param conf_file: Name of the configuration file.
377,827
def flatten(d, reducer=, inverse=False): if isinstance(reducer, str): reducer = REDUCER_DICT[reducer] flat_dict = {} def _flatten(d, parent=None): for key, value in six.viewitems(d): flat_key = reducer(parent, key) if isinstance(value, Mapping): _flatten(value, flat_key) else: if inverse: flat_key, value = value, flat_key if flat_key in flat_dict: raise ValueError("duplicated key ".format(flat_key)) flat_dict[flat_key] = value _flatten(d) return flat_dict
Flatten dict-like object. Parameters ---------- d: dict-like object The dict that will be flattened. reducer: {'tuple', 'path', function} (default: 'tuple') The key joining method. If a function is given, the function will be used to reduce. 'tuple': The resulting key will be tuple of the original keys 'path': Use ``os.path.join`` to join keys. inverse: bool (default: False) Whether you want invert the resulting key and value. Returns ------- flat_dict: dict
377,828
def getSimilarTermsForExpression(self, body, contextId=None, posType=None, getFingerprint=None, startIndex=0, maxResults=10, sparsity=1.0): return self._expressions.getSimilarTermsForExpressionContext(self._retina, body, contextId, posType, getFingerprint, startIndex, maxResults, sparsity)
Get similar terms for the contexts of an expression Args: body, ExpressionOperation: The JSON encoded expression to be evaluated (required) contextId, int: The identifier of a context (optional) posType, str: Part of speech (optional) getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional) startIndex, int: The start-index for pagination (optional) maxResults, int: Max results per page (optional) sparsity, float: Sparsify the resulting expression to this percentage (optional) Returns: list of Term Raises: CorticalioException: if the request was not successful
377,829
def get_data(self, linesep=os.linesep): stream = BytesIO() self.store(stream, linesep) return stream.getvalue()
Serialize the section and return it as bytes :return bytes
377,830
def get(self, key, filepath): if not filepath: raise RuntimeError("Configuration file not given") if not self.__check_config_key(key): raise RuntimeError("%s parameter does not exists" % key) if not os.path.isfile(filepath): raise RuntimeError("%s config file does not exist" % filepath) section, option = key.split() config = configparser.SafeConfigParser() config.read(filepath) try: option = config.get(section, option) self.display(, key=key, option=option) except (configparser.NoSectionError, configparser.NoOptionError): pass return CMD_SUCCESS
Get configuration parameter. Reads 'key' configuration parameter from the configuration file given in 'filepath'. Configuration parameter in 'key' must follow the schema <section>.<option> . :param key: key to get :param filepath: configuration file
377,831
def get_applicable_overlays(self, error_bundle): content_paths = self.get_triples(subject=) if not content_paths: return set() chrome_path = content_root_path = for path in content_paths: chrome_name = path[] if not path[]: continue path_location = path[].strip().split()[0] if path_location.startswith(): if not error_bundle.is_nested_package: continue if jar_path != error_bundle.package_stack[0]: continue chrome_path = self._url_chunk_join(chrome_name, package_path) break else: if error_bundle.is_nested_package: continue chrome_path = self._url_chunk_join(chrome_name, ) content_root_path = % path_location.strip() break if not chrome_path: return set() applicable_overlays = set() chrome_path = % self._url_chunk_join(chrome_path + ) for overlay in self.get_triples(subject=): if not overlay[]: error_bundle.error( err_id=(, , ), error=, description= , filename=overlay[], line=overlay[], context=self.context) continue overlay_url = overlay[].split()[0] if overlay_url.startswith(chrome_path): overlay_relative_path = overlay_url[len(chrome_path):] applicable_overlays.add( % self._url_chunk_join(content_root_path, overlay_relative_path)) return applicable_overlays
Given an error bundle, a list of overlays that are present in the current package or subpackage are returned.
377,832
def timestamp_to_microseconds(timestamp): timestamp_str = datetime.datetime.strptime(timestamp, ISO_DATETIME_REGEX) epoch_time_secs = calendar.timegm(timestamp_str.timetuple()) epoch_time_mus = epoch_time_secs * 1e6 + timestamp_str.microsecond return epoch_time_mus
Convert a timestamp string into a microseconds value :param timestamp :return time in microseconds
377,833
def _map_relation(c, language=): label = c.label(language) return { : c.id, : c.type, : c.uri, : label.label if label else None }
Map related concept or collection, leaving out the relations. :param c: the concept or collection to map :param string language: Language to render the relation's label in :rtype: :class:`dict`
377,834
def fix_deplist(deps): deps = [ ((dep.lower(),) if not isinstance(dep, (list, tuple)) else tuple([dep_entry.lower() for dep_entry in dep ])) for dep in deps ] return deps
Turn a dependency list into lowercase, and make sure all entries that are just a string become a tuple of strings
377,835
def jpl_horizons_ephemeris( log, objectId, mjd, obscode=500, verbose=False): log.debug() if not isinstance(mjd, list): mjd = [str(mjd)] mjd = (" ").join(map(str, mjd)) if not isinstance(objectId, list): objectList = [objectId] else: objectList = objectId keys = ["jd", "solar_presence", "lunar_presence", "ra_deg", "dec_deg", "ra_arcsec_per_hour", "dec_arcsec_per_hour", "apparent_mag", "surface_brightness", "heliocentric_distance", "heliocentric_motion", "observer_distance", "observer_motion", "sun_obs_target_angle", "apparent_motion_relative_to_sun", "sun_target_obs_angle", "ra_3sig_error", "dec_3sig_error", "true_anomaly_angle", "phase_angle", "phase_angle_bisector_long", "phase_angle_bisector_lat"] if verbose == True: order = ["requestId", "objectId", "mjd", "ra_deg", "dec_deg", "ra_3sig_error", "dec_3sig_error", "ra_arcsec_per_hour", "dec_arcsec_per_hour", "apparent_mag", "heliocentric_distance", "heliocentric_motion", "observer_distance", "observer_motion", "phase_angle", "true_anomaly_angle", "surface_brightness", "sun_obs_target_angle", "sun_target_obs_angle", "apparent_motion_relative_to_sun", "phase_angle_bisector_long", "phase_angle_bisector_lat"] else: order = ["requestId", "objectId", "mjd", "ra_deg", "dec_deg", "ra_3sig_error", "dec_3sig_error", "ra_arcsec_per_hour", "dec_arcsec_per_hour", "apparent_mag", "heliocentric_distance", "observer_distance", "phase_angle"] params = { "COMMAND": "", "OBJ_DATA": "", "MAKE_EPHEM": "", "TABLE_TYPE": "", "CENTER": "" % locals(), "TLIST": mjd, "QUANTITIES": "", "REF_SYSTEM": "", "CAL_FORMAT": "", "ANG_FORMAT": "", "APPARENT": "", "TIME_DIGITS": "", "TIME_ZONE": "", "RANGE_UNITS": "", "SUPPRESS_RANGE_RATE": "", "SKIP_DAYLT": "", "EXTRA_PREC": "", "CSV_FORMAT": "", "batch": "1", } resultList = [] paramList = [] for objectId in objectList: requestId = objectId try: thisId = int(objectId) objectId = "%(thisId)s" % locals() except Exception as e: pass theseparams = copy.deepcopy(params) theseparams["COMMAND"] = + objectId + paramList.append(theseparams) rs = [grequests.get("https://ssd.jpl.nasa.gov/horizons_batch.cgi", params=p) for p in paramList] def exception_handler(request, exception): print "Request failed" print exception returns = grequests.map(rs, size=1, exception_handler=exception_handler) for result, requestId in zip(returns, objectList): r = result.content match = re.search( r, r, flags=re.S ) if not match: log.warning( "Horizons could not find a match for `%(requestId)s`" % locals()) try: import requests response = requests.get( url="https://ssd.jpl.nasa.gov/horizons_batch.cgi", params=theseparams, ) content = response.content status_code = response.status_code print response.url except requests.exceptions.RequestException: print() sys.exit(0) objectDict = {} for k in keys: v = None objectDict[k] = v objectDict["objectId"] = requestId + " - NOT FOUND" objectDict["requestId"] = requestId objectDict["mjd"] = None orderDict = collections.OrderedDict({}) for i in order: orderDict[i] = objectDict[i] resultList.append(orderDict) continue horizonsId = match.group(1).replace("(", "").replace(")", "").strip() match = re.search( r, r, flags=re.S ) keys2 = copy.deepcopy(keys) order2 = copy.deepcopy(order) if "S-brt," not in r: keys2.remove("surface_brightness") try: order2.remove("surface_brightness") except: pass lines = match.group(1).split("\n") for line in lines: vals = line.split(",") objectDict = {} for k, v in zip(keys2, vals): v = v.strip().replace("/", "") try: v = float(v) except: pass objectDict[k] = v objectDict["mjd"] = objectDict["jd"] - 2400000.5 objectDict["objectId"] = horizonsId objectDict["requestId"] = requestId orderDict = collections.OrderedDict({}) for i in order2: orderDict[i] = objectDict[i] resultList.append(orderDict) log.debug() return resultList
Given a known solar-system object ID (human-readable name, MPC number or MPC packed format) and one or more specific epochs, return the calculated ephemerides **Key Arguments:** - ``log`` -- logger - ``objectId`` -- human-readable name, MPC number or MPC packed format id of the solar-system object or list of names - ``mjd`` -- a single MJD, or a list of up to 10,000 MJDs to generate an ephemeris for - ``obscode`` -- the observatory code for the ephemeris generation. Default **500** (geocentric) - ``verbose`` -- return extra information with each ephemeris **Return:** - ``resultList`` -- a list of ordered dictionaries containing the returned ephemerides **Usage:** To generate a an ephemeris for a single epoch run, using ATLAS Haleakala as your observatory: .. code-block:: python from rockfinder import jpl_horizons_ephemeris eph = jpl_horizons_ephemeris( log=log, objectId=1, mjd=57916., obscode='T05' ) or to generate an ephemeris for multiple epochs: .. code-block:: python from rockfinder import jpl_horizons_ephemeris eph = jpl_horizons_ephemeris( log=log, objectId="ceres", mjd=[57916.1,57917.234,57956.34523] verbose=True ) Note by passing `verbose=True` the essential ephemeris data is supplimented with some extra data It's also possible to pass in an array of object IDs: .. code-block:: python from rockfinder import jpl_horizons_ephemeris eph = jpl_horizons_ephemeris( log=log, objectId=[1,5,03547,"Shikoku","K10B11A"], mjd=[57916.1,57917.234,57956.34523] )
377,836
def multiple_paths_parser(value): if isinstance(value, six.string_types): value = value.split(os.path.pathsep) return value
Parses data_path argument. Parameters ---------- value : str a string of data paths separated by ":". Returns ------- value : list a list of strings indicating each data paths.
377,837
def array_equivalent(left, right, strict_nan=False): left, right = np.asarray(left), np.asarray(right) if left.shape != right.shape: return False if is_string_dtype(left) or is_string_dtype(right): if not strict_nan: return lib.array_equivalent_object( ensure_object(left.ravel()), ensure_object(right.ravel())) for left_value, right_value in zip(left, right): if left_value is NaT and right_value is not NaT: return False elif isinstance(left_value, float) and np.isnan(left_value): if (not isinstance(right_value, float) or not np.isnan(right_value)): return False else: if left_value != right_value: return False return True if is_float_dtype(left) or is_complex_dtype(left): if not (np.prod(left.shape) and np.prod(right.shape)): return True return ((left == right) | (isna(left) & isna(right))).all() elif is_datetimelike_v_numeric(left, right): return False elif needs_i8_conversion(left) and needs_i8_conversion(right): if not is_dtype_equal(left.dtype, right.dtype): return False left = left.view() right = right.view() if (left.dtype.type is np.void or right.dtype.type is np.void): if left.dtype != right.dtype: return False return np.array_equal(left, right)
True if two arrays, left and right, have equal non-NaN elements, and NaNs in corresponding locations. False otherwise. It is assumed that left and right are NumPy arrays of the same dtype. The behavior of this function (particularly with respect to NaNs) is not defined if the dtypes are different. Parameters ---------- left, right : ndarrays strict_nan : bool, default False If True, consider NaN and None to be different. Returns ------- b : bool Returns True if the arrays are equivalent. Examples -------- >>> array_equivalent( ... np.array([1, 2, np.nan]), ... np.array([1, 2, np.nan])) True >>> array_equivalent( ... np.array([1, np.nan, 2]), ... np.array([1, 2, np.nan])) False
377,838
def stop(ctx, yes): user, project_name = get_project_or_local(ctx.obj.get()) group = ctx.obj.get() experiment = ctx.obj.get() if experiment: obj = .format(experiment) elif group: obj = .format(group) else: obj = .format(user, project_name) if not yes and not click.confirm("Are sure you want to stop tensorboard " "for {}".format(obj)): click.echo() sys.exit(1) if experiment: try: PolyaxonClient().experiment.stop_tensorboard( username=user, project_name=project_name, experiment_id=experiment) Printer.print_success() except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error(.format(obj)) Printer.print_error(.format(e)) sys.exit(1) elif group: try: PolyaxonClient().experiment_group.stop_tensorboard( username=user, project_name=project_name, group_id=group) Printer.print_success() except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error(.format(obj)) Printer.print_error(.format(e)) sys.exit(1) else: try: PolyaxonClient().project.stop_tensorboard( username=user, project_name=project_name) Printer.print_success() except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e: Printer.print_error(.format(obj)) Printer.print_error(.format(e)) sys.exit(1)
Stops the tensorboard deployment for project/experiment/experiment group if it exists. Uses [Caching](/references/polyaxon-cli/#caching) Examples: stopping project tensorboard \b ```bash $ polyaxon tensorboard stop ``` Examples: stopping experiment group tensorboard \b ```bash $ polyaxon tensorboard -g 1 stop ``` Examples: stopping experiment tensorboard \b ```bash $ polyaxon tensorboard -xp 112 stop ```
377,839
def _post_start(self): flags = fcntl.fcntl(self._process.stdout, fcntl.F_GETFL) fcntl.fcntl(self._process.stdout, fcntl.F_SETFL, flags | os.O_NONBLOCK)
Set stdout to non-blocking VLC does not always return a newline when reading status so in order to be lazy and still use the read API without caring about how much output there is we switch stdout to nonblocking mode and just read a large chunk of datin order to be lazy and still use the read API without caring about how much output there is we switch stdout to nonblocking mode and just read a large chunk of data.
377,840
def paintEvent(self, event): x = 1 y = 1 w = self.width() h = self.height() clr_a = QColor(220, 220, 220) clr_b = QColor(190, 190, 190) grad = QLinearGradient() grad.setColorAt(0.0, clr_a) grad.setColorAt(0.6, clr_a) grad.setColorAt(1.0, clr_b) if self.position() & (self.Position.North | self.Position.South): h = self.minimumPixmapSize().height() + 6 if self.position() == self.Position.South: y = self.height() - h grad.setStart(0, y) grad.setFinalStop(0, self.height()) else: grad.setStart(0, 0) grad.setFinalStart(0, h) if self.position() & (self.Position.East | self.Position.West): w = self.minimumPixmapSize().width() + 6 if self.position() == self.Position.West: x = self.width() - w grad.setStart(x, 0) grad.setFinalStop(self.width(), 0) else: grad.setStart(0, 0) grad.setFinalStop(w, 0) with XPainter(self) as painter: painter.fillRect(x, y, w, h, grad) action = self.selectedAction() if action is not None and \ not self.currentAction() and \ not self._animating: for lbl in self.actionLabels(): if lbl.action() != action: continue geom = lbl.geometry() size = lbl.pixmapSize() if self.position() == self.Position.North: x = geom.left() y = 0 w = geom.width() h = size.height() + geom.top() + 2 elif self.position() == self.Position.East: x = 0 y = geom.top() w = size.width() + geom.left() + 2 h = geom.height() painter.setPen(QColor(140, 140, 40)) painter.setBrush(QColor(160, 160, 160)) painter.drawRect(x, y, w, h) break
Paints the background for the dock toolbar. :param event | <QPaintEvent>
377,841
def _drawForeground(self, scene, painter, rect): rect = scene.sceneRect() if scene == self.uiChartVIEW.scene(): self.renderer().drawForeground(painter, rect, self.showGrid(), self.showColumns(), self.showRows())
Draws the backgroud for a particular scene within the charts. :param scene | <XChartScene> painter | <QPainter> rect | <QRectF>
377,842
def find_guests(names, path=None): ret = {} names = names.split() for data in _list_iter(path=path): host, stat = next(six.iteritems(data)) for state in stat: for name in stat[state]: if name in names: if host in ret: ret[host].append(name) else: ret[host] = [name] return ret
Return a dict of hosts and named guests path path to the container parent default: /var/lib/lxc (system default) .. versionadded:: 2015.8.0
377,843
def parse_stream(self, stream: BytesIO, context=None): if context is None: context = Context() if not isinstance(context, Context): context = Context(context) try: return self._parse_stream(stream, context) except Error: raise except Exception as exc: raise ParsingError(str(exc))
Parse some python object from the stream. :param stream: Stream from which the data is read and parsed. :param context: Optional context dictionary.
377,844
def verify(self): if self._verify is None: from twilio.rest.verify import Verify self._verify = Verify(self) return self._verify
Access the Verify Twilio Domain :returns: Verify Twilio Domain :rtype: twilio.rest.verify.Verify
377,845
def normalize_so_name(name): if "cpython" in name: return os.path.splitext(os.path.splitext(name)[0])[0] if name == "timemodule.so": return "time" return os.path.splitext(name)[0]
Handle different types of python installations
377,846
def detect_branchings(self): logg.m(, self.n_branchings, + ( if self.n_branchings == 1 else )) indices_all = np.arange(self._adata.shape[0], dtype=int) self.detect_branching(segs, segs_tips, segs_connects, segs_undecided, segs_adjacency, iseg, tips3) self.segs = segs self.segs_tips = segs_tips self.segs_undecided = segs_undecided self.segs_adjacency = sp.sparse.lil_matrix((len(segs), len(segs)), dtype=float) self.segs_connects = sp.sparse.lil_matrix((len(segs), len(segs)), dtype=int) for i, seg_adjacency in enumerate(segs_adjacency): self.segs_connects[i, seg_adjacency] = segs_connects[i] for i in range(len(segs)): for j in range(len(segs)): self.segs_adjacency[i, j] = self.distances_dpt[self.segs_connects[i, j], self.segs_connects[j, i]] self.segs_adjacency = self.segs_adjacency.tocsr() self.segs_connects = self.segs_connects.tocsr()
Detect all branchings up to `n_branchings`. Writes Attributes ----------------- segs : np.ndarray List of integer index arrays. segs_tips : np.ndarray List of indices of the tips of segments.
377,847
def chr(self): if len(self.exons)==0: sys.stderr.write("WARNING can't return chromsome with nothing here\n") return None return self._rngs[0].chr
the reference chromosome. greedy return the first chromosome in exon array :return: chromosome :rtype: string
377,848
def _check_rel(attrs, rel_whitelist, rel_blacklist): rels = attrs.get(, [None]) if rel_blacklist: for rel in rels: if rel in rel_blacklist: return False if rel_whitelist: for rel in rels: if rel in rel_whitelist: return True return False return True
Check a link's relations against the whitelist or blacklist. First, this will reject based on blacklist. Next, if there is a whitelist, there must be at least one rel that matches. To explicitly allow links without a rel you can add None to the whitelist (e.g. ['in-reply-to',None])
377,849
def draw_line(self, img, pixmapper, pt1, pt2, colour, linewidth): pix1 = pixmapper(pt1) pix2 = pixmapper(pt2) (width, height) = image_shape(img) (ret, pix1, pix2) = cv2.clipLine((0, 0, width, height), pix1, pix2) if ret is False: if len(self._pix_points) == 0: self._pix_points.append(None) self._pix_points.append(None) return cv2.line(img, pix1, pix2, colour, linewidth) cv2.circle(img, pix2, linewidth*2, colour) if len(self._pix_points) == 0: self._pix_points.append(pix1) self._pix_points.append(pix2) if self.arrow: xdiff = pix2[0]-pix1[0] ydiff = pix2[1]-pix1[1] if (xdiff*xdiff + ydiff*ydiff) > 400: SlipArrow(self.key, self.layer, (int(pix1[0]+xdiff/2.0), int(pix1[1]+ydiff/2.0)), self.colour, self.linewidth, math.atan2(ydiff, xdiff)+math.pi/2.0).draw(img)
draw a line on the image
377,850
def load_models(*chain, **kwargs): def inner(f): @wraps(f) def decorated_function(*args, **kw): permissions = None permission_required = kwargs.get() url_check_attributes = kwargs.get(, []) if isinstance(permission_required, six.string_types): permission_required = set([permission_required]) elif permission_required is not None: permission_required = set(permission_required) result = {} for models, attributes, parameter in chain: if not isinstance(models, (list, tuple)): models = (models,) item = None for model in models: query = model.query url_check = False url_check_paramvalues = {} for k, v in attributes.items(): if callable(v): query = query.filter_by(**{k: v(result, kw)}) else: if in v: first, attrs = v.split(, 1) val = result.get(first) for attr in attrs.split(): val = getattr(val, attr) else: val = result.get(v, kw.get(v)) query = query.filter_by(**{k: val}) if k in url_check_attributes: url_check = True url_check_paramvalues[k] = (v, val) item = query.first() if item is not None: return f(*args, kwargs=kw, **result) else: return f(*args, **result) return decorated_function return inner
Decorator to load a chain of models from the given parameters. This works just like :func:`load_model` and accepts the same parameters, with some small differences. :param chain: The chain is a list of tuples of (``model``, ``attributes``, ``parameter``). Lists and tuples can be used interchangeably. All retrieved instances are passed as parameters to the decorated function :param permission: Same as in :func:`load_model`, except :meth:`~coaster.sqlalchemy.PermissionMixin.permissions` is called on every instance in the chain and the retrieved permissions are passed as the second parameter to the next instance in the chain. This allows later instances to revoke permissions granted by earlier instances. As an example, if a URL represents a hierarchy such as ``/<page>/<comment>``, the ``page`` can assign ``edit`` and ``delete`` permissions, while the ``comment`` can revoke ``edit`` and retain ``delete`` if the current user owns the page but not the comment In the following example, load_models loads a Folder with a name matching the name in the URL, then loads a Page with a matching name and with the just-loaded Folder as parent. If the Page provides a 'view' permission to the current user, the decorated function is called:: @app.route('/<folder_name>/<page_name>') @load_models( (Folder, {'name': 'folder_name'}, 'folder'), (Page, {'name': 'page_name', 'parent': 'folder'}, 'page'), permission='view') def show_page(folder, page): return render_template('page.html', folder=folder, page=page)
377,851
def verify_item_signature(signature_attribute, encrypted_item, verification_key, crypto_config): signature = signature_attribute[Tag.BINARY.dynamodb_tag] verification_key.verify( algorithm=verification_key.algorithm, signature=signature, data=_string_to_sign( item=encrypted_item, table_name=crypto_config.encryption_context.table_name, attribute_actions=crypto_config.attribute_actions, ), )
Verify the item signature. :param dict signature_attribute: Item signature DynamoDB attribute value :param dict encrypted_item: Encrypted DynamoDB item :param DelegatedKey verification_key: DelegatedKey to use to calculate the signature :param CryptoConfig crypto_config: Cryptographic configuration
377,852
def get_overall_services_health(self) -> str: services_health_status = self.get_services_health() health_status = all(status == "Healthy" for status in services_health_status.values()) if health_status: overall_status = "Healthy" else: overall_status = "Unhealthy" return overall_status
Get the overall health of all the services. Returns: str, overall health status
377,853
def _wrap_handling(kwargs): _configure_logging(kwargs, extract=False) handler=kwargs[] graceful_exit = kwargs[] if graceful_exit: sigint_handling.start() handler.run()
Starts running a queue handler and creates a log file for the queue.
377,854
def OnApprove(self, event): if not self.main_window.safe_mode: return msg = _(u"You are going to approve and trust a file that\n" u"you have not created yourself.\n" u"After proceeding, the file is executed.\n \n" u"It may harm your system as any program can.\n" u"Please check all cells thoroughly before\nproceeding.\n \n" u"Proceed and sign this file as trusted?") short_msg = _("Security warning") if self.main_window.interfaces.get_warning_choice(msg, short_msg): self.main_window.grid.actions.leave_safe_mode() statustext = _("Safe mode deactivated.") post_command_event(self.main_window, self.main_window.StatusBarMsg, text=statustext)
File approve event handler
377,855
def to_dict(self): return dict( addr=self.addr, protocol=self.protocol, weight=self.weight, last_checked=self.last_checked)
convert detailed proxy info into a dict Returns: dict: A dict with four keys: ``addr``, ``protocol``, ``weight`` and ``last_checked``
377,856
def http_adapter_kwargs(): return dict( max_retries=Retry( total=3, status_forcelist=[r for r in Retry.RETRY_AFTER_STATUS_CODES if r != 429], respect_retry_after_header=False ) )
Provides Zenpy's default HTTPAdapter args for those users providing their own adapter.
377,857
def timeline_list(self, id, max_id=None, min_id=None, since_id=None, limit=None): id = self.__unpack_id(id) return self.timeline(.format(id), max_id=max_id, min_id=min_id, since_id=since_id, limit=limit)
Fetches a timeline containing all the toots by users in a given list. Returns a list of `toot dicts`_.
377,858
def parse(self, request): if request.method in (, , ): content_type = self.determine_content(request) if content_type: split = content_type.split(, 1) if len(split) > 1: content_type = split[0] content_type = content_type.strip() parser = self._meta.parsers_dict.get( content_type, self._meta.default_parser) data = parser(self).parse(request) return dict() if isinstance(data, basestring) else data return dict()
Parse request content. :return dict: parsed data.
377,859
def _match_serializers_by_accept_headers(self, serializers, default_media_type): if len(request.accept_mimetypes) == 0: return serializers[default_media_type] best_quality = -1 best = None has_wildcard = False for client_accept, quality in request.accept_mimetypes: if quality <= best_quality: continue if client_accept == : has_wildcard = True for s in serializers: if s in [, client_accept] and quality > 0: best_quality = quality best = s if best is None and has_wildcard: best = default_media_type if best is not None: return serializers[best] return None
Match serializer by `Accept` headers.
377,860
def _connected(self, sock): logger.debug() self.protocol = self.factory.build(self.loop) self.connection = Connection(self.loop, self.sock, self.addr, self.protocol, self) self.connector = None self.connect_deferred.callback(self.protocol)
When the socket is writtable, the socket is ready to be used.
377,861
def check_permissions(self, request): objs = [None] if hasattr(self, ): objs = self.get_perms_objects() else: if hasattr(self, ): try: objs = [self.get_object()] except Http404: raise except: pass if objs == [None]: objs = self.get_queryset() if len(objs) == 0: objs = [None] if (hasattr(self, ) and self.permission_filter_queryset is not False and self.request.method == ): if objs != [None]: self.perms_filter_queryset(objs) else: has_perm = check_perms(self.request.user, self.get_permission_required(), objs, self.request.method) if not has_perm: msg = self.get_permission_denied_message( default="Permission denied." ) if isinstance(msg, Sequence): msg = msg[0] self.permission_denied(request, message=msg)
Permission checking for DRF.
377,862
def _write_rigid_information(xml_file, rigid_bodies): if not all(body is None for body in rigid_bodies): xml_file.write() for body in rigid_bodies: if body is None: body = -1 xml_file.write(.format(int(body))) xml_file.write()
Write rigid body information. Parameters ---------- xml_file : file object The file object of the hoomdxml file being written rigid_bodies : list, len=n_particles The rigid body that each particle belongs to (-1 for none)
377,863
def is_address_valid(self, address): try: mbi = self.mquery(address) except WindowsError: e = sys.exc_info()[1] if e.winerror == win32.ERROR_INVALID_PARAMETER: return False raise return True
Determines if an address is a valid user mode address. @type address: int @param address: Memory address to query. @rtype: bool @return: C{True} if the address is a valid user mode address. @raise WindowsError: An exception is raised on error.
377,864
def p_NonAnyType_domString(p): p[0] = helper.unwrapTypeSuffix(model.SimpleType( type=model.SimpleType.DOMSTRING), p[2])
NonAnyType : DOMString TypeSuffix
377,865
def gather_categories(imap, header, categories=None): if categories is None: return {"default": DataCategory(set(imap.keys()), {})} cat_ids = [header.index(cat) for cat in categories if cat in header and "=" not in cat] table = OrderedDict() conditions = defaultdict(set) for i, cat in enumerate(categories): if "=" in cat and cat.split("=")[0] in header: cat_name = header[header.index(cat.split("=")[0])] conditions[cat_name].add(cat.split("=")[1]) if not cat_ids and not conditions: return {"default": DataCategory(set(imap.keys()), {})} if cat_ids and not conditions: for sid, row in imap.items(): cat_name = "_".join([row[cid] for cid in cat_ids]) if cat_name not in table: table[cat_name] = DataCategory(set(), {}) table[cat_name].sids.add(sid) return table cond_ids = set() for k in conditions: try: cond_ids.add(header.index(k)) except ValueError: continue idx_to_test = set(cat_ids).union(cond_ids) for sid, row in imap.items(): if all([row[header.index(c)] in conditions[c] for c in conditions]): key = "_".join([row[idx] for idx in idx_to_test]) try: assert key in table.keys() except AssertionError: table[key] = DataCategory(set(), {}) table[key].sids.add(sid) try: assert len(table) > 0 except AssertionError: return {"default": DataCategory(set(imap.keys()), {})} else: return table
Find the user specified categories in the map and create a dictionary to contain the relevant data for each type within the categories. Multiple categories will have their types combined such that each possible combination will have its own entry in the dictionary. :type imap: dict :param imap: The input mapping file data keyed by SampleID :type header: list :param header: The header line from the input mapping file. This will be searched for the user-specified categories :type categories: list :param categories: The list of user-specified category column name from mapping file :rtype: dict :return: A sorted dictionary keyed on the combinations of all the types found within the user-specified categories. Each entry will contain an empty DataCategory namedtuple. If no categories are specified, a single entry with the key 'default' will be returned
377,866
def active_editor_buffer(self): if self.active_tab and self.active_tab.active_window: return self.active_tab.active_window.editor_buffer
The active EditorBuffer or None.
377,867
def update_range(self, share_name, directory_name, file_name, data, start_range, end_range, content_md5=None, timeout=None): _validate_not_none(, share_name) _validate_not_none(, file_name) _validate_not_none(, data) request = HTTPRequest() request.method = request.host = self._get_host() request.path = _get_path(share_name, directory_name, file_name) request.query = [ (, ), (, _int_to_str(timeout)), ] request.headers = [ (, _to_str(content_md5)), (, ), ] _validate_and_format_range_headers( request, start_range, end_range) request.body = _get_request_body_bytes_only(, data) self._perform_request(request)
Writes the bytes specified by the request body into the specified range. :param str share_name: Name of existing share. :param str directory_name: The path to the directory. :param str file_name: Name of existing file. :param bytes data: Content of the range. :param int start_range: Start of byte range to use for updating a section of the file. The range can be up to 4 MB in size. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of file. :param int end_range: End of byte range to use for updating a section of the file. The range can be up to 4 MB in size. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of file. :param str content_md5: An MD5 hash of the range content. This hash is used to verify the integrity of the range during transport. When this header is specified, the storage service compares the hash of the content that has arrived with the header value that was sent. If the two hashes do not match, the operation will fail with error code 400 (Bad Request). :param int timeout: The timeout parameter is expressed in seconds.
377,868
def fetchChildren(self): assert self._canFetchChildren, "canFetchChildren must be True" try: childItems = self._fetchAllChildren() finally: self._canFetchChildren = False return childItems
Fetches children. The actual work is done by _fetchAllChildren. Descendant classes should typically override that method instead of this one.
377,869
def set_chassis_datacenter(location, host=None, admin_username=None, admin_password=None): * return set_general(, , location, host=host, admin_username=admin_username, admin_password=admin_password)
Set the location of the chassis. location The name of the datacenter to be set on the chassis. host The chassis host. admin_username The username used to access the chassis. admin_password The password used to access the chassis. CLI Example: .. code-block:: bash salt '*' dracr.set_chassis_datacenter datacenter-name host=111.222.333.444 admin_username=root admin_password=secret
377,870
def pca(U, centre=False): if centre: C = np.mean(U, axis=1, keepdims=True) U = U - C else: C = None B, S, _ = np.linalg.svd(U, full_matrices=False, compute_uv=True) return B, S**2, C
Compute the PCA basis for columns of input array `U`. Parameters ---------- U : array_like 2D data array with rows corresponding to different variables and columns corresponding to different observations center : bool, optional (default False) Flag indicating whether to centre data Returns ------- B : ndarray A 2D array representing the PCA basis; each column is a PCA component. B.T is the analysis transform into the PCA representation, and B is the corresponding synthesis transform S : ndarray The eigenvalues of the PCA components C : ndarray or None None if centering is disabled, otherwise the mean of the data matrix subtracted in performing the centering
377,871
def CheckVlogArguments(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] if Search(r, line): error(filename, linenum, , 5, )
Checks that VLOG() is only used for defining a logging level. For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and VLOG(FATAL) are not. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
377,872
def get(self, name, default="", parent_search=False, multikeys_search=False, __settings_temp=None, __rank_recursion=0): if __settings_temp is None: __settings_temp = self.settings if name.startswith("/"): name = name[1:] if name.endswith("/"): name = name[:-1] indice_master = -1 indices_master = re.findall(r"\[\d+\]", name_master) if len(indices_master) > 0: try: indice_master = int(indices_master[0].replace("[", "").replace("]", "")) except: pass __settings_temp = __settings_temp[name_master] else: __settings_temp = __settings_temp[name_master][indice_master] if self.is_json else __settings_temp[name] name_split = name.split("/")[1:] search_path = "/".join(name_split) return_value = self.get( search_path, default, parent_search, multikeys_search, __settings_temp, __rank_recursion + 1) if len(name_split) > 1 and return_value is None: i = len(name_split) while i >= 0: i -= 1 new_search_path = "/".join(name_split[i-len(name_split):]) return_value = self.get( new_search_path, default, parent_search, multikeys_search, __settings_temp, __rank_recursion + 1) if not multikeys_search: break if not return_value is None: break if return_value is None and __rank_recursion == 0: name = name_split[-1] return_value = self.get( name, default, parent_search, multikeys_search, self.settings, 0) if return_value is None: return_value = default return return_value name = name.replace("[{}]".format(indice_master), "") if type(__settings_temp) is str or name not in __settings_temp.keys(): value = __settings_temp[name] else: value = __settings_temp[name][indice_master] if self.is_json else __settings_temp[name] if value is None: if parent_search: return None value = default if isinstance(value, str): value = value.strip() return value
Récupération d'une configuration le paramètre ```name``` peut être soit un nom ou un chemin vers la valeur (séparateur /) ```parent_search``` est le boolean qui indique si on doit chercher la valeur dans la hiérarchie plus haute. Si la chaîne "/document/host/val" retourne None, on recherche dans "/document/val" puis dans "/val" ```multikeys_search``` indique si la recherche d'une clef non trouvabe se fait sur les parents en multi clef ie: /graphic/output/logo/enable va aussi chercher dans /graphic/logo/enable ```__settings_temp``` est le dictionnaire temporaire de transmission récursif (intégrant les sous configurations) ```__rank_recursion``` défini le rang de récusion pour chercher aussi depuis la racine du chemin en cas de récursion inverse exemple : valeur = self.settings("document/host/val", "mon_defaut") valeur = self.settings("/document/host/val", "mon_defaut")
377,873
def getCandScoresMapFromSamplesFile(self, profile, sampleFileName): wmg = profile.getWmg(True) utilities = dict() for cand in wmg.keys(): utilities[cand] = 0.0 sampleFile = open(sampleFileName) for i in range(0, SAMPLESFILEMETADATALINECOUNT): sampleFile.readline() for i in range(0, self.burnIn): sampleFile.readline() numSamples = 0 for i in range(0, self.n2*self.n1): line = sampleFile.readline() if i % self.n1 != 0: continue sample = json.loads(line) for cand in wmg.keys(): utilities[cand] += self.utilityFunction.getUtility([cand], sample) numSamples += 1 sampleFile.close() for key in utilities.keys(): utilities[key] = utilities[key]/numSamples return utilities
Returns a dictonary that associates the integer representation of each candidate with the Bayesian utilities we approximate from the samples we generated into a file. :ivar Profile profile: A Profile object that represents an election profile. :ivar str sampleFileName: The name of the input file containing the sample data.
377,874
def update(self): current_time = int(time.time()) last_refresh = 0 if self._last_refresh is None else self._last_refresh if current_time >= (last_refresh + self._refresh_rate): self.get_cameras_properties() self.get_ambient_sensor_data() self.get_camera_extended_properties() self._attrs = self._session.refresh_attributes(self.name) self._attrs = assert_is_dict(self._attrs) _LOGGER.debug("Called base station update of camera properties: " "Scan Interval: %s, New Properties: %s", self._refresh_rate, self.camera_properties)
Update object properties.
377,875
def base_prompt(self, prompt): if prompt is None: return None if not self.device.is_target: return prompt pattern = pattern_manager.pattern(self.platform, "prompt_dynamic", compiled=False) pattern = pattern.format(prompt="(?P<prompt>.*?)") result = re.search(pattern, prompt) if result: base = result.group("prompt") + " self.log("base prompt: {}".format(base)) return base else: self.log("Unable to extract the base prompt") return prompt
Extract the base prompt pattern.
377,876
def updatePhysicalInterface(self, physicalInterfaceId, name, schemaId, description=None): req = ApiClient.onePhysicalInterfacesUrl % (self.host, "/draft", physicalInterfaceId) body = {"name" : name, "schemaId" : schemaId} if description: body["description"] = description resp = requests.put(req, auth=self.credentials, headers={"Content-Type":"application/json"}, data=json.dumps(body), verify=self.verify) if resp.status_code == 200: self.logger.debug("physical interface updated") else: raise ibmiotf.APIException(resp.status_code, "HTTP error updating physical interface", resp) return resp.json()
Update a physical interface. Parameters: - physicalInterfaceId (string) - name (string) - schemaId (string) - description (string, optional) Throws APIException on failure.
377,877
def dump_children(self, f, indent=): for child in self.__order: child.dump(f, indent+)
Dump the children of the current section to a file-like object
377,878
def init_app(self, app): self.__app = app self.__app.before_first_request(self.__setup) self.tracer.enabled = self.__app.config.get(, self.tracer.enabled)
Initialize this class with the specified :class:`flask.Flask` application :param app: The Flask application.
377,879
def sendPassword(self, password): pw = (password + * 8)[:8] des = RFBDes(pw) response = des.encrypt(self._challenge) self.transport.write(response)
send password
377,880
def restart(name, runas=None): * if enabled(name): stop(name, runas=runas) start(name, runas=runas) return True
Unloads and reloads a launchd service. Raises an error if the service fails to reload :param str name: Service label, file name, or full path :param str runas: User to run launchctl commands :return: ``True`` if successful :rtype: bool CLI Example: .. code-block:: bash salt '*' service.restart org.cups.cupsd
377,881
def is_streamable(self): return bool( _number( _extract(self._request(self.ws_prefix + ".getInfo", True), "streamable") ) )
Returns True if the artist is streamable.
377,882
def connect(self, config): if isinstance(config, str): conn = dbutil.get_database(config_file=config) elif isinstance(config, dict): conn = dbutil.get_database(settings=config) else: raise ValueError("Configuration, , must be a path to " "a configuration file or dict".format(config)) return conn
Connect to database with given configuration, which may be a dict or a path to a pymatgen-db configuration.
377,883
def zero_pad(data, count, right=True): if len(data) == 0: return np.zeros(count) elif len(data) < count: padded = np.zeros(count) if right: padded[-len(data):] = data else: padded[:len(data)] = data return padded else: return np.asanyarray(data)
Parameters -------- data : (n,) 1D array count : int Minimum length of result array Returns -------- padded : (m,) 1D array where m >= count
377,884
def smoothing_window(data, window=[1, 1, 1]): for i in range(len(data) - sum(window)): start_window_from = i start_window_to = i+window[0] end_window_from = start_window_to + window[1] end_window_to = end_window_from + window[2] if np.all(data[start_window_from: start_window_to] == data[end_window_from: end_window_to]): data[start_window_from: end_window_to] = data[start_window_from] return data
This is a smoothing functionality so we can fix misclassifications. It will run a sliding window of form [border, smoothing, border] on the signal and if the border elements are the same it will change the smooth elements to match the border. An example would be for a window of [2, 1, 2] we have the following elements [1, 1, 0, 1, 1], this will transform it into [1, 1, 1, 1, 1]. So if the border elements match it will transform the middle (smoothing) into the same as the border. :param data array: One-dimensional array. :param window array: Used to define the [border, smoothing, border] regions. :return data array: The smoothed version of the original data.
377,885
def from_string(dir_string): dir_string = dir_string.upper() if dir_string == UP: return UP elif dir_string == DOWN: return DOWN elif dir_string == LEFT: return LEFT elif dir_string == RIGHT: return RIGHT else: raise InvalidDirectionError(dir_string)
Returns the correct constant for a given string. @raises InvalidDirectionError
377,886
def dict_has_all_keys(self, keys): if not _is_non_string_iterable(keys): keys = [keys] with cython_context(): return SArray(_proxy=self.__proxy__.dict_has_all_keys(keys))
Create a boolean SArray by checking the keys of an SArray of dictionaries. An element of the output SArray is True if the corresponding input element's dictionary has all of the given keys. Fails on SArrays whose data type is not ``dict``. Parameters ---------- keys : list A list of key values to check each dictionary against. Returns ------- out : SArray A SArray of int type, where each element indicates whether the input SArray element contains all keys in the input list. See Also -------- dict_has_any_keys Examples -------- >>> sa = turicreate.SArray([{"this":1, "is":5, "dog":7}, {"this": 2, "are": 1, "cat": 5}]) >>> sa.dict_has_all_keys(["is", "this"]) dtype: int Rows: 2 [1, 0]
377,887
def auto_model_name_recognize(model_name): name_list = model_name.split() return .join([ % (name[0].upper(), name[1:]) for name in name_list])
自动将 site-user 识别成 SiteUser :param model_name: :return:
377,888
def _describe_tree(self, prefix, with_transform): extra = % self.name if self.name is not None else if with_transform: extra += ( % self.transform.__class__.__name__) output = if len(prefix) > 0: output += prefix[:-3] output += output += % (self.__class__.__name__, extra) n_children = len(self.children) for ii, child in enumerate(self.children): sub_prefix = prefix + ( if ii+1 == n_children else ) output += child._describe_tree(sub_prefix, with_transform) return output
Helper function to actuall construct the tree
377,889
def date_added(self, date_added): date_added = self._utils.format_datetime(date_added, date_format=) self._data[] = date_added request = self._base_request request[] = date_added return self._tc_requests.update(request, owner=self.owner)
Updates the security labels date_added Args: date_added: Converted to %Y-%m-%dT%H:%M:%SZ date format
377,890
def _validate_data(self, data: dict): log.debug("validating provided data") e = best_match(self.validator.iter_errors(data)) if e: custom_error_key = f"error_{e.validator}" msg = ( e.schema[custom_error_key] if e.schema.get(custom_error_key) else e.message ) raise BadArguments(validation_error=msg, provider=self.name, data=data)
Validates data against provider schema. Raises :class:`~notifiers.exceptions.BadArguments` if relevant :param data: Data to validate :raises: :class:`~notifiers.exceptions.BadArguments`
377,891
def _flatten_dicts(self, dicts): d = dict() list_of_dicts = [d.get() for d in dicts or []] return {k: v for d in list_of_dicts for k, v in d.items()}
Flatten a dict :param dicts: Flatten a dict :type dicts: list(dict)
377,892
def process_call(self, i2c_addr, register, value, force=None): self._set_address(i2c_addr, force=force) msg = i2c_smbus_ioctl_data.create( read_write=I2C_SMBUS_WRITE, command=register, size=I2C_SMBUS_PROC_CALL ) msg.data.contents.word = value ioctl(self.fd, I2C_SMBUS, msg) return msg.data.contents.word
Executes a SMBus Process Call, sending a 16-bit value and receiving a 16-bit response :param i2c_addr: i2c address :type i2c_addr: int :param register: Register to read/write to :type register: int :param value: Word value to transmit :type value: int :param force: :type force: Boolean :rtype: int
377,893
def _get_enterprise_admin_users_batch(self, start, end): LOGGER.info(, start, end) return User.objects.filter(groups__name=ENTERPRISE_DATA_API_ACCESS_GROUP, is_staff=False)[start:end]
Returns a batched queryset of User objects.
377,894
def text_antialias(self, flag=True): antialias = pgmagick.DrawableTextAntialias(flag) self.drawer.append(antialias)
text antialias :param flag: True or False. (default is True) :type flag: bool
377,895
def show_vcs_output_vcs_nodes_vcs_node_info_node_fabric_state(self, **kwargs): config = ET.Element("config") show_vcs = ET.Element("show_vcs") config = show_vcs output = ET.SubElement(show_vcs, "output") vcs_nodes = ET.SubElement(output, "vcs-nodes") vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info") node_fabric_state = ET.SubElement(vcs_node_info, "node-fabric-state") node_fabric_state.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
377,896
def read(self,filename,min_length,slide_sec=0,buffer=0): self.__filename = filename octothorpe = re.compile(r) for line in open(filename): if not octothorpe.match(line) and int(line.split()[3]) >= min_length: (id,st,en,du) = map(int,line.split()) if slide_sec > 0: st += slide_sec elif slide_sec < 0: en += slide_sec du -= abs(slide_sec) if buffer > 0: st += buffer en -= buffer du -= 2*abs(buffer) x = ScienceSegment(tuple([id,st,en,du])) self.__sci_segs.append(x)
Parse the science segments from the segwizard output contained in file. @param filename: input text file containing a list of science segments generated by segwizard. @param min_length: only append science segments that are longer than min_length. @param slide_sec: Slide each ScienceSegment by:: delta > 0: [s,e] -> [s+delta,e]. delta < 0: [s,e] -> [s,e-delta]. @param buffer: shrink the ScienceSegment:: [s,e] -> [s+buffer,e-buffer]
377,897
def get(self, prefix, url, schema_version=None): if not self.cache_dir: return None filename = self._get_cache_file(prefix, url) try: with open(filename, ) as file: item = pickle.load(file) if schema_version and schema_version != item.schema: LOGGER.debug("Cache get %s %s: Wanted schema %d, got %d", prefix, url, schema_version, item.schema) return None return item except FileNotFoundError: pass except Exception: _, msg, _ = sys.exc_info() LOGGER.warning("Cache get %s %s failed: %s", prefix, url, msg) return None
Get the cached object
377,898
def related_linkage_states_and_scoped_variables(self, state_ids, scoped_variables): related_transitions = {: [], : [], : []} for t in self.transitions.values(): if t.from_state in state_ids and t.to_state in state_ids: related_transitions[].append(t) elif t.to_state in state_ids: related_transitions[].append(t) elif t.from_state in state_ids: related_transitions[].append(t) related_data_flows = {: [], : [], : []} for df in self.data_flows.values(): if df.from_state in state_ids and df.to_state in state_ids or \ df.from_state in state_ids and self.state_id == df.to_state and df.to_key in scoped_variables or \ self.state_id == df.from_state and df.from_key in scoped_variables and df.to_state in state_ids: related_data_flows[].append(df) elif df.to_state in state_ids or \ self.state_id == df.to_state and df.to_key in scoped_variables: related_data_flows[].append(df) elif df.from_state in state_ids or \ self.state_id == df.from_state and df.from_key in scoped_variables: related_data_flows[].append(df) return related_transitions, related_data_flows
TODO: document
377,899
def rmfile(path): if osp.isfile(path): if is_win: os.chmod(path, 0o777) os.remove(path)
Ensure file deleted also on *Windows* where read-only files need special treatment.