Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
17,500
def _lt(field, value, document): try: return document.get(field, None) < value except TypeError: return False
Returns True if the value of a document field is less than a given value
17,501
def data_lookup_method(fields_list, mongo_db_obj, hist, record, lookup_type): if hist is None: hist = {} for field in record: if record[field] != and record[field] is not None: if field in fields_list: if lookup_type in fields_list[field][]: field_val_new, hist = DataLookup( fieldVal=record[field], db=mongo_db_obj, lookupType=lookup_type, fieldName=field, histObj=hist) record[field] = field_val_new return record, hist
Method to lookup the replacement value given a single input value from the same field. :param dict fields_list: Fields configurations :param MongoClient mongo_db_obj: MongoDB collection object :param dict hist: existing input of history values object :param dict record: values to validate :param str lookup_type: Type of lookup
17,502
def get_settings(self, section=None, defaults=None): section = self._maybe_get_default_name(section) if self.filepath is None: return {} parser = self._get_parser(defaults) defaults = parser.defaults() try: raw_items = parser.items(section) except NoSectionError: return {} local_conf = OrderedDict() get_from_globals = {} for option, value in raw_items: if option.startswith("set "): name = option[4:].strip() defaults[name] = value elif option.startswith("get "): name = option[4:].strip() get_from_globals[name] = value local_conf[name] = None else: if option in defaults: continue local_conf[option] = value for option, global_option in get_from_globals.items(): local_conf[option] = defaults[global_option] return ConfigDict(local_conf, defaults, self)
Gets a named section from the configuration source. :param section: a :class:`str` representing the section you want to retrieve from the configuration source. If ``None`` this will fallback to the :attr:`plaster.PlasterURL.fragment`. :param defaults: a :class:`dict` that will get passed to :class:`configparser.ConfigParser` and will populate the ``DEFAULT`` section. :return: A :class:`plaster_pastedeploy.ConfigDict` of key/value pairs.
17,503
def nt_commonpath(paths): from ntpath import splitdrive if not paths: raise ValueError() check_arg_types(, *paths) if isinstance(paths[0], bytes): sep = b altsep = b curdir = b else: sep = altsep = curdir = drivesplits = [splitdrive(p.replace(altsep, sep).lower()) for p in paths] split_paths = [p.split(sep) for d, p in drivesplits] try: isabs, = set(p[:1] == sep for d, p in drivesplits) except ValueError: raise ValueError("Cant have the same drive") drive, path = splitdrive(paths[0].replace(altsep, sep)) common = path.split(sep) common = [c for c in common if c and c != curdir] split_paths = [[c for c in s if c and c != curdir] for s in split_paths] s_min = min(split_paths) s_max = max(split_paths) for i, run_c in enumerate(s_min): if run_c != s_max[i]: common = common[:i] break else: common = common[:len(s_min)] prefix = drive + sep if isabs else drive return prefix + sep.join(common)
Given a sequence of NT path names, return the longest common sub-path.
17,504
def _kmedoids_run(X, n_clusters, distance, max_iter, tol, rng): membs = np.empty(shape=X.shape[0], dtype=int) centers = kmeans._kmeans_init(X, n_clusters, method=, rng=rng) sse_last = 9999.9 n_iter = 0 for it in range(1,max_iter): membs = kmeans._assign_clusters(X, centers) centers,sse_arr = _update_centers(X, membs, n_clusters, distance) sse_total = np.sum(sse_arr) if np.abs(sse_total - sse_last) < tol: n_iter = it break sse_last = sse_total return(centers, membs, sse_total, sse_arr, n_iter)
Run a single trial of k-medoids clustering on dataset X, and given number of clusters
17,505
def seqids(args): p = OptionParser(seqids.__doc__) p.add_option("--maxn", default=100, type="int", help="Maximum number of seqids") p.add_option("--prefix", help="Seqids must start with") p.add_option("--exclude", default="random", help="Seqids should not contain") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) bedfile, = args pf = opts.prefix exclude = opts.exclude bed = Bed(bedfile) s = bed.seqids if pf: s = [x for x in s if x.startswith(pf)] if exclude: s = [x for x in s if not exclude in x] s = s[:opts.maxn] print(",".join(s))
%prog seqids bedfile Print out all seqids on one line. Useful for graphics.karyotype.
17,506
def references(self, criteria, publications=, column_name=, fetch=False): data_tables = dict() if isinstance(criteria, type(1)) and column_name == : t = self.query("SELECT * FROM {} WHERE id={}".format(publications, criteria), fmt=) if len(t) > 0: criteria = t[][0] else: print(.format(criteria)) return t = self.query("SELECT * FROM sqlite_master WHERE type=", fmt=) all_tables = t[].tolist() for table in [] + [t for t in all_tables if t not in [, , ]]: if data: if fetch: data_tables[table] = self.query( "SELECT {} FROM {} WHERE {}=".format( .join(columns), table, column_name, criteria), fmt=, fetch=True) else: data = data[[c.lower() for c in columns]] pprint(data, title=table.upper()) if fetch: return data_tables
Do a reverse lookup on the **publications** table. Will return every entry that matches that reference. Parameters ---------- criteria: int or str The id from the PUBLICATIONS table whose data across all tables is to be printed. publications: str Name of the publications table column_name: str Name of the reference column in other tables fetch: bool Return the results. Returns ------- data_tables: dict Returns a dictionary of astropy tables with the table name as the keys.
17,507
def _session(): profile_name = _cfg() if profile_name: log.info(, profile_name) else: log.info() try: return boto3.Session(profile_name=profile_name) except botocore.exceptions.ProfileNotFound as orig_exc: err_msg = .format( profile_name or ) config_error = salt.exceptions.SaltConfigurationError(err_msg) six.raise_from(config_error, orig_exc) except botocore.exceptions.NoRegionError as orig_exc: err_msg = ( ).format(profile_name or ) config_error = salt.exceptions.SaltConfigurationError(err_msg) six.raise_from(config_error, orig_exc)
Return the boto3 session to use for the KMS client. If aws_kms:profile_name is set in the salt configuration, use that profile. Otherwise, fall back on the default aws profile. We use the boto3 profile system to avoid having to duplicate individual boto3 configuration settings in salt configuration.
17,508
def framesToFrameRanges(frames, zfill=0): _build = FrameSet._build_frange_part curr_start = None curr_stride = None curr_frame = None last_frame = None curr_count = 0 for curr_frame in frames: if curr_start is None: curr_start = curr_frame last_frame = curr_frame curr_count += 1 continue if curr_stride is None: curr_stride = abs(curr_frame-curr_start) new_stride = abs(curr_frame-last_frame) if curr_stride == new_stride: last_frame = curr_frame curr_count += 1 elif curr_count == 2 and curr_stride != 1: yield _build(curr_start, curr_start, None, zfill) curr_start = last_frame curr_stride = new_stride last_frame = curr_frame else: yield _build(curr_start, last_frame, curr_stride, zfill) curr_stride = None curr_start = curr_frame last_frame = curr_frame curr_count = 1 if curr_count == 2 and curr_stride != 1: yield _build(curr_start, curr_start, None, zfill) yield _build(curr_frame, curr_frame, None, zfill) else: yield _build(curr_start, curr_frame, curr_stride, zfill)
Converts a sequence of frames to a series of padded frame range strings. Args: frames (collections.Iterable): sequence of frames to process zfill (int): width for zero padding Yields: str:
17,509
def remove_comments_and_docstrings(source): io_obj = io.StringIO(source) out = "" prev_toktype = tokenize.INDENT last_lineno = -1 last_col = 0 for tok in tokenize.generate_tokens(io_obj.readline): token_type = tok[0] token_string = tok[1] start_line, start_col = tok[2] end_line, end_col = tok[3] if start_line > last_lineno: last_col = 0 if start_col > last_col: out += (" " * (start_col - last_col)) if token_type == tokenize.COMMENT: pass elif token_type == tokenize.STRING: if prev_toktype != tokenize.INDENT: out += token_string else: out += token_string prev_toktype = token_type last_col = end_col last_lineno = end_line return out
Returns *source* minus comments and docstrings. .. note:: Uses Python's built-in tokenize module to great effect. Example:: def noop(): # This is a comment ''' Does nothing. ''' pass # Don't do anything Will become:: def noop(): pass
17,510
def priority_run_or_raise(self, hosts, function, attempts=1): return self._run(hosts, function, self.workqueue.priority_enqueue_or_raise, False, attempts)
Like priority_run(), but if a host is already in the queue, the existing host is moved to the top of the queue instead of enqueuing the new one. :type hosts: string|list(string)|Host|list(Host) :param hosts: A hostname or Host object, or a list of them. :type function: function :param function: The function to execute. :type attempts: int :param attempts: The number of attempts on failure. :rtype: object :return: A task object, or None if all hosts were duplicates.
17,511
def pil_image(self): if not self._pil_image: if self._format == "SVG": raise VectorImageError("can't rasterise vector images") self._pil_image = PIL.Image.open(StringIO(self.contents)) return self._pil_image
A :class:`PIL.Image.Image` instance containing the image data.
17,512
def unique_list_dicts(dlist, key): return list(dict((val[key], val) for val in dlist).values())
Return a list of dictionaries which are sorted for only unique entries. :param dlist: :param key: :return list:
17,513
def _put_or_post_multipart(self, method, url, data): fields = [] files = [] for key, value in data.items(): if type(value) == file: files.append((key, value.name, value.read())) else: fields.append((key, value)) content_type, body = _encode_multipart_formdata(fields, files) if self.parsed_endpoint.scheme == : h = httplib.HTTPS(self.parsed_endpoint.netloc) else: h = httplib.HTTP(self.parsed_endpoint.netloc) h.putrequest(method, url) h.putheader(, content_type) h.putheader(, str(len(body))) h.putheader(, ) h.putheader(, USER_AGENT) h.putheader(API_TOKEN_HEADER_NAME, self.api_token) if self.api_version in [, ]: h.putheader(API_VERSION_HEADER_NAME, self.api_version) h.endheaders() h.send(body) errcode, errmsg, headers = h.getreply() if errcode not in [200, 202]: raise IOError( % (method, url, errcode, h.file.read())) return json.loads(h.file.read())
encodes the data as a multipart form and PUTs or POSTs to the url the response is parsed as JSON and the returns the resulting data structure
17,514
def get_planes(im, squeeze=True): r x, y, z = (sp.array(im.shape) / 2).astype(int) planes = [im[x, :, :], im[:, y, :], im[:, :, z]] if not squeeze: imx = planes[0] planes[0] = sp.reshape(imx, [1, imx.shape[0], imx.shape[1]]) imy = planes[1] planes[1] = sp.reshape(imy, [imy.shape[0], 1, imy.shape[1]]) imz = planes[2] planes[2] = sp.reshape(imz, [imz.shape[0], imz.shape[1], 1]) return planes
r""" Extracts three planar images from the volumetric image, one for each principle axis. The planes are taken from the middle of the domain. Parameters ---------- im : ND-array The volumetric image from which the 3 planar images are to be obtained squeeze : boolean, optional If True (default) the returned images are 2D (i.e. squeezed). If False, the images are 1 element deep along the axis where the slice was obtained. Returns ------- planes : list A list of 2D-images
17,515
def pathparse(value, sep=os.pathsep, os_sep=os.sep): PATH escapes = [] normpath = ntpath.normpath if os_sep == else posixpath.normpath if not in (os_sep, sep): escapes.extend(( (, , ), (, , ), (<ESCAPE-SQUOTE>\), ( % sep, , sep), )) for original, escape, unescape in escapes: value = value.replace(original, escape) for part in pathsplit(value, sep=sep): if part[-1:] == os_sep and part != os_sep: part = part[:-1] for original, escape, unescape in escapes: part = part.replace(escape, unescape) yield normpath(fsdecode(part))
Get enviroment PATH directories as list. This function cares about spliting, escapes and normalization of paths across OSes. :param value: path string, as given by os.environ['PATH'] :type value: str :param sep: PATH separator, defaults to os.pathsep :type sep: str :param os_sep: OS filesystem path separator, defaults to os.sep :type os_sep: str :yields: every path :ytype: str
17,516
def Matches(self, file_entry): if not self._date_time_ranges: return None for date_time_range in self._date_time_ranges: time_attribute = self._TIME_VALUE_MAPPINGS.get( date_time_range.time_value, None) if not time_attribute: continue timestamp = getattr(file_entry, time_attribute, None) if timestamp is None: continue if (date_time_range.start_date_time is not None and timestamp < date_time_range.start_date_time): return False if (date_time_range.end_date_time is not None and timestamp > date_time_range.end_date_time): return False return True
Compares the file entry against the filter. Args: file_entry (dfvfs.FileEntry): file entry to compare. Returns: bool: True if the file entry matches the filter, False if not or None if the filter does not apply.
17,517
def get_device_model(self) -> str: output, _ = self._execute( , self.device_sn, , , ) return output.strip()
Show device model.
17,518
def from_element(cls, element): def _int_helper(name): result = element.get(name) if result is not None: try: return int(result) except ValueError: raise DIDLMetadataError( .format(name)) else: return None content = {} content[] = element.get() if content[] is None: raise DIDLMetadataError( ) content[] = element.get() content[] = _int_helper() content[] = element.get() content[] = _int_helper() content[] = _int_helper() content[] = _int_helper() content[] = _int_helper() content[] = element.get() content[] = _int_helper() content[] = element.get() content[] = element.text return cls(**content)
Set the resource properties from a ``<res>`` element. Args: element (~xml.etree.ElementTree.Element): The ``<res>`` element
17,519
def find_files(root, pattern): results = [] for base, dirs, files in os.walk(root): matched = fnmatch.filter(files, pattern) results.extend(os.path.join(base, f) for f in matched) return results
Find all files matching the glob pattern recursively :param root: string :param pattern: string :return: list of file paths relative to root
17,520
def GetAvailableClaimTotal(self): coinrefs = [coin.Reference for coin in self.GetUnclaimedCoins()] bonus = Blockchain.CalculateBonusIgnoreClaimed(coinrefs, True) return bonus
Gets the total amount of Gas that this wallet is able to claim at a given moment. Returns: Fixed8: the amount of Gas available to claim as a Fixed8 number.
17,521
def update(self, campaign_id, budget, nick=None): request = TOPRequest() request[] = campaign_id request[] = budget if nick!=None: request[] = nick self.create(self.execute(request), fields=[,,,,], models={:CampaignBudget}) return self.result
xxxxx.xxxxx.campaign.budget.update =================================== 更新一个推广计划的日限额
17,522
def convert_date_to_iso(value): date_formats = ["%d %b %Y", "%Y/%m/%d"] for dformat in date_formats: try: date = datetime.strptime(value, dformat) return date.strftime("%Y-%m-%d") except ValueError: pass return value
Convert a date-value to the ISO date standard.
17,523
def rotate(self, angle, axis, point=None, radians=False): q = Quaternion.angle_and_axis(angle=angle, axis=axis, radians=radians) self._vector = q.rotate_vector(v=self._vector, point=point) return
Rotates `Atom` by `angle`. Parameters ---------- angle : float Angle that `Atom` will be rotated. axis : 3D Vector (tuple, list, numpy.array) Axis about which the `Atom` will be rotated. point : 3D Vector (tuple, list, numpy.array), optional Point that the `axis` lies upon. If `None` then the origin is used. radians : bool, optional True is `angle` is define in radians, False is degrees.
17,524
def threshold_monitor_hidden_threshold_monitor_security_pause(self, **kwargs): config = ET.Element("config") threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor") threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor") security = ET.SubElement(threshold_monitor, "security") pause = ET.SubElement(security, "pause") callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
17,525
def create_instance(self, port): instance_type = self.get_instance_type(port) if not instance_type: return i_res = MechResource(port[], instance_type, a_const.CREATE) self.provision_queue.put(i_res)
Enqueue instance create
17,526
def _flatten_list(self, data): if data is None: return if isinstance(data, types.StringTypes): return data elif isinstance(data, (list, tuple)): return .join(self._flatten_list(x) for x in data)
Flattens nested lists into strings.
17,527
def get_geophysical_variables(ds): parameters = [] for variable in ds.variables: if is_geophysical(ds, variable): parameters.append(variable) return parameters
Returns a list of variable names for the variables detected as geophysical variables. :param netCDF4.Dataset nc: An open netCDF dataset
17,528
def find_sparse_mode(self, core, additional, scaling, weights={}): if len(core) == 0: return self.lp7(core) k = set() for reaction_id in core: flux = self.get_flux(reaction_id) if self.is_flipped(reaction_id): flux *= -1 if flux >= self._epsilon: k.add(reaction_id) if len(k) == 0: return self.lp10(k, additional, weights) for reaction_id in self._model.reactions: flux = self.get_flux(reaction_id) if abs(flux) >= self._epsilon / scaling: yield reaction_id
Find a sparse mode containing reactions of the core subset. Return an iterator of the support of a sparse mode that contains as many reactions from core as possible, and as few reactions from additional as possible (approximately). A dictionary of weights can be supplied which gives further penalties for including specific additional reactions.
17,529
def build_task(self, name): try: self._gettask(name).value = ( self._gettask(name).task.resolve_and_build()) except TaskExecutionException as e: perror(e.header, indent="+0") perror(e.message, indent="+4") self._gettask(name).value = e.payload except Exception as e: perror("error evaluating target %s" % (name, type(self._gettask(name).task))) perror(traceback.format_exc(e), indent=) self._gettask(name).value = None self._gettask(name).last_build_time = time.time()
Builds a task by name, resolving any dependencies on the way
17,530
def on_backward_begin(self, last_loss:Rank0Tensor, **kwargs:Any) -> Rank0Tensor: "Scale gradients up by `self.loss_scale` to prevent underflow." ret_loss = last_loss * self.loss_scale return {: ret_loss}
Scale gradients up by `self.loss_scale` to prevent underflow.
17,531
def CreateFromDocument(xml_text, default_namespace=None, location_base=None): if pyxb.XMLStyle_saxer != pyxb._XMLStyle: dom = pyxb.utils.domutils.StringToDOM(xml_text) return CreateFromDOM(dom.documentElement, default_namespace=default_namespace) if default_namespace is None: default_namespace = Namespace.fallbackNamespace() saxer = pyxb.binding.saxer.make_parser( fallback_namespace=default_namespace, location_base=location_base ) handler = saxer.getContentHandler() xmld = xml_text if isinstance(xmld, pyxb.utils.six.text_type): xmld = xmld.encode(pyxb._InputEncoding) saxer.parse(io.BytesIO(xmld)) instance = handler.rootObject() return instance
Parse the given XML and use the document element to create a Python instance. @param xml_text An XML document. This should be data (Python 2 str or Python 3 bytes), or a text (Python 2 unicode or Python 3 str) in the L{pyxb._InputEncoding} encoding. @keyword default_namespace The L{pyxb.Namespace} instance to use as the default namespace where there is no default namespace in scope. If unspecified or C{None}, the namespace of the module containing this function will be used. @keyword location_base: An object to be recorded as the base of all L{pyxb.utils.utility.Location} instances associated with events and objects handled by the parser. You might pass the URI from which the document was obtained.
17,532
def within(self, x, ctrs, kdtree=None): if kdtree is None: idxs = np.where(lalg.norm(ctrs - x, axis=1) <= self.radius)[0] else: idxs = kdtree.query_ball_point(x, self.radius, p=2.0, eps=0) return idxs
Check which balls `x` falls within. Uses a K-D Tree to perform the search if provided.
17,533
def minute_frame_to_session_frame(minute_frame, calendar): how = OrderedDict((c, _MINUTE_TO_SESSION_OHCLV_HOW[c]) for c in minute_frame.columns) labels = calendar.minute_index_to_session_labels(minute_frame.index) return minute_frame.groupby(labels).agg(how)
Resample a DataFrame with minute data into the frame expected by a BcolzDailyBarWriter. Parameters ---------- minute_frame : pd.DataFrame A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`, and `dt` (minute dts) calendar : trading_calendars.trading_calendar.TradingCalendar A TradingCalendar on which session labels to resample from minute to session. Return ------ session_frame : pd.DataFrame A DataFrame with the columns `open`, `high`, `low`, `close`, `volume`, and `day` (datetime-like).
17,534
def get_all_snapshots(self): data = self.get_data("snapshots/") return [ Snapshot(token=self.token, **snapshot) for snapshot in data[] ]
This method returns a list of all Snapshots.
17,535
def actions_for_project(self, project): project.cflags = ["-O3", "-fno-omit-frame-pointer"] project.runtime_extension = time.RunWithTime( run.RuntimeExtension(project, self)) return self.default_runtime_actions(project)
Compile & Run the experiment with -O3 enabled.
17,536
def update(self, *args, **kwargs): self._call_helper("Updating", self.real.update, *args, **kwargs)
This funcion updates a checkout of source code.
17,537
def makeEndOfPrdvFunc(self,EndOfPrdvP): vLvlNext = self.vFuncNext(self.mLvlNext,self.pLvlNext) EndOfPrdv = self.DiscFacEff*np.sum(vLvlNext*self.ShkPrbs_temp,axis=0) EndOfPrdvNvrs = self.uinv(EndOfPrdv) EndOfPrdvNvrsP = EndOfPrdvP*self.uinvP(EndOfPrdv) EndOfPrdvNvrs = np.concatenate((np.zeros((self.pLvlGrid.size,1)),EndOfPrdvNvrs),axis=1) if hasattr(self,): EndOfPrdvNvrsP = np.concatenate((np.zeros((self.pLvlGrid.size,1)),EndOfPrdvNvrsP),axis=1) else: EndOfPrdvNvrsP = np.concatenate((np.reshape(EndOfPrdvNvrsP[:,0],(self.pLvlGrid.size,1)),EndOfPrdvNvrsP),axis=1) aLvl_temp = np.concatenate((np.reshape(self.BoroCnstNat(self.pLvlGrid),(self.pLvlGrid.size,1)),self.aLvlNow),axis=1) EndOfPrdvNvrsFunc_list = [] for p in range(self.pLvlGrid.size): EndOfPrdvNvrsFunc_list.append(CubicInterp(aLvl_temp[p,:]-self.BoroCnstNat(self.pLvlGrid[p]),EndOfPrdvNvrs[p,:],EndOfPrdvNvrsP[p,:])) EndOfPrdvNvrsFuncBase = LinearInterpOnInterp1D(EndOfPrdvNvrsFunc_list,self.pLvlGrid) EndOfPrdvNvrsFunc = VariableLowerBoundFunc2D(EndOfPrdvNvrsFuncBase,self.BoroCnstNat) self.EndOfPrdvFunc = ValueFunc2D(EndOfPrdvNvrsFunc,self.CRRA)
Construct the end-of-period value function for this period, storing it as an attribute of self for use by other methods. Parameters ---------- EndOfPrdvP : np.array Array of end-of-period marginal value of assets corresponding to the asset values in self.aLvlNow x self.pLvlGrid. Returns ------- none
17,538
def _plot_posterior_op( values, var_name, selection, ax, bw, linewidth, bins, kind, point_estimate, round_to, credible_interval, ref_val, rope, ax_labelsize, xt_labelsize, **kwargs ): def format_as_percent(x, round_to=0): return "{0:.{1:d}f}%".format(100 * x, round_to) def display_ref_val(): if ref_val is None: return elif isinstance(ref_val, dict): val = None for sel in ref_val.get(var_name, []): if all( k in selection and selection[k] == v for k, v in sel.items() if k != "ref_val" ): val = sel["ref_val"] break if val is None: return elif isinstance(ref_val, Number): val = ref_val else: raise ValueError( "Argument `ref_val` must be None, a constant, or a " ) less_than_ref_probability = (values < val).mean() greater_than_ref_probability = (values >= val).mean() ref_in_posterior = "{} <{:g}< {}".format( format_as_percent(less_than_ref_probability, 1), val, format_as_percent(greater_than_ref_probability, 1), ) ax.axvline(val, ymin=0.05, ymax=0.75, color="C1", lw=linewidth, alpha=0.65) ax.text( values.mean(), plot_height * 0.6, ref_in_posterior, size=ax_labelsize, color="C1", weight="semibold", horizontalalignment="center", ) def display_rope(): if rope is None: return elif isinstance(rope, dict): vals = None for sel in rope.get(var_name, []): if all(k in selection and selection[k] == v for k, v in sel.items() if k != "rope"): vals = sel["rope"] break if vals is None: return elif len(rope) == 2: vals = rope else: raise ValueError( "Argument `rope` must be None, a dictionary like" "iterable of length 2" ) ax.plot( vals, (plot_height * 0.02, plot_height * 0.02), lw=linewidth * 5, color="C2", solid_capstyle="round", zorder=0, alpha=0.7, ) text_props = {"size": ax_labelsize, "horizontalalignment": "center", "color": "C2"} ax.text(vals[0], plot_height * 0.2, vals[0], weight="semibold", **text_props) ax.text(vals[1], plot_height * 0.2, vals[1], weight="semibold", **text_props) def display_point_estimate(): if not point_estimate: return if point_estimate not in ("mode", "mean", "median"): raise ValueError("Point Estimate should be in (,,)") if point_estimate == "mean": point_value = values.mean() elif point_estimate == "mode": if isinstance(values[0], float): density, lower, upper = _fast_kde(values, bw=bw) x = np.linspace(lower, upper, len(density)) point_value = x[np.argmax(density)] else: point_value = mode(values.round(round_to))[0][0] elif point_estimate == "median": point_value = np.median(values) point_text = "{}={:.{}f}".format(point_estimate, point_value, round_to) ax.text( point_value, plot_height * 0.8, point_text, size=ax_labelsize, horizontalalignment="center", ) def display_hpd(): hpd_intervals = hpd(values, credible_interval=credible_interval) ax.plot( hpd_intervals, (plot_height * 0.02, plot_height * 0.02), lw=linewidth * 2, color="k", solid_capstyle="round", ) ax.text( hpd_intervals[0], plot_height * 0.07, hpd_intervals[0].round(round_to), size=ax_labelsize, horizontalalignment="center", ) ax.text( hpd_intervals[1], plot_height * 0.07, hpd_intervals[1].round(round_to), size=ax_labelsize, horizontalalignment="center", ) ax.text( (hpd_intervals[0] + hpd_intervals[1]) / 2, plot_height * 0.3, format_as_percent(credible_interval) + " HPD", size=ax_labelsize, horizontalalignment="center", ) def format_axes(): ax.yaxis.set_ticks([]) ax.spines["top"].set_visible(False) ax.spines["right"].set_visible(False) ax.spines["left"].set_visible(False) ax.spines["bottom"].set_visible(True) ax.xaxis.set_ticks_position("bottom") ax.tick_params( axis="x", direction="out", width=1, length=3, color="0.5", labelsize=xt_labelsize ) ax.spines["bottom"].set_color("0.5") if kind == "kde" and values.dtype.kind == "f": plot_kde( values, bw=bw, fill_kwargs={"alpha": kwargs.pop("fill_alpha", 0)}, plot_kwargs={"linewidth": linewidth}, ax=ax, rug=False, ) else: if bins is None: if values.dtype.kind == "i": xmin = values.min() xmax = values.max() bins = range(xmin, xmax + 2) ax.set_xlim(xmin - 0.5, xmax + 0.5) else: bins = "auto" kwargs.setdefault("align", "left") kwargs.setdefault("color", "C0") ax.hist(values, bins=bins, alpha=0.35, **kwargs) plot_height = ax.get_ylim()[1] format_axes() display_hpd() display_point_estimate() display_ref_val() display_rope()
Artist to draw posterior.
17,539
def memoize(func=None, maxlen=None): if func is not None: cache = BoundedOrderedDict(maxlen=maxlen) @functools.wraps(func) def memo_target(candidates, args): fitness = [] for candidate in candidates: lookup_value = pickle.dumps(candidate, 1) if lookup_value not in cache: cache[lookup_value] = func([candidate], args)[0] fitness.append(cache[lookup_value]) return fitness return memo_target else: def memoize_factory(func): return memoize(func, maxlen=maxlen) return memoize_factory
Cache a function's return value each time it is called. This function serves as a function decorator to provide a caching of evaluated fitness values. If called later with the same arguments, the cached value is returned instead of being re-evaluated. This decorator assumes that candidates are individually pickleable, and their pickled values are used for hashing into a dictionary. It should be used when evaluating an *expensive* fitness function to avoid costly re-evaluation of those fitnesses. The typical usage is as follows:: @memoize def expensive_fitness_function(candidates, args): # Implementation of expensive fitness calculation pass It is also possible to provide the named argument *maxlen*, which specifies the size of the memoization cache to use. (If *maxlen* is ``None``, then an unbounded cache is used.) Once the size of the cache has reached *maxlen*, the oldest element is replaced by the newest element in order to keep the size constant. This usage is as follows:: @memoize(maxlen=100) def expensive_fitness_function(candidates, args): # Implementation of expensive fitness calculation pass .. warning:: The ``maxlen`` parameter must be passed as a named keyword argument, or an ``AttributeError`` will be raised (e.g., saying ``@memoize(100)`` will cause an error).
17,540
def _mpv_coax_proptype(value, proptype=str): if type(value) is bytes: return value; elif type(value) is bool: return b if value else b elif proptype in (str, int, float): return str(proptype(value)).encode() else: raise TypeError(.format(type(value), proptype))
Intelligently coax the given python value into something that can be understood as a proptype property.
17,541
def _parse_team_data(self, team_data): for field in self.__dict__: if field == or \ field == : continue value = utils._parse_field(PARSING_SCHEME, team_data, str(field)[1:]) setattr(self, field, value)
Parses a value for every attribute. This function looks through every attribute and retrieves the value according to the parsing scheme and index of the attribute from the passed HTML data. Once the value is retrieved, the attribute's value is updated with the returned result. Note that this method is called directly once Team is invoked and does not need to be called manually. Parameters ---------- team_data : string A string containing all of the rows of stats for a given team. If multiple tables are being referenced, this will be comprised of multiple rows in a single string.
17,542
def CreateLink(target_path, link_path, override=True): _AssertIsLocal(target_path) _AssertIsLocal(link_path) if override and IsLink(link_path): DeleteLink(link_path) dirname = os.path.dirname(link_path) if dirname: CreateDirectory(dirname) if sys.platform != : return os.symlink(target_path, link_path) else: import jaraco.windows.filesystem return jaraco.windows.filesystem.symlink(target_path, link_path) from ._easyfs_win32 import CreateSymbolicLink try: dw_flags = 0 if target_path and os.path.isdir(target_path): dw_flags = 1 return CreateSymbolicLink(target_path, link_path, dw_flags) except Exception as e: reraise(e, % locals())
Create a symbolic link at `link_path` pointing to `target_path`. :param unicode target_path: Link target :param unicode link_path: Fullpath to link name :param bool override: If True and `link_path` already exists as a link, that link is overridden.
17,543
def fftp(wave, npoints=None, indep_min=None, indep_max=None, unwrap=True, rad=True): r return phase(fft(wave, npoints, indep_min, indep_max), unwrap=unwrap, rad=rad)
r""" Return the phase of the Fast Fourier Transform of a waveform. :param wave: Waveform :type wave: :py:class:`peng.eng.Waveform` :param npoints: Number of points to use in the transform. If **npoints** is less than the size of the independent variable vector the waveform is truncated; if **npoints** is greater than the size of the independent variable vector, the waveform is zero-padded :type npoints: positive integer :param indep_min: Independent vector start point of computation :type indep_min: integer or float :param indep_max: Independent vector stop point of computation :type indep_max: integer or float :param unwrap: Flag that indicates whether phase should change phase shifts to their :code:`2*pi` complement (True) or not (False) :type unwrap: boolean :param rad: Flag that indicates whether phase should be returned in radians (True) or degrees (False) :type rad: boolean :rtype: :py:class:`peng.eng.Waveform` .. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]] .. Auto-generated exceptions documentation for .. peng.wave_functions.fftp :raises: * RuntimeError (Argument \`indep_max\` is not valid) * RuntimeError (Argument \`indep_min\` is not valid) * RuntimeError (Argument \`npoints\` is not valid) * RuntimeError (Argument \`rad\` is not valid) * RuntimeError (Argument \`unwrap\` is not valid) * RuntimeError (Argument \`wave\` is not valid) * RuntimeError (Incongruent \`indep_min\` and \`indep_max\` arguments) * RuntimeError (Non-uniform sampling) .. [[[end]]]
17,544
def count(expr): if isinstance(expr, SequenceExpr): unique_input = _extract_unique_input(expr) if unique_input: return nunique(unique_input).rename() else: return Count(_value_type=types.int64, _input=expr) elif isinstance(expr, SequenceGroupBy): return GroupedCount(_data_type=types.int64, _input=expr.to_column(), _grouped=expr.input) elif isinstance(expr, CollectionExpr): return Count(_value_type=types.int64, _input=expr).rename() elif isinstance(expr, GroupBy): return GroupedCount(_data_type=types.int64, _input=expr.input, _grouped=expr).rename()
Value counts :param expr: :return:
17,545
def return_abs_path(directory, path): if directory is None or path is None: return directory = os.path.expanduser(directory) return os.path.abspath(os.path.join(directory, path))
Unfortunately, Python is not smart enough to return an absolute path with tilde expansion, so I writing functionality to do this :param directory: :param path: :return:
17,546
def main(): plugins = DefaultPluginManager() plugins.load_plugins() parser, _ = create_argparser() for kls in updater_classes(): kls.register_arguments(parser) for kls in detector_classes(): kls.register_arguments(parser) from os import environ plugins.options(parser, environ) args = parser.parse_args() if args.debug: args.verbose_count = 5 log_level = max(int(logging.WARNING / 10) - args.verbose_count, 0) * 10 logging.basicConfig(level=log_level, format="%(levelname)s %(message)s") if args.version: from . import __version__ print("dyndnsc %s" % __version__) return 0 requests_log = logging.getLogger("requests") requests_log.setLevel(logging.WARNING) logging.debug(parser) cfg = get_configuration(args.config) if args.listpresets: list_presets(cfg) return 0 if args.config: collected_configs = collect_config(cfg) else: parsed_args = parse_cmdline_args(args, updater_classes().union(detector_classes())) logging.debug("parsed_args %r", parsed_args) collected_configs = { "cmdline": { "interval": int(args.sleeptime) } } collected_configs["cmdline"].update(parsed_args) plugins.configure(args) plugins.initialize() logging.debug("collected_configs: %r", collected_configs) dyndnsclients = [] for thisconfig in collected_configs: logging.debug("Initializing client for ", thisconfig) dyndnsclient = getDynDnsClientForConfig( collected_configs[thisconfig], plugins=plugins) if dyndnsclient is None: return 1 dyndnsclient.sync() dyndnsclients.append(dyndnsclient) run_forever_callable = partial(run_forever, dyndnsclients) if args.daemon: import daemonocle daemon = daemonocle.Daemon(worker=run_forever_callable) daemon.do_action("start") args.loop = True if args.loop: run_forever_callable() return 0
Run the main CLI program. Initializes the stack, parses command line arguments, and fires requested logic.
17,547
def parametrize_grid(self, debug=False): self._station.set_operation_voltage_level() self.default_branch_type,\ self.default_branch_type_aggregated,\ self.default_branch_type_settle = self.set_default_branch_type(debug) self.default_branch_kind_aggregated = self.default_branch_kind self.default_branch_kind_settle = self._station.select_transformers()
Performs Parametrization of grid equipment: i) Sets voltage level of MV grid, ii) Operation voltage level and transformer of HV/MV station, iii) Default branch types (normal, aggregated, settlement) Args ---- debug: bool, defaults to False If True, information is printed during process. Notes ----- It is assumed that only cables are used within settlements.
17,548
def _correct_qualimap_genome_results(samples): for s in samples: if verify_file(s.qualimap_genome_results_fpath): correction_is_needed = False with open(s.qualimap_genome_results_fpath, ) as f: content = f.readlines() metrics_started = False for line in content: if ">> Reference" in line: metrics_started = True if metrics_started: if line.find() != -1: correction_is_needed = True break if correction_is_needed: with open(s.qualimap_genome_results_fpath, ) as f: metrics_started = False for line in content: if ">> Reference" in line: metrics_started = True if metrics_started: if line.find() != -1: line = line.replace(, ) f.write(line)
fixing java.lang.Double.parseDouble error on entries like "6,082.49"
17,549
def com_google_fonts_check_italic_angle(ttFont, style): failed = False value = ttFont["post"].italicAngle if value > 0: failed = True yield FAIL, Message("positive", ("The value of post.italicAngle is positive, which" " is likely a mistake and should become negative," " from {} to {}.").format(value, -value)) if abs(value) > 30: failed = True yield FAIL, Message("over -30 degrees", ("The value of post.italicAngle ({}) is very" " high (over -30°!) and should be" " confirmed.").format(value)) elif abs(value) > 20: failed = True yield WARN, Message("over -20 degrees", ("The value of post.italicAngle ({}) seems very" " high (over -20°!) and should be" " confirmed.").format(value)) if "Italic" in style: if ttFont[].italicAngle == 0: failed = True yield FAIL, Message("zero-italic", ("Font is italic, so post.italicAngle" " should be non-zero.")) else: if ttFont["post"].italicAngle != 0: failed = True yield FAIL, Message("non-zero-normal", ("Font is not italic, so post.italicAngle" " should be equal to zero.")) if not failed: yield PASS, ("Value of post.italicAngle is {}" " with style=.").format(value, style)
Checking post.italicAngle value.
17,550
def get(self, name: str) -> Union[None, str, List[str]]: if name in self._headers: return self._headers[name] return None
获取 header
17,551
def produce_context(namespace, context_id, max_delay=None): try: context_obj = get_context(namespace, context_id) logger.info("Found context ", namespace, context_id) except ContextError: logger.info("Context not found", namespace, context_id) if max_delay is not None: max_delay = float(max_delay) logger.info("Context error handled with max_delay=%s", max_delay) if not max_delay \ or arrival_delay_greater_than(context_id, max_delay): context_obj = {} logger.info( "Timeout: waited %s seconds for context ", max_delay, context_id) else: msg = "Context not found: resorting".format(context_id) raise OutOfOrderError(msg) return context_obj
Produce event context.
17,552
def numberOfConnectedProximalSynapses(self, cells=None): if cells is None: cells = xrange(self.numberOfCells()) return _countWhereGreaterEqualInRows(self.proximalPermanences, cells, self.connectedPermanenceProximal)
Returns the number of proximal connected synapses on these cells. Parameters: ---------------------------- @param cells (iterable) Indices of the cells. If None return count for all cells.
17,553
def _ShouldPrintError(category, confidence, linenum): if category.startswith(one_filter[1:]): is_filtered = False else: assert False if is_filtered: return False return True
If confidence >= verbose, category passes filter and is not suppressed.
17,554
def on_service_departure(self, svc_ref): with self._lock: if svc_ref is self.reference: service = self._value self._current_ranking = None self._value = None self.reference = None if self.requirement.immediate_rebind: self._pending_ref = self._context.get_service_reference( self.requirement.specification, self.requirement.filter ) else: self._pending_ref = None self._ipopo_instance.unbind(self, service, svc_ref)
Called when a service has been unregistered from the framework :param svc_ref: A service reference
17,555
def _(pymux, variables): " Go to previous active window. " w = pymux.arrangement.get_previous_active_window() if w: pymux.arrangement.set_active_window(w)
Go to previous active window.
17,556
def CRPS(label, pred): for i in range(pred.shape[0]): for j in range(pred.shape[1] - 1): if pred[i, j] > pred[i, j + 1]: pred[i, j + 1] = pred[i, j] return np.sum(np.square(label - pred)) / label.size
Custom evaluation metric on CRPS.
17,557
def update(self,*flags): super(Flags,self).update([(flag.name,flag) for flag in flags])
Update Flags registry with a list of :class:`Flag` instances.
17,558
def get_configuration(self): mapping = {} settings = self.get_settings() for record in self.context.getAnalyses(): uid = record.get("service_uid") setting = settings.get(uid, {}) config = { "partition": record.get("partition"), "hidden": setting.get("hidden", False), } mapping[uid] = config return mapping
Returns a mapping of UID -> configuration
17,559
def decorator(wrapped_decorator): def helper(_func=None, **options): def outer_wrapper(func): @wrapping(func) def inner_wrapper(*args, **kwds): return wrapped_decorator(func, args, kwds, **options) return inner_wrapper if _func is None: return outer_wrapper if options: return outer_wrapper(_func) helper.wrapped_decorator = wrapped_decorator return helper
Converts a function into a decorator that optionally accepts keyword arguments in its declaration. Example usage: @utils.decorator def decorator(func, args, kwds, op1=None): ... apply op1 ... return func(*args, **kwds) # Form (1), vanilla @decorator foo(...) ... # Form (2), with options @decorator(op1=5) foo(...) ... Args: wrapped_decorator: A function that accepts positional args (func, args, kwds) and any additional supported keyword arguments. Returns: A decorator with an additional 'wrapped_decorator' property that is set to the original function.
17,560
def parse_variable_definition(lexer: Lexer) -> VariableDefinitionNode: start = lexer.token return VariableDefinitionNode( variable=parse_variable(lexer), type=expect_token(lexer, TokenKind.COLON) and parse_type_reference(lexer), default_value=parse_value_literal(lexer, True) if expect_optional_token(lexer, TokenKind.EQUALS) else None, directives=parse_directives(lexer, True), loc=loc(lexer, start), )
VariableDefinition: Variable: Type DefaultValue? Directives[Const]?
17,561
def distroinfo(cargs, version=__version__): code = 1 args = docopt(__doc__, argv=cargs) try: if args[]: if not version: version = print(version) code = 0 elif args[]: code = fetch( info_url=args[], info_files=args[], cache_dir=args[], fetcher=args[], ) elif args[]: code = dump( info_url=args[], info_files=args[], yaml_out=args[], json_out=args[], cache_dir=args[], fetcher=args[], ) except ( exception.InvalidInfoFormat, KeyboardInterrupt, ) as ex: code = getattr(ex, , code) print("") print(str(ex) or type(ex).__name__) return code
distroinfo Command-Line Interface
17,562
def _normalize_server_settings(**settings): ret = dict() settings = salt.utils.args.clean_kwargs(**settings) for setting in settings: if isinstance(settings[setting], dict): value_from_key = next(six.iterkeys(settings[setting])) ret[setting] = "{{{0}}}".format(value_from_key) else: ret[setting] = settings[setting] return ret
Convert setting values that has been improperly converted to a dict back to a string.
17,563
def returnOrderBook(self, currencyPair=, depth=): return self._public(, currencyPair=currencyPair, depth=depth)
Returns the order book for a given market, as well as a sequence number for use with the Push API and an indicator specifying whether the market is frozen. You may set currencyPair to "all" to get the order books of all markets.
17,564
def input_dim(self): n_cont = len(self.get_continuous_dims()) n_disc = len(self.get_discrete_dims()) return n_cont + n_disc
Extracts the input dimension of the domain.
17,565
def AddEventAttribute(self, attribute_name, attribute_value): if attribute_name in self._extra_event_attributes: raise KeyError(.format( attribute_name)) self._extra_event_attributes[attribute_name] = attribute_value
Adds an attribute that will be set on all events produced. Setting attributes using this method will cause events produced via this mediator to have an attribute with the provided name set with the provided value. Args: attribute_name (str): name of the attribute to add. attribute_value (str): value of the attribute to add. Raises: KeyError: if the event attribute is already set.
17,566
def compute_json(self, build_context): props = {} test_props = {} for prop in self.props: if prop in self._prop_json_blacklist: continue sig_spec = Plugin.builders[self.builder_name].sig.get(prop) if sig_spec is None: continue if prop in self._prop_json_testlist: test_props[prop] = process_prop(sig_spec.type, self.props[prop], build_context) else: props[prop] = process_prop(sig_spec.type, self.props[prop], build_context) json_dict = dict( name=self.name, builder_name=self.builder_name, deps=hashify_targets(self.deps, build_context), props=props, buildenv=hashify_targets(self.buildenv, build_context), tags=sorted(list(self.tags)), flavor=build_context.conf.flavor, ) json_test_dict = dict( props=test_props, ) self._json = json.dumps(json_dict, sort_keys=True, indent=4) self._test_json = json.dumps(json_test_dict, sort_keys=True, indent=4)
Compute and store a JSON serialization of this target for caching purposes. The serialization includes: - The build flavor - The builder name - Target tags - Hashes of target dependencies & buildenv - Processed props (where target props are replaced with their hashes, and file props are replaced with mapping from file name to its hash) It specifically does NOT include: - Artifacts produced by the target The target name is currently included, although it would be better off to leave it out, and allow targets to be renamed without affecting their caching status (if it's just a rename). It is currently included because it's the easy way to account for the fact that when cached artifacts are restored, their path may be a function of the target name in non-essential ways (such as a workspace dir name).
17,567
def get_bandstructure(self): return LobsterBandStructureSymmLine(kpoints=self.kpoints_array, eigenvals=self.eigenvals, lattice=self.lattice, efermi=self.efermi, labels_dict=self.label_dict, structure=self.structure, projections=self.p_eigenvals)
returns a LobsterBandStructureSymmLine object which can be plotted with a normal BSPlotter
17,568
def pauli_expansion( val: Any, *, default: Union[value.LinearDict[str], TDefault] = RaiseTypeErrorIfNotProvided, atol: float = 1e-9 ) -> Union[value.LinearDict[str], TDefault]: method = getattr(val, , None) expansion = NotImplemented if method is None else method() if expansion is not NotImplemented: return expansion.clean(atol=atol) matrix = unitary(val, default=None) if matrix is None: if default is RaiseTypeErrorIfNotProvided: raise TypeError( .format(val, type(val))) return default num_qubits = matrix.shape[0].bit_length() - 1 basis = operator_spaces.kron_bases(operator_spaces.PAULI_BASIS, repeat=num_qubits) expansion = operator_spaces.expand_matrix_in_orthogonal_basis(matrix, basis) return expansion.clean(atol=atol)
Returns coefficients of the expansion of val in the Pauli basis. Args: val: The value whose Pauli expansion is to returned. default: Determines what happens when `val` does not have methods that allow Pauli expansion to be obtained (see below). If set, the value is returned in that case. Otherwise, TypeError is raised. atol: Ignore coefficients whose absolute value is smaller than this. Returns: If `val` has a _pauli_expansion_ method, then its result is returned. Otherwise, if `val` has a small unitary then that unitary is expanded in the Pauli basis and coefficients are returned. Otherwise, if default is set to None or other value then default is returned. Otherwise, TypeError is raised. Raises: TypeError if `val` has none of the methods necessary to obtain its Pauli expansion and no default value has been provided.
17,569
def get_value(self): def get_element_value(): if self.tag_name() == : return self.get_attribute() elif self.tag_name() == : selected_options = self.element.all_selected_options if len(selected_options) > 1: raise ValueError( .format(self) ) return selected_options[0].get_attribute() else: raise ValueError(.format(self.tag_name())) return self.execute_and_handle_webelement_exceptions(get_element_value, name_of_action=)
Gets the value of a select or input element @rtype: str @return: The value of the element @raise: ValueError if element is not of type input or select, or has multiple selected options
17,570
def to_header(self): d = dict(self) auth_type = d.pop("__auth_type__", None) or "basic" return "%s %s" % ( auth_type.title(), ", ".join( [ "%s=%s" % ( key, quote_header_value( value, allow_token=key not in self._require_quoting ), ) for key, value in iteritems(d) ] ), )
Convert the stored values into a WWW-Authenticate header.
17,571
def _analyze_single(self, reference, result): reference_str = str(reference) result_str = str(result) report = {: [], : [], : []} for i, (ref, res) in enumerate(zip(reference_str, result_str)): if ref != res: return report
Report mistmatches and indels for a single (aligned) reference and result.
17,572
def _fill_from_config(self, config): for k,v in config.items(): if k in H2OConnectionConf.allowed_properties: setattr(self, k, v) else: raise H2OValueError(message="Unsupported name of property: %s!" % k, var_name="config")
Fill this instance from given dictionary. The method only uses keys which corresponds to properties this class, throws exception on unknown property name. :param conf: dictionary of parameters :return: a new instance of this class filled with values from given dictionary :raises H2OValueError: if input config contains unknown property name.
17,573
def sources(self): data = clips.data.DataObject(self._env) lib.EnvSlotSources(self._env, self._cls, self._name, data.byref) return tuple(data.value) if isinstance(data.value, list) else ()
A tuple containing the names of the Class sources for this Slot. The Python equivalent of the CLIPS slot-sources function.
17,574
def s(self, *args, **kwargs) -> Partial[Stepwise]: return Partial(Stepwise, self.base, *self.rules, *args, **kwargs)
Create an unbound prototype of this class, partially applying arguments .. code:: python @stepwise def control(pool: Pool, interval): return 10 pipeline = control.s(interval=20) >> pool :note: The partial rules are sealed, and :py:meth:`~.UnboundStepwise.add` cannot be called on it.
17,575
def getdateByTimezone(cDateUTC, timezone=None): dt = cDateUTC[0:19] if timezone and len(cDateUTC) == 25: tz = cDateUTC[19:25] tz = int(tz.split()[0]) dt = datetime.strptime(dt, ) dt = dt - timedelta(hours=tz) dt = pytz.utc.localize(dt) dt = timezone.normalize(dt) dt = dt.strftime() cDt = dt[0:10].split() cDt.reverse() return .join(cDt), dt[11:16]
Esse método trata a data recebida de acordo com o timezone do usuário. O seu retorno é dividido em duas partes: 1) A data em si; 2) As horas; :param cDateUTC: string contendo as informações da data :param timezone: timezone do usuário do sistema :return: data e hora convertidos para a timezone do usuário
17,576
def change_dir(): try: d = os.environ[] sys.stderr.write( % d) except KeyError: pass else: try: os.chdir(d) except OSError: sys.stderr.write( % d)
Change the local directory if the HADOOPY_CHDIR environmental variable is provided
17,577
def plot_energy(time, H, T, U): T0 = T[0] H = H / T0 T = T / T0 U = U / T0 fig, ax = plt.subplots(figsize=[16,8]) ax.set_title() ax.set_xlabel() ax.set_ylabel() ax.plot(time, T, label=, color=) ax.plot(time, U, label=, color=) ax.plot(time, H, label=, color=) ax.legend() ax.grid() plt.show()
Plot kinetic and potential energy of system over time
17,578
def to_json(self): cursor = self._get_cursor() cursor_object = False if cursor and isinstance(cursor, datastore_query.Cursor): cursor = cursor.to_websafe_string() cursor_object = True return {"key_range": self._key_range.to_json(), "query_spec": self._query_spec.to_json(), "cursor": cursor, "cursor_object": cursor_object}
Serializes all states into json form. Returns: all states in json-compatible map.
17,579
def get_kabsch_rotation(Q, P): A = np.dot(np.transpose(P), Q) V, S, W = np.linalg.svd(A) W = W.T d = np.linalg.det(np.dot(W, V.T)) return np.linalg.multi_dot((W, np.diag([1., 1., d]), V.T))
Calculate the optimal rotation from ``P`` unto ``Q``. Using the Kabsch algorithm the optimal rotation matrix for the rotation of ``other`` unto ``self`` is calculated. The algorithm is described very well in `wikipedia <http://en.wikipedia.org/wiki/Kabsch_algorithm>`_. Args: other (Cartesian): Returns: :class:`~numpy.array`: Rotation matrix
17,580
def get_params(self, param=""): fullcurdir = os.path.realpath(os.path.curdir) if not param: for index, (key, value) in enumerate(self.paramsdict.items()): if isinstance(value, str): value = value.replace(fullcurdir+"/", "./") sys.stdout.write("{}{:<4}{:<28}{:<45}\n"\ .format(self._spacer, index, key, value)) else: try: if int(param): return self.paramsdict.values()[int(param)] except (ValueError, TypeError, NameError, IndexError): try: return self.paramsdict[param] except KeyError: return
pretty prints params if called as a function
17,581
def remember_encrypted_identity(self, subject, encrypted): try: encoded = base64.b64encode(encrypted).decode() subject.web_registry.remember_me = encoded except AttributeError: msg = ("Subject argument is not an HTTP-aware instance. This " "is required to obtain a web registry in order to" "set the RememberMe cookie. Returning immediately " "and ignoring RememberMe operation.") logger.debug(msg)
Base64-encodes the specified serialized byte array and sets that base64-encoded String as the cookie value. The ``subject`` instance is expected to be a ``WebSubject`` instance with a web_registry handle so that an HTTP cookie may be set on an outgoing response. If it is not a ``WebSubject`` or that ``WebSubject`` does not have a web_registry handle, this implementation does nothing. :param subject: the Subject for which the identity is being serialized :param serialized: the serialized bytes to persist :type serialized: bytearray
17,582
def from_string(bnf: str, entry=None, *optional_inherit) -> Grammar: inherit = [Grammar] + list(optional_inherit) scope = {: bnf, : entry} return build_grammar(tuple(inherit), scope)
Create a Grammar from a string
17,583
def users_update(self, user_id, **kwargs): return self.__call_api_post(, userId=user_id, data=kwargs)
Update an existing user.
17,584
def as_yml(self): return YmlFileEvent(name=str(self.name), subfolder=str(self.subfolder))
Return yml compatible version of self
17,585
def build_permission_name(model_class, prefix): model_name = model_class._meta.object_name.lower() app_label = model_class._meta.app_label action_name = prefix perm = % (app_label, action_name, model_name) return perm
Build permission name for model_class (like 'app.add_model').
17,586
def create_attach_volumes(name, kwargs, call=None): volumesnodenodevolumes if call != : raise SaltCloudSystemExit( ) volumes = literal_eval(kwargs[]) node = kwargs[] conn = get_conn() node_data = _expand_node(conn.ex_get_node(node)) letter = ord() - 1 for idx, volume in enumerate(volumes): volume_name = .format(name, chr(letter + 2 + idx)) volume_dict = { : volume_name, : node_data[][][], : volume[], : volume.get(, ), : volume.get(, None), : volume.get(, None), : volume.get(, False) } create_disk(volume_dict, ) attach_disk(name, volume_dict, )
.. versionadded:: 2017.7.0 Create and attach multiple volumes to a node. The 'volumes' and 'node' arguments are required, where 'node' is a libcloud node, and 'volumes' is a list of maps, where each map contains: size The size of the new disk in GB. Required. type The disk type, either pd-standard or pd-ssd. Optional, defaults to pd-standard. image An image to use for this new disk. Optional. snapshot A snapshot to use for this new disk. Optional. auto_delete An option(bool) to keep or remove the disk upon instance deletion. Optional, defaults to False. Volumes are attached in the order in which they are given, thus on a new node the first volume will be /dev/sdb, the second /dev/sdc, and so on.
17,587
def axes(self): return tuple(i for i in range(self.domain.ndim) if self.domain.shape[i] != self.range.shape[i])
Dimensions in which an actual resizing is performed.
17,588
def get_point(theta_ik, theta_jk, Pi, Pj): A = np.array([[sin(theta_ik), -cos(theta_ik)], [sin(theta_jk), -cos(theta_jk)]]) B = np.array([[sin(theta_ik), -cos(theta_ik), 0, 0], [0, 0, sin(theta_jk), -cos(theta_jk)]]) p = np.r_[Pi, Pj] Pk = np.linalg.solve(A, np.dot(B, p)) return Pk
Calculate coordinates of point Pk given two points Pi, Pj and inner angles. :param theta_ik: Inner angle at Pi to Pk. :param theta_jk: Inner angle at Pj to Pk. :param Pi: Coordinates of point Pi. :param Pj: Coordinates of point Pj. :return: Coordinate of point Pk.
17,589
def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable=): bufsize = 4096 try: chan = self.ssh.get_transport().open_session() except Exception, e: msg = "Failed to open session" if len(str(e)) > 0: msg += ": %s" % str(e) raise errors.AnsibleConnectionFailed(msg) chan.get_pty() if not self.runner.sudo or not sudoable: if executable: quoted_command = executable + + pipes.quote(cmd) else: quoted_command = cmd vvv("EXEC %s" % quoted_command, host=self.host) chan.exec_command(quoted_command) else: shcmd, prompt = utils.make_sudo_cmd(sudo_user, executable, cmd) vvv("EXEC %s" % shcmd, host=self.host) sudo_output = try: chan.exec_command(shcmd) if self.runner.sudo_pass: while not sudo_output.endswith(prompt): chunk = chan.recv(bufsize) if not chunk: if in sudo_output: raise errors.AnsibleError( % sudo_user) else: raise errors.AnsibleError( + ) sudo_output += chunk chan.sendall(self.runner.sudo_pass + ) except socket.timeout: raise errors.AnsibleError( + sudo_output) stdout = .join(chan.makefile(, bufsize)) stderr = .join(chan.makefile_stderr(, bufsize)) return (chan.recv_exit_status(), , stdout, stderr)
run a command on the remote host
17,590
def run_evaluation(self, stream_name: str) -> None: def prediction(): logging.info() self._run_zeroth_epoch([stream_name]) logging.info() self._try_run(prediction)
Run the main loop with the given stream in the prediction mode. :param stream_name: name of the stream to be evaluated
17,591
def joliet_vd_factory(joliet, sys_ident, vol_ident, set_size, seqnum, log_block_size, vol_set_ident, pub_ident_str, preparer_ident_str, app_ident_str, copyright_file, abstract_file, bibli_file, vol_expire_date, app_use, xa): t work. vol_set_ident - The volume set identification string to use on the new ISO. pub_ident_str - The publisher identification string to use on the new ISO. preparer_ident_str - The preparer identification string to use on the new ISO. app_ident_str - The application identification string to use on the new ISO. copyright_file - The name of a file at the root of the ISO to use as the copyright file. abstract_file - The name of a file at the root of the ISO to use as the abstract file. bibli_file - The name of a file at the root of the ISO to use as the bibliographic file. vol_expire_date - The date that this ISO will expire at. app_use - Arbitrary data that the application can stuff into the primary volume descriptor of this ISO. xa - Whether to add the ISO9660 Extended Attribute extensions to this ISO. The default is False. Returns: The newly created Joliet Volume Descriptor. %/@%/C%/EInvalid Joliet level; must be 1, 2, or 3') svd = PrimaryOrSupplementaryVD(VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY) svd.new(0, sys_ident, vol_ident, set_size, seqnum, log_block_size, vol_set_ident, pub_ident_str, preparer_ident_str, app_ident_str, copyright_file, abstract_file, bibli_file, vol_expire_date, app_use, xa, 1, escape_sequence) return svd
An internal function to create an Joliet Volume Descriptor. Parameters: joliet - The joliet version to use, one of 1, 2, or 3. sys_ident - The system identification string to use on the new ISO. vol_ident - The volume identification string to use on the new ISO. set_size - The size of the set of ISOs this ISO is a part of. seqnum - The sequence number of the set of this ISO. log_block_size - The logical block size to use for the ISO. While ISO9660 technically supports sizes other than 2048 (the default), this almost certainly doesn't work. vol_set_ident - The volume set identification string to use on the new ISO. pub_ident_str - The publisher identification string to use on the new ISO. preparer_ident_str - The preparer identification string to use on the new ISO. app_ident_str - The application identification string to use on the new ISO. copyright_file - The name of a file at the root of the ISO to use as the copyright file. abstract_file - The name of a file at the root of the ISO to use as the abstract file. bibli_file - The name of a file at the root of the ISO to use as the bibliographic file. vol_expire_date - The date that this ISO will expire at. app_use - Arbitrary data that the application can stuff into the primary volume descriptor of this ISO. xa - Whether to add the ISO9660 Extended Attribute extensions to this ISO. The default is False. Returns: The newly created Joliet Volume Descriptor.
17,592
def generate(cls, size, string, filetype="JPEG"): render_size = max(size, GenAvatar.MAX_RENDER_SIZE) image = Image.new(, (render_size, render_size), cls._background_color(string)) draw = ImageDraw.Draw(image) font = cls._font(render_size) text = cls._text(string) draw.text( cls._text_position(render_size, text, font), text, fill=cls.FONT_COLOR, font=font) stream = BytesIO() image = image.resize((size, size), Image.ANTIALIAS) image.save(stream, format=filetype, optimize=True) return stream
Generates a squared avatar with random background color. :param size: size of the avatar, in pixels :param string: string to be used to print text and seed the random :param filetype: the file format of the image (i.e. JPEG, PNG)
17,593
def partition_version_classifiers( classifiers: t.Sequence[str], version_prefix: str = , only_suffix: str = ) -> t.Tuple[t.List[str], t.List[str]]: versions_min, versions_only = [], [] for classifier in classifiers: version = classifier.replace(version_prefix, ) versions = versions_min if version.endswith(only_suffix): version = version.replace(only_suffix, ) versions = versions_only try: versions.append(tuple([int(_) for _ in version.split()])) except ValueError: pass return versions_min, versions_only
Find version number classifiers in given list and partition them into 2 groups.
17,594
def _create_source(self, src): if src[] == : pylike_src = pyLike.PointSource(self.like.logLike.observation()) pylike_src.setDir(src.skydir.ra.deg, src.skydir.dec.deg, False, False) elif src[] == : filepath = str(utils.path_to_xmlpath(src[])) sm = pyLike.SpatialMap(filepath) pylike_src = pyLike.DiffuseSource(sm, self.like.logLike.observation(), False) elif src[] == : filepath = str(utils.path_to_xmlpath(src[])) sm = pyLike.RadialProfile(filepath) sm.setCenter(src[], src[]) pylike_src = pyLike.DiffuseSource(sm, self.like.logLike.observation(), False) elif src[] == : sm = pyLike.RadialGaussian(src.skydir.ra.deg, src.skydir.dec.deg, src.spatial_pars[][]) pylike_src = pyLike.DiffuseSource(sm, self.like.logLike.observation(), False) elif src[] == : sm = pyLike.RadialDisk(src.skydir.ra.deg, src.skydir.dec.deg, src.spatial_pars[][]) pylike_src = pyLike.DiffuseSource(sm, self.like.logLike.observation(), False) elif src[] == : filepath = str(utils.path_to_xmlpath(src[])) mcf = pyLike.MapCubeFunction2(filepath) pylike_src = pyLike.DiffuseSource(mcf, self.like.logLike.observation(), False) else: raise Exception(, src[]) if src[] == : fn = gtutils.create_spectrum_from_dict(src[], src.spectral_pars) file_function = pyLike.FileFunction_cast(fn) filename = str(os.path.expandvars(src[])) file_function.readFunction(filename) elif src[] == : fn = pyLike.DMFitFunction() fn = gtutils.create_spectrum_from_dict(src[], src.spectral_pars, fn) filename = str(os.path.expandvars(src[])) fn.readFunction(filename) else: fn = gtutils.create_spectrum_from_dict(src[], src.spectral_pars) pylike_src.setSpectrum(fn) pylike_src.setName(str(src.name)) return pylike_src
Create a pyLikelihood Source object from a `~fermipy.roi_model.Model` object.
17,595
def register_job(self, job_details): try: job_details_old = self.get_details(job_details.jobname, job_details.jobkey) if job_details_old.status <= JobStatus.running: job_details_old.status = job_details.status job_details_old.update_table_row( self._table, job_details_old.dbkey - 1) job_details = job_details_old except KeyError: job_details.dbkey = len(self._table) + 1 job_details.get_file_ids( self._file_archive, creator=job_details.dbkey) job_details.append_to_tables(self._table, self._table_ids) self._table_id_array = self._table_ids[].data self._cache[job_details.fullkey] = job_details return job_details
Register a job in this `JobArchive`
17,596
def update_vpnservice(self, vpnservice, body=None): return self.put(self.vpnservice_path % (vpnservice), body=body)
Updates a VPN service.
17,597
def nat_gateways(self): api_version = self._get_api_version() if api_version == : from .v2019_02_01.operations import NatGatewaysOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
Instance depends on the API version: * 2019-02-01: :class:`NatGatewaysOperations<azure.mgmt.network.v2019_02_01.operations.NatGatewaysOperations>`
17,598
def conflicts_with(self, other): if isinstance(other, Requirement): if (self.name_ != other.name_) or (self.range is None) \ or (other.range is None): return False elif self.conflict: return False if other.conflict \ else self.range_.issuperset(other.range_) elif other.conflict: return other.range_.issuperset(self.range_) else: return not self.range_.intersects(other.range_) else: if (self.name_ != other.name_) or (self.range is None): return False if self.conflict: return (other.version_ in self.range_) else: return (other.version_ not in self.range_)
Returns True if this requirement conflicts with another `Requirement` or `VersionedObject`.
17,599
def make_regular_points_with_no_res(bounds, nb_points=10000): minlon, minlat, maxlon, maxlat = bounds minlon, minlat, maxlon, maxlat = bounds offset_lon = (maxlon - minlon) / 8 offset_lat = (maxlat - minlat) / 8 minlon -= offset_lon maxlon += offset_lon minlat -= offset_lat maxlat += offset_lat nb_x = int(nb_points**0.5) nb_y = int(nb_points**0.5) return ( np.linspace(minlon, maxlon, nb_x), np.linspace(minlat, maxlat, nb_y), (nb_y, nb_x) )
Return a regular grid of points within `bounds` with the specified number of points (or a close approximate value). Parameters ---------- bounds : 4-floats tuple The bbox of the grid, as xmin, ymin, xmax, ymax. nb_points : int, optionnal The desired number of points (default: 10000) Returns ------- points : numpy.array An array of coordinates shape : 2-floats tuple The number of points on each dimension (width, height)