Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
8,300
def _add(self, xer, primary, type): if xer not in primary: primary[xer] = TwistedSocketNotifier(None, self, xer, type)
Private method for adding a descriptor from the event loop. It takes care of adding it if new or modifying it if already added for another state (read -> read/write for example).
8,301
def _logged_in_successful(data): if re.match(r r r, data.strip()): return False else: return True
Test the login status from the returned communication of the server. :param data: bytes received from server during login :type data: list of bytes :return boolean, True when you are logged in.
8,302
async def FinishActions(self, results): _params = dict() msg = dict(type=, request=, version=5, params=_params) _params[] = results reply = await self.rpc(msg) return reply
results : typing.Sequence[~ActionExecutionResult] Returns -> typing.Sequence[~ErrorResult]
8,303
def fmap(order, aij, bij, x, y): u = np.zeros_like(x) v = np.zeros_like(y) k = 0 for i in range(order + 1): for j in range(i + 1): u += aij[k] * (x ** (i - j)) * (y ** j) v += bij[k] * (x ** (i - j)) * (y ** j) k += 1 return u, v
Evaluate the 2D polynomial transformation. u = sum[i=0:order]( sum[j=0:j]( a_ij * x**(i - j) * y**j )) v = sum[i=0:order]( sum[j=0:j]( b_ij * x**(i - j) * y**j )) Parameters ---------- order : int Order of the polynomial transformation. aij : numpy array Polynomial coefficents corresponding to a_ij. bij : numpy array Polynomial coefficents corresponding to b_ij. x : numpy array or float X coordinate values where the transformation is computed. Note that these values correspond to array indices. y : numpy array or float Y coordinate values where the transformation is computed. Note that these values correspond to array indices. Returns ------- u : numpy array or float U coordinate values. v : numpy array or float V coordinate values.
8,304
def register_magics(store_name=, ampl_object=None): from IPython.core.magic import ( Magics, magics_class, cell_magic, line_magic ) @magics_class class StoreAMPL(Magics): def __init__(self, shell=None, **kwargs): Magics.__init__(self, shell=shell, **kwargs) self._store = [] shell.user_ns[store_name] = self._store @cell_magic def ampl(self, line, cell): self._store.append(cell) @cell_magic def ampl_eval(self, line, cell): ampl_object.eval(cell) @line_magic def get_ampl(self, line): return self._store get_ipython().register_magics(StoreAMPL)
Register jupyter notebook magics ``%%ampl`` and ``%%ampl_eval``. Args: store_name: Name of the store where ``%%ampl cells`` will be stored. ampl_object: Object used to evaluate ``%%ampl_eval`` cells.
8,305
def timelimit(timeout): def _1(function): def _2(*args, **kw): class Dispatch(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.result = None self.error = None self.setDaemon(True) self.start() def run(self): try: self.result = function(*args, **kw) except: self.error = sys.exc_info() c = Dispatch() c.join(timeout) if c.isAlive(): raise TimeoutError, if c.error: raise c.error[0], c.error[1] return c.result return _2 return _1
borrowed from web.py
8,306
def to_df_CSV(self, tempfile: str=None, tempkeep: bool=False, **kwargs) -> : return self.to_df(method=, tempfile=tempfile, tempkeep=tempkeep, **kwargs)
Export this SAS Data Set to a Pandas Data Frame via CSV file :param tempfile: [optional] an OS path for a file to use for the local CSV file; default it a temporary file that's cleaned up :param tempkeep: if you specify your own file to use with tempfile=, this controls whether it's cleaned up after using it :param kwargs: :return: Pandas data frame :rtype: 'pd.DataFrame'
8,307
def getvalue(x): if isrepeating(x): raise TypeError( "Ambiguous call to getvalue for %r which has more than one value." % x) for value in getvalues(x): return value
Return the single value of x or raise TypError if more than one value.
8,308
def get_annotated_lines(self): lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)] if hasattr(self.code, ): lineno = self.code.co_firstlineno - 1 while lineno > 0: if _funcdef_re.match(lines[lineno].code): break lineno -= 1 try: offset = len(inspect.getblock([x.code + for x in lines[lineno:]])) except TokenError: offset = 0 for line in lines[lineno:lineno + offset]: line.in_frame = True try: lines[self.lineno - 1].current = True except IndexError: pass return lines
Helper function that returns lines with extra information.
8,309
def tweakback(drzfile, input=None, origwcs = None, newname = None, wcsname = None, extname=, force=False, verbose=False): print("TweakBack Version {:s}({:s}) started at: {:s}\n" .format(__version__,__version_date__,util._ptime()[0])) fltfiles = parseinput.parseinput(input)[0] if fltfiles is None or len(fltfiles) == 0: if not extlist: extlist = [0] wcsutil.altwcs.archiveWCS(imhdulist, extlist, reusekey=True) for ext in extlist: logstr = "Processing {:s}[{:s}]".format(imhdulist.filename(), ext2str(ext)) if verbose: print("\n{:s}\n".format(logstr)) else: log.info(logstr) chip_wcs = wcsutil.HSTWCS(imhdulist, ext=ext) update_chip_wcs(chip_wcs, orig_wcs, final_wcs, xrms=crderr1, yrms = crderr2) extnum = imhdulist.index(imhdulist[ext]) updatehdr.update_wcs(imhdulist, extnum, chip_wcs, wcsname=final_name, reusename=False, verbose=verbose) imhdulist.close()
Apply WCS solution recorded in drizzled file to distorted input images (``_flt.fits`` files) used to create the drizzled file. This task relies on the original WCS and updated WCS to be recorded in the drizzled image's header as the last 2 alternate WCSs. Parameters ---------- drzfile : str (Default = '') filename of undistorted image which contains the new WCS and WCS prior to being updated newname : str (Default = None) Value of ``WCSNAME`` to be used to label the updated solution in the output (eq., ``_flt.fits``) files. If left blank or None, it will default to using the current ``WCSNAME`` value from the input drzfile. input : str (Default = '') filenames of distorted images to be updated using new WCS from 'drzfile'. These can be provided either as an ``@-file``, a comma-separated list of filenames or using wildcards. .. note:: A blank value will indicate that the task should derive the filenames from the 'drzfile' itself, if possible. The filenames will be derived from the ``D*DATA`` keywords written out by ``AstroDrizzle``. If they can not be found, the task will quit. origwcs : str (Default = None) Value of ``WCSNAME`` keyword prior to the drzfile image being updated by ``TweakReg``. If left blank or None, it will default to using the second to last ``WCSNAME*`` keyword value found in the header. wcsname : str (Default = None) Value of WCSNAME for updated solution written out by ``TweakReg`` as specified by the `wcsname` parameter from ``TweakReg``. If this is left blank or `None`, it will default to the current ``WCSNAME`` value from the input drzfile. extname : str (Default = 'SCI') Name of extension in `input` files to be updated with new WCS force : bool (Default = False) This parameters specified whether or not to force an update of the WCS even though WCS already exists with this solution or `wcsname`? verbose : bool (Default = False) This parameter specifies whether or not to print out additional messages during processing. Notes ----- The algorithm used by this function is based on linearization of the exact compound operator that converts input image coordinates to the coordinates (in the input image) that would result in alignment with the new drizzled image WCS. If no input distorted files are specified as input, this task will attempt to generate the list of filenames from the drizzled input file's own header. EXAMPLES -------- An image named ``acswfc_mos2_drz.fits`` was created from 4 images using astrodrizzle. This drizzled image was then aligned to another image using tweakreg and the header was updated using the ``WCSNAME`` = ``TWEAK_DRZ``. The new WCS can then be used to update each of the 4 images that were combined to make up this drizzled image using: >>> from drizzlepac import tweakback >>> tweakback.tweakback('acswfc_mos2_drz.fits') If the same WCS should be applied to a specific set of images, those images can be updated using: >>> tweakback.tweakback('acswfc_mos2_drz.fits', ... input='img_mos2a_flt.fits,img_mos2e_flt.fits') See Also -------- stwcs.wcsutil.altwcs: Alternate WCS implementation
8,310
def failed_login_limit_reached(self): login_limit = 10 if self.failed_logins and self.failed_logins >= login_limit: return True else: return False
A boolean method to check for failed login limit being reached
8,311
def addarchive(self, name): with tarfile.open(name, ) as st: for member in st.getmembers(): self.tarfile.addfile(member, st.extractfile(member.name))
Add (i.e. copy) the contents of another tarball to this one. :param name: File path to the tar archive. :type name: unicode | str
8,312
def _filter(request, object_, tags=None, more=False, orderby=): res = Result() models = QUERY_MODELS idDict = {} objDict = {} data = {} modelmap = {} length = 75 for m in models: modelmap[m.model_class()] = m.model if object_: idDict[m.model] = m.model_class().objects.filter(gallery=object_) else: idDict[m.model] = m.model_class().objects.all() if idDict[m.model] is None: continue if tags: for bucket in tags: searchQuery = "" o = None for item in bucket: if item == 0: idDict[m.model].annotate(num_tags=Count()) if not o: o = Q() o |= Q(num_tags__lte=1) break elif isinstance(item, six.integer_types): if not o: o = Q() o |= Q(tags__id=item) else: searchQuery += item + if not HAYSTACK: if not o: o = Q() o |= Q(title__icontains=item) if HAYSTACK and searchQuery != "": searchIDs = search(searchQuery, m.model_class()) if searchIDs: if not o: o = Q() o |= Q(id__in=searchIDs) if o: idDict[m.model] = idDict[m.model].annotate(num_tags=Count()).filter(o) else: idDict[m.model] = idDict[m.model].none() idDict[m.model] = list(idDict[m.model].order_by(.format(orderby)).values_list(, flat=True)) lastid = request.session.get(.format(m.model), 0) if not idDict[m.model]: continue if not more: lastid = idDict[m.model][0] index = idDict[m.model].index(lastid) if more and lastid != 0: index += 1 idDict[m.model] = idDict[m.model][index:index + length] objDict[m.model] = m.model_class().objects.filter(id__in=idDict[m.model]) objDict[m.model] = objDict[m.model].select_related().prefetch_related().order_by(.format(orderby)) objDict[m.model] = list(objDict[m.model]) objects = _sortObjects(orderby, **objDict) if len(models) > 1 else objDict.values()[0] objects = objects[:length] lastids = {} for obj in objects: lastids[.format(modelmap[obj.__class__])] = obj.id for key, value in lastids.items(): request.session[key] = value for i in objects: res.append(i.json()) data[] = len(objects) if settings.DEBUG: data[] = connection.queries res.value = data return JsonResponse(res.asDict())
Filters Piece objects from self based on filters, search, and range :param tags: List of tag IDs to filter :type tags: list :param more -- bool, Returns more of the same filtered set of images based on session range return list, Objects filtered
8,313
def post(self): self.set_header("Content-Type", "application/json") key = uuid.uuid4().hex metadata = json.loads(self.request.body.decode()) metadata["uuid"] = key self.database[key] = metadata result = json.dumps({"uuid": key}) self.write(result)
Register a new model (models)
8,314
def find_embedding(elt, embedding=None): result = [] module = getmodule(elt) if module is not None: visited = set() if embedding is None: embedding = module compounds = [embedding] while compounds: last_embedding = compounds[-1] if last_embedding == elt: result = compounds break else: for name in dir(last_embedding): embedded = getattr(last_embedding, name) try: if embedded not in visited: visited.add(embedded) else: continue except TypeError: pass else: embedded_module = getmodule(embedded) if embedded_module is module: compounds.append(embedded) break else: compounds.pop(-1) return result
Try to get elt embedding elements. :param embedding: embedding element. Must have a module. :return: a list of [module [,class]*] embedding elements which define elt. :rtype: list
8,315
def _plot_estimate( cls, estimate=None, confidence_intervals=None, loc=None, iloc=None, show_censors=False, censor_styles=None, ci_legend=False, ci_force_lines=False, ci_alpha=0.25, ci_show=True, at_risk_counts=False, **kwargs ): plot_estimate_config = PlotEstimateConfig( cls, estimate, confidence_intervals, loc, iloc, show_censors, censor_styles, **kwargs ) dataframe_slicer = create_dataframe_slicer(iloc, loc) if show_censors and cls.event_table["censored"].sum() > 0: cs = {"marker": "+", "ms": 12, "mew": 1} cs.update(plot_estimate_config.censor_styles) times = dataframe_slicer(cls.event_table.loc[(cls.event_table["censored"] > 0)]).index.values.astype(float) v = cls.predict(times) plot_estimate_config.ax.plot(times, v, linestyle="None", color=plot_estimate_config.colour, **cs) dataframe_slicer(plot_estimate_config.estimate_).rename( columns=lambda _: plot_estimate_config.kwargs.pop("label") ).plot(**plot_estimate_config.kwargs) if ci_show: if ci_force_lines: dataframe_slicer(plot_estimate_config.confidence_interval_).plot( linestyle="-", linewidth=1, color=[plot_estimate_config.colour], legend=ci_legend, drawstyle=plot_estimate_config.kwargs["drawstyle"], ax=plot_estimate_config.ax, alpha=0.6, ) else: x = dataframe_slicer(plot_estimate_config.confidence_interval_).index.values.astype(float) lower = dataframe_slicer(plot_estimate_config.confidence_interval_.filter(like="lower")).values[:, 0] upper = dataframe_slicer(plot_estimate_config.confidence_interval_.filter(like="upper")).values[:, 0] if plot_estimate_config.kwargs["drawstyle"] == "default": step = None elif plot_estimate_config.kwargs["drawstyle"].startswith("step"): step = plot_estimate_config.kwargs["drawstyle"].replace("steps-", "") plot_estimate_config.ax.fill_between( x, lower, upper, alpha=ci_alpha, color=plot_estimate_config.colour, linewidth=1.0, step=step ) if at_risk_counts: add_at_risk_counts(cls, ax=plot_estimate_config.ax) return plot_estimate_config.ax
Plots a pretty figure of {0}.{1} Matplotlib plot arguments can be passed in inside the kwargs, plus Parameters ----------- show_censors: bool place markers at censorship events. Default: False censor_styles: bool If show_censors, this dictionary will be passed into the plot call. ci_alpha: bool the transparency level of the confidence interval. Default: 0.3 ci_force_lines: bool force the confidence intervals to be line plots (versus default shaded areas). Default: False ci_show: bool show confidence intervals. Default: True ci_legend: bool if ci_force_lines is True, this is a boolean flag to add the lines' labels to the legend. Default: False at_risk_counts: bool show group sizes at time points. See function ``add_at_risk_counts`` for details. Default: False loc: slice specify a time-based subsection of the curves to plot, ex: >>> model.plot(loc=slice(0.,10.)) will plot the time values between t=0. and t=10. iloc: slice specify a location-based subsection of the curves to plot, ex: >>> model.plot(iloc=slice(0,10)) will plot the first 10 time points. Returns ------- ax: a pyplot axis object
8,316
def start_container(self): self.__container_lengths.append(self.current_container_length) self.current_container_length = 0 new_container_node = _Node() self.__container_node.add_child(new_container_node) self.__container_nodes.append(self.__container_node) self.__container_node = new_container_node
Add a node to the tree that represents the start of a container. Until end_container is called, any nodes added through add_scalar_value or start_container will be children of this new node.
8,317
def complete_task_from_id(self, task_id): if task_id is None: raise WorkflowException(self.spec, ) for task in self.task_tree: if task.id == task_id: return task.complete() msg = % task_id raise WorkflowException(self.spec, msg)
Runs the task with the given id. :type task_id: integer :param task_id: The id of the Task object.
8,318
def get_previous_next_published(self, date): previous_next = getattr(self, , None) if previous_next is None: date_year = datetime(date.year, 1, 1) date_month = datetime(date.year, date.month, 1) date_day = datetime(date.year, date.month, date.day) date_next_week = date_day + timedelta(weeks=1) previous_next = {: [None, None], : [None, None], : [None, None], : [None, None]} dates = self.get_queryset().datetimes( , , order=) for d in dates: d_year = datetime(d.year, 1, 1) d_month = datetime(d.year, d.month, 1) d_day = datetime(d.year, d.month, d.day) if d_year < date_year: previous_next[][0] = d_year.date() elif d_year > date_year and not previous_next[][1]: previous_next[][1] = d_year.date() if d_month < date_month: previous_next[][0] = d_month.date() elif d_month > date_month and not previous_next[][1]: previous_next[][1] = d_month.date() if d_day < date_day: previous_next[][0] = d_day.date() previous_next[][0] = d_day.date() - timedelta( days=d_day.weekday()) elif d_day > date_day and not previous_next[][1]: previous_next[][1] = d_day.date() if d_day > date_next_week and not previous_next[][1]: previous_next[][1] = d_day.date() - timedelta( days=d_day.weekday()) setattr(self, , previous_next) return previous_next
Returns a dict of the next and previous date periods with published entries.
8,319
def initialize(self): if not hasattr(self.application, ): log.debug() self.application.event_listener = EventListener( self.application.mod_opts, self.application.opts, ) if not hasattr(self, ): local_client = salt.client.get_local_client(mopts=self.application.opts) self.saltclients = { : local_client.run_job_async, : local_client.run_job_async, : salt.runner.RunnerClient(opts=self.application.opts).cmd_async, : None, } if not hasattr(self, ): self.ckminions = salt.utils.minions.CkMinions(self.application.opts)
Initialize the handler before requests are called
8,320
def change_quantiles(x, ql, qh, isabs, f_agg): if ql >= qh: ValueError("ql={} should be lower than qh={}".format(ql, qh)) div = np.diff(x) if isabs: div = np.abs(div) try: bin_cat = pd.qcut(x, [ql, qh], labels=False) bin_cat_0 = bin_cat == 0 except ValueError: return 0 ind = (bin_cat_0 & _roll(bin_cat_0, 1))[1:] if sum(ind) == 0: return 0 else: ind_inside_corridor = np.where(ind == 1) aggregator = getattr(np, f_agg) return aggregator(div[ind_inside_corridor])
First fixes a corridor given by the quantiles ql and qh of the distribution of x. Then calculates the average, absolute value of consecutive changes of the series x inside this corridor. Think about selecting a corridor on the y-Axis and only calculating the mean of the absolute change of the time series inside this corridor. :param x: the time series to calculate the feature of :type x: numpy.ndarray :param ql: the lower quantile of the corridor :type ql: float :param qh: the higher quantile of the corridor :type qh: float :param isabs: should the absolute differences be taken? :type isabs: bool :param f_agg: the aggregator function that is applied to the differences in the bin :type f_agg: str, name of a numpy function (e.g. mean, var, std, median) :return: the value of this feature :return type: float
8,321
def revokeSystemPermission(self, login, user, perm): self.send_revokeSystemPermission(login, user, perm) self.recv_revokeSystemPermission()
Parameters: - login - user - perm
8,322
def T6(word, rules): offset = 0 try: WORD, rest = tuple(word.split(, 1)) for vvv in long_vowel_sequences(rest): i = vvv.start(2) vvv = vvv.group(2) i += (2 if phon.is_long(vvv[:2]) else 1) + offset rest = rest[:i] + + rest[i:] offset += 1 except ValueError: WORD = word for vvv in long_vowel_sequences(WORD): i = vvv.start(2) + 2 WORD = WORD[:i] + + WORD[i:] try: WORD += + rest except UnboundLocalError: pass rules += if word != WORD else return WORD, rules
If a VVV-sequence contains a long vowel, insert a syllable boundary between it and the third vowel. E.g. [kor.ke.aa], [yh.ti.öön], [ruu.an], [mää.yt.te].
8,323
def sky_fraction(self): pix_id = self._best_res_pixels() nb_pix_filled = pix_id.size return nb_pix_filled / float(3 << (2*(self.max_order + 1)))
Sky fraction covered by the MOC
8,324
def models(self): model_dict = self._build_all_dependencies() while True: model = self._get_model_without_dependencies(model_dict) if not model: break yield (model, self._models().get(model))
generator to return the tuple of model and its schema to create on aws.
8,325
def tokenize(self, string): new_lexer = ply.lex.lex(module=self, debug=self.debug, errorlog=logger) new_lexer.latest_newline = 0 new_lexer.string_value = None new_lexer.input(string) while True: t = new_lexer.token() if t is None: break t.col = t.lexpos - new_lexer.latest_newline yield t if new_lexer.string_value is not None: raise JsonPathLexerError()
Maps a string to an iterator over tokens. In other words: [char] -> [token]
8,326
def fit_points_in_bounding_box_params(df_points, bounding_box, padding_fraction=0): width = df_points.x.max() height = df_points.y.max() points_bbox = pd.Series([width, height], index=[, ]) fill_scale = 1 - 2 * padding_fraction assert(fill_scale > 0) scale = scale_to_fit_a_in_b(points_bbox, bounding_box) padded_scale = scale * fill_scale offset = .5 * (bounding_box - points_bbox * padded_scale) offset.index = [, ] return offset, padded_scale
Return offset and scale factor to scale ``x``, ``y`` columns of :data:`df_points` to fill :data:`bounding_box` while maintaining aspect ratio. Arguments --------- df_points : pandas.DataFrame A frame with at least the columns ``x`` and ``y``, containing one row per point. bounding_box: pandas.Series A `pandas.Series` containing numeric `width` and `height` values. padding_fraction : float Fraction of padding to add around points. Returns ------- (offset, scale) : (pandas.Series, float) Offset translation and scale required to fit all points in :data:`df_points` to fill :data:`bounding_box` while maintaining aspect ratio. :data:`offset` contains ``x`` and ``y`` values for the offset.
8,327
def organize(dirs, config, run_info_yaml, sample_names=None, is_cwl=False, integrations=None): from bcbio.pipeline import qcsummary if integrations is None: integrations = {} logger.info("Using input YAML configuration: %s" % run_info_yaml) assert run_info_yaml and os.path.exists(run_info_yaml), \ "Did not find input sample YAML file: %s" % run_info_yaml run_details = _run_info_from_yaml(dirs, run_info_yaml, config, sample_names, is_cwl=is_cwl, integrations=integrations) remote_retriever = None for iname, retriever in integrations.items(): if iname in config: run_details = retriever.add_remotes(run_details, config[iname]) remote_retriever = retriever out = [] for item in run_details: item["dirs"] = dirs if "name" not in item: item["name"] = ["", item["description"]] elif isinstance(item["name"], six.string_types): description = "%s-%s" % (item["name"], clean_name(item["description"])) item["name"] = [item["name"], description] item["description"] = description item["resources"] = _add_remote_resources(item["resources"]) item["config"] = config_utils.update_w_custom(config, item) item.pop("algorithm", None) item = add_reference_resources(item, remote_retriever) item["config"]["algorithm"]["qc"] = qcsummary.get_qc_tools(item) item["config"]["algorithm"]["vcfanno"] = vcfanno.find_annotations(item, remote_retriever) tmp_dir = tz.get_in(["config", "resources", "tmp", "dir"], item) if tmp_dir: if os.path.expandvars(tmp_dir) == tmp_dir: tmp_dir = utils.safe_makedir(os.path.expandvars(tmp_dir)) tmp_dir = genome.abs_file_paths(tmp_dir, do_download=not integrations) item["config"]["resources"]["tmp"]["dir"] = tmp_dir out.append(item) out = _add_provenance(out, dirs, config, not is_cwl) return out
Organize run information from a passed YAML file or the Galaxy API. Creates the high level structure used for subsequent processing. sample_names is a list of samples to include from the overall file, for cases where we are running multiple pipelines from the same configuration file.
8,328
def normalize_full_name_true(decl): if decl.cache.normalized_full_name_true is None: decl.cache.normalized_full_name_true = normalize( declaration_utils.full_name(decl, with_defaults=True)) return decl.cache.normalized_full_name_true
Cached variant of normalize Args: decl (declaration.declaration_t): the declaration Returns: str: normalized name
8,329
def _set_default_resource_names(self): self.ip_config_name = .join([ self.running_instance_id, ]) self.nic_name = .join([self.running_instance_id, ]) self.public_ip_name = .join([self.running_instance_id, ])
Generate names for resources based on the running_instance_id.
8,330
def _match_filenames_w_dfs(filenames, lo_dfs): logger_dataframes.info("enter match_filenames_w_dfs") dfs = {} for filename in filenames: try: if filename in lo_dfs["chronData"]: dfs[filename] = lo_dfs["chronData"][filename] elif filename in lo_dfs["paleoData"]: dfs[filename] = lo_dfs["paleoData"][filename] except KeyError: logger_dataframes.info("filter_dfs: KeyError: missing data frames keys") logger_dataframes.info("exit match_filenames_w_dfs") return dfs
Match a list of filenames to their data frame counterparts. Return data frames :param list filenames: Filenames of data frames to retrieve :param dict lo_dfs: All data frames :return dict: Filenames and data frames (filtered)
8,331
def click(self): if self._click_extension is None: from .click_ext import ClickExtension self._click_extension = ClickExtension( config=self ) return self._click_extension
click extension Returns: ClickExtension
8,332
def register(self, resource, endpoint): s endpoint as it appears in the URL :type endpoint: str Not and instance of ``Resource`` subclass') self._registry[endpoint] = resource resource.connect_signal_receivers()
This methods registers a resource with the router and connects all receivers to their respective signals :param resource: The resource class to register :type resource: A subclass of ``Resource`` class :param endpoint: the name of the resource's endpoint as it appears in the URL :type endpoint: str
8,333
def maybe_show_asm(showasm, tokens): if showasm: stream = showasm if hasattr(showasm, ) else sys.stdout for t in tokens: stream.write(str(t)) stream.write()
Show the asm based on the showasm flag (or file object), writing to the appropriate stream depending on the type of the flag. :param showasm: Flag which determines whether the ingested code is written to sys.stdout or not. (It is also to pass a file like object, into which the asm will be written). :param tokens: The asm tokens to show.
8,334
def _cutoff(self, coeffs, vscale): bnd = self._threshold(vscale) inds = np.nonzero(abs(coeffs) >= bnd) if len(inds[0]): N = inds[0][-1] else: N = 0 return N+1
Compute cutoff index after which the coefficients are deemed negligible.
8,335
def size_as_bytes(size_, prefix): prefix = prefix.upper() assert prefix in si_prefixes exponent = si_prefixes.index(prefix) + 1 return int(size_ * (1024.0 ** exponent))
>>> size_as_bytes(7.5, 'T') 8246337208320
8,336
def remove_from_group(self, group, user): data = {: group, : user} return self.post(, data)
Remove a user from a group :type user: str :param user: User's email :type group: str :param group: Group name :rtype: dict :return: an empty dictionary
8,337
def QA_util_date_stamp(date): datestr = str(date)[0:10] date = time.mktime(time.strptime(datestr, )) return date
字符串 '2018-01-01' 转变成 float 类型时间 类似 time.time() 返回的类型 :param date: 字符串str -- 格式必须是 2018-01-01 ,长度10 :return: 类型float
8,338
def installed(name, env=None, saltenv=, user=None): ret = {: name, : {}, : , : True} packages = [] if os.path.exists(name) or name.startswith(): if name.startswith(): lines = __salt__[](name, saltenv) lines = lines.split() elif os.path.exists(name): f = open(name, mode=) lines = f.readlines() f.close() for line in lines: line = line.strip() if line != and not line.startswith(): line = line.split()[0].strip() packages.append(line) else: packages = [pkg.strip() for pkg in name.split()] conda_list = __salt__[](env=env, user=user) def extract_info(pkgname): pkgname, pkgversion = package, pkgname, pkgversion = (package.split()[0], package.split()[1] ) if in package else (package, pkgversion) pkgname, pkgversion = (package.split()[0], package.split()[1] ) if in package else (pkgname, pkgversion) pkgname, pkgversion = (package.split()[0], package.split()[1] ) if in package else (pkgname, pkgversion) return pkgname, pkgversion installed, failed, old = 0, 0, 0 for package in packages: pkgname, pkgversion = extract_info(package) conda_pkgname = pkgname + * (26 - len(pkgname)) + pkgversion if conda_pkgname not in conda_list: installation = __salt__[](package, env=env, user=user) if installation[] == 0: ret[][package] = installed += 1 else: ret[][package] = installation failed += 1 else: old += 1 comments = [] if installed > 0: comments.append(.format(installed)) if failed > 0: ret[] = False comments.append(.format(failed)) if old > 0: comments.append(.format(old)) ret[] = .join(comments) return ret
Installs a single package, list of packages (comma separated) or packages in a requirements.txt Checks if the package is already in the environment. Check ocurres here so is only needed to `conda list` and `pip freeze` once name name of the package(s) or path to the requirements.txt env : None environment name or path where to put the new enviroment if None (default) will use the default conda environment (`~/anaconda/bin`) saltenv : 'base' Salt environment. Usefull when the name is file using the salt file system (e.g. `salt://.../reqs.txt`) user The user under which to run the commands
8,339
def set_chain_info(self, chain_id, chain_name, num_groups): self.chain_id_list.append(chain_id) self.chain_name_list.append(chain_name) self.groups_per_chain.append(num_groups)
Set the chain information. :param chain_id: the asym chain id from mmCIF :param chain_name: the auth chain id from mmCIF :param num_groups: the number of groups this chain has
8,340
def count_flag_reads(self, file_name, flag, paired_end): param = " -c -f" + str(flag) if file_name.endswith("sam"): param += " -S" return self.samtools_view(file_name, param=param)
Counts the number of reads with the specified flag. :param str file_name: name of reads file :param str flag: sam flag value to be read :param bool paired_end: This parameter is ignored; samtools automatically correctly responds depending on the data in the bamfile. We leave the option here just for consistency, since all the other counting functions require the parameter. This makes it easier to swap counting functions during pipeline development.
8,341
def generate_random_id(size=6, chars=string.ascii_uppercase + string.digits): return "".join(random.choice(chars) for x in range(size))
Generate random id numbers.
8,342
def class_get_trait_help(cls, trait, inst=None): assert inst is None or isinstance(inst, cls) lines = [] header = "--%s.%s=<%s>" % (cls.__name__, trait.name, trait.__class__.__name__) lines.append(header) if inst is not None: lines.append(indent( % getattr(inst, trait.name), 4)) else: try: dvr = repr(trait.get_default_value()) except Exception: dvr = None
Get the help string for a single trait. If `inst` is given, it's current trait values will be used in place of the class default.
8,343
def fold_string(input_string, max_width): new_string = input_string if isinstance(input_string, six.string_types): if max_width < len(input_string): new_string = textwrap.fill(input_string, max_width) return new_string
Fold a string within a maximum width. Parameters: input_string: The string of data to go into the cell max_width: Maximum width of cell. Data is folded into multiple lines to fit into this width. Return: String representing the folded string
8,344
def get_sun_times(dates, lon, lat, time_zone): df = pd.DataFrame(index=dates, columns=[, , , ]) doy = np.array([(d - d.replace(day=1, month=1)).days + 1 for d in df.index]) day_angle_b = np.deg2rad((360. / 365.25) * (doy - 79.346)) declination = np.deg2rad( 0.3723 + 23.2567 * np.sin(day_angle_b) - 0.7580 * np.cos(day_angle_b) + 0.1149 * np.sin(2*day_angle_b) + 0.3656 * np.cos(2*day_angle_b) - 0.1712 * np.sin(3*day_angle_b) + 0.0201 * np.cos(3*day_angle_b) ) day_angle_s = 2 * np.pi * (doy - 1) / 365. eq_time = 12. / np.pi * ( 0.000075 + 0.001868 * np.cos( day_angle_s) - 0.032077 * np.sin( day_angle_s) - 0.014615 * np.cos(2*day_angle_s) - 0.040849 * np.sin(2*day_angle_s) ) standard_meridian = time_zone * 15. delta_lat_time = (lon - standard_meridian) * 24. / 360. omega_nul_arg = -np.tan(np.deg2rad(lat)) * np.tan(declination) omega_nul = np.arccos(omega_nul_arg) sunrise = 12. * (1. - (omega_nul) / np.pi) - delta_lat_time - eq_time sunset = 12. * (1. + (omega_nul) / np.pi) - delta_lat_time - eq_time sunnoon = 12. * (1.) - delta_lat_time - eq_time pos = omega_nul_arg < -1 sunrise[pos] = sunnoon[pos] - 12 sunset[pos] = sunnoon[pos] + 12 pos = omega_nul_arg > 1 sunrise[pos] = sunnoon[pos] sunset[pos] = sunnoon[pos] daylength = sunset - sunrise sunrise[sunrise < 0] += 24 sunset[sunset > 24] -= 24 df.sunrise = sunrise df.sunnoon = sunnoon df.sunset = sunset df.daylength = daylength return df
Computes the times of sunrise, solar noon, and sunset for each day. Parameters ---- dates: datetime lat : latitude in DecDeg lon : longitude in DecDeg time_zone : timezone Returns ---- DataFrame: [sunrise, sunnoon, sunset, day length] in dec hours
8,345
def open_external_editor(filename=None, sql=None): message = None filename = filename.strip().split(, 1)[0] if filename else None sql = sql or MARKER = query = click.edit(u.format(sql=sql, marker=MARKER), filename=filename, extension=) if filename: try: with open(filename, encoding=) as f: query = f.read() except IOError: message = % filename if query is not None: query = query.split(MARKER, 1)[0].rstrip() else: query = sql return (query, message)
Open external editor, wait for the user to type in their query, return the query. :return: list with one tuple, query as first element.
8,346
def script_post_save(model, os_path, contents_manager, **kwargs): from nbconvert.exporters.script import ScriptExporter if model[] != : return global _script_exporter if _script_exporter is None: _script_exporter = ScriptExporter(parent=contents_manager) log = contents_manager.log base, ext = os.path.splitext(os_path) script, resources = _script_exporter.from_filename(os_path) script_fname = base + resources.get(, ) log.info("Saving script /%s", to_api_path(script_fname, contents_manager.root_dir)) with io.open(script_fname, , encoding=) as f: f.write(script)
convert notebooks to Python script after save with nbconvert replaces `ipython notebook --script`
8,347
def _default_arguments(self, obj): if not (inspect.isfunction(obj) or inspect.ismethod(obj)): if inspect.isclass(obj): obj = (getattr(obj,,None) or getattr(obj,,None)) elif hasattr(obj, ): obj = obj.__call__ try: args,_,_1,defaults = inspect.getargspec(obj) if defaults: return args[-len(defaults):] except TypeError: pass return []
Return the list of default arguments of obj if it is callable, or empty list otherwise.
8,348
def _pairwise_chisq(self): return [ self._chi_squared( mr_subvar_proportions, self._margin[idx], self._opposite_axis_margin[idx] / np.sum(self._opposite_axis_margin[idx]), ) for (idx, mr_subvar_proportions) in enumerate(self._proportions) ]
Pairwise comparisons (Chi-Square) along axis, as numpy.ndarray. Returns a list of square and symmetric matrices of test statistics for the null hypothesis that each vector along *axis* is equal to each other.
8,349
def get_updated_data(self, old_data: Dict[str, LinkItem]) -> Dict[str, LinkItem]: if not self.download_data: return {} new_link_item_dict = {} for link, link_item in tqdm(self.download_data.items(), desc="Compare with save", unit="item", leave=True, mininterval=1, ncols=100, disable=dynamic_data.DISABLE_TQDM): if (link not in old_data) or (link_item.time > old_data[link].time): new_link_item_dict[link] = link_item return new_link_item_dict
Get links who needs to be downloaded by comparing old and the new data. :param old_data: old data :type old_data: Dict[str, ~unidown.plugin.link_item.LinkItem] :return: data which is newer or dont exist in the old one :rtype: Dict[str, ~unidown.plugin.link_item.LinkItem]
8,350
def find_holes(db_module, db, table_name, column_name, _range, filter=None): if not filter: filter = {"match_all": {}} _range = wrap(_range) params = { "min": _range.min, "max": _range.max - 1, "column_name": db_module.quote_column(column_name), "table_name": db_module.quote_column(table_name), "filter": esfilter2sqlwhere(filter) } min_max = db.query(, params)[0] db.execute("SET @last={{min}}-1", {"min": _range.min}) ranges = db.query(, params) if ranges: ranges.append({"min": min_max.max, "max": _range.max}) else: if min_max.min: ranges.append({"min": _range.min, "max": min_max.min}) ranges.append({"min": min_max.max, "max": _range.max}) else: ranges.append(_range) return ranges
FIND HOLES IN A DENSE COLUMN OF INTEGERS RETURNS A LIST OF {"min"min, "max":max} OBJECTS
8,351
def get_search_results(portal_type=None, uid=None, **kw): if uid is not None: logger.info("UID found, returning the object immediately" % uid) return u.to_list(get_object_by_uid(uid)) include_portal = False if u.to_string(portal_type) == "Plone Site": include_portal = True if "Plone Site" in u.to_list(req.get("portal_type")): include_portal = True results = search(portal_type=portal_type, uid=uid, **kw) if include_portal: results = list(results) + u.to_list(get_portal()) return results
Search the catalog and return the results :returns: Catalog search results :rtype: iterable
8,352
def compute_gt_results(est_file, ref_file, boundaries_id, labels_id, config, bins=251, annotator_id=0): if config["hier"]: ref_times, ref_labels, ref_levels = \ msaf.io.read_hier_references( ref_file, annotation_id=annotator_id, exclude_levels=["segment_salami_function"]) else: jam = jams.load(ref_file, validate=False) ann = jam.search(namespace=)[annotator_id] ref_inter, ref_labels = ann.to_interval_values() est_inter, est_labels = io.read_estimations(est_file, boundaries_id, labels_id, **config) logging.info("Evaluating %s" % os.path.basename(est_file)) if config["hier"]: assert len(est_inter) == len(est_labels), "Same number of levels " \ "are required in the boundaries and labels for the hierarchical " \ "evaluation." est_times = [] est_labels = [] est_inter = sorted(est_inter, key=lambda level: len(level)) for inter in est_inter: est_times.append(msaf.utils.intervals_to_times(inter)) est_labels.append(np.ones(len(est_times[-1]) - 1) * -1) utils.align_end_hierarchies(est_times, ref_times, thres=1) est_hier = [utils.times_to_intervals(times) for times in est_times] ref_hier = [utils.times_to_intervals(times) for times in ref_times] res = {} res["t_recall10"], res["t_precision10"], res["t_measure10"] = \ mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=10) res["t_recall15"], res["t_precision15"], res["t_measure15"] = \ mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=15) res["track_id"] = os.path.basename(est_file)[:-5] return res else: return compute_results(ref_inter, est_inter, ref_labels, est_labels, bins, est_file)
Computes the results by using the ground truth dataset identified by the annotator parameter. Return ------ results : dict Dictionary of the results (see function compute_results).
8,353
def validate_twilio_signature(func=None, backend_name=): def _dec(view_func): @functools.wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): backend = kwargs.get(, backend_name) config = settings.INSTALLED_BACKENDS[backend][] validator = RequestValidator(config[]) signature = request.META.get(, ) url = request.build_absolute_uri() body = {} if request.method == : body = request.POST require_validation = config.get(, True) if validator.validate(url, body, signature) or not require_validation: return view_func(request, *args, **kwargs) else: return HttpResponseBadRequest() return _wrapped_view if func is None: return _dec else: return _dec(func)
View decorator to validate requests from Twilio per http://www.twilio.com/docs/security.
8,354
def import_url(self,caseSensitiveNetworkCollectionKeys=None,\ caseSensitiveNetworkKeys=None,dataTypeList=None,\ DataTypeTargetForNetworkCollection=None,DataTypeTargetForNetworkList=None,\ delimiters=None,delimitersForDataList=None,firstRowAsColumnNames=None,\ KeyColumnForMapping=None,KeyColumnForMappingNetworkList=None,\ keyColumnIndex=None,newTableName=None,startLoadRow=None,\ TargetNetworkCollection=None,TargetNetworkList=None,url=None,\ WhereImportTable=None,verbose=None): PARAMS=set_param([,\ ,,,\ ,,,\ ,,,\ ,,,,\ ,,],[caseSensitiveNetworkCollectionKeys,\ caseSensitiveNetworkKeys,dataTypeList,DataTypeTargetForNetworkCollection,\ DataTypeTargetForNetworkList,delimiters,delimitersForDataList,\ firstRowAsColumnNames,KeyColumnForMapping,KeyColumnForMappingNetworkList,\ keyColumnIndex,newTableName,startLoadRow,TargetNetworkCollection,\ TargetNetworkList,url,WhereImportTable]) response=api(url=self.__url+"/import url", PARAMS=PARAMS, method="POST", verbose=verbose) return response
Similar to Import Table this uses a long list of input parameters to specify the attributes of the table, the mapping keys, and the destination table for the input. :param caseSensitiveNetworkCollectionKeys (string, optional): Determines wh ether capitalization is considered in matching and sorting :param caseSensitiveNetworkKeys (string, optional): Determines whether capi talization is considered in matching and sorting :param dataTypeList (string, optional): List of column data types ordered b y column index (e.g. "string,int,long,double,boolean,intlist" or jus t "s,i,l,d,b,il") :param DataTypeTargetForNetworkCollection (string, optional): Select whethe r to import the data as Node Table Columns, Edge Table Columns, or N etwork Table Columns :param DataTypeTargetForNetworkList (string, optional): The data type of th e targets :param delimiters (string, optional): The list of delimiters that separate columns in the table. :param delimitersForDataList (string, optional): The delimiters between ele ments of list columns in the table. :param firstRowAsColumnNames (string, optional): If the first imported row contains column names, set this to true. :param KeyColumnForMapping (string, optional): The column in the network to use as the merge key :param KeyColumnForMappingNetworkList (string, optional): The column in the network to use as the merge key :param keyColumnIndex (string, optional): The column that contains the key values for this import. These values will be used to match with the key values in the network. :param newTableName (string, optional): The title of the new table :param startLoadRow (string, optional): The first row of the input table to load. This allows the skipping of headers that are not part of the import. :param TargetNetworkCollection (string, optional): The network collection t o use for the table import :param TargetNetworkList (string, optional): The list of networks into whic h the table is imported :param url (string): The URL of the file or resource that provides the tabl e or network to be imported. :param WhereImportTable (string, optional): Determines what network(s) the imported table will be associated with (if any). A table can be impo rted into a Network Collection, Selected networks or to an unassigne d table.
8,355
def get_texts_and_labels(sentence_chunk): words = sentence_chunk.split() texts = [] labels = [] for word in words: word = word.strip() if len(word) > 0: toks = word.split() texts.append(toks[0].strip()) labels.append(toks[-1].strip()) return texts, labels
Given a sentence chunk, extract original texts and labels.
8,356
def format_dateaxis(subplot, freq, index): if isinstance(index, ABCPeriodIndex): majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot) minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot) subplot.xaxis.set_major_locator(majlocator) subplot.xaxis.set_minor_locator(minlocator) majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, minor_locator=False, plot_obj=subplot) minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True, minor_locator=True, plot_obj=subplot) subplot.xaxis.set_major_formatter(majformatter) subplot.xaxis.set_minor_formatter(minformatter) subplot.format_coord = functools.partial(_format_coord, freq) elif isinstance(index, ABCTimedeltaIndex): subplot.xaxis.set_major_formatter( TimeSeries_TimedeltaFormatter()) else: raise TypeError() pylab.draw_if_interactive()
Pretty-formats the date axis (x-axis). Major and minor ticks are automatically set for the frequency of the current underlying series. As the dynamic mode is activated by default, changing the limits of the x axis will intelligently change the positions of the ticks.
8,357
def getActiveAxes(self): active = [] for i in range(len(self._axisId)): if self._menu.IsChecked(self._axisId[i]): active.append(i) return active
Return a list of the selected axes.
8,358
def add_param(self, param_key, param_val): self.params.append([param_key, param_val]) if param_key == : self.success = param_val
adds parameters as key value pairs
8,359
def find_elements_by_class_name(self, name): return self.find_elements(by=By.CLASS_NAME, value=name)
Finds elements by class name. :Args: - name: The class name of the elements to find. :Returns: - list of WebElement - a list with elements if any was found. An empty list if not :Usage: :: elements = driver.find_elements_by_class_name('foo')
8,360
def build_plan(description, graph, targets=None, reverse=False): if reverse: graph = graph.transposed() if targets: nodes = [] for target in targets: for k, step in graph.steps.items(): if step.name == target: nodes.append(step.name) graph = graph.filtered(nodes) return Plan(description=description, graph=graph)
Builds a plan from a list of steps. Args: description (str): an arbitrary string to describe the plan. graph (:class:`Graph`): a list of :class:`Graph` to execute. targets (list): an optional list of step names to filter the graph to. If provided, only these steps, and their transitive dependencies will be executed. If no targets are specified, every node in the graph will be executed. reverse (bool): If provided, the graph will be walked in reverse order (dependencies last).
8,361
async def _set_annotations(entity_tag, annotations, connection): log.debug(, entity_tag) facade = client.AnnotationsFacade.from_connection(connection) args = client.EntityAnnotations( entity=entity_tag, annotations=annotations, ) return await facade.Set([args])
Set annotations on the specified entity. :param annotations map[string]string: the annotations as key/value pairs.
8,362
def dict(self, **kwargs): return dict( time = self.timestamp, serial_number = self.serial_number, value = self.value, battery = self.battery, supervision = self.supervision, **kwargs )
Dictionary representation.
8,363
def get_desc2nts(self, **kws_usr): kws_nts = {k:v for k, v in kws_usr.items() if k in self.keys_nts} return self.get_desc2nts_fnc(**kws_nts)
Return grouped, sorted namedtuples in either format: flat, sections.
8,364
def cced(self, user, include=None): return self._query_zendesk(self.endpoint.cced, , id=user, include=include)
Retrieve the tickets this user is cc'd into. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param user: User object or id
8,365
def _NormalizeTime(self, time): if isinstance(time, rdfvalue.RDFDatetime): return time.AsMicrosecondsSinceEpoch() if isinstance(time, rdfvalue.Duration): return time.microseconds return int(time)
Normalize a time to be an int measured in microseconds.
8,366
def parse_config_file(config_path, verb=3): import json import unicodedata with open(config_path) as config_file: data = json.load(config_file, encoding="utf8") if verb > 2: print(" def get_if_exist(key, default): return data[key] if key in data else default return Bunch(testfiles=get_if_exist("testfiles", []), breakfailed=get_if_exist("breakfailed", True), remove_testfiles=get_if_exist("remove_testfiles", []), onlyfailed=get_if_exist("onlyfailed", False), verb=get_if_exist("verb", 3), dump=get_if_exist("dump", 0), crc=get_if_exist("crc", 1), scapy=get_if_exist("scapy", "scapy"), preexec=get_if_exist("preexec", {}), global_preexec=get_if_exist("global_preexec", ""), outfile=get_if_exist("outputfile", sys.stdout), local=get_if_exist("local", False), num=get_if_exist("num", None), modules=get_if_exist("modules", []), kw_ok=get_if_exist("kw_ok", []), kw_ko=get_if_exist("kw_ko", []), format=get_if_exist("format", "ansi"))
Parse provided json to get configuration Empty default json: { "testfiles": [], "breakfailed": true, "onlyfailed": false, "verb": 3, "dump": 0, "crc": true, "scapy": "scapy", "preexec": {}, "global_preexec": "", "outputfile": null, "local": true, "format": "ansi", "num": null, "modules": [], "kw_ok": [], "kw_ko": [] }
8,367
def set_attribute(self, name, value): if value is True: self.widget.set(name, name) elif value is False: del self.widget.attrib[name] else: self.widget.set(name, str(value))
Default handler for those not explicitly defined
8,368
def run_powerflow(self, session, method=, export_pypsa=False, debug=False): if method == : pypsa_io.delete_powerflow_tables(session) for grid_district in self.mv_grid_districts(): if export_pypsa: export_pypsa_dir = repr(grid_district.mv_grid) else: export_pypsa_dir = None grid_district.mv_grid.run_powerflow(session, method=, export_pypsa_dir=export_pypsa_dir, debug=debug) elif method == : for grid_district in self.mv_grid_districts(): if export_pypsa: export_pypsa_dir = repr(grid_district.mv_grid) else: export_pypsa_dir = None grid_district.mv_grid.run_powerflow(session, method=, export_pypsa_dir=export_pypsa_dir, debug=debug)
Performs power flow calculation for all MV grids Args: session : sqlalchemy.orm.session.Session Database session method: str Specify export method If method='db' grid data will be exported to database If method='onthefly' grid data will be passed to PyPSA directly (default) export_pypsa: bool If True PyPSA networks will be exported as csv to output/debug/grid/<MV-GRID_NAME>/ debug: bool, defaults to False If True, information is printed during process
8,369
def scroll_event(self, widget, event): x, y = event.x, event.y num_degrees = 0 direction = 0 self.last_win_x, self.last_win_y = x, y self.logger.debug("scroll deg=%f direction=%f" % ( num_degrees, direction)) data_x, data_y = self.check_cursor_location() return self.make_ui_callback(, direction, num_degrees, data_x, data_y)
Called when a mouse is turned in the widget (and maybe for finger scrolling in the trackpad). Adjust method signature as appropriate for callback.
8,370
def _delete_file(configurator, path): path = os.path.join(configurator.target_directory, path) os.remove(path) try: os.removedirs(os.path.dirname(path)) except OSError: pass
remove file and remove it's directories if empty
8,371
def statexml2pdb(topology, state, output=None): state = Restart.from_xml(state) system = SystemHandler.load(topology, positions=state.positions) if output is None: output = topology + system.write_pdb(output)
Given an OpenMM xml file containing the state of the simulation, generate a PDB snapshot for easy visualization.
8,372
def release(self, connection: Connection): assert not self._closed key = connection.key host_pool = self._host_pools[key] _logger.debug(, key) yield from host_pool.release(connection) force = self.count() > self._max_count yield from self.clean(force=force)
Put a connection back in the pool. Coroutine.
8,373
def define_simulation_graph(batch_env, algo_cls, config): step = tf.Variable(0, False, dtype=tf.int32, name=) is_training = tf.placeholder(tf.bool, name=) should_log = tf.placeholder(tf.bool, name=) do_report = tf.placeholder(tf.bool, name=) force_reset = tf.placeholder(tf.bool, name=) algo = algo_cls(batch_env, step, is_training, should_log, config) done, score, summary = tools.simulate( batch_env, algo, should_log, force_reset) message = tf.logging.info(message.format(tools.count_weights())) return tools.AttrDict(locals())
Define the algorithm and environment interaction. Args: batch_env: In-graph environments object. algo_cls: Constructor of a batch algorithm. config: Configuration object for the algorithm. Returns: Object providing graph elements via attributes.
8,374
def filter(self, *args, **kwargs): if in kwargs: kwargs = self.get_filter_args_with_path(False, **kwargs) return super(FileNodeManager, self).filter(*args, **kwargs)
Works just like the default Manager's :func:`filter` method, but you can pass an additional keyword argument named ``path`` specifying the full **path of the folder whose immediate child objects** you want to retrieve, e.g. ``"path/to/folder"``.
8,375
def getheaderAnim(self, im): bb = b bb += intToBin(im.size[0]) bb += intToBin(im.size[1]) bb += b return bb
getheaderAnim(im) Get animation header. To replace PILs getheader()[0]
8,376
def publish_minions(self): log.debug() minions = {} log.debug() for minion, minion_info in six.iteritems(self.minions): log.debug(minion) curr_minion = {} curr_minion.update(minion_info) curr_minion.update({: minion}) minions[minion] = curr_minion log.debug() ret = {: minions} self.handler.write_message( salt.utils.json.dumps(ret) + str())
Publishes minions as a list of dicts.
8,377
def make_tx_signatures(txs_to_sign, privkey_list, pubkey_list): assert len(privkey_list) == len(pubkey_list) == len(txs_to_sign) signatures = [] for cnt, tx_to_sign in enumerate(txs_to_sign): sig = der_encode_sig(*ecdsa_raw_sign(tx_to_sign.rstrip(), privkey_list[cnt])) err_msg = % ( sig, tx_to_sign, pubkey_list[cnt], ) assert ecdsa_raw_verify(tx_to_sign, der_decode_sig(sig), pubkey_list[cnt]), err_msg signatures.append(sig) return signatures
Loops through txs_to_sign and makes signatures using privkey_list and pubkey_list Not sure what privkeys and pubkeys to supply? Use get_input_addresses() to return a list of addresses. Matching those addresses to keys is up to you and how you store your private keys. A future version of this library may handle this for you, but it is not trivial. Note that if spending multisig funds the process is significantly more complicated. Each tx_to_sign must be signed by *each* private key. In a 2-of-3 transaction, two of [privkey1, privkey2, privkey3] must sign each tx_to_sign http://dev.blockcypher.com/#multisig-transactions
8,378
def recursively_save_dict_contents_to_group(h5file, path, dic): for key, item in dic.items(): if isinstance(item, (np.ndarray, np.int64, np.float64, str, bytes, tuple, list)): h5file[path + str(key)] = item elif isinstance(item, dict): recursively_save_dict_contents_to_group(h5file, path + key + , item) else: raise ValueError( % type(item))
Parameters ---------- h5file: h5py file to be written to path: path within h5py file to saved dictionary dic: python dictionary to be converted to hdf5 format
8,379
def clean(ctx): logger = logging.getLogger(__name__) dirnames = [, , , ] dirnames = [os.path.join(ctx.obj[], dirname) for dirname in dirnames] for dirname in dirnames: if os.path.isdir(dirname): shutil.rmtree(dirname) logger.debug(, dirname) else: logger.debug(, dirname)
Clean Sphinx build products. Use this command to clean out build products after a failed build, or in preparation for running a build from a clean state. This command removes the following directories from the ``pipelines_lsst_io`` directory: - ``_build`` (the Sphinx build itself) - ``modules`` (symlinks to the module doc directories of Stack packages) - ``packages`` (symlinks to the package doc directories of Stack packages) - ``py-api`` (pages created by automodapi for the Python API reference)
8,380
def directives(entrystream, type=None): for directive in entry_type_filter(entrystream, tag.Directive): if not type or type == directive.type: yield directive
Pull directives out of the specified entry stream. :param entrystream: a stream of entries :param type: retrieve only directives of the specified type; set to :code:`None` to retrieve all directives
8,381
def as_unicode(s, encoding=): if isinstance(s, six.text_type): return s elif isinstance(s, six.binary_type): return s.decode(encoding) else: raise ValueError(.format(six.text_type, six.binary_type))
Force conversion of given string to unicode type. Unicode is ``str`` type for Python 3.x and ``unicode`` for Python 2.x . If the string is already in unicode, then no conversion is done and the same string is returned. Parameters ---------- s: str or bytes (Python3), str or unicode (Python2) The string to convert to unicode. encoding: str The encoding of the input string (default: utf-8) Raises ------ ValueError In case an input of invalid type was passed to the function. Returns ------- ``str`` for Python3 or ``unicode`` for Python 2.
8,382
def block_ip(ip_address): if not ip_address: return if config.DISABLE_IP_LOCKOUT: return key = get_ip_blocked_cache_key(ip_address) if config.COOLOFF_TIME: REDIS_SERVER.set(key, , config.COOLOFF_TIME) else: REDIS_SERVER.set(key, ) send_ip_block_signal(ip_address)
given the ip, block it
8,383
def add_section(self, section): if not issubclass(section.__class__, SubSection): raise TypeError("Argument should be a subclass of SubSection, \ not :" + str(section.__class__)) self.sections[section.name] = section return section
You can add section inside a Element, the section must be a subclass of SubSection. You can use this class to represent a tree.
8,384
def datetime_utc_to_local(utc): ts = time.time() cur = datetime.datetime.fromtimestamp(ts) cur_utc = datetime.datetime.utcfromtimestamp(ts) offset = cur - cur_utc t = utc d = datetime.timedelta(hours = 2) while d > _MINUTE: local = t + offset tm = local.timetuple() tm = tm[0:8] + (0, ) ts = time.mktime(tm) u = datetime.datetime.utcfromtimestamp(ts) diff = u - utc if diff < _MINUTE and diff > -_MINUTE: break if diff > _NULLDELTA: offset -= d else: offset += d d //= 2 return local
An ugly hack to convert naive :std:`datetime.datetime` object containing UTC time to a naive :std:`datetime.datetime` object with local time. It seems standard Python 2.3 library doesn't provide any better way to do that.
8,385
def find_max(self, predicate, max_=None): if predicate(self.value): max_ = self.value next_node = self._greater else: next_node = self._lesser if next_node is None: return max_ return next_node.find_max(predicate, max_)
Return the largest item in or under this node that satisfies *predicate*.
8,386
def _parse_doc(doc): lines = doc.split("\n") descriptions = list(itertools.takewhile(_checker(_KEYWORDS), lines)) if len(descriptions) < 3: description = lines[0] else: description = "{0}\n\n{1}".format( lines[0], textwrap.dedent("\n".join(descriptions[2:]))) args = list(itertools.takewhile( _checker(_KEYWORDS_OTHERS), itertools.dropwhile(_checker(_KEYWORDS_ARGS), lines))) argmap = {} if len(args) > 1: for pair in args[1:]: kv = [v.strip() for v in pair.split(":")] if len(kv) >= 2: argmap[kv[0]] = ":".join(kv[1:]) return dict(headline=descriptions[0], description=description, args=argmap)
Parse a docstring. Parse a docstring and extract three components; headline, description, and map of arguments to help texts. Args: doc: docstring. Returns: a dictionary.
8,387
def process_from_webservice(id_val, id_type=, source=, with_grounding=True): if with_grounding: fmt = else: fmt = resp = requests.get(RLIMSP_URL + fmt % (source, id_type, id_val)) if resp.status_code != 200: raise RLIMSP_Error("Bad status code: %d - %s" % (resp.status_code, resp.reason)) rp = RlimspProcessor(resp.json()) rp.extract_statements() return rp
Return an output from RLIMS-p for the given PubMed ID or PMC ID. Parameters ---------- id_val : str A PMCID, with the prefix PMC, or pmid, with no prefix, of the paper to be "read". id_type : str Either 'pmid' or 'pmcid'. The default is 'pmcid'. source : str Either 'pmc' or 'medline', whether you want pmc fulltext or medline abstracts. with_grounding : bool The RLIMS-P web service provides two endpoints, one pre-grounded, the other not so much. The grounded endpoint returns far less content, and may perform some grounding that can be handled by the grounding mapper. Returns ------- :py:class:`indra.sources.rlimsp.processor.RlimspProcessor` An RlimspProcessor which contains a list of extracted INDRA Statements in its statements attribute.
8,388
def write_ln(self, *text, sep=): if self.text and self.text[-1] != : self.text += self.text += markdown.text(*text, sep) + return self
Write line :param text: :param sep: :return:
8,389
def format_symbol(symbol): fixed = [] s = symbol.strip() s = s[0].upper() + s[1:].lower() for c in s: if c.isalpha(): fixed.append( + c + ) elif c.isspace(): fixed.append() elif c.isdigit(): fixed.append(c) elif c == : fixed.append( + c) elif c == : fixed.append( + c) s = .join(fixed).strip() return .join(s.split())
Returns well formatted Hermann-Mauguin symbol as extected by the database, by correcting the case and adding missing or removing dublicated spaces.
8,390
def get_edges(self): edge_list = [[value, key] for key in self.variable_parents for value in self.variable_parents[key]] return edge_list
Returns the edges of the network Examples -------- >>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml") >>> reader.get_edges() [['family-out', 'light-on'], ['family-out', 'dog-out'], ['bowel-problem', 'dog-out'], ['dog-out', 'hear-bark']]
8,391
def roll_sparse(x, shift, axis=0): if not scipy.sparse.isspmatrix(x): return np.roll(x, shift, axis=axis) if axis not in [0, 1, -1]: raise ParameterError() shift = np.mod(shift, x.shape[axis]) if shift == 0: return x.copy() fmt = x.format if axis == 0: x = x.tocsc() elif axis in (-1, 1): x = x.tocsr() x_r = scipy.sparse.lil_matrix(x.shape, dtype=x.dtype) idx_in = [slice(None)] * x.ndim idx_out = [slice(None)] * x_r.ndim idx_in[axis] = slice(0, -shift) idx_out[axis] = slice(shift, None) x_r[tuple(idx_out)] = x[tuple(idx_in)] idx_out[axis] = slice(0, shift) idx_in[axis] = slice(-shift, None) x_r[tuple(idx_out)] = x[tuple(idx_in)] return x_r.asformat(fmt)
Sparse matrix roll This operation is equivalent to ``numpy.roll``, but operates on sparse matrices. Parameters ---------- x : scipy.sparse.spmatrix or np.ndarray The sparse matrix input shift : int The number of positions to roll the specified axis axis : (0, 1, -1) The axis along which to roll. Returns ------- x_rolled : same type as `x` The rolled matrix, with the same format as `x` See Also -------- numpy.roll Examples -------- >>> # Generate a random sparse binary matrix >>> X = scipy.sparse.lil_matrix(np.random.randint(0, 2, size=(5,5))) >>> X_roll = roll_sparse(X, 2, axis=0) # Roll by 2 on the first axis >>> X_dense_r = roll_sparse(X.toarray(), 2, axis=0) # Equivalent dense roll >>> np.allclose(X_roll, X_dense_r.toarray()) True
8,392
def set(self, field, value): if field == : raise ValueError() elif field == : raise ValueError( reset_key\) else: self.data[field] = value
Sets the value of an app field. :param str field: The name of the app field. Trying to set immutable fields ``uuid`` or ``key`` will raise a ValueError. :param value: The new value of the app field. :raises: ValueError
8,393
def copy_children(self, foreign_id, existing_node): url = "{}/api/v2/pages/{}/".format(self.base_url, foreign_id) self.log( ACTION, "Copying Children", {"existing node type": str(type(existing_node))}) try: self.log(ACTION, "Requesting Data", {"url": url}) response = requests.get(url) content = json.loads(response.content) self.log(SUCCESS, "Data Fetched Successfully", {"url": url}) main_language_child_ids = content["meta"]["main_language_children"] if main_language_child_ids: for main_language_child_id in main_language_child_ids: self.copy_page_and_children( foreign_id=main_language_child_id, parent_id=existing_node.id, depth=1) else: self.log(SUCCESS, "No children to copy") except Exception as e: self.log(ERROR, "Copying Children", {"url": url, "exception": e})
Initiates copying of tree, with existing_node acting as root
8,394
def nodes(self): if self._nodes is None: self._nodes = layout_nodes(self, only_nodes=True) return self._nodes
Computes the node positions the first time they are requested if no explicit node information was supplied.
8,395
def add_noise(Y, sigma): return Y + np.random.normal(0, sigma, Y.shape)
Adds noise to Y
8,396
def _cell_to_python(cell): data_type, value = cell.data_type, cell.value if type(cell) is EmptyCell: return None elif data_type == "f" and value == "=TRUE()": return True elif data_type == "f" and value == "=FALSE()": return False elif cell.number_format.lower() == "yyyy-mm-dd": return str(value).split(" 00:00:00")[0] elif cell.number_format.lower() == "yyyy-mm-dd hh:mm:ss": return str(value).split(".")[0] elif cell.number_format.endswith("%") and isinstance(value, Number): value = Decimal(str(value)) return "{:%}".format(value) elif value is None: return "" else: return value
Convert a PyOpenXL's `Cell` object to the corresponding Python object.
8,397
def median(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs): axis = self._get_axis_number(axis) if axis is not None else 0 if numeric_only is not None and not numeric_only: self._validate_dtypes(numeric_only=True) return self._reduce_dimension( self._query_compiler.median( axis=axis, skipna=skipna, level=level, numeric_only=numeric_only, **kwargs ) )
Computes median across the DataFrame. Args: axis (int): The axis to take the median on. skipna (bool): True to skip NA values, false otherwise. Returns: The median of the DataFrame. (Pandas series)
8,398
def config(self, show_row_hdrs=True, show_col_hdrs=True, show_col_hdr_in_cell=False, auto_resize=True): self.show_row_hdrs = show_row_hdrs self.show_col_hdrs = show_col_hdrs self.show_col_hdr_in_cell = show_col_hdr_in_cell
Override the in-class params: @param show_row_hdrs : show row headers @param show_col_hdrs : show column headers @param show_col_hdr_in_cell : embed column header in each cell @param auto_resize : auto resize according to the size of terminal
8,399
def slice(index, template): try: return re.match("^.*{[%i]}" % index, template).group() except AttributeError: raise ValueError("Index %i not found in template: %s" % (index, template))
Slice a template based on it's positional argument Arguments: index (int): Position at which to slice template (str): Template to slice Example: >>> slice(0, "{cwd}/{0}/assets/{1}/{2}") '{cwd}/{0}' >>> slice(1, "{cwd}/{0}/assets/{1}/{2}") '{cwd}/{0}/assets/{1}'