Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
387,800
def getmap(self, path, query=None): code, data, ctype = self.get(path, query) if ctype != : self.log.error("Expecting JSON from GET of , got ", self.lastpath, ctype) raise HttpError(code=400, content_type=, content=+ctype) try: result = json.loads(data) except Exception as e: self.log.error("Could not load JSON content from GET %r -- %s", self.lastpath, e) raise HttpError(code=400, content_type=, content=) return result
Performs a GET request where the response content type is required to be "application/json" and the content is a JSON-encoded data structure. The decoded structure is returned.
387,801
def iterfiles(self): try: for path in self.order: yield self.files[path] except: for winfile in self.files.values(): yield winfile
Yield all WinFile object.
387,802
def fov_for_height_and_distance(height, distance): vfov_deg = np.degrees(2.0 * np.arctan(height * 0.5 / distance)) return vfov_deg
Calculate the FOV needed to get a given frustum height at a given distance.
387,803
def data(self, **query): data = self.gencloud.project_data(self.id) query[] = self.id ids = set(d[] for d in self.gencloud.api.dataid.get(**query)[]) return [d for d in data if d.id in ids]
Query for Data object annotation.
387,804
def __create_channel_run(self, channel, username, token): data = { : channel.get_node_id().hex, : self.__get_chef_name(), : __version__, : username, : token, : config.DOMAIN, } try: response = requests.post( config.sushi_bar_channel_runs_url(), data=data, auth=AUTH) response.raise_for_status() return response.json()[] except Exception as e: config.LOGGER.error( % e) return None
Sends a post request to create the channel run.
387,805
def download_extract(url): logger.info("Downloading %s", url) request = urllib2.Request(url) request.add_header(, ) opener = urllib2.build_opener() with tempfile.TemporaryFile(suffix=, dir=env.WEATHER_DATA_PATH) \ as local_file: logger.debug(, local_file.name) local_file.write(opener.open(request).read()) compressed_file = zipfile.ZipFile(local_file, ) logger.debug(, compressed_file) compressed_file.extractall(env.WEATHER_DATA_PATH) local_file.close()
download and extract file.
387,806
def save(self): with open(self.filename, ) as file: self.prune() self.data[] = self.version json.dump(self.data, file, sort_keys=True, indent=2)
Save data.
387,807
def _get_range(book, range_, sheet): filename = None if isinstance(book, str): filename = book book = opxl.load_workbook(book, data_only=True) elif isinstance(book, opxl.Workbook): pass else: raise TypeError if _is_range_address(range_): sheet_names = [name.upper() for name in book.sheetnames] index = sheet_names.index(sheet.upper()) data = book.worksheets[index][range_] else: data = _get_namedrange(book, range_, sheet) if data is None: raise ValueError( "Named range not found in %s" % (range_, filename or book) ) return data
Return a range as nested dict of openpyxl cells.
387,808
def forwards(apps, schema_editor): Event = apps.get_model(, ) Work = apps.get_model(, ) WorkRole = apps.get_model(, ) WorkSelection = apps.get_model(, ) for event in Event.objects.filter(kind=): work.slug = generate_slug(work.pk) work.save() WorkSelection.objects.create( event=event, work=work ) for role in event.roles.all(): WorkRole.objects.create( creator=role.creator, work=work, role_name=role.role_name, role_order=role.role_order ) role.delete()
Having added the new 'exhibition' Work type, we're going to assume that every Event of type 'museum' should actually have one Exhibition attached. So, we'll add one, with the same title as the Event. And we'll move all Creators from the Event to the Exhibition.
387,809
def extract_transformers_from_source(source): lines = source.split() linenumbers = [] for number, line in enumerate(lines): if FROM_EXPERIMENTAL.match(line): add_transformers(line) linenumbers.insert(0, number) for number in linenumbers: del lines[number] return .join(lines)
Scan a source for lines of the form from __experimental__ import transformer1 [,...] identifying transformers to be used. Such line is passed to the add_transformer function, after which it is removed from the code to be executed.
387,810
def save_related(self, request, form, formsets, change): super(MenuItemAdmin, self).save_related(request, form, formsets, change) self.model.objects.rebuild()
Rebuilds the tree after saving items related to parent.
387,811
def _remove_redundancy_routers(self, context, router_ids, ports, delete_ha_groups=False): subnets_info = [{: port[][0][]} for port in ports] for r_id in router_ids: for i in range(len(subnets_info)): self.remove_router_interface(context, r_id, subnets_info[i]) LOG.debug("Removed interface on %(s_id)s to redundancy router " "with %(r_id)s", {: ports[i][], : r_id}) if delete_ha_groups and r_id == router_ids[0]: self._delete_ha_group(context, ports[i][]) self.delete_router(context, r_id) LOG.debug("Deleted redundancy router %s", r_id)
Deletes all interfaces of the specified redundancy routers and then the redundancy routers themselves.
387,812
def do_rename(argdict): site = make_site_obj(argdict) slug = argdict[] newtitle = argdict[] try: site.rename_page(slug, newtitle) print "Renamed page." except ValueError: print "Cannot rename. A page with the given slug does not exist." sys.exit()
Rename a page.
387,813
def define(self, value, lineno, namespace=None): if self.defined: error(lineno, "label already defined at line %i" % (self.name, self.lineno)) self.value = value self.lineno = lineno self.namespace = NAMESPACE if namespace is None else namespace
Defines label value. It can be anything. Even an AST
387,814
def _genA(self): p, df = self._p, self.df A = np.zeros((p, p)) for i in range(p): A[i, i] = sqrt(st.chi2.rvs(df - i)) for j in range(p-1): for i in range(j+1, p): A[i, j] = np.random.randn() return A
Generate the matrix A in the Bartlett decomposition A is a lower triangular matrix, with A(i, j) ~ sqrt of Chisq(df - i + 1) when i == j ~ Normal() when i > j
387,815
def load_data(path, fmt=None, bg_data=None, bg_fmt=None, meta_data={}, holo_kw={}, as_type="float32"): path = pathlib.Path(path).resolve() for kk in meta_data: if kk not in qpimage.meta.DATA_KEYS: msg = "Meta data key not allowed: {}".format(kk) raise ValueError(msg) for kk in list(meta_data.keys()): if meta_data[kk] in [np.nan, None]: meta_data.pop(kk) if fmt is None: fmt = guess_format(path) else: if not formats_dict[fmt].verify(path): msg = "Wrong file format for !".format(fmt, path) raise WrongFileFormatError(msg) dataobj = formats_dict[fmt](path=path, meta_data=meta_data, holo_kw=holo_kw, as_type=as_type) if bg_data is not None: if isinstance(bg_data, qpimage.QPImage): dataobj.set_bg(bg_data) else: bg_path = pathlib.Path(bg_data).resolve() if bg_fmt is None: bg_fmt = guess_format(bg_path) bgobj = formats_dict[bg_fmt](path=bg_path, meta_data=meta_data, holo_kw=holo_kw, as_type=as_type) dataobj.set_bg(bgobj) return dataobj
Load experimental data Parameters ---------- path: str Path to experimental data file or folder fmt: str The file format to use (see `file_formats.formats`). If set to `None`, the file format is guessed. bg_data: str Path to background data file or `qpimage.QPImage` bg_fmt: str The file format to use (see `file_formats.formats`) for the background. If set to `None`, the file format is be guessed. meta_data: dict Meta data (see `qpimage.meta.DATA_KEYS`) as_type: str Defines the data type that the input data is casted to. The default is "float32" which saves memory. If high numerical accuracy is required (does not apply for a simple 2D phase analysis), set this to double precision ("float64"). Returns ------- dataobj: SeriesData or SingleData Object that gives lazy access to the experimental data.
387,816
def format(self, record): s = super(ANSIFormatter, self).format(record) if hasattr(self.context, ): s = self.context.ansi(s, **self.get_sgr(record)) return s
Overridden method that applies SGR codes to log messages.
387,817
def _register_view(self, app, resource, *urls, **kwargs): endpoint = kwargs.pop(, None) or resource.__name__.lower() self.endpoints.add(endpoint) if endpoint in getattr(app, , {}): existing_view_class = app.view_functions[endpoint].__dict__[] if existing_view_class != resource: raise ValueError( .format(endpoint, existing_view_class.__name__)) if not hasattr(resource, ): rule = self._make_url(url, None) app.add_url_rule(rule, view_func=resource_func, **kwargs)
Bind resources to the app. :param app: an actual :class:`flask.Flask` app :param resource: :param urls: :param endpoint: endpoint name (defaults to :meth:`Resource.__name__.lower` Can be used to reference this route in :meth:`flask.url_for` :type endpoint: str Additional keyword arguments not specified above will be passed as-is to :meth:`flask.Flask.add_url_rule`. SIDE EFFECT Implements the one mentioned in add_resource
387,818
def rm(self, container_alias): title = % self.__class__.__name__ input_fields = { : container_alias } for key, value in input_fields.items(): object_title = % (title, key, str(value)) self.fields.validate(value, % key, object_title) sys_cmd = % container_alias output_lines = self.command(sys_cmd).split() return output_lines[0]
a method to remove an active container :param container_alias: string with name or id of container :return: string with container id
387,819
def _decode_embedded_list(src): output = [] for elem in src: if isinstance(elem, dict): elem = _decode_embedded_dict(elem) elif isinstance(elem, list): elem = _decode_embedded_list(elem) elif isinstance(elem, bytes): try: elem = elem.decode() except UnicodeError: pass output.append(elem) return output
Convert enbedded bytes to strings if possible. List helper.
387,820
def update_item(self, payload, last_modified=None): to_send = self.check_items([payload])[0] if last_modified is None: modified = payload["version"] else: modified = last_modified ident = payload["key"] headers = {"If-Unmodified-Since-Version": str(modified)} headers.update(self.default_headers()) req = requests.patch( url=self.endpoint + "/{t}/{u}/items/{id}".format( t=self.library_type, u=self.library_id, id=ident ), headers=headers, data=json.dumps(to_send), ) self.request = req try: req.raise_for_status() except requests.exceptions.HTTPError: error_handler(req) return True
Update an existing item Accepts one argument, a dict containing Item data
387,821
def from_local_name(acs, attr, name_format): for aconv in acs: if aconv.name_format == name_format: return aconv.to_format(attr) return attr
:param acs: List of AttributeConverter instances :param attr: attribute name as string :param name_format: Which name-format it should be translated to :return: An Attribute instance
387,822
def getFilename(name): name = re.sub(r"[^0-9a-zA-Z_\-\.]", "_", name) while ".." in name: name = name.replace(, ) while "__" in name: name = name.replace(, ) if name.startswith((".", "-")): name = name[1:] return name
Get a filename from given name without dangerous or incompatible characters.
387,823
def _get_config_value(profile, config_name): s configuration value based on the supplied configuration name. profile The profile name that contains configuration information. config_name The configuration item config = __salt__[](profile) if not config: raise CommandExecutionError( {0}\.format(profile) ) config_value = config.get(config_name) if config_value is None: raise CommandExecutionError( {0}\{1}\ .format( config_name, profile ) ) return config_value
Helper function that returns a profile's configuration value based on the supplied configuration name. profile The profile name that contains configuration information. config_name The configuration item's name to use to return configuration values.
387,824
def _get_record_attrs(out_keys): if len(out_keys) == 1: attr = list(out_keys.keys())[0] if out_keys[attr]: return attr, out_keys[attr] return None, None
Check for records, a single key plus output attributes.
387,825
def AddClient(self, client): client_id, keywords = self.AnalyzeClient(client) self.AddKeywordsForName(client_id, keywords)
Adds a client to the index. Args: client: A VFSGRRClient record to add or update.
387,826
def pin(package, version, checks, marker, resolving, lazy, quiet): root = get_root() package_name = package.lower() version = version.lower() for check_name in sorted(os.listdir(root)): pinned_reqs_file = os.path.join(root, check_name, ) resolved_reqs_file = os.path.join(root, check_name, ) if os.path.isfile(pinned_reqs_file): pinned_packages = {package.name: package for package in read_packages(pinned_reqs_file)} if package not in pinned_packages and check_name not in checks: continue if resolving: pre_packages = list(read_packages(resolved_reqs_file)) else: pre_packages = list(itervalues(pinned_packages)) if not quiet: echo_info(.format(check_name)) if version == : del pinned_packages[package_name] else: pinned_packages[package_name] = Package(package_name, version, marker) package_list = sorted(itervalues(pinned_packages)) write_file_lines(pinned_reqs_file, (.format(package) for package in package_list)) if not quiet: echo_waiting() if resolving: result = resolve_requirements(pinned_reqs_file, resolved_reqs_file, lazy=lazy) if result.code: abort(result.stdout + result.stderr) if not quiet: post_packages = read_packages(resolved_reqs_file if resolving else pinned_reqs_file) display_package_changes(pre_packages, post_packages, indent=)
Pin a dependency for all checks that require it. This can also resolve transient dependencies. Setting the version to `none` will remove the package. You can specify an unlimited number of additional checks to apply the pin for via arguments.
387,827
def response_hook(self, r, **kwargs): if r.status_code == 401: www_authenticate = r.headers.get(, ).lower() auth_type = _auth_type_from_header(www_authenticate) if auth_type is not None: return self.retry_using_http_NTLM_auth( , , r, auth_type, kwargs ) elif r.status_code == 407: r, auth_type, kwargs ) return r
The actual hook handler.
387,828
def disable_multicolor(self): for color in [, , ]: self.multicolorscales[color].config(state=tk.DISABLED, bg=) self.multicolorframes[color].config(bg=) self.multicolorlabels[color].config(bg=) self.multicolordropdowns[color].config(bg=, state=tk.DISABLED) self.multicolorminscale[color].config(bg=, state=tk.DISABLED) self.multicolormaxscale[color].config(bg=, state=tk.DISABLED) self.singlecolorscale.config(state=tk.NORMAL, bg=self.single_color_theme) self.singlecolorframe.config(bg=self.single_color_theme) self.singlecolorlabel.config(bg=self.single_color_theme) self.singlecolordropdown.config(bg=self.single_color_theme, state=tk.NORMAL) self.singlecolorminscale.config(bg=self.single_color_theme, state=tk.NORMAL) self.singlecolormaxscale.config(bg=self.single_color_theme, state=tk.NORMAL)
swap from the multicolor image to the single color image
387,829
def AssignTasksToClient(self, client_id): rules = self.Get(self.Schema.RULES) if not rules: return 0 if data_store.RelationalDBEnabled(): last_foreman_run = self._GetLastForemanRunTimeRelational(client_id) else: last_foreman_run = self._GetLastForemanRunTime(client_id) latest_rule = max(rule.created for rule in rules) if latest_rule <= last_foreman_run: return 0 if data_store.RelationalDBEnabled(): try: self._SetLastForemanRunTimeRelational(client_id, latest_rule) except db.UnknownClientError: pass if not data_store.RelationalDBEnabled(): self._SetLastForemanRunTime(client_id, latest_rule) relevant_rules = [] expired_rules = False now = time.time() * 1e6 for rule in rules: if rule.expires < now: expired_rules = True continue if rule.created <= int(last_foreman_run): continue relevant_rules.append(rule) if data_store.RelationalDBEnabled(): client_data = data_store.REL_DB.ReadClientFullInfo(client_id) if client_data is None: return else: client_data = aff4.FACTORY.Open(client_id, mode="rw", token=self.token) actions_count = 0 for rule in relevant_rules: if self._EvaluateRules(rule, client_data): actions_count += self._RunActions(rule, client_id) if expired_rules: self.ExpireRules() return actions_count
Examines our rules and starts up flows based on the client. Args: client_id: Client id of the client for tasks to be assigned. Returns: Number of assigned tasks.
387,830
def update_hacluster_dns_ha(service, relation_data, crm_ocf=): assert_charm_supports_dns_ha() settings = [, , , ] hostname_group = [] for setting in settings: hostname = config(setting) if hostname is None: log( .format(setting), DEBUG) continue m = re.search(, setting) if m: endpoint_type = m.group(1) raise DNSHAException(msg)
Configure DNS-HA resources based on provided configuration @param service: Name of the service being configured @param relation_data: Pointer to dictionary of relation data. @param crm_ocf: Corosync Open Cluster Framework resource agent to use for DNS HA
387,831
def binned_bitsets_from_list( list=[] ): last_chrom = None last_bitset = None bitsets = dict() for l in list: chrom = l[0] if chrom != last_chrom: if chrom not in bitsets: bitsets[chrom] = BinnedBitSet(MAX) last_chrom = chrom last_bitset = bitsets[chrom] start, end = int( l[1] ), int( l[2] ) last_bitset.set_range( start, end - start ) return bitsets
Read a list into a dictionary of bitsets
387,832
def RemoveDevice(self, object_path): adapter = mockobject.objects[self.path] adapter.EmitSignal(ADAPTER_IFACE, , , [object_path])
Remove (forget) a device
387,833
def is_contextfree(self): for lhs, rhs in self.rules: if len(lhs) != 1: return False if lhs[0] not in self.nonterminals: return False return True
Returns True iff the grammar is context-free.
387,834
def unregister(self, name): try: name = name.name except AttributeError: pass return self.pop(name,None)
Unregister function by name.
387,835
def check_load(grid, mode): crit_branches = {} crit_stations = [] if mode == : load_factor_mv_line_lc_normal = float(cfg_ding0.get(, )) load_factor_mv_cable_lc_normal = float(cfg_ding0.get(, )) load_factor_mv_line_fc_normal = float(cfg_ding0.get(, )) load_factor_mv_cable_fc_normal = float(cfg_ding0.get(, )) mw2kw = 1e3 kw2mw = 1e-3 elif mode == : raise NotImplementedError if crit_branches: logger.info(.format( len(crit_branches))) if crit_stations: logger.info(.format( len(crit_stations))) return crit_branches, crit_stations
Checks for over-loading of branches and transformers for MV or LV grid. Parameters ---------- grid : GridDing0 Grid identifier. mode : str Kind of grid ('MV' or 'LV'). Returns ------- :obj:`dict` Dict of critical branches with max. relative overloading, and the following format:: { branch_1: rel_overloading_1, ..., branch_n: rel_overloading_n } :any:`list` of :obj:`GridDing0` List of critical transformers with the following format:: [trafo_1, ..., trafo_m] Notes ----- Lines'/cables' max. capacity (load case and feed-in case) are taken from [#]_. References ---------- .. [#] dena VNS See Also -------- ding0.flexopt.reinforce_measures.reinforce_branches_current : ding0.flexopt.reinforce_measures.reinforce_branches_voltage :
387,836
def compile_relative_distances(self, sympy_accesses=None): if sympy_accesses is None: sympy_accesses = self.compile_sympy_accesses() sympy_distances = defaultdict(list) for var_name, accesses in sympy_accesses.items(): for i in range(1, len(accesses)): sympy_distances[var_name].append((accesses[i-1]-accesses[i]).simplify()) return sympy_distances
Return load and store distances between accesses. :param sympy_accesses: optionally restrict accesses, default from compile_sympy_accesses() e.g. if accesses are to [+N, +1, -1, -N], relative distances are [N-1, 2, N-1] returned is a dict of list of sympy expressions, for each variable
387,837
def get_pending_withdrawals(self, currency=None): return self._api_query(path_dict={ API_V2_0: }, options={: currency} if currency else None, protection=PROTECTION_PRV)
Used to view your pending withdrawals Endpoint: 1.1 NO EQUIVALENT 2.0 /key/balance/getpendingwithdrawals :param currency: String literal for the currency (ie. BTC) :type currency: str :return: pending withdrawals in JSON :rtype : list
387,838
def new(cls, settings, *args, **kwargs): logger.debug( % settings[]) cloud = settings[] if cloud == : self = BareInstance(settings=settings, *args, **kwargs) elif cloud == : self = AWSInstance(settings=settings, *args, **kwargs) elif cloud == : self = GCPInstance(settings=settings, *args, **kwargs) else: raise DSBException( % cloud) return self
Create a new Cloud instance based on the Settings
387,839
def compile(self, source, name=None, filename=None, raw=False, defer_init=False): source_hint = None try: if isinstance(source, string_types): source_hint = source source = self._parse(source, name, filename) source = self._generate(source, name, filename, defer_init=defer_init) if raw: return source if filename is None: filename = else: filename = encode_filename(filename) return self._compile(source, filename) except TemplateSyntaxError: exc_info = sys.exc_info() self.handle_exception(exc_info, source_hint=source_hint)
Compile a node or template source code. The `name` parameter is the load name of the template after it was joined using :meth:`join_path` if necessary, not the filename on the file system. the `filename` parameter is the estimated filename of the template on the file system. If the template came from a database or memory this can be omitted. The return value of this method is a python code object. If the `raw` parameter is `True` the return value will be a string with python code equivalent to the bytecode returned otherwise. This method is mainly used internally. `defer_init` is use internally to aid the module code generator. This causes the generated code to be able to import without the global environment variable to be set. .. versionadded:: 2.4 `defer_init` parameter added.
387,840
def decorator_handle(tokens): defs = [] decorates = [] for i, tok in enumerate(tokens): if "simple" in tok and len(tok) == 1: decorates.append("@" + tok[0]) elif "test" in tok and len(tok) == 1: varname = decorator_var + "_" + str(i) defs.append(varname + " = " + tok[0]) decorates.append("@" + varname) else: raise CoconutInternalException("invalid decorator tokens", tok) return "\n".join(defs + decorates) + "\n"
Process decorators.
387,841
def _extract_coeffs(self, imt): C_HR = self.COEFFS_HARD_ROCK[imt] C_BC = self.COEFFS_BC[imt] C_SR = self.COEFFS_SOIL_RESPONSE[imt] SC = self.COEFFS_STRESS[imt] return C_HR, C_BC, C_SR, SC
Extract dictionaries of coefficients specific to required intensity measure type.
387,842
def jtype(c): ct = c[] return ct if ct != else .format(ct, c.get())
Return the a string with the data type of a value, for JSON data
387,843
def _bool_segments(array, start=0, delta=1, minlen=1): array = iter(array) i = 0 while True: try: val = next(array) except StopIteration: return if val: n = 1 try: while next(array): n += 1 except StopIteration: return finally: if n >= minlen: yield (start + i * delta, start + (i + n) * delta) i += n i += 1
Yield segments of consecutive `True` values in a boolean array Parameters ---------- array : `iterable` An iterable of boolean-castable values. start : `float` The value of the first sample on the indexed axis (e.g.the GPS start time of the array). delta : `float` The step size on the indexed axis (e.g. sample duration). minlen : `int`, optional The minimum number of consecutive `True` values for a segment. Yields ------ segment : `tuple` ``(start + i * delta, start + (i + n) * delta)`` for a sequence of ``n`` consecutive True values starting at position ``i``. Notes ----- This method is adapted from original code written by Kipp Cannon and distributed under GPLv3. The datatype of the values returned will be the larger of the types of ``start`` and ``delta``. Examples -------- >>> print(list(_bool_segments([0, 1, 0, 0, 0, 1, 1, 1, 0, 1])) [(1, 2), (5, 8), (9, 10)] >>> print(list(_bool_segments([0, 1, 0, 0, 0, 1, 1, 1, 0, 1] ... start=100., delta=0.1)) [(100.1, 100.2), (100.5, 100.8), (100.9, 101.0)]
387,844
def sticker_templates(): voc = DisplayList() stickers = getStickerTemplates() for sticker in stickers: voc.add(sticker.get(), sticker.get()) if voc.index == 0: logger.warning() return voc
It returns the registered stickers in the system. :return: a DisplayList object
387,845
def exclude(source, keys, *, transform=None): check = keys if callable(keys) else lambda key: key in keys return {key: transform(source[key]) if transform else source[key] for key in source if not check(key)}
Returns a dictionary excluding keys from a source dictionary. :source: a dictionary :keys: a set of keys, or a predicate function that accepting a key :transform: a function that transforms the values
387,846
def coerce(self, values): if isinstance(values, compat.basestring): values = tuple(value.strip() for value in values.split()) opt_iter = tuple(copy.deepcopy(self._option) for value in values) for opt_obj, val in compat.zip(opt_iter, values): opt_obj.__set__(None, val) return opt_iter
Convert an iterable of literals to an iterable of options. Args: values (iterable or string): An iterable of raw values to convert into options. If the value is a string is is assumed to be a comma separated list and will be split before processing. Returns: iterable: An iterable of option values initialized with the raw values from `values`. Raises: TypeError: If `values` is not iterable or string. TypeError: If the underlying option raises a TypeError. ValueError: If the underlying option raises a ValueError.
387,847
def params_of_mean(value=array([-.005, 1.]), tau=.1, rate=4.): def logp(value, tau, rate): if value[1] > 0 and value[1] + value[0] * 110 > 0: return normal_like(value[0], 0., tau) + \ exponential_like(value[1], rate) else: return -Inf def random(tau, rate): val = zeros(2) val[0] = rnormal(0., tau) val[1] = rexponential(rate) while val[1] < 0 or val[1] + val[0] * 110 <= 0: val[0] = rnormal(0., tau) val[1] = rexponential(rate) return val
Intercept and slope of rate stochastic of poisson distribution Rate stochastic must be positive for t in [0,T] p(intercept, slope|tau,rate) = N(slope|0,tau) Exp(intercept|rate) 1(intercept>0) 1(intercept + slope * T>0)
387,848
def _get_color(self, r, g, b): clr = (r, g, b) return clr
Convert red, green and blue values specified in floats with range 0-1 to whatever the native widget color object is.
387,849
def wrap(msg, indent, indent_first=True): wrapper.width = 120 wrapper.initial_indent = indent wrapper.subsequent_indent = indent msg = wrapper.fill(msg) return msg if indent_first else msg[len(indent):]
Helper function that wraps msg to 120-chars page width. All lines (except maybe 1st) will be prefixed with string {indent}. First line is prefixed only if {indent_first} is True. :param msg: string to indent :param indent: string that will be used for indentation :param indent_first: if True then the first line will be indented as well, otherwise not
387,850
def toposort(data): if len(data) == 0: return data = data.copy() for k, v in data.items(): v.discard(k) extra_items_in_deps = reduce(set.union, data.values()) - set(data.keys()) data.update(dict((item, set()) for item in extra_items_in_deps)) while True: ordered = set(item for item, dep in data.items() if len(dep) == 0) if not ordered: break yield ordered data = dict((item, (dep - ordered)) for item, dep in data.items() if item not in ordered) if len(data) != 0: raise CyclicDependency(data)
Dependencies are expressed as a dictionary whose keys are items and whose values are a set of dependent items. Output is a list of sets in topological order. The first set consists of items with no dependences, each subsequent set consists of items that depend upon items in the preceeding sets. :param data: :type data: :return: :rtype:
387,851
def int_list_packer(term, values): DENSITY = 10 MIN_RANGE = 20 singletons = set() ranges = [] exclude = set() sorted = jx.sort(values) last = sorted[0] curr_start = last curr_excl = set() for v in sorted[1::]: if v <= last + 1: pass elif v - last > 3: if last == curr_start: singletons.add(last) elif last - curr_start - len(curr_excl) < MIN_RANGE or ((last - curr_start) < len(curr_excl) * DENSITY): singletons |= set(range(curr_start, last + 1)) singletons -= curr_excl else: ranges.append({"gte": curr_start, "lte": last}) exclude |= curr_excl curr_start = v curr_excl = set() else: if 1 + last - curr_start >= len(curr_excl) * DENSITY: add_me = set(range(last + 1, v)) curr_excl |= add_me elif 1 + last - curr_start - len(curr_excl) < MIN_RANGE: new_singles = set(range(curr_start, last + 1)) - curr_excl singletons = singletons | new_singles curr_start = v curr_excl = set() else: ranges.append({"gte": curr_start, "lte": last}) exclude |= curr_excl curr_start = v curr_excl = set() last = v if last == curr_start: singletons.add(last) elif last - curr_start - len(curr_excl) < MIN_RANGE or ((last - curr_start) < len(curr_excl) * DENSITY): singletons |= set(range(curr_start, last + 1)) singletons -= curr_excl else: ranges.append({"gte": curr_start, "lte": last}) exclude |= curr_excl if ranges: r = {"or": [{"range": {term: r}} for r in ranges]} if exclude: r = {"and": [r, {"not": {"terms": {term: jx.sort(exclude)}}}]} if singletons: return {"or": [ {"terms": {term: jx.sort(singletons)}}, r ]} else: return r else: return {"terms": {term: values}}
return singletons, ranges and exclusions
387,852
def delete(self): if lib.EnvDeleteInstance(self._env, self._ist) != 1: raise CLIPSError(self._env)
Delete the instance.
387,853
def wrap_many(self, *args, strict=False): for arg in args: is_elem = arg and isinstance(arg, DOMElement) is_elem_iter = ( not is_elem and arg and isinstance(arg, Iterable) and isinstance(iter(arg).__next__(), DOMElement) ) if not (is_elem or is_elem_iter): raise WrongArgsError( self, "Argument {} is not DOMElement nor iterable of DOMElements".format( arg ), ) wcopies = [] failure = [] def wrap_next(tag, idx): nonlocal wcopies, failure next_copy = self.__copy__() try: return next_copy.wrap(tag) except TagError: failure.append(idx) return next_copy for arg_idx, arg in enumerate(args): if isinstance(arg, DOMElement): wcopies.append(wrap_next(arg, (arg_idx, -1))) else: iter_wcopies = [] for iter_idx, t in enumerate(arg): iter_wcopies.append(wrap_next(t, (arg_idx, iter_idx))) wcopies.append(type(arg)(iter_wcopies)) if failure and strict: raise TagError( self, "Wrapping in a non empty Tag is forbidden, failed on arguments " + ", ".join( list( map( lambda idx: str(idx[0]) if idx[1] == -1 else "[{1}] of {0}".format(*idx), failure, ) ) ), ) return wcopies
Wraps different copies of this element inside all empty tags listed in params or param's (non-empty) iterators. Returns list of copies of this element wrapped inside args or None if not succeeded, in the same order and same structure, i.e. args = (Div(), (Div())) -> value = (A(...), (A(...))) If on some args it must raise TagError, it will only if strict is True, otherwise it will do nothing with them and return Nones on their positions
387,854
def update(did): required_attributes = [, , , , , , ] required_metadata_base_attributes = [, , , , , , , ] required_metadata_curation_attributes = [, ] assert isinstance(request.json, dict), data = request.json if not data: logger.error(f) return 400 msg, status = check_required_attributes(required_attributes, data, ) if msg: return msg, status msg, status = check_required_attributes(required_metadata_base_attributes, _get_base_metadata(data[]), ) if msg: return msg, status msg, status = check_required_attributes(required_metadata_curation_attributes, _get_curation_metadata(data[]), ) if msg: return msg, status msg, status = check_no_urls_in_files(_get_base_metadata(data[]), ) if msg: return msg, status msg, status = validate_date_format(data[]) if msg: return msg, status _record = dict() _record = copy.deepcopy(data) _record[] = datetime.strptime(data[], ) try: if dao.get(did) is None: register() return _sanitize_record(_record), 201 else: for service in _record[]: service_id = int(service[]) if service[] == : _record[][service_id][][][] = _get_date( dao.get(did)[]) dao.update(_record, did) return Response(_sanitize_record(_record), 200, content_type=) except Exception as err: return f, 500
Update DDO of an existing asset --- tags: - ddo consumes: - application/json parameters: - in: body name: body required: true description: DDO of the asset. schema: type: object required: - "@context" - created - id - publicKey - authentication - proof - service properties: "@context": description: example: https://w3id.org/future-method/v1 type: string id: description: ID of the asset. example: did:op:123456789abcdefghi type: string created: description: date of ddo creation. example: "2016-02-08T16:02:20Z" type: string publicKey: type: array description: List of public keys. example: [{"id": "did:op:123456789abcdefghi#keys-1"}, {"type": "Ed25519VerificationKey2018"}, {"owner": "did:op:123456789abcdefghi"}, {"publicKeyBase58": "H3C2AVvLMv6gmMNam3uVAjZpfkcJCwDwnZn6z3wXmqPV"}] authentication: type: array description: List of authentication mechanisms. example: [{"type": "RsaSignatureAuthentication2018"}, {"publicKey": "did:op:123456789abcdefghi#keys-1"}] proof: type: dictionary description: Information about the creation and creator of the asset. example: {"type": "UUIDSignature", "created": "2016-02-08T16:02:20Z", "creator": "did:example:8uQhQMGzWxR8vw5P3UWH1ja", "signatureValue": "QNB13Y7Q9...1tzjn4w==" } service: type: array description: List of services. example: [{"type": "Access", "serviceEndpoint": "http://mybrizo.org/api/v1/brizo/services/consume?pubKey=${ pubKey}&serviceId={serviceId}&url={url}"}, {"type": "Compute", "serviceEndpoint": "http://mybrizo.org/api/v1/brizo/services/compute?pubKey=${ pubKey}&serviceId={serviceId}&algo={algo}&container={container}"}, { "type": "Metadata", "serviceDefinitionId": "2", "serviceEndpoint": "http://myaquarius.org/api/v1/provider/assets/metadata/{did}", "metadata": { "base": { "name": "UK Weather information 2011", "type": "dataset", "description": "Weather information of UK including temperature and humidity", "dateCreated": "2012-02-01T10:55:11Z", "author": "Met Office", "license": "CC-BY", "copyrightHolder": "Met Office", "compression": "zip", "workExample": "stationId,latitude,longitude,datetime, temperature,humidity/n423432fsd,51.509865,-0.118092, 2011-01-01T10:55:11+00:00,7.2,68", "files": [{ "contentLength": "4535431", "contentType": "text/csv", "encoding": "UTF-8", "compression": "zip", "resourceId": "access-log2018-02-13-15-17-29-18386C502CAEA932" } ], "encryptedFiles": "0x098213xzckasdf089723hjgdasfkjgasfv", "links": [{ "name": "Sample of Asset Data", "type": "sample", "url": "https://foo.com/sample.csv" }, { "name": "Data Format Definition", "type": "format", "AssetID": "4d517500da0acb0d65a716f61330969334630363ce4a6a9d39691026ac7908ea" } ], "inLanguage": "en", "tags": "weather, uk, 2011, temperature, humidity", "price": 10, "checksum": "38803b9e6f04fce3fba4b124524672592264d31847182c689095a081c9e85262" }, "curation": { "rating": 0.93, "numVotes": 123, "schema": "Binary Voting" }, "additionalInformation": { "updateFrecuency": "yearly", "structuredMarkup": [{ "uri": "http://skos.um.es/unescothes/C01194/jsonld", "mediaType": "application/ld+json" }, { "uri": "http://skos.um.es/unescothes/C01194/turtle", "mediaType": "text/turtle" } ] } } }] responses: 200: description: Asset successfully updated. 201: description: Asset successfully registered. 400: description: One of the required attributes is missing. 404: description: Invalid asset data. 500: description: Error
387,855
def check_in(self, url: str, new_status: Status, increment_try_count: bool=True, url_result: Optional[URLResult]=None):
Update record for processed URL. Args: url: The URL. new_status: Update the item status to `new_status`. increment_try_count: Whether to increment the try counter for the URL. url_result: Additional values.
387,856
def cut_from_block(html_message): block = html_message.xpath( ("//*[starts-with(mg:text_content(), )]|" "//*[starts-with(mg:text_content(), )]")) if block: block = block[-1] parent_div = None while block.getparent() is not None: if block.tag == : parent_div = block break block = block.getparent() if parent_div is not None: maybe_body = parent_div.getparent() parent_div_is_all_content = ( maybe_body is not None and maybe_body.tag == and len(maybe_body.getchildren()) == 1) if not parent_div_is_all_content: parent = block.getparent() next_sibling = block.getnext() while next_sibling is not None: parent.remove(block) block = next_sibling next_sibling = block.getnext() if block is not None: parent.remove(block) return True else: return False block = html_message.xpath( ("//*[starts-with(mg:tail(), )]|" "//*[starts-with(mg:tail(), )]")) if block: block = block[0] if RE_FWD.match(block.getparent().text or ): return False while(block.getnext() is not None): block.getparent().remove(block.getnext()) block.getparent().remove(block) return True
Cuts div tag which wraps block starting with "From:".
387,857
def get_help(obj, env, subcmds): doc = txt.dedent(obj.__doc__ or "") env = env.copy() doc = doc.strip() if not re.search(r"^usage:\s*$", doc, flags=re.IGNORECASE | re.MULTILINE): doc += txt.dedent() help_line = (" %%-%ds %%s" % (max([5] + [len(a) for a in subcmds]), )) env["actions"] = "\n".join( help_line % ( name, get_help(subcmd, subcmd_env(env, name), {}).split("\n")[0]) for name, subcmd in subcmds.items()) env["actions_help"] = "" if not env["actions"] else ( "ACTION could be one of:\n\n" "%(actions)s\n\n" "See for more information " "on a specific command." % env) if "%(std_usage)s" in doc: env["std_usage"] = txt.indent( ("%(surcmd)s --help\n" "%(surcmd)s --version" + (("\n%(surcmd)s help [COMMAND]" "\n%(surcmd)s ACTION [ARGS...]") if subcmds else "")) % env, _find_prefix(doc, "%(std_usage)s"), first="") if "%(std_options)s" in doc: env["std_options"] = txt.indent( "--help Show this screen.\n" "--version Show version.", _find_prefix(doc, "%(std_options)s"), first="") if subcmds and "%(actions_help)s" not in doc: doc += "\n\n%(actions_help)s" try: output = doc % env except KeyError as e: msg.err("Doc interpolation of %s needed missing key %r" % (aformat(env["surcmd"], attrs=["bold", ]), e.args[0])) exit(1) except Exception as e: msg.err( "Documentation of %s is not valid. Please check it:\n%s" % (aformat(env["surcmd"], attrs=["bold", ]), doc)) exit(1) return output
Interpolate complete help doc of given object Assumption that given object as a specific interface: obj.__doc__ is the basic help object. obj.get_actions_titles() returns the subcommand if any.
387,858
def list_build_configuration_sets(page_size=200, page_index=0, sort="", q=""): data = list_build_configuration_sets_raw(page_size, page_index, sort, q) if data: return utils.format_json_list(data)
List all build configuration sets
387,859
def tf_idf(text): _raise_error_if_not_sarray(text, "text") if len(text) == 0: return _turicreate.SArray() dataset = _turicreate.SFrame({: text}) scores = _feature_engineering.TFIDF().fit_transform(dataset) return scores[]
Compute the TF-IDF scores for each word in each document. The collection of documents must be in bag-of-words format. .. math:: \mbox{TF-IDF}(w, d) = tf(w, d) * log(N / f(w)) where :math:`tf(w, d)` is the number of times word :math:`w` appeared in document :math:`d`, :math:`f(w)` is the number of documents word :math:`w` appeared in, :math:`N` is the number of documents, and we use the natural logarithm. Parameters ---------- text : SArray[str | dict | list] Input text data. Returns ------- out : SArray[dict] The same document corpus where each score has been replaced by the TF-IDF transformation. See Also -------- count_words, count_ngrams, tokenize, References ---------- - `Wikipedia - TF-IDF <https://en.wikipedia.org/wiki/TFIDF>`_ Examples -------- .. sourcecode:: python >>> import turicreate >>> docs = turicreate.SArray('https://static.turi.com/datasets/nips-text') >>> docs_tfidf = turicreate.text_analytics.tf_idf(docs)
387,860
def main(): import signal signal.signal(signal.SIGINT, signal.SIG_DFL) import argparse parser = argparse.ArgumentParser(description=) parser.add_argument(,,type=str, default=,help=) parser.add_argument(,type=str, help=) parser.add_argument(,type=str, help=) parser.add_argument(, , help=) parser.add_argument(, nargs=, help=) args = parser.parse_args() GObject.threads_init() default_config = {: args.parser} if args.filter: default_config[]=args.filter if args.separator: default_config[]=args.separator if args.headers: default_config[]=map(lambda s: s.strip(),args.headers.split()) inputs = [ (open(f,),default_config) for f in args.files ]
Main entry point
387,861
def get_all_publications(return_namedtuples=True): sources = [ ben_cz.get_publications, grada_cz.get_publications, cpress_cz.get_publications, zonerpress_cz.get_publications, ] publications = [] for source in sources: publications.extend( filters.filter_publications(source()) ) if return_namedtuples: publications = map(lambda x: x.to_namedtuple(), publications) return publications
Get list publications from all available source. Args: return_namedtuples (bool, default True): Convert :class:`.Publication` structures to namedtuples (used in AMQP communication). Returns: list: List of :class:`.Publication` structures converted to namedtuple.
387,862
def download_ts(self, path, chunk, process_last_line=True): import glob ret_chunk = [] partial_chunk = lines = chunk.strip().split() if not process_last_line: partial_chunk = lines.pop() for line in lines: if line.startswith(): ts = % (path, line.split()[0].split()[-1]) relative_ts = % ( path.split()[-1], line.split()[0].split()[-1]) if not os.path.exists(ts): gevent.spawn(ApiCall.save_url_to_file, line, ts).start() gevent.sleep(0) ret_chunk.append( + line) ret_chunk.append(relative_ts) else: ret_chunk = [] else: ret_chunk.append(line) if in chunk: self.repeat_needed = 0 gevent.sleep(0) elif chunk.strip(): self.repeat_needed = 1 + len(glob.glob(path + )) ret_chunk = ret_chunk and .join(ret_chunk) + or return ret_chunk, partial_chunk
This will look for a download ts link. It will then download that file and replace the link with the local file. :param process_last_line: :param path: str of the path to put the file :param chunk: str of the chunk file, note this could have partial lines :return: str of the chunk with the local file link
387,863
def c(*args, **kwargs): with Reflect.context(**kwargs) as r: kwargs["args"] = args instance = C_CLASS(r, stream, **kwargs) instance()
kind of like od -c on the command line, basically it dumps each character and info about that char since -- 2013-5-9 *args -- tuple -- one or more strings to dump
387,864
def differences_between(self, current_files, parent_files, changes, prefixes): parent_oid = None if any(is_tree for _, is_tree, _ in changes): if len(changes) == 1: wanted_path = list(changes)[0][0] parent_oid = frozenset([oid for path, is_tree, oid in parent_files if path == wanted_path and is_tree]) else: parent_values = defaultdict(set) parent_changes = parent_files - current_files for path, is_tree, oid in parent_changes: if is_tree: parent_values[path].add(oid) for path, is_tree, oid in changes: if is_tree and path not in prefixes: continue if not is_tree: yield path, None, True else: parent_oids = parent_oid if parent_oid is not None else parent_values.get(path, empty) cf_and_pf, changes = self.tree_structures_for(path, oid, parent_oids, prefixes) if changes: yield cf_and_pf, changes, False
yield (thing, changes, is_path) If is_path is true, changes is None and thing is the path as a tuple. If is_path is false, thing is the current_files and parent_files for that changed treeentry and changes is the difference between current_files and parent_files. The code here is written to squeeze as much performance as possible out of this operation.
387,865
def _rest_post(self, suburi, request_headers, request_body): return self._rest_op(, suburi, request_headers, request_body)
REST POST operation. The response body after the operation could be the new resource, or ExtendedError, or it could be empty.
387,866
def on_for_seconds(self, left_speed, right_speed, seconds, brake=True, block=True): if seconds < 0: raise ValueError("seconds is negative ({})".format(seconds)) (left_speed_native_units, right_speed_native_units) = self._unpack_speeds_to_native_units(left_speed, right_speed) self.left_motor.speed_sp = int(round(left_speed_native_units)) self.left_motor.time_sp = int(seconds * 1000) self.left_motor._set_brake(brake) self.right_motor.speed_sp = int(round(right_speed_native_units)) self.right_motor.time_sp = int(seconds * 1000) self.right_motor._set_brake(brake) log.debug("%s: on_for_seconds %ss at left-speed %s, right-speed %s" % (self, seconds, left_speed, right_speed)) self.left_motor.run_timed() self.right_motor.run_timed() if block: self._block()
Rotate the motors at 'left_speed & right_speed' for 'seconds'. Speeds can be percentages or any SpeedValue implementation.
387,867
def install_remote(self): package, pkg_folder = None, None try: package = self._download() pkg_folder = self._extract(package) napp_folder = self._get_local_folder(pkg_folder) dst = self._installed / self.user / self.napp self._check_module(dst.parent) shutil.move(str(napp_folder), str(dst)) finally: if package: Path(package).unlink() if pkg_folder and pkg_folder.exists(): shutil.rmtree(str(pkg_folder))
Download, extract and install NApp.
387,868
def getRelativePath(basepath, path): basepath = splitpath(os.path.abspath(basepath)) path = splitpath(os.path.abspath(path)) afterCommon = False for c in basepath: if afterCommon or path[0] != c: path.insert(0, os.path.pardir) afterCommon = True else: del path[0] return os.path.join(*path)
Get a path that is relative to the given base path.
387,869
def _from_dict(cls, _dict): args = {} if in _dict: args[] = [ LanguageModel._from_dict(x) for x in (_dict.get()) ] else: raise ValueError( customizations\ ) return cls(**args)
Initialize a LanguageModels object from a json dictionary.
387,870
def make_encoder(self,formula_dict,inter_list,param_dict): X_dict = {} Xcol_dict = {} encoder_dict = {} for key in formula_dict: encoding,arg = formula_dict[key] if in encoding: drop_name = arg deviation_encoder,X_sub,colnames_sub = _dev_encode(param_dict,drop_name,key) X_dict[key] = X_sub Xcol_dict[key] = colnames_sub encoder_dict[key] = deviation_encoder elif in encoding: ref_name = arg dummy_encoder,X_sub,colnames_sub = _dum_encode(param_dict,ref_name,key) X_dict[key] = X_sub Xcol_dict[key] = colnames_sub encoder_dict[key] = dummy_encoder elif in encoding: degree = arg polynomial_encoder,X_sub,colnames_sub = _poly_encode(param_dict,degree,key) X_dict[key] = X_sub Xcol_dict[key] = colnames_sub encoder_dict[key] = polynomial_encoder else: print encoding raise Exception("Encoding name error") for interaction in inter_list: if len(interaction) >= 3: raise Exception("Doesnthreewaytwowaytrimmed_columns'] = self._trimmed_columns return encoder_dict
make the encoder function
387,871
def associate_route_table(self, route_table_id, subnet_id): params = { : route_table_id, : subnet_id } result = self.get_object(, params, ResultSet) return result.associationId
Associates a route table with a specific subnet. :type route_table_id: str :param route_table_id: The ID of the route table to associate. :type subnet_id: str :param subnet_id: The ID of the subnet to associate with. :rtype: str :return: The ID of the association created
387,872
def export_throw_event_info(node_params, output_element): definitions = node_params[consts.Consts.event_definitions] for definition in definitions: definition_id = definition[consts.Consts.id] definition_type = definition[consts.Consts.definition_type] output_definition = eTree.SubElement(output_element, definition_type) if definition_id != "": output_definition.set(consts.Consts.id, definition_id)
Adds EndEvent or IntermediateThrowingEvent attributes to exported XML element :param node_params: dictionary with given intermediate throw event parameters, :param output_element: object representing BPMN XML 'intermediateThrowEvent' element.
387,873
def _rapRperiAxiEq(R,E,L,pot): return E-potentialAxi(R,pot)-L**2./2./R**2.
The vr=0 equation that needs to be solved to find apo- and pericenter
387,874
def get_measurement_id_options(self): document = self._get_document_for_url( self._get_url_for_measurements() ) measurement_ids = self._get_measurement_ids(document) return measurement_ids
Returns list of measurement choices.
387,875
def create_small_thumbnail(self, token, item_id): parameters = dict() parameters[] = token parameters[] = item_id response = self.request( , parameters) return response
Create a 100x100 small thumbnail for the given item. It is used for preview purpose and displayed in the 'preview' and 'thumbnails' sidebar sections. :param token: A valid token for the user in question. :type token: string :param item_id: The item on which to set the thumbnail. :type item_id: int | long :returns: The item object (with the new thumbnail id) and the path where the newly created thumbnail is stored. :rtype: dict
387,876
def on(self, event, listener, *user_args): self._listeners[event].append( _Listener(callback=listener, user_args=user_args))
Register a ``listener`` to be called on ``event``. The listener will be called with any extra arguments passed to :meth:`emit` first, and then the extra arguments passed to :meth:`on` last. If the listener function returns :class:`False`, it is removed and will not be called the next time the ``event`` is emitted.
387,877
def get_image_grad(net, image, class_id=None): return _get_grad(net, image, class_id, image_grad=True)
Get the gradients of the image. Parameters: ---------- net: Block Network to use for visualization. image: NDArray Preprocessed image to use for visualization. class_id: int Category ID this image belongs to. If not provided, network's prediction will be used.
387,878
def determine_if_whitespace(self): value = self.current.value if value == : self.is_space = True else: self.is_space = False if (value == or regexes[].match(value)): self.is_space = True
Set is_space if current token is whitespace Is space if value is: * Newline * Empty String * Something that matches regexes['whitespace']
387,879
def authorized_default_handler(resp, remote, *args, **kwargs): response_token_setter(remote, resp) db.session.commit() return redirect(url_for())
Store access token in session. Default authorized handler. :param remote: The remote application. :param resp: The response. :returns: Redirect response.
387,880
def init_registry_from_json(mongo, filename, clear_collection=False): with open(filename, ) as f: models = json.load(f) init_registry(mongo, models, clear_collection)
Initialize a model registry with a list of model definitions that are stored in a given file in Json format. Parameters ---------- mongo : scodata.MongoDBFactory Connector for MongoDB filename : string Path to file containing model definitions clear_collection : boolean If true, collection will be dropped before models are created
387,881
def write(self): self._check() cache = self._cache pristine_cache = self._pristine_cache self._pristine_cache = cache.copy() changes = [] def apply_changes(content, stat): del changes[:] current = yaml.load(content) if content else {} missing = object() for key in set(pristine_cache).union(cache): old_value = pristine_cache.get(key, missing) new_value = cache.get(key, missing) if old_value != new_value: if new_value != missing: current[key] = new_value if old_value != missing: changes.append( ModifiedItem(key, old_value, new_value)) else: changes.append(AddedItem(key, new_value)) elif key in current: del current[key] changes.append(DeletedItem(key, old_value)) return yaml.safe_dump(current) yield retry_change(self._client, self._path, apply_changes) returnValue(changes)
Write object state to Zookeeper. This will write the current state of the object to Zookeeper, taking the final merged state as the new one, and resetting any write buffers.
387,882
def wait_for_crm_operation(operation): logger.info("wait_for_crm_operation: " "Waiting for operation {} to finish...".format(operation)) for _ in range(MAX_POLLS): result = crm.operations().get(name=operation["name"]).execute() if "error" in result: raise Exception(result["error"]) if "done" in result and result["done"]: logger.info("wait_for_crm_operation: Operation done.") break time.sleep(POLL_INTERVAL) return result
Poll for cloud resource manager operation until finished.
387,883
def attention_mask_same_segment( query_segment, memory_segment=None, dtype=tf.float32): memory_segment = rename_length_to_memory_length( memory_segment or query_segment) return mtf.cast(mtf.not_equal(query_segment, memory_segment), dtype) * -1e9
Bias for attention where attention between segments is disallowed. Args: query_segment: a mtf.Tensor with shape [..., length_dim] memory_segment: a mtf.Tensor with shape [..., memory_length_dim] dtype: a tf.dtype Returns: a mtf.Tensor with shape [..., length_dim, memory_length_dim]
387,884
def _load(self): with open(self._pickle_file, ) as source: pickler = pickle.Unpickler(source) for attribute in self._pickle_attributes: pickle_data = pickler.load() setattr(self, attribute, pickle_data)
Load data from a pickle file.
387,885
def get_nsing(self,epsilon=1.0e-4): mx = self.xtqx.shape[0] nsing = mx - np.searchsorted( np.sort((self.xtqx.s.x / self.xtqx.s.x.max())[:,0]),epsilon) if nsing == mx: self.logger.warn("optimal nsing=npar") nsing = None return nsing
get the number of solution space dimensions given a ratio between the largest and smallest singular values Parameters ---------- epsilon: float singular value ratio Returns ------- nsing : float number of singular components above the epsilon ratio threshold Note ----- If nsing == nadj_par, then None is returned
387,886
def defaults(self): self.chart_style = {} self.chart_opts = {} self.style("color", " self.width(900) self.height(250)
Reset the chart options and style to defaults
387,887
def load_glove_df(filepath, **kwargs): pdkwargs = dict(index_col=0, header=None, sep=r, skiprows=[0], verbose=False, engine=) pdkwargs.update(kwargs) return pd.read_csv(filepath, **pdkwargs)
Load a GloVE-format text file into a dataframe >>> df = load_glove_df(os.path.join(BIGDATA_PATH, 'glove_test.txt')) >>> df.index[:3] Index(['the', ',', '.'], dtype='object', name=0) >>> df.iloc[0][:3] 1 0.41800 2 0.24968 3 -0.41242 Name: the, dtype: float64
387,888
def conv2d_fixed_padding(inputs, filters, kernel_size, strides, data_format="channels_first", use_td=False, targeting_rate=None, keep_prob=None, is_training=None): if strides > 1: inputs = fixed_padding(inputs, kernel_size, data_format=data_format) if use_td: inputs_shape = common_layers.shape_list(inputs) if use_td == "weight": if data_format == "channels_last": size = kernel_size * kernel_size * inputs_shape[-1] else: size = kernel_size * kernel_size * inputs_shape[1] targeting_count = targeting_rate * tf.to_float(size) targeting_fn = common_layers.weight_targeting elif use_td == "unit": targeting_count = targeting_rate * filters targeting_fn = common_layers.unit_targeting else: raise Exception("Unrecognized targeted dropout type: %s" % use_td) y = common_layers.td_conv( inputs, filters, kernel_size, targeting_count, targeting_fn, keep_prob, is_training, do_prune=True, strides=strides, padding=("SAME" if strides == 1 else "VALID"), data_format=data_format, use_bias=False, kernel_initializer=tf.variance_scaling_initializer()) else: y = layers().Conv2D( filters=filters, kernel_size=kernel_size, strides=strides, padding=("SAME" if strides == 1 else "VALID"), use_bias=False, kernel_initializer=tf.variance_scaling_initializer(), data_format=data_format)(inputs) return y
Strided 2-D convolution with explicit padding. The padding is consistent and is based only on `kernel_size`, not on the dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). Args: inputs: `Tensor` of size `[batch, channels, height_in, width_in]`. filters: `int` number of filters in the convolution. kernel_size: `int` size of the kernel to be used in the convolution. strides: `int` strides of the convolution. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. is_training: `bool` for whether the model is in training. Returns: A `Tensor` of shape `[batch, filters, height_out, width_out]`. Raises: Exception: if use_td is not valid.
387,889
def init_prior(self, R): centers, widths = self.init_centers_widths(R) prior = np.zeros(self.K * (self.n_dim + 1)) self.set_centers(prior, centers) self.set_widths(prior, widths) self.set_prior(prior) return self
initialize prior for the subject Returns ------- TFA Returns the instance itself.
387,890
def printSequences(x, formatString="%d"): [seqLen, numElements] = x.shape for i in range(seqLen): s = "" for j in range(numElements): s += formatString % x[i][j] print s
Print a bunch of sequences stored in a 2D numpy array.
387,891
async def execute_all_with_names(self, subprocesses, container = None, retnames = (,), forceclose = True): retvalue1retvalue2 if not subprocesses: return [] subprocesses = list(subprocesses) if len(subprocesses) == 1 and (container is None or container is self) and forceclose: return [await subprocesses[0]] if container is None: container = self delegates = [self.begin_delegate_other(p, container, retnames) for p in subprocesses] matchers = [d[0] for d in delegates] try: _, eventdict = await self.wait_for_all(*matchers) events = [eventdict[m] for m in matchers] exceptions = [e.exception for e in events if hasattr(e, )] if exceptions: if len(exceptions) == 1: raise exceptions[0] else: raise MultipleException(exceptions) return [e.result for e in events] finally: if forceclose: for d in delegates: try: container.terminate(d[1]) except Exception: pass
DEPRECATED Execute all subprocesses and get the return values. :param subprocesses: sequence of subroutines (coroutines) :param container: if specified, run subprocesses in another container. :param retnames: DEPRECATED get return value from container.(name) for each name in retnames. '' for return value (to be compatible with earlier versions) :param forceclose: force close the routines on exit, so all the subprocesses are terminated on timeout if used with executeWithTimeout :returns: a list of tuples, one for each subprocess, with value of retnames inside: `[('retvalue1',),('retvalue2',),...]`
387,892
def _get_notifications_status(self, notifications): if notifications: size = len(notifications["activeNotifications"]) else: size = 0 status = self.status_notif if size > 0 else self.status_no_notif return (size, status)
Get the notifications status
387,893
def get_cached_moderated_reddits(self): if self._mod_subs is None: self._mod_subs = {: self.reddit_session.get_subreddit()} for sub in self.reddit_session.get_my_moderation(limit=None): self._mod_subs[six.text_type(sub).lower()] = sub return self._mod_subs
Return a cached dictionary of the user's moderated reddits. This list is used internally. Consider using the `get_my_moderation` function instead.
387,894
def contains(self, times, keep_inside=True, delta_t=DEFAULT_OBSERVATION_TIME): current_max_order = self.max_order new_max_order = TimeMOC.time_resolution_to_order(delta_t) if new_max_order > current_max_order: message = \ .format( TimeMOC.order_to_time_resolution(current_max_order).sec) warnings.warn(message, UserWarning) rough_tmoc = self.degrade_to_order(new_max_order) pix_arr = (times.jd * TimeMOC.DAY_MICRO_SEC) pix_arr = pix_arr.astype(int) intervals_arr = rough_tmoc._interval_set._intervals inf_arr = np.vstack([pix_arr[i] >= intervals_arr[:, 0] for i in range(pix_arr.shape[0])]) sup_arr = np.vstack([pix_arr[i] <= intervals_arr[:, 1] for i in range(pix_arr.shape[0])]) if keep_inside: res = inf_arr & sup_arr filtered_rows = np.any(res, axis=1) else: res = ~inf_arr | ~sup_arr filtered_rows = np.all(res, axis=1) return filtered_rows
Get a mask array (e.g. a numpy boolean array) of times being inside (or outside) the TMOC instance. Parameters ---------- times : `astropy.time.Time` astropy times to check whether they are contained in the TMOC or not. keep_inside : bool, optional True by default. If so the filtered table contains only observations that are located the MOC. If ``keep_inside`` is False, the filtered table contains all observations lying outside the MOC. delta_t : `astropy.time.TimeDelta`, optional the duration of one observation. It is set to 30 min by default. This data is used to compute the more efficient TimeMOC order to represent the observations (Best order = the less precise order which is able to discriminate two observations separated by ``delta_t``). Returns ------- array : `~numpy.darray` A mask boolean array
387,895
def lookup(self, pathogenName, sampleName): pathogenIndex = self._pathogens[pathogenName] sampleIndex = self._samples[sampleName] return self._readsFilenames[(pathogenIndex, sampleIndex)]
Look up a pathogen name, sample name combination and get its FASTA/FASTQ file name and unique read count. This method should be used instead of C{add} in situations where you want an exception to be raised if a pathogen/sample combination has not already been passed to C{add}. @param pathogenName: A C{str} pathogen name. @param sampleName: A C{str} sample name. @raise KeyError: If the pathogen name or sample name have not been seen, either individually or in combination. @return: A (C{str}, C{int}) tuple retrieved from self._readsFilenames
387,896
def highlight_occurences(editor): format = editor.language.theme.get("accelerator.occurence") if not format: return False extra_selections = editor.extraSelections() or [] if not editor.isReadOnly(): word = editor.get_word_under_cursor() if not word: return False block = editor.document().findBlock(0) cursor = editor.document().find(word, block.position(), QTextDocument.FindCaseSensitively | QTextDocument.FindWholeWords) while block.isValid() and cursor.position() != -1: selection = QTextEdit.ExtraSelection() selection.format.setBackground(format.background()) selection.cursor = cursor extra_selections.append(selection) cursor = editor.document().find(word, cursor.position(), QTextDocument.FindCaseSensitively | QTextDocument.FindWholeWords) block = block.next() editor.setExtraSelections(extra_selections) return True
Highlights given editor current line. :param editor: Document editor. :type editor: QWidget :return: Method success. :rtype: bool
387,897
def list(self, full_properties=False, filter_args=None): resource_obj_list = [] uris = self.partition.get_property() if uris: for uri in uris: resource_obj = self.resource_class( manager=self, uri=uri, name=None, properties=None) if self._matches_filters(resource_obj, filter_args): resource_obj_list.append(resource_obj) if full_properties: resource_obj.pull_full_properties() self._name_uri_cache.update_from(resource_obj_list) return resource_obj_list
List the Virtual Functions of this Partition. Authorization requirements: * Object-access permission to this Partition. Parameters: full_properties (bool): Controls whether the full set of resource properties should be retrieved, vs. only the short set as returned by the list operation. filter_args (dict): Filter arguments that narrow the list of returned resources to those that match the specified filter arguments. For details, see :ref:`Filtering`. `None` causes no filtering to happen, i.e. all resources are returned. Returns: : A list of :class:`~zhmcclient.VirtualFunction` objects. Raises: :exc:`~zhmcclient.HTTPError` :exc:`~zhmcclient.ParseError` :exc:`~zhmcclient.AuthError` :exc:`~zhmcclient.ConnectionError`
387,898
def load_obs(self, mask_threshold=0.5): print("Loading obs ", self.run_date, self.model_name, self.forecast_variable) start_date = self.run_date + timedelta(hours=self.start_hour) end_date = self.run_date + timedelta(hours=self.end_hour) mrms_grid = MRMSGrid(start_date, end_date, self.mrms_variable, self.mrms_path) mrms_grid.load_data() if len(mrms_grid.data) > 0: self.raw_obs[self.mrms_variable] = np.where(mrms_grid.data > 100, 100, mrms_grid.data) self.period_obs[self.mrms_variable] = self.raw_obs[self.mrms_variable].max(axis=0) if self.obs_mask: mask_grid = MRMSGrid(start_date, end_date, self.mask_variable, self.mrms_path) mask_grid.load_data() self.raw_obs[self.mask_variable] = np.where(mask_grid.data >= mask_threshold, 1, 0) self.period_obs[self.mask_variable] = self.raw_obs[self.mask_variable].max(axis=0)
Loads observations and masking grid (if needed). Args: mask_threshold: Values greater than the threshold are kept, others are masked.
387,899
def _normalise_weights(logZ, weights, ntrim=None): logZ -= logZ.max() Zs = numpy.exp(logZ) weights = [w/w.sum()*Z for w, Z in zip(weights, Zs)] wmax = max([w.max() for w in weights]) weights = [w/wmax for w in weights] ntot = sum([w.sum() for w in weights]) if ntrim is not None and ntrim < ntot: weights = [w*ntrim/ntot for w in weights] return logZ, weights
Correctly normalise the weights for trimming This takes a list of log-evidences, and re-normalises the weights so that the largest weight across all samples is 1, and the total weight in each set of samples is proportional to the evidence. Parameters ---------- logZ: array-like log-evidences to weight each set of weights by weights: array-like of numpy.array list of not necessarily equal length list of weights Returns ------- logZ: numpy.array evidences, renormalised so that max(logZ) = 0 weights: list of 1D numpy.array normalised weights