Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
25,800
def lstm_posterior_builder(getter, name, *args, **kwargs): del args parameter_shapes = tfp.distributions.Normal.param_static_shapes( kwargs["shape"]) prior_stddev = np.sqrt( FLAGS.prior_pi * np.square(FLAGS.prior_sigma1) + (1 - FLAGS.prior_pi) * np.square(FLAGS.prior_sigma2)) loc_var = getter( "{}/posterior_loc".format(name), shape=parameter_shapes["loc"], initializer=kwargs.get("initializer"), dtype=tf.float32) scale_var = getter( "{}/posterior_scale".format(name), initializer=tf.random_uniform( minval=np.log(np.exp(prior_stddev / 4.0) - 1.0), maxval=np.log(np.exp(prior_stddev / 2.0) - 1.0), dtype=tf.float32, shape=parameter_shapes["scale"])) return tfp.distributions.Normal( loc=loc_var, scale=tf.nn.softplus(scale_var) + 1e-5, name="{}/posterior_dist".format(name))
A builder for a particular diagonal gaussian posterior. Args: getter: The `getter` passed to a `custom_getter`. Please see the documentation for `tf.get_variable`. name: The `name` argument passed to `tf.get_variable`. *args: Positional arguments forwarded by `tf.get_variable`. **kwargs: Keyword arguments forwarded by `tf.get_variable`. Returns: An instance of `tfp.distributions.Distribution` representing the posterior distribution over the variable in question.
25,801
def _proxy(self): if self._context is None: self._context = FunctionVersionContext( self._version, service_sid=self._solution[], function_sid=self._solution[], sid=self._solution[], ) return self._context
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: FunctionVersionContext for this FunctionVersionInstance :rtype: twilio.rest.serverless.v1.service.function.function_version.FunctionVersionContext
25,802
def _load_background_color(self): url = self.build_url(self._endpoints.get()) response = self.session.get(url) if not response: return None data = response.json() self._background_color = data.get(, None)
Loads the data related to the fill color
25,803
def create_relocate_package(cls, package_name, shade_prefix=None, recursive=True): return cls.create_relocate(from_pattern=cls._format_package_glob(package_name, recursive), shade_prefix=shade_prefix)
Convenience constructor for a package relocation rule. Essentially equivalent to just using ``shading_relocate('package_name.**')``. :param string package_name: Package name to shade (eg, ``org.pantsbuild.example``). :param string shade_prefix: Optional prefix to apply to the package. Defaults to ``__shaded_by_pants__.``. :param bool recursive: Whether to rename everything under any subpackage of ``package_name``, or just direct children of the package. (Defaults to True).
25,804
def get_words_iterable( letters, tamil_only=False ): buf = [] for idx,let in enumerate(letters): if not let.isspace(): if istamil(let) or (not tamil_only): buf.append( let ) else: if len(buf) > 0: yield u"".join( buf ) buf = [] if len(buf) > 0: yield u"".join(buf)
given a list of UTF-8 letters section them into words, grouping them at spaces
25,805
def add_slices(self, dashboard_id): data = json.loads(request.form.get()) session = db.session() Slice = models.Slice dash = ( session.query(models.Dashboard).filter_by(id=dashboard_id).first()) check_ownership(dash, raise_if_false=True) new_slices = session.query(Slice).filter( Slice.id.in_(data[])) dash.slices += new_slices session.merge(dash) session.commit() session.close() return
Add and save slices to a dashboard
25,806
def rooms(self): if self._rooms is None: self._rooms = RoomList(self) return self._rooms
:rtype: twilio.rest.video.v1.room.RoomList
25,807
def runs_to_xml(self, runSet, runs, blockname=None): runsElem = util.copy_of_xml_element(self.xml_header) runsElem.set("options", " ".join(runSet.options)) if blockname is not None: runsElem.set("block", blockname) runsElem.set("name", ((runSet.real_name + ".") if runSet.real_name else "") + blockname) elif runSet.real_name: runsElem.set("name", runSet.real_name) for run in runs: runsElem.append(run.xml) return runsElem
This function creates the XML structure for a list of runs
25,808
def p_If(p): if len(p) == 2: p[0] = If(p[1], None, None) else: p[0] = If(p[1], p[5], p[4])
If : IfBlock | IfBlock ELSE COLON Terminator Block
25,809
def generate(env): global GhostscriptAction try: if GhostscriptAction is None: GhostscriptAction = SCons.Action.Action(, ) from SCons.Tool import pdf pdf.generate(env) bld = env[][] bld.add_action(, GhostscriptAction) except ImportError as e: pass gsbuilder = SCons.Builder.Builder(action = SCons.Action.Action(, )) env[][] = gsbuilder env[] = gs env[] = SCons.Util.CLVar() env[] =
Add Builders and construction variables for Ghostscript to an Environment.
25,810
def image_undo(): if len(image_undo_list) <= 0: print("no undos in memory") return [image, Z] = image_undo_list.pop(-1) image.set_array(Z) _pylab.draw()
Undoes the last coarsen or smooth command.
25,811
def isbn(self, fmt: Optional[ISBNFormat] = None, locale: str = ) -> str: fmt_value = self._validate_enum(item=fmt, enum=ISBNFormat) mask = ISBN_MASKS[fmt_value].format( ISBN_GROUPS[locale]) return self.random.custom_code(mask)
Generate ISBN for current locale. To change ISBN format, pass parameter ``fmt`` with needed value of the enum object :class:`~mimesis.enums.ISBNFormat` :param fmt: ISBN format. :param locale: Locale code. :return: ISBN. :raises NonEnumerableError: if fmt is not enum ISBNFormat.
25,812
def get(self, section, option, as_list=False): ret = super(GitConfigParser, self).get(section, option) if as_list and not isinstance(ret, list): ret = [ret] return ret
Adds an optional "as_list" argument to ensure a list is returned. This is helpful when iterating over an option which may or may not be a multivar.
25,813
def photo_url(self): img = self.soup.find(, id=).img[] return img.replace(, )
获取话题头像图片地址. :return: 话题头像url :rtype: str
25,814
def do_size(self, w, h): if (w is None): self.sw = self.rw self.sh = self.rh else: self.sw = w self.sh = h image = Image.new("RGB", (self.sw, self.sh), self.gen.background_color) for y in range(0, self.sh): for x in range(0, self.sw): ix = int((x * self.rw) // self.sw + self.rx) iy = int((y * self.rh) // self.sh + self.ry) color = self.gen.pixel(ix, iy) if (color is not None): image.putpixel((x, y), color) self.image = image
Record size.
25,815
def collect(config, pconn): if config.analyze_file: logger.debug("Client analyzing a compress filesystem.") target = {: , : os.path.splitext( os.path.basename(config.analyze_file))[0], : config.analyze_file} elif config.analyze_mountpoint: logger.debug("Client analyzing a filesystem already mounted.") target = {: , : os.path.splitext( os.path.basename(config.analyze_mountpoint))[0], : config.analyze_mountpoint} elif config.analyze_image_id: logger.debug("Client running in image mode.") logger.debug("Scanning for matching image.") from .containers import get_targets targets = get_targets(config) if len(targets) == 0: sys.exit(constants.sig_kill_bad) target = targets[0] else: if config.analyze_container: logger.debug() else: logger.debug("Host selected as scanning target.") target = constants.default_target branch_info = get_branch_info(config, pconn) pc = InsightsUploadConf(config) tar_file = None collection_rules = pc.get_conf_file() rm_conf = pc.get_rm_conf() if rm_conf: logger.warn("WARNING: Excluding data from files") archive = None container_connection = None mp = None compressed_filesystem = None try: if target[] == : from .containers import open_image container_connection = open_image(target[]) logging_name = + target[] if container_connection: mp = container_connection.get_fs() else: logger.error(, logging_name) return False elif target[] == : logging_name = + target[] + + target[] from .compressed_file import InsightsCompressedFile compressed_filesystem = InsightsCompressedFile(target[]) if compressed_filesystem.is_tarfile is False: logger.debug("Could not access compressed tar filesystem.") return False mp = compressed_filesystem.get_filesystem_path() elif target[] == : logging_name = + target[] + + target[] mp = config.analyze_mountpoint elif target[] == : logging_name = determine_hostname() else: logger.error(, target[]) return False archive = InsightsArchive(compressor=config.compressor, target_name=target[]) atexit.register(_delete_archive_internal, config, archive) if target[] in ["mountpoint", "compressed_file"]: target_type = "docker_image" else: target_type = target[] logger.debug("Inferring target_type for SPEC collection", target_type) logger.debug("Inferred from ", target[]) dc = DataCollector(config, archive, mountpoint=mp) logger.info(, logging_name) dc.run_collection(collection_rules, rm_conf, branch_info) tar_file = dc.done(collection_rules, rm_conf) finally: if container_connection: container_connection.close() if config.analyze_file is not None and compressed_filesystem is not None: compressed_filesystem.cleanup_temp_filesystem() return tar_file
All the heavy lifting done here
25,816
def find_throat_facets(self, throats=None): r if throats is None: throats = self.throats() temp = [] tvals = self[].astype(int) am = self.create_adjacency_matrix(weights=tvals, fmt=, drop_zeros=True) for t in throats: P12 = self[][t] Ps = list(set(am.rows[P12][0]).intersection(am.rows[P12][1])) temp.append(Ps) return sp.array(temp, dtype=object)
r""" Finds the indicies of the Voronoi nodes that define the facet or ridge between the Delaunay nodes connected by the given throat. Parameters ---------- throats : array_like The throats whose facets are sought. The given throats should be from the 'delaunay' network. If no throats are specified, all 'delaunay' throats are assumed. Notes ----- The method is not well optimized as it scans through each given throat inside a for-loop, so it could be slow for large networks.
25,817
def check_is_working(self): try: r = requests.post("http://{}/".format(LAUNDRY_DOMAIN), timeout=60, data={ "locationid": "5faec7e9-a4aa-47c2-a514-950c03fac460", "email": "[email protected]", "washers": 0, "dryers": 0, "locationalert": "OK" }) r.raise_for_status() return "The transaction log for database is full due to ." not in r.text except requests.exceptions.HTTPError: return False
Returns True if the wash alert web interface seems to be working properly, or False otherwise. >>> l.check_is_working()
25,818
def read_in_chunks(file_obj, chunk_size): offset = 0 while True: data = file_obj.read(chunk_size) if not data: break yield data, offset offset += len(data)
Generator to read a file piece by piece.
25,819
def image_id(self): try: image_id = self.data["ImageID"] except KeyError: image_id = self.metadata_get(["Image"]) return image_id
this container is created from image with id...
25,820
def info(self): if self.descriptions is None: choice_list = [.format(choice) for choice in self.choices] else: choice_list = [ .format(choice, self.descriptions[choice]) for choice in self.choices ] if len(self.choices) == 2: return .format(choice_list[0], choice_list[1]) return .format(.join(choice_list))
Formatted string to display the available choices
25,821
def get_all(self): if not self.vars: return self.parent if not self.parent: return self.vars return dict(self.parent, **self.vars)
Return the complete context as dict including the exported variables. For optimizations reasons this might not return an actual copy so be careful with using it.
25,822
def _find_newest_ckpt(ckpt_dir): full_paths = [ os.path.join(ckpt_dir, fname) for fname in os.listdir(ckpt_dir) if fname.startswith("experiment_state") and fname.endswith(".json") ] return max(full_paths)
Returns path to most recently modified checkpoint.
25,823
def headers(self): if py3k: return dict((k.lower(), v) for k, v in self.getheaders()) else: return dict(self.getheaders())
Response headers. Response headers is a dict with all keys in lower case. >>> import urlfetch >>> response = urlfetch.get("http://docs.python.org/") >>> response.headers { 'content-length': '8719', 'x-cache': 'MISS from localhost', 'accept-ranges': 'bytes', 'vary': 'Accept-Encoding', 'server': 'Apache/2.2.16 (Debian)', 'last-modified': 'Tue, 26 Jun 2012 19:23:18 GMT', 'connection': 'close', 'etag': '"13cc5e4-220f-4c36507ded580"', 'date': 'Wed, 27 Jun 2012 06:50:30 GMT', 'content-type': 'text/html', 'x-cache-lookup': 'MISS from localhost:8080' }
25,824
def get_boto_ses_connection(): access_key_id = getattr( settings, , getattr(settings, , None)) access_key = getattr( settings, , getattr(settings, , None)) region_name = getattr( settings, , getattr(settings, , None)) if region_name != None: return boto.ses.connect_to_region( region_name, aws_access_key_id=access_key_id, aws_secret_access_key=access_key, ) else: return boto.connect_ses( aws_access_key_id=access_key_id, aws_secret_access_key=access_key, )
Shortcut for instantiating and returning a boto SESConnection object. :rtype: boto.ses.SESConnection :returns: A boto SESConnection object, from which email sending is done.
25,825
async def stream(self, event_type: Type[TStreamEvent], num_events: Optional[int] = None) -> AsyncGenerator[TStreamEvent, None]: queue: asyncio.Queue = asyncio.Queue() if event_type not in self._queues: self._queues[event_type] = [] self._queues[event_type].append(queue) i = None if num_events is None else 0 while True: try: yield await queue.get() except GeneratorExit: self._queues[event_type].remove(queue) break except asyncio.CancelledError: self._queues[event_type].remove(queue) break else: if i is None: continue i += 1 if i >= cast(int, num_events): self._queues[event_type].remove(queue) break
Stream all events that match the specified event type. This returns an ``AsyncIterable[BaseEvent]`` which can be consumed through an ``async for`` loop. An optional ``num_events`` parameter can be passed to stop streaming after a maximum amount of events was received.
25,826
def from_database(cls, database): if isinstance(database, PostgresqlDatabase): return PostgresqlMigrator(database) if isinstance(database, SqliteDatabase): return SqliteMigrator(database) if isinstance(database, MySQLDatabase): return MySQLMigrator(database) return super(SchemaMigrator, cls).from_database(database)
Initialize migrator by db.
25,827
def c_Duffy(z, m, h=h): z, m = _check_inputs(z, m) M_pivot = 2.e12 / h A = 5.71 B = -0.084 C = -0.47 concentration = A * ((m / M_pivot)**B) * (1 + z)**C return concentration
Concentration from c(M) relation published in Duffy et al. (2008). Parameters ---------- z : float or array_like Redshift(s) of halos. m : float or array_like Mass(es) of halos (m200 definition), in units of solar masses. h : float, optional Hubble parameter. Default is from Planck13. Returns ---------- ndarray Concentration values (c200) for halos. References ---------- Results from N-body simulations using WMAP5 cosmology, presented in: A.R. Duffy, J. Schaye, S.T. Kay, and C. Dalla Vecchia, "Dark matter halo concentrations in the Wilkinson Microwave Anisotropy Probe year 5 cosmology," Monthly Notices of the Royal Astronomical Society, Volume 390, Issue 1, pp. L64-L68, 2008. This calculation uses the parameters corresponding to the NFW model, the '200' halo definition, and the 'full' sample of halos spanning z = 0-2. This means the values of fitted parameters (A,B,C) = (5.71, -0.084,-0.47) in Table 1 of Duffy et al. (2008).
25,828
def _runOPF(self): if self.decommit: solver = UDOPF(self.case, dc=(self.locationalAdjustment == "dc")) elif self.locationalAdjustment == "dc": solver = OPF(self.case, dc=True) else: solver = OPF(self.case, dc=False, opt={"verbose": True}) self._solution = solver.solve() return self._solution["converged"]
Computes dispatch points and LMPs using OPF.
25,829
def get_init(self): suffix = self._separator + "%s" % str(self._counter_init) return self._base_name + suffix
Return initial name.
25,830
def _get_dvs_portgroup(dvs, portgroup_name): for portgroup in dvs.portgroup: if portgroup.name == portgroup_name: return portgroup return None
Return a portgroup object corresponding to the portgroup name on the dvs :param dvs: DVS object :param portgroup_name: Name of portgroup to return :return: Portgroup object
25,831
def get_device_model(): try: return apps.get_model(settings.GCM_DEVICE_MODEL) except ValueError: raise ImproperlyConfigured("GCM_DEVICE_MODEL must be of the form ") except LookupError: raise ImproperlyConfigured( "GCM_DEVICE_MODEL refers to model that has not been installed" % settings.GCM_DEVICE_MODEL )
Returns the Device model that is active in this project.
25,832
def from_shapely(geometry, label=None): import shapely.geometry if isinstance(geometry, shapely.geometry.MultiPolygon): return MultiPolygon([Polygon.from_shapely(poly, label=label) for poly in geometry.geoms]) elif isinstance(geometry, shapely.geometry.Polygon): return MultiPolygon([Polygon.from_shapely(geometry, label=label)]) elif isinstance(geometry, shapely.geometry.collection.GeometryCollection): ia.do_assert(all([isinstance(poly, shapely.geometry.Polygon) for poly in geometry.geoms])) return MultiPolygon([Polygon.from_shapely(poly, label=label) for poly in geometry.geoms]) else: raise Exception("Unknown datatype . Expected shapely.geometry.Polygon or " "shapely.geometry.MultiPolygon or " "shapely.geometry.collections.GeometryCollection." % (type(geometry),))
Create a MultiPolygon from a Shapely MultiPolygon, a Shapely Polygon or a Shapely GeometryCollection. This also creates all necessary Polygons contained by this MultiPolygon. Parameters ---------- geometry : shapely.geometry.MultiPolygon or shapely.geometry.Polygon\ or shapely.geometry.collection.GeometryCollection The object to convert to a MultiPolygon. label : None or str, optional A label assigned to all Polygons within the MultiPolygon. Returns ------- imgaug.MultiPolygon The derived MultiPolygon.
25,833
def from_masked_images(cls: Type[T], masked_images: Iterable[np.ndarray], n_subjects: int) -> T: images_iterator = iter(masked_images) first_image = next(images_iterator) first_image_shape = first_image.T.shape result = np.empty((first_image_shape[0], first_image_shape[1], n_subjects)) for n_images, image in enumerate(itertools.chain([first_image], images_iterator)): image = image.T if image.shape != first_image_shape: raise ValueError("Image {} has different shape from first " "image: {} != {}".format(n_images, image.shape, first_image_shape)) result[:, :, n_images] = image n_images += 1 if n_images != n_subjects: raise ValueError("n_subjects != number of images: {} != {}" .format(n_subjects, n_images)) return result.view(cls)
Create a new instance of MaskedMultiSubjecData from masked images. Parameters ---------- masked_images : iterator Images from multiple subjects to stack along 3rd dimension n_subjects : int Number of subjects; must match the number of images Returns ------- T A new instance of MaskedMultiSubjectData Raises ------ ValueError Images have different shapes. The number of images differs from n_subjects.
25,834
def store(self, name, data, version, size=0, compressed=False, digest=None, logical_size=None): with _exclusive_lock(self._lock_path(, name)): logger.debug(, name) link_path = self._link_path(name) if _path_exists(link_path) and _file_version(link_path) > version: logger.info( , name, version, _file_version(link_path)) return _file_version(link_path) logger.warning( ) with gzip.open( contents.current_path, ) as decompressed: digest = file_digest(decompressed) with gzip.open( contents.current_path, ) as decompressed: logical_size = _read_stream_for_size(decompressed) else: digest = file_digest(contents.current_path) logical_size = os.stat(contents.current_path).st_size blob_path = self._blob_path(digest) with _exclusive_lock(self._lock_path(, digest)): logger.debug(, digest) digest_bytes = digest.encode() with self._db_transaction() as txn: logger.debug() link_count = int(self.db.get(digest_bytes, 0, txn=txn)) new_count = str(link_count + 1).encode() self.db.put(digest_bytes, new_count, txn=txn) if link_count == 0: self.db.put( .format(digest).encode(), str(logical_size).encode(), txn=txn) logger.debug() logger.debug()
Adds a new file to the storage. If the file with the same name existed before, it's not guaranteed that the link for the old version will exist until the operation completes, but it's guaranteed that the link will never point to an invalid blob. Args: name: name of the file being stored. May contain slashes that are treated as path separators. data: binary file-like object with file contents. Files with unknown length are supported for compatibility with WSGI interface: ``size`` parameter should be passed in these cases. version: new file "version" Link modification time will be set to this timestamp. If the link exists, and its modification time is higher, the file is not overwritten. size: length of ``data`` in bytes If not 0, this takes priority over internal ``data`` size. compressed: whether ``data`` is gzip-compressed If True, the compression is skipped, and file is written as-is. Note that the current server implementation sends 'Content-Encoding' header anyway, mandating client to decompress the file. digest: SHA256 digest of the file before compression If specified, the digest will not be computed again, saving resources. logical_size: if ``data`` is gzip-compressed, this parameter has to be set to decompressed file size.
25,835
def play(self): if self._proc.state() == QProcess.Running: if self.isPlaying is False: self._execute("pause") self._changePlayingState(True) elif self._filePath is not None: self._kill() self._run(self._filePath) self._changePlayingState(True)
Starts a playback
25,836
def nfa_word_acceptance(nfa: dict, word: list) -> bool: current_level = set() current_level = current_level.union(nfa[]) next_level = set() for action in word: for state in current_level: if (state, action) in nfa[]: next_level.update(nfa[][state, action]) if len(next_level) < 1: return False current_level = next_level next_level = set() if current_level.intersection(nfa[]): return True else: return False
Checks if a given word is accepted by a NFA. The word w is accepted by a NFA if exists at least an accepting run on w. :param dict nfa: input NFA; :param list word: list of symbols ∈ nfa['alphabet']; :return: *(bool)*, True if the word is accepted, False otherwise.
25,837
def beginning_offsets(self, partitions): offsets = self._fetcher.beginning_offsets( partitions, self.config[]) return offsets
Get the first offset for the given partitions. This method does not change the current consumer position of the partitions. Note: This method may block indefinitely if the partition does not exist. Arguments: partitions (list): List of TopicPartition instances to fetch offsets for. Returns: ``{TopicPartition: int}``: The earliest available offsets for the given partitions. Raises: UnsupportedVersionError: If the broker does not support looking up the offsets by timestamp. KafkaTimeoutError: If fetch failed in request_timeout_ms.
25,838
def write(path, pid, timestamp): with open(path, "w") as lockfile: print(pid, timestamp, file=lockfile)
Write the contents of a LockFile. Arguments: path (str): Path to lockfile. pid (int): The integer process ID. timestamp (datetime): The time the lock was aquired.
25,839
def uniform_filter(data, size=7, res_g=None, sub_blocks=(1, 1, 1), normalized = True): if normalized: if np.isscalar(size): norm = size else: norm = np.int32(np.prod(size))**(1./len(size)) FUNC = "res+val/%s"%norm else: FUNC = "res+val" if data.ndim == 2: _filt = make_filter(_generic_filter_gpu_2d(FUNC=FUNC, DEFAULT="0")) elif data.ndim == 3: _filt = make_filter(_generic_filter_gpu_3d(FUNC=FUNC, DEFAULT="0")) res = _filt(data=data, size=size, res_g=res_g, sub_blocks=sub_blocks) return res
mean filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) normalized: bool if True, the filter corresponds to mean if False, the filter corresponds to sum Returns ------- filtered image or None (if OCLArray)
25,840
def sendData(self, data): if self.client_log is None: self.client_log_buffer.append(data) else: self.client_log.write(data) self.transport.write(data)
Write data to server
25,841
def get_options(self): (options, args) = self.parser.parse_args() return options
Get the options that have been set. Called after the user has added all their own options and is ready to use the variables.
25,842
def CreateDevice(self, device_address): device_name = + device_address.replace(, ).upper() adapter_path = self.path path = adapter_path + + device_name if path not in mockobject.objects: raise dbus.exceptions.DBusException( % device_address, name=) adapter = mockobject.objects[self.path] adapter.EmitSignal(ADAPTER_IFACE, , , [dbus.ObjectPath(path, variant_level=1)]) return dbus.ObjectPath(path, variant_level=1)
Create a new device
25,843
def set_policy_alert_threshold(self, policy_ids, alert_threshold): for policy_id in policy_ids: self.logger.debug(.format(policy_id, alert_threshold)) result = self.zap.ascan.set_policy_alert_threshold(policy_id, alert_threshold) if result != : raise ZAPError(.format(policy_id, result))
Set the alert theshold for the given policies.
25,844
def support_autoupload_param_password(self, **kwargs): config = ET.Element("config") support = ET.SubElement(config, "support", xmlns="urn:brocade.com:mgmt:brocade-ras") autoupload_param = ET.SubElement(support, "autoupload-param") password = ET.SubElement(autoupload_param, "password") password.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
25,845
def window_features(idx, window_size=100, overlap=10): overlap = window_size - overlap sh = (idx.size - window_size + 1, window_size) st = idx.strides * 2 view = np.lib.stride_tricks.as_strided(idx, strides=st, shape=sh)[0::overlap] return view
Generate indexes for a sliding window with overlap :param array idx: The indexes that need to be windowed. :param int window_size: The size of the window. :param int overlap: How much should each window overlap. :return array view: The indexes for the windows with overlap.
25,846
def make_future_info(first_sid, root_symbols, years, notice_date_func, expiration_date_func, start_date_func, month_codes=None, multiplier=500): if month_codes is None: month_codes = CMES_CODE_TO_MONTH year_strs = list(map(str, years)) years = [pd.Timestamp(s, tz=) for s in year_strs] contract_suffix_to_beginning_of_month = tuple( (month_code + year_str[-2:], year + MonthBegin(month_num)) for ((year, year_str), (month_code, month_num)) in product( zip(years, year_strs), iteritems(month_codes), ) ) contracts = [] parts = product(root_symbols, contract_suffix_to_beginning_of_month) for sid, (root_sym, (suffix, month_begin)) in enumerate(parts, first_sid): contracts.append({ : sid, : root_sym, : root_sym + suffix, : start_date_func(month_begin), : notice_date_func(month_begin), : notice_date_func(month_begin), : multiplier, : "TEST", }) return pd.DataFrame.from_records(contracts, index=)
Create a DataFrame representing futures for `root_symbols` during `year`. Generates a contract per triple of (symbol, year, month) supplied to `root_symbols`, `years`, and `month_codes`. Parameters ---------- first_sid : int The first sid to use for assigning sids to the created contracts. root_symbols : list[str] A list of root symbols for which to create futures. years : list[int or str] Years (e.g. 2014), for which to produce individual contracts. notice_date_func : (Timestamp) -> Timestamp Function to generate notice dates from first of the month associated with asset month code. Return NaT to simulate futures with no notice date. expiration_date_func : (Timestamp) -> Timestamp Function to generate expiration dates from first of the month associated with asset month code. start_date_func : (Timestamp) -> Timestamp, optional Function to generate start dates from first of the month associated with each asset month code. Defaults to a start_date one year prior to the month_code date. month_codes : dict[str -> [1..12]], optional Dictionary of month codes for which to create contracts. Entries should be strings mapped to values from 1 (January) to 12 (December). Default is zipline.futures.CMES_CODE_TO_MONTH multiplier : int The contract multiplier. Returns ------- futures_info : pd.DataFrame DataFrame of futures data suitable for passing to an AssetDBWriter.
25,847
def __dbfHeader(self): if not self.dbf: raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no dbf file found)") dbf = self.dbf headerLength = self.__dbfHeaderLength() numFields = (headerLength - 33) // 32 for field in range(numFields): fieldDesc = list(unpack("<11sc4xBB14x", dbf.read(32))) name = 0 idx = 0 if b("\x00") in fieldDesc[name]: idx = fieldDesc[name].index(b("\x00")) else: idx = len(fieldDesc[name]) - 1 fieldDesc[name] = fieldDesc[name][:idx] fieldDesc[name] = u(fieldDesc[name]) fieldDesc[name] = fieldDesc[name].lstrip() fieldDesc[1] = u(fieldDesc[1]) self.fields.append(fieldDesc) terminator = dbf.read(1) assert terminator == b("\r") self.fields.insert(0, (, , 1, 0))
Reads a dbf header. Xbase-related code borrows heavily from ActiveState Python Cookbook Recipe 362715 by Raymond Hettinger
25,848
def _append_zeros_if_too_small(self, value): size_diff = len(value) - len(self.array) if size_diff: self.array = np.append( self.array, np.zeros(size_diff, dtype=self.array.dtype) )
Appends zeros to the points stored if the value we are trying to fit is bigger
25,849
def _check_module_attrs(self, node, module, module_names): assert isinstance(module, astroid.Module), module while module_names: name = module_names.pop(0) if name == "__dict__": module = None break try: module = next(module.getattr(name)[0].infer()) if module is astroid.Uninferable: return None except astroid.NotFoundError: if module.name in self._ignored_modules: return None self.add_message( "no-name-in-module", args=(name, module.name), node=node ) return None except astroid.InferenceError: return None if module_names: modname = module.name if module else "__dict__" self.add_message( "no-name-in-module", node=node, args=(".".join(module_names), modname) ) return None if isinstance(module, astroid.Module): return module return None
check that module_names (list of string) are accessible through the given module if the latest access name corresponds to a module, return it
25,850
def reorder(self, dst_order, arr, src_order=None): if dst_order is None: dst_order = self.viewer.rgb_order if src_order is None: src_order = self.rgb_order if src_order != dst_order: arr = trcalc.reorder_image(dst_order, arr, src_order) return arr
Reorder the output array to match that needed by the viewer.
25,851
def _hrf_kernel(hrf_model, tr, oversampling=50, fir_delays=None): acceptable_hrfs = [ , , , , , , , None] if hrf_model == : hkernel = [spm_hrf(tr, oversampling)] elif hrf_model == : hkernel = [spm_hrf(tr, oversampling), spm_time_derivative(tr, oversampling)] elif hrf_model == : hkernel = [spm_hrf(tr, oversampling), spm_time_derivative(tr, oversampling), spm_dispersion_derivative(tr, oversampling)] elif hrf_model == : hkernel = [glover_hrf(tr, oversampling)] elif hrf_model == : hkernel = [glover_hrf(tr, oversampling), glover_time_derivative(tr, oversampling)] elif hrf_model == : hkernel = [glover_hrf(tr, oversampling), glover_time_derivative(tr, oversampling), glover_dispersion_derivative(tr, oversampling)] elif hrf_model == : hkernel = [np.hstack((np.zeros(f * oversampling), np.ones(oversampling))) for f in fir_delays] elif hrf_model is None: hkernel = [np.hstack((1, np.zeros(oversampling - 1)))] else: raise ValueError(. format(hrf_model, acceptable_hrfs)) return hkernel
Given the specification of the hemodynamic model and time parameters, return the list of matching kernels Parameters ---------- hrf_model : string or None, identifier of the hrf model tr : float the repetition time in seconds oversampling : int, optional temporal oversampling factor to have a smooth hrf fir_delays : list of floats, list of delays for finite impulse response models Returns ------- hkernel : list of arrays samples of the hrf (the number depends on the hrf_model used)
25,852
def send_script_sync(self, conn_id, data, progress_callback): done = threading.Event() result = {} def send_script_done(conn_id, adapter_id, status, reason): result[] = status result[] = reason done.set() self.send_script_async(conn_id, data, progress_callback, send_script_done) done.wait() return result
Asynchronously send a a script to this IOTile device Args: conn_id (int): A unique identifier that will refer to this connection data (string): the script to send to the device progress_callback (callable): A function to be called with status on our progress, called as: progress_callback(done_count, total_count) Returns: dict: a dict with the following two entries set 'success': a bool indicating whether we received a response to our attempted RPC 'failure_reason': a string with the reason for the failure if success == False
25,853
def findWalkthrough(self, name): for walkthrough in self._walkthroughs: if walkthrough.name() == name: return walkthrough return None
Looks up the walkthrough based on the given name. :param name | <str>
25,854
def df_representative_structures(self): rep_struct_pre_df = [] df_cols = [, , , , ] for g in self.genes_with_a_representative_structure: repdict = g.protein.representative_structure.get_dict(df_format=True, only_attributes=df_cols) repdict[] = g.id rep_struct_pre_df.append(repdict) df = pd.DataFrame.from_records(rep_struct_pre_df, columns=df_cols).set_index() if df.empty: log.warning() return df else: return ssbio.utils.clean_df(df)
DataFrame: Get a dataframe of representative protein structure information.
25,855
def r(self, **kwargs): if in kwargs: default = kwargs.pop() if kwargs: raise TypeError( "Unexpected argument: {}".format(repr(next(iter(kwargs)))) ) return default else: raise JSaneException( "Key does not exist: {}".format(repr(self._key_name)) )
Resolve the object. This returns default (if present) or fails on an Empty.
25,856
def delete(cls, cert_id, background=False): result = cls.call(, cert_id) if background: return result cls.echo("Deleting your certificate.") cls.display_progress(result) cls.echo( % cert_id) return result
Delete a certificate.
25,857
def setModelData(self, editor, model, index): try: data = editor.getData() except InvalidInputError as ex: logger.warn(ex) else: logger.debug("ConfigItemDelegate.setModelData: {}".format(data)) model.setData(index, data, Qt.EditRole)
Gets data from the editor widget and stores it in the specified model at the item index. Does this by calling getEditorValue of the config tree item at the index. :type editor: QWidget :type model: ConfigTreeModel :type index: QModelIndex Reimplemented from QStyledItemDelegate.
25,858
def get_customer_group_by_id(cls, customer_group_id, **kwargs): kwargs[] = True if kwargs.get(): return cls._get_customer_group_by_id_with_http_info(customer_group_id, **kwargs) else: (data) = cls._get_customer_group_by_id_with_http_info(customer_group_id, **kwargs) return data
Find CustomerGroup Return single instance of CustomerGroup by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_customer_group_by_id(customer_group_id, async=True) >>> result = thread.get() :param async bool :param str customer_group_id: ID of customerGroup to return (required) :return: CustomerGroup If the method is called asynchronously, returns the request thread.
25,859
def findTextBackward(self, block, column, needle): if column is not None: index = block.text()[:column].rfind(needle) else: index = block.text().rfind(needle) if index != -1: return block, index for block in self.iterateBlocksBackFrom(block.previous()): column = block.text().rfind(needle) if column != -1: return block, column raise ValueError()
Search for a needle and return (block, column) Raise ValueError, if not found
25,860
def get_key_names(self): names = [] for (k, _) in self.items(): names.append(k) return names
Gets keys of all elements stored in this map. :return: a list with all map keys.
25,861
def user_add_link(self): if self.check_post_role()[]: pass else: return False post_data = self.get_post_data() post_data[] = self.get_current_user() cur_uid = tools.get_uudd(2) while MLink.get_by_uid(cur_uid): cur_uid = tools.get_uudd(2) MLink.create_link(cur_uid, post_data) self.redirect()
Create link by user.
25,862
def with_ascendants_for_slug(self, slug, **kwargs): if slug == "/": slugs = [home_slug()] else: parts = slug.split("/") slugs = ["/".join(parts[:i]) for i in range(1, len(parts) + 1)] pages_for_user = self.published(**kwargs) pages = list(pages_for_user.filter(slug__in=slugs).order_by("-slug")) if not pages: return [] pages[0]._ascendants = [] for i, page in enumerate(pages): try: parent = pages[i + 1] except IndexError: if page.parent_id: break else: if page.parent_id != parent.id: break else: pages[0]._ascendants = pages[1:] return pages
Given a slug, returns a list of pages from ascendants to descendants, that form the parent/child page relationships for that slug. The main concern is to do this in a single database query rather than querying the database for parents of a given page. Primarily used in ``PageMiddleware`` to provide the current page, which in the case of non-page views, won't match the slug exactly, but will likely match a page that has been created for linking to the entry point for the app, eg the blog page when viewing blog posts. Also used within ``Page.get_ascendants``, which gets called in the ``pages.views`` view, for building a list of possible templates that can be used for the page. If a valid chain of pages is found, we also assign the pages to the ``page._ascendants`` attr of the main/first/deepest page, so that when its ``get_ascendants`` method is called, the ascendants chain can be re-used without querying the database again. This occurs at least once, given the second use-case described above.
25,863
def scale(self, w=1.0, h=1.0): from types import FloatType w0, h0 = self.img.size if type(w) == FloatType: w = int(w*w0) if type(h) == FloatType: h = int(h*h0) self.img = self.img.resize((w,h), INTERPOLATION) self.w = w self.h = h
Resizes the layer to the given width and height. When width w or height h is a floating-point number, scales percentual, otherwise scales to the given size in pixels.
25,864
def base_style(self): basedOn = self.basedOn if basedOn is None: return None styles = self.getparent() base_style = styles.get_by_id(basedOn.val) if base_style is None: return None return base_style
Sibling CT_Style element this style is based on or |None| if no base style or base style not found.
25,865
def configure(configFile=None, baseConfig="ProductionConfig", port=8000, extraConfig={}): file_handler = StreamHandler() file_handler.setLevel(logging.WARNING) app.logger.addHandler(file_handler) configStr = .format(baseConfig) app.config.from_object(configStr) if os.environ.get() is not None: app.config.from_envvar() if configFile is not None: app.config.from_pyfile(configFile) app.config.update(extraConfig.items()) datamodel.fileHandleCache.setMaxCacheSize( app.config["FILE_HANDLE_CACHE_MAX_SIZE"]) try: cors.CORS(app, allow_headers=) except AssertionError: pass app.serverStatus = ServerStatus() app.backend = _configure_backend(app) if app.config.get(): app.secret_key = app.config[] elif app.config.get(): raise exceptions.ConfigurationException( ) if app.config.get(): app.cache_dir = app.config[] else: app.cache_dir = app.cache = FileSystemCache( app.cache_dir, threshold=5000, default_timeout=600, mode=384) network.initialize( app.config.get(), app.backend.getDataRepository(), app.logger) app.oidcClient = None app.myPort = port if app.config.get(): emails = app.config.get(, ).split() [auth.authorize_email(e, app.cache) for e in emails] if "OIDC_PROVIDER" in app.config: app.oidcClient = oic.oic.Client( verify_ssl=( not in app.config)) try: app.oidcClient.provider_config(app.config[]) except requests.exceptions.ConnectionError: configResponse = message.ProviderConfigurationResponse( issuer=app.config[], authorization_endpoint=app.config[], token_endpoint=app.config[], revocation_endpoint=app.config[]) app.oidcClient.handle_provider_config(configResponse, app.config[]) redirectUri = app.config.get() if redirectUri is None and app.config.get(): redirectUri = .format( socket.gethostname(), app.myPort) app.oidcClient.redirect_uris = [redirectUri] if redirectUri is []: raise exceptions.ConfigurationException( ) if ( in app.oidcClient.provider_info and app.config.get()): app.oidcClient.register( app.oidcClient.provider_info["registration_endpoint"], redirect_uris=[redirectUri]) else: response = message.RegistrationResponse( client_id=app.config[], client_secret=app.config[], redirect_uris=[redirectUri], verify_ssl=False) app.oidcClient.store_registration_info(response)
TODO Document this critical function! What does it do? What does it assume?
25,866
def force_atlas2_layout(graph, pos_list=None, node_masses=None, iterations=100, outbound_attraction_distribution=False, lin_log_mode=False, prevent_overlapping=False, edge_weight_influence=1.0, jitter_tolerance=1.0, barnes_hut_optimize=False, barnes_hut_theta=1.2, scaling_ratio=2.0, strong_gravity_mode=False, multithread=False, gravity=1.0): assert isinstance(graph, networkx.classes.graph.Graph), "Not a networkx graph" assert isinstance(pos_list, dict) or (pos_list is None), "pos must be specified as a dictionary, as in networkx" assert multithread is False, "Not implemented yet" G = numpy.asarray(networkx.to_numpy_matrix(graph)) pos = None if pos_list is not None: pos = numpy.asarray([pos_list[i] for i in graph.nodes()]) masses = None if node_masses is not None: masses = numpy.asarray([node_masses[node] for node in graph.nodes()]) assert G.shape == (G.shape[0], G.shape[0]), "G is not 2D square" assert numpy.all(G.T == G), "G is not symmetric." speed = 1 speed_efficiency = 1 nodes = [] for i in range(0, G.shape[0]): n = Node() if node_masses is None: n.mass = 1 + numpy.count_nonzero(G[i]) else: n.mass = masses[i] n.old_dx = 0 n.old_dy = 0 n.dx = 0 n.dy = 0 if pos is None: n.x = random.random() n.y = random.random() else: n.x = pos[i][0] n.y = pos[i][1] nodes.append(n) edges = [] es = numpy.asarray(G.nonzero()).T for e in es: if e[1] <= e[0]: continue edge = Edge() edge.node1 = e[0] edge.node2 = e[1] edge.weight = G[tuple(e)] edges.append(edge) repulsion = get_repulsion(prevent_overlapping, scaling_ratio) if strong_gravity_mode: gravity_force = get_strong_gravity(scaling_ratio) else: gravity_force = repulsion if outbound_attraction_distribution: outbound_att_compensation = numpy.mean([n.mass for n in nodes]) attraction_coef = outbound_att_compensation if outbound_attraction_distribution else 1 attraction = get_attraction(lin_log_mode, outbound_attraction_distribution, prevent_overlapping, attraction_coef) for _i in range(0, iterations): for n in nodes: n.old_dx = n.dx n.old_dy = n.dy n.dx = 0 n.dy = 0 root_region = None if barnes_hut_optimize: root_region = Quadtree(nodes) root_region.build() apply_repulsion(repulsion, nodes, barnes_hut_optimize=barnes_hut_optimize, barnes_hut_theta=barnes_hut_theta, region=root_region) apply_gravity(gravity_force, nodes, gravity, scaling_ratio) apply_attraction(attraction, nodes, edges, edge_weight_influence) total_swinging = 0.0 total_effective_traction = 0.0 for n in nodes: swinging = math.sqrt((n.old_dx - n.dx) * (n.old_dx - n.dx) + (n.old_dy - n.dy) * (n.old_dy - n.dy)) total_swinging += n.mass * swinging total_effective_traction += .5 * n.mass * math.sqrt( (n.old_dx + n.dx) * (n.old_dx + n.dx) + (n.old_dy + n.dy) * (n.old_dy + n.dy)) estimated_optimal_jitter_tolerance = .05 * math.sqrt(len(nodes)) min_jt = math.sqrt(estimated_optimal_jitter_tolerance) max_jt = 10 jt = jitter_tolerance * max(min_jt, min(max_jt, estimated_optimal_jitter_tolerance * total_effective_traction / (len(nodes) ** 2))) min_speed_efficiency = 0.05 if total_swinging / total_effective_traction > 2.0: if speed_efficiency > min_speed_efficiency: speed_efficiency *= .5 jt = max(jt, jitter_tolerance) target_speed = jt * speed_efficiency * total_effective_traction / total_swinging if total_swinging > jt * total_effective_traction: if speed_efficiency > min_speed_efficiency: speed_efficiency *= .7 elif speed < 1000: speed_efficiency *= 1.3 max_rise = .5 speed = speed + min(target_speed - speed, max_rise * speed) if prevent_overlapping: for n in nodes: swinging = n.mass * math.sqrt( (n.old_dx - n.dx) * (n.old_dx - n.dx) + (n.old_dy - n.dy) * (n.old_dy - n.dy)) factor = 0.1 * speed / (1 + math.sqrt(speed * swinging)) df = math.sqrt(math.pow(n.dx, 2) + n.dy ** 2) factor = min(factor * df, 10.) / df x = n.dx * factor y = n.dy * factor else: for n in nodes: swinging = n.mass * math.sqrt( (n.old_dx - n.dx) * (n.old_dx - n.dx) + (n.old_dy - n.dy) * (n.old_dy - n.dy)) factor = speed / (1.0 + math.sqrt(speed * swinging)) n.x = n.x + (n.dx * factor) n.y = n.y + (n.dy * factor) positions = [(n.x, n.y) for n in nodes] return dict(zip(graph.nodes(), positions))
Position nodes using ForceAtlas2 force-directed algorithm Parameters ---------- graph: NetworkX graph A position will be assigned to every node in G. pos_list : dict or None optional (default=None) Initial positions for nodes as a dictionary with node as keys and values as a coordinate list or tuple. If None, then use random initial positions. node_masses : dict or None optional (default=None) Predefined masses for nodes with node as keys and masses as values. If None, then use degree of nodes. iterations : int optional (default=50) Number of iterations outbound_attraction_distribution : boolean Distributes attraction along outbound edges. Hubs attract less and thus are pushed to the borders. This mode is meant to grant authorities (nodes with a high indegree) a more central position than hubs (nodes with a high outdegree). This is useful for social networks and web networks, where authorities are sometimes considered more important than hubs lin_log_mode: boolean Switch ForceAtlas model from lin-lin to lin-log (tribute to Andreas Noack). Makes clusters more tight prevent_overlapping: boolean With this mode enabled, the repulsion is modified so that the nodes do not overlap. The goal is to produce a more readable and aesthetically pleasing image. edge_weight_influence: float How much influence you give to the edges weight. 0 is “no influence” and 1 is “normal”. jitter_tolerance: float How much swinging you allow. Above 1 discouraged. Lower gives less speed and more precision barnes_hut_optimize: boolean Barnes Hut optimization: n² complexity to n.ln(n) ; allows larger graphs. barnes_hut_theta: float Theta of the Barnes Hut optimization scaling_ratio: float How much repulsion you want. More makes a more sparse graph. strong_gravity_mode: boolean The “Strong gravity” option sets a force that attracts the nodes that are distant from the center more ( is this distance). This force has the drawback of being so strong that it is sometimes stronger than the other forces. It may result in a biased placement of the nodes. However, its advantage is to force a very compact layout, which may be useful for certain purposes. multithread: boolean gravity: float Attracts nodes to the center. Prevents islands from drifting away. Returns ------- pos : dict A dictionary of positions keyed by node
25,867
def _get_gecos(name, root=None): if root is not None and __grains__[] != : getpwnam = functools.partial(_getpwnam, root=root) else: getpwnam = functools.partial(pwd.getpwnam) gecos_field = salt.utils.stringutils.to_unicode( getpwnam(_quote_username(name)).pw_gecos).split(, 4) if not gecos_field: return {} else: while len(gecos_field) < 5: gecos_field.append() return {: salt.utils.data.decode(gecos_field[0]), : salt.utils.data.decode(gecos_field[1]), : salt.utils.data.decode(gecos_field[2]), : salt.utils.data.decode(gecos_field[3]), : salt.utils.data.decode(gecos_field[4])}
Retrieve GECOS field info and return it in dictionary form
25,868
def encrypted_score(self, x): score = self.intercept _, idx = x.nonzero() for i in idx: score += x[0, i] * self.weights[i] return score
Compute the score of `x` by multiplying with the encrypted model, which is a vector of `paillier.EncryptedNumber`
25,869
def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None): valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc ret = defaultdict(list) for item in iterable: key = keyfunc(item) value = valuefunc(item) ret[key].append(value) if reducefunc is not None: for key, value_list in ret.items(): ret[key] = reducefunc(value_list) ret.default_factory = None return ret
Return a dictionary that maps the items in *iterable* to categories defined by *keyfunc*, transforms them with *valuefunc*, and then summarizes them by category with *reducefunc*. *valuefunc* defaults to the identity function if it is unspecified. If *reducefunc* is unspecified, no summarization takes place: >>> keyfunc = lambda x: x.upper() >>> result = map_reduce('abbccc', keyfunc) >>> sorted(result.items()) [('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])] Specifying *valuefunc* transforms the categorized items: >>> keyfunc = lambda x: x.upper() >>> valuefunc = lambda x: 1 >>> result = map_reduce('abbccc', keyfunc, valuefunc) >>> sorted(result.items()) [('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])] Specifying *reducefunc* summarizes the categorized items: >>> keyfunc = lambda x: x.upper() >>> valuefunc = lambda x: 1 >>> reducefunc = sum >>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc) >>> sorted(result.items()) [('A', 1), ('B', 2), ('C', 3)] You may want to filter the input iterable before applying the map/reduce procedure: >>> all_items = range(30) >>> items = [x for x in all_items if 10 <= x <= 20] # Filter >>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1 >>> categories = map_reduce(items, keyfunc=keyfunc) >>> sorted(categories.items()) [(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])] >>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum) >>> sorted(summaries.items()) [(0, 90), (1, 75)] Note that all items in the iterable are gathered into a list before the summarization step, which may require significant storage. The returned object is a :obj:`collections.defaultdict` with the ``default_factory`` set to ``None``, such that it behaves like a normal dictionary.
25,870
def substitute_placeholders(inputstring, placeholders): newst = inputstring.format(link=placeholders.link, filename=placeholders.filename, directory=placeholders.directory, fullpath=placeholders.fullpath, title=placeholders.title, filename_title=placeholders.filename_title, date=placeholders.date_string(), podcasttitle=placeholders.podcasttitle, filename_podcasttitle= placeholders.filename_podcasttitle, name=placeholders.name, subtitle=placeholders.sanitizedsubtitle, entrysummary=placeholders.entrysummary) return newst
Take a string with placeholders, and return the strings with substitutions.
25,871
def get_ajd_bound(mesh): print() boundary_elements = [] str_adj_boundaries = boundaries = mesh[][] + mesh[][] for boundary in boundaries: indices = [nr if (boundary[0] in x and boundary[1] in x) else np.nan for (nr, x) in enumerate(mesh[][])] indices = np.array(indices)[~np.isnan(indices)] if(len(indices) != 1): print() elif(len(indices) == 0): print() boundary_elements.append(indices[0]) str_adj_boundaries += .format(int(indices[0]) + 1) return str_adj_boundaries, boundary_elements
Determine triangular elements adjacend to the boundary elements
25,872
def serialize(self, obj, *args, **kwargs): data = super(Users, self).serialize(obj, *args, **kwargs) return data
Serialize user as per Meteor accounts serialization.
25,873
def list_device_data_sources(self, device_rid): headers = { : self.user_agent(), } headers.update(self.headers()) r = requests.get( self.portals_url()++device_rid+, headers=headers, auth=self.auth()) if HTTP_STATUS.OK == r.status_code: return r.json() else: print("Something went wrong: <{0}>: {1}".format( r.status_code, r.reason)) return None
List data sources of a portal device with rid 'device_rid'. http://docs.exosite.com/portals/#list-device-data-source
25,874
def _expand_parameters(specification, parameters, original=None): expanded_specification = deepcopy(specification) try: for step_num, step in enumerate(expanded_specification[]): current_step = expanded_specification[][step_num] for command_num, command in enumerate(step[]): current_step[][command_num] = \ Template(command).substitute(parameters) if original: return specification else: return expanded_specification except KeyError as e: raise ValidationError( .format(params=str(e)))
Expand parameters inside comands for Serial workflow specifications. :param specification: Full valid Serial workflow specification. :param parameters: Parameters to be extended on a Serial specification. :param original: Flag which, determins type of specifications to return. :returns: If 'original' parameter is set, a copy of the specification whithout expanded parametrers will be returned. If 'original' is not set, a copy of the specification with expanded parameters (all $varname and ${varname} will be expanded with their value). Otherwise an error will be thrown if the parameters can not be expanded. :raises: jsonschema.ValidationError
25,875
def _set_data(self, action): if action == : self._set_labels() if action == : self._set_wikidata() self.get_labels(show=False)
capture Wikidata API response data
25,876
def optimize_for_size(self): self.optimization = self.relax = True self.gc_sections = True self.ffunction_sections = True self.fdata_sections = True self.fno_inline_small_functions = True
http://www.avrfreaks.net/index.php?name=PNphpBB2&file=viewtopic&t=90752 http://www.avrfreaks.net/index.php?name=PNphpBB2&file=viewtopic&t=69813
25,877
def make_chart(self): task_df = self.get_task_df() import altair as alt chart = alt.Chart(task_df).mark_bar().encode( x=, x2=, y=, ) return chart
Returns ------- altair.Chart
25,878
def update(self, z, R2=None): if z is None: self.z = np.array([[None]*self.dim_z]).T self.x_post = self.x.copy() self._P1_2_post = np.copy(self._P1_2) return if R2 is None: R2 = self._R1_2 elif np.isscalar(R2): R2 = eye(self.dim_z) * R2 dim_z = self.dim_z M = self.M M[0:dim_z, 0:dim_z] = R2.T M[dim_z:, 0:dim_z] = dot(self.H, self._P1_2).T M[dim_z:, dim_z:] = self._P1_2.T _, self.S = qr(M) self.K = self.S[0:dim_z, dim_z:].T N = self.S[0:dim_z, 0:dim_z].T self.y = z - dot(self.H, self.x) self.x += dot(self.K, pinv(N)).dot(self.y) self._P1_2 = self.S[dim_z:, dim_z:].T self.z = deepcopy(z) self.x_post = self.x.copy() self._P1_2_post = np.copy(self._P1_2)
Add a new measurement (z) to the kalman filter. If z is None, nothing is changed. Parameters ---------- z : np.array measurement for this update. R2 : np.array, scalar, or None Sqrt of meaaurement noize. Optionally provide to override the measurement noise for this one call, otherwise self.R2 will be used.
25,879
def poster(self): user_model = get_user_model() return get_object_or_404(user_model, pk=self.kwargs[self.user_pk_url_kwarg])
Returns the considered user.
25,880
def equation(self): mat = np.empty((self.nunknowns, self.model.neq)) rhs = np.zeros(self.nunknowns) for icp in range(self.ncp): istart = icp * self.nlayers ieq = 0 for e in self.model.elementlist: if e.nunknowns > 0: fluxin = self.intflux(e.disvecinflayers, self.xcin[icp], self.ycin[icp], self.xcin[icp + 1], self.ycin[icp + 1], self.layers, aq=self.aqin) fluxout = self.intflux(e.disvecinflayers, self.xcout[icp], self.ycout[icp], self.xcout[icp + 1], self.ycout[icp + 1], self.layers, aq=self.aqout) mat[istart:istart + self.nlayers, ieq:ieq + e.nunknowns] = fluxin - fluxout ieq += e.nunknowns else: fluxin = self.intflux(e.disveclayers, self.xcin[icp], self.ycin[icp], self.xcin[icp + 1], self.ycin[icp + 1], self.layers, aq=self.aqin) fluxout = self.intflux(e.disveclayers, self.xcout[icp], self.ycout[icp], self.xcout[icp + 1], self.ycout[icp + 1], self.layers, aq=self.aqout) rhs[istart:istart + self.nlayers] -= fluxin - fluxout return mat, rhs
Mix-in class that returns matrix rows for difference in head between inside and outside equals zeros Returns matrix part (nunknowns,neq) Returns rhs part nunknowns
25,881
def metaclass(*metaclasses): def _inner(cls): metabases = tuple( collections.OrderedDict( (c, None) for c in (metaclasses + (type(cls),)) ).keys() ) _Meta = metabases[0] for base in metabases[1:]: class _Meta(base, _Meta): pass return six.add_metaclass(_Meta)(cls) return _inner
Create the class using all metaclasses. Args: metaclasses: A tuple of metaclasses that will be used to generate and replace a specified class. Returns: A decorator that will recreate the class using the specified metaclasses.
25,882
def get_import_status(self, sis_import): if not self._canvas_account_id: raise MissingAccountID() url = SIS_IMPORTS_API.format( self._canvas_account_id) + "/{}.json".format(sis_import.import_id) return SISImportModel(data=self._get_resource(url))
Get the status of an already created SIS import. https://canvas.instructure.com/doc/api/sis_imports.html#method.sis_imports_api.show
25,883
def register_iq_request_handler(self, type_, payload_cls, cb, *, with_send_reply=False): type_ = self._coerce_enum(type_, structs.IQType) if not type_.is_request: raise ValueError( "{!r} is not a request IQType".format(type_) ) key = type_, payload_cls if key in self._iq_request_map: raise ValueError("only one listener is allowed per tag") self._iq_request_map[key] = cb, with_send_reply self._logger.debug( "iq request coroutine registered: type=%r, payload=%r", type_, payload_cls)
Register a coroutine function or a function returning an awaitable to run when an IQ request is received. :param type_: IQ type to react to (must be a request type). :type type_: :class:`~aioxmpp.IQType` :param payload_cls: Payload class to react to (subclass of :class:`~xso.XSO`) :type payload_cls: :class:`~.XMLStreamClass` :param cb: Function or coroutine function to invoke :param with_send_reply: Whether to pass a function to send a reply to `cb` as second argument. :type with_send_reply: :class:`bool` :raises ValueError: if there is already a coroutine registered for this target :raises ValueError: if `type_` is not a request IQ type :raises ValueError: if `type_` is not a valid :class:`~.IQType` (and cannot be cast to a :class:`~.IQType`) The callback `cb` will be called whenever an IQ stanza with the given `type_` and payload being an instance of the `payload_cls` is received. The callback must either be a coroutine function or otherwise return an awaitable. The awaitable must evaluate to a valid value for the :attr:`.IQ.payload` attribute. That value will be set as the payload attribute value of an IQ response (with type :attr:`~.IQType.RESULT`) which is generated and sent by the stream. If the awaitable or the function raises an exception, it will be converted to a :class:`~.stanza.Error` object. That error object is then used as payload for an IQ response (with type :attr:`~.IQType.ERROR`) which is generated and sent by the stream. If the exception is a subclass of :class:`aioxmpp.errors.XMPPError`, it is converted to an :class:`~.stanza.Error` instance directly. Otherwise, it is wrapped in a :class:`aioxmpp.XMPPCancelError` with ``undefined-condition``. For this to work, `payload_cls` *must* be registered using :meth:`~.IQ.as_payload_class`. Otherwise, the payload will not be recognised by the stream parser and the IQ is automatically responded to with a ``feature-not-implemented`` error. .. warning:: When using a coroutine function for `cb`, there is no guarantee that concurrent IQ handlers and other coroutines will execute in any defined order. This implies that the strong ordering guarantees normally provided by XMPP XML Streams are lost when using coroutine functions for `cb`. For this reason, the use of non-coroutine functions is allowed. .. note:: Using a non-coroutine function for `cb` will generally lead to less readable code. For the sake of readability, it is recommended to prefer coroutine functions when strong ordering guarantees are not needed. .. versionadded:: 0.11 When the argument `with_send_reply` is true `cb` will be called with two arguments: the IQ stanza to handle and a unary function `send_reply(result=None)` that sends a response to the IQ request and prevents that an automatic response is sent. If `result` is an instance of :class:`~aioxmpp.XMPPError` an error result is generated. This is useful when the handler function needs to execute actions which happen after the IQ result has been sent, for example, sending other stanzas. .. versionchanged:: 0.10 Accepts an awaitable as last argument in addition to coroutine functions. Renamed from :meth:`register_iq_request_coro`. .. versionadded:: 0.6 If the stream is :meth:`stop`\\ -ped (only if SM is not enabled) or :meth:`close`\\ ed, running IQ response coroutines are :meth:`asyncio.Task.cancel`\\ -led. To protect against that, fork from your coroutine using :func:`asyncio.ensure_future`. .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.IQType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently.
25,884
def start_prompt(self): logger.show("Coconut Interpreter:") logger.show("(type or press Ctrl-D to end)") self.start_running() while self.running: try: code = self.get_input() if code: compiled = self.handle_input(code) if compiled: self.execute(compiled, use_eval=None) except KeyboardInterrupt: printerr("\nKeyboardInterrupt")
Start the interpreter.
25,885
def convert_padding(params, w_name, scope_name, inputs, layers, weights, names): print() if params[] == : if params[] != 0.0: raise AssertionError() if names: tf_name = + random_string(4) else: tf_name = w_name + str(random.random()) padding_name = tf_name padding_layer = keras.layers.ZeroPadding2D( padding=((params[][2], params[][6]), (params[][3], params[][7])), name=padding_name ) layers[scope_name] = padding_layer(layers[inputs[0]]) elif params[] == : def target_layer(x, pads=params[]): layer = tf.pad(x, [[0, 0], [0, 0], [pads[2], pads[6]], [pads[3], pads[7]]], ) return layer lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
Convert padding layer. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
25,886
def transform_hits(hits): packages = {} for hit in hits: name = hit[] summary = hit[] version = hit[] score = hit[] if score is None: score = 0 if name not in packages.keys(): packages[name] = { : name, : summary, : [version], : score, } else: packages[name][].append(version) if version == highest_version(packages[name][]): packages[name][] = summary packages[name][] = score package_list = sorted( packages.values(), key=lambda x: x[], reverse=True, ) return package_list
The list from pypi is really a list of versions. We want a list of packages with the list of versions stored inline. This converts the list from pypi into one we can use.
25,887
def _find_usage_vpc_links(self): logger.debug() link_count = 0 paginator = self.conn.get_paginator() for resp in paginator.paginate(): link_count += len(resp[]) self.limits[]._add_current_usage( link_count, aws_type= )
Find usage on VPC Links. Update `self.limits`.
25,888
def fetch_search_document(self, *, index): assert self.pk, "Object must have a primary key before being indexed." client = get_client() return client.get(index=index, doc_type=self.search_doc_type, id=self.pk)
Fetch the object's document from a search index by id.
25,889
def enable_page_breakpoint(self, dwProcessId, address): p = self.system.get_process(dwProcessId) bp = self.get_page_breakpoint(dwProcessId, address) if bp.is_running(): self.__del_running_bp_from_all_threads(bp) bp.enable(p, None)
Enables the page breakpoint at the given address. @see: L{define_page_breakpoint}, L{has_page_breakpoint}, L{get_page_breakpoint}, L{enable_one_shot_page_breakpoint}, L{disable_page_breakpoint} L{erase_page_breakpoint}, @type dwProcessId: int @param dwProcessId: Process global ID. @type address: int @param address: Memory address of breakpoint.
25,890
def load(self): df = pd.read_csv(self.input_file, encoding=) df[] = df[].str.split().str[-1] df[] = df[].str.split().str[-1] df[] = (df[].str[:4].astype(float) / 10.).astype(int) * 10 df[] = df[] * df[] return df
Load the data file, do some basic type conversions
25,891
def _deserialize_dict(cls, cls_target, dict_): instance = cls_target.__new__(cls_target) dict_deserialized = cls._deserialize_dict_attributes(cls_target, dict_) instance.__dict__ = cls._fill_default_values(cls_target, dict_deserialized) return instance
:type cls_target: T|type :type dict_: dict :rtype: T
25,892
def reset(self): self.bumpPhases = np.empty((2,0), dtype="float") self.phaseDisplacement = np.empty((0,2), dtype="float") self.cellsForActivePhases = np.empty(0, dtype="int") self.activeCells = np.empty(0, dtype="int") self.learningCells = np.empty(0, dtype="int") self.sensoryAssociatedCells = np.empty(0, dtype="int")
Clear the active cells.
25,893
def command_line_runner(): filename = sys.argv[-1] if not filename.endswith(".rst"): print("ERROR! Please enter a ReStructuredText filename!") sys.exit() print(rst_to_json(file_opener(filename)))
I run functions from the command-line!
25,894
def _AddSerializeToStringMethod(message_descriptor, cls): def SerializeToString(self): errors = [] if not self.IsInitialized(): raise message_mod.EncodeError( % ( self.DESCRIPTOR.full_name, .join(self.FindInitializationErrors()))) return self.SerializePartialToString() cls.SerializeToString = SerializeToString
Helper for _AddMessageMethods().
25,895
def get_or_search(self) -> List[GridQubit]: if not self._sequence: self._sequence = self._find_sequence() return self._sequence
Starts the search or gives previously calculated sequence. Returns: The linear qubit sequence found.
25,896
def __get_category(self, category, name, vivify=False): namespace = foundations.namespace.get_namespace(name, root_only=True) name = foundations.namespace.remove_namespace(name, root_only=True) if namespace: if vivify and namespace not in category: category[namespace] = {} return self.__get_category(category[namespace], name, vivify) else: if vivify and name not in category: category[name] = {} return category[name]
Gets recusively requested category, alternately if **vivify** argument is set, the category will be created. :param category: Base category. :type category: dict :param name: Category to retrieve or vivify. :type name: unicode :param vivify: Vivify missing parents in the chain to the requested category. :type vivify: bool :return: Requested category. :rtype: dict
25,897
def accuracy(links_true, links_pred=None, total=None): if _isconfusionmatrix(links_true): confusion_matrix = links_true v = (confusion_matrix[0, 0] + confusion_matrix[1, 1]) \ / numpy.sum(confusion_matrix) else: tp = true_positives(links_true, links_pred) tn = true_negatives(links_true, links_pred, total) v = (tp + tn) / total return float(v)
accuracy(links_true, links_pred, total) Compute the accuracy. The accuracy is given by (TP+TN)/(TP+FP+TN+FN). Parameters ---------- links_true: pandas.MultiIndex, pandas.DataFrame, pandas.Series The true (or actual) collection of links. links_pred: pandas.MultiIndex, pandas.DataFrame, pandas.Series The predicted collection of links. total: int, pandas.MultiIndex The count of all record pairs (both links and non-links). When the argument is a pandas.MultiIndex, the length of the index is used. Returns ------- float The accuracy
25,898
def wait_for_string(self, expected_string, timeout=60): events = [self.syntax_error_re, self.connection_closed_re, expected_string, self.press_return_re, self.more_re, pexpect.TIMEOUT, pexpect.EOF, self.buffer_overflow_re] events += self.device.get_previous_prompts() self.log("Expecting: {}".format(pattern_to_str(expected_string))) transitions = [ (self.syntax_error_re, [0], -1, CommandSyntaxError("Command unknown", self.device.hostname), 0), (self.connection_closed_re, [0], 1, a_connection_closed, 10), (pexpect.TIMEOUT, [0], -1, CommandTimeoutError("Timeout waiting for prompt", self.device.hostname), 0), (pexpect.EOF, [0, 1], -1, ConnectionError("Unexpected device disconnect", self.device.hostname), 0), (self.more_re, [0], 0, partial(a_send, " "), 10), (expected_string, [0, 1], -1, a_expected_prompt, 0), (self.press_return_re, [0], -1, a_stays_connected, 0), (self.buffer_overflow_re, [0], -1, CommandSyntaxError("Command too long", self.device.hostname), 0) ] for prompt in self.device.get_previous_prompts(): transitions.append((prompt, [0, 1], 0, a_unexpected_prompt, 0)) fsm = FSM("WAIT-4-STRING", self.device, events, transitions, timeout=timeout) return fsm.run()
Wait for string FSM.
25,899
def resetPassword(self, userId): s password to a system-generated value. resetPassword') return self._sforce.service.resetPassword(userId)
Changes a user's password to a system-generated value.