code
stringlengths
59
3.37k
docstring
stringlengths
8
15.5k
def remove_rows_matching(df, column, match): df = df.copy() mask = df[column].values != match return df.iloc[mask, :]
Return a ``DataFrame`` with rows where `column` values match `match` are removed. The selected `column` series of values from the supplied Pandas ``DataFrame`` is compared to `match`, and those rows that match are removed from the DataFrame. :param df: Pandas ``DataFrame`` :param column: Column indexer :param match: ``str`` match target :return: Pandas ``DataFrame`` filtered
def ai(board, who='x'): return sorted(board.possible(), key=lambda b: value(b, who))[-1]
Returns best next board >>> b = Board(); b._rows = [['x', 'o', ' '], ['x', 'o', ' '], [' ', ' ', ' ']] >>> ai(b) < Board |xo.xo.x..| >
def hsl2rgb(hsl): h, s, l = [float(v) for v in hsl] if not (0.0 - FLOAT_ERROR <= s <= 1.0 + FLOAT_ERROR): raise ValueError("Saturation must be between 0 and 1.") if not (0.0 - FLOAT_ERROR <= l <= 1.0 + FLOAT_ERROR): raise ValueError("Lightness must be between 0 and 1.") if s == 0: return l, l, l if l < 0.5: v2 = l * (1.0 + s) else: v2 = (l + s) - (s * l) v1 = 2.0 * l - v2 r = _hue2rgb(v1, v2, h + (1.0 / 3)) g = _hue2rgb(v1, v2, h) b = _hue2rgb(v1, v2, h - (1.0 / 3)) return r, g, b
Convert HSL representation towards RGB :param h: Hue, position around the chromatic circle (h=1 equiv h=0) :param s: Saturation, color saturation (0=full gray, 1=full color) :param l: Ligthness, Overhaul lightness (0=full black, 1=full white) :rtype: 3-uple for RGB values in float between 0 and 1 Hue, Saturation, Range from Lightness is a float between 0 and 1 Note that Hue can be set to any value but as it is a rotation around the chromatic circle, any value above 1 or below 0 can be expressed by a value between 0 and 1 (Note that h=0 is equiv to h=1). This algorithm came from: http://www.easyrgb.com/index.php?X=MATH&H=19#text19 Here are some quick notion of HSL to RGB conversion: >>> from colour import hsl2rgb With a lightness put at 0, RGB is always rgbblack >>> hsl2rgb((0.0, 0.0, 0.0)) (0.0, 0.0, 0.0) >>> hsl2rgb((0.5, 0.0, 0.0)) (0.0, 0.0, 0.0) >>> hsl2rgb((0.5, 0.5, 0.0)) (0.0, 0.0, 0.0) Same for lightness put at 1, RGB is always rgbwhite >>> hsl2rgb((0.0, 0.0, 1.0)) (1.0, 1.0, 1.0) >>> hsl2rgb((0.5, 0.0, 1.0)) (1.0, 1.0, 1.0) >>> hsl2rgb((0.5, 0.5, 1.0)) (1.0, 1.0, 1.0) With saturation put at 0, the RGB should be equal to Lightness: >>> hsl2rgb((0.0, 0.0, 0.25)) (0.25, 0.25, 0.25) >>> hsl2rgb((0.5, 0.0, 0.5)) (0.5, 0.5, 0.5) >>> hsl2rgb((0.5, 0.0, 0.75)) (0.75, 0.75, 0.75) With saturation put at 1, and lightness put to 0.5, we can find normal full red, green, blue colors: >>> hsl2rgb((0 , 1.0, 0.5)) (1.0, 0.0, 0.0) >>> hsl2rgb((1 , 1.0, 0.5)) (1.0, 0.0, 0.0) >>> hsl2rgb((1.0/3 , 1.0, 0.5)) (0.0, 1.0, 0.0) >>> hsl2rgb((2.0/3 , 1.0, 0.5)) (0.0, 0.0, 1.0) Of course: >>> hsl2rgb((0.0, 2.0, 0.5)) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Saturation must be between 0 and 1. And: >>> hsl2rgb((0.0, 0.0, 1.5)) # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: Lightness must be between 0 and 1.
def getAnalyses(self, contentFilter=None, **kwargs): cf = contentFilter if contentFilter else {} cf.update(kwargs) analyses = [] for ar in self.getAnalysisRequests(): analyses.extend(ar.getAnalyses(**cf)) return analyses
return list of all analyses against this sample
def cors_options(self, request, **_): return create_response( request, headers=self.pre_flight_headers(request, request.supported_methods) )
CORS options response method. Broken out if this needs to be customised.
def load_kegg(self, kegg_id, kegg_organism_code=None, kegg_seq_file=None, kegg_metadata_file=None, set_as_representative=False, download=False, outdir=None, force_rerun=False): if download: if not outdir: outdir = self.sequence_dir if not outdir: raise ValueError('Output directory must be specified') if kegg_organism_code: kegg_id = kegg_organism_code + ':' + kegg_id if self.sequences.has_id(kegg_id): if force_rerun: existing = self.sequences.get_by_id(kegg_id) self.sequences.remove(existing) else: log.debug('{}: KEGG ID already present in list of sequences'.format(kegg_id)) kegg_prop = self.sequences.get_by_id(kegg_id) if not self.sequences.has_id(kegg_id): kegg_prop = KEGGProp(id=kegg_id, seq=None, fasta_path=kegg_seq_file, txt_path=kegg_metadata_file) if download: kegg_prop.download_seq_file(outdir, force_rerun) kegg_prop.download_metadata_file(outdir, force_rerun) if self.representative_sequence: if not self.representative_sequence.uniprot: if kegg_prop.equal_to(self.representative_sequence): self.representative_sequence.update(kegg_prop.get_dict(), only_keys=['sequence_path', 'metadata_path', 'kegg', 'description', 'taxonomy', 'id', 'pdbs', 'uniprot', 'seq_record', 'gene_name', 'refseq']) else: log.warning('{}: representative sequence does not match mapped KEGG sequence.'.format(self.id)) self.sequences.append(kegg_prop) if set_as_representative: self.representative_sequence = kegg_prop return self.sequences.get_by_id(kegg_id)
Load a KEGG ID, sequence, and metadata files into the sequences attribute. Args: kegg_id (str): KEGG ID kegg_organism_code (str): KEGG organism code to prepend to the kegg_id if not part of it already. Example: ``eco:b1244``, ``eco`` is the organism code kegg_seq_file (str): Path to KEGG FASTA file kegg_metadata_file (str): Path to KEGG metadata file (raw KEGG format) set_as_representative (bool): If this KEGG ID should be set as the representative sequence download (bool): If the KEGG sequence and metadata files should be downloaded if not provided outdir (str): Where the sequence and metadata files should be downloaded to force_rerun (bool): If ID should be reloaded and files redownloaded Returns: KEGGProp: object contained in the sequences attribute
def get_template(template_path): folder, fname = os.path.split(template_path) return create_env_by_folder(folder).get_template(fname)
Get template object from absolute path. For example, the template has: .. code-block:: html {% macro add(a, b) -%} {{a + b}} {%- endmacro %} And call the macro as template object method. .. code-block:: python tpl = get_template('template.tpl') print(tpl.add(1, 2)) >>> 3 :param template_path: template absolute path. :return: template object
def include(self, node): result = None if isinstance(node, ScalarNode): result = Loader.include_file(self.construct_scalar(node)) else: raise RuntimeError("Not supported !include on type %s" % type(node)) return result
Include the defined yaml file.
def get_rna(self) -> Rna: if self.variants: raise InferCentralDogmaException('can not get rna for variant') return Rna( namespace=self.namespace, name=self.name, identifier=self.identifier )
Get the corresponding RNA or raise an exception if it's not the reference node. :raises: InferCentralDogmaException
def get_volumes_for_instance(self, arg, device=None): instance = self.get(arg) filters = {'attachment.instance-id': instance.id} if device is not None: filters['attachment.device'] = device return self.get_all_volumes(filters=filters)
Return all EC2 Volume objects attached to ``arg`` instance name or ID. May specify ``device`` to limit to the (single) volume attached as that device.
def fetchone(self): if self._state == self._STATE_NONE: raise Exception("No query yet") if not self._data: return None else: self._rownumber += 1 return self._data.pop(0)
Fetch the next row of a query result set, returning a single sequence, or ``None`` when no more data is available.
def startup_script(self): script_file = self.script_file if script_file is None: return None try: with open(script_file, "rb") as f: return f.read().decode("utf-8", errors="replace") except OSError as e: raise VPCSError('Cannot read the startup script file "{}": {}'.format(script_file, e))
Returns the content of the current startup script
def post_build(self, container_builder, container): jinja = container.get('ioc.extra.jinja2') for id in container_builder.get_ids_by_tag('jinja2.filter'): definition = container_builder.get(id) for option in definition.get_tag('jinja2.filter'): if 'name' not in option: break if 'method' not in option: break jinja.filters[option['name']] = getattr(container.get(id), option['method']) for id in container_builder.get_ids_by_tag('jinja2.global'): definition = container_builder.get(id) for option in definition.get_tag('jinja2.global'): if 'name' not in option: break if 'method' not in option: break jinja.globals[option['name']] = getattr(container.get(id), option['method'])
Register filter and global in jinja environment instance IoC tags are: - jinja2.filter to register filter, the tag must contain a name and a method options - jinja2.global to add new global, here globals are functions. The tag must contain a name and a method options
def compileGSUB(self): from ufo2ft.util import compileGSUB compiler = self.context.compiler if compiler is not None: if hasattr(compiler, "_gsub"): return compiler._gsub glyphOrder = compiler.ttFont.getGlyphOrder() else: glyphOrder = sorted(self.context.font.keys()) gsub = compileGSUB(self.context.feaFile, glyphOrder) if compiler and not hasattr(compiler, "_gsub"): compiler._gsub = gsub return gsub
Compile a temporary GSUB table from the current feature file.
def get_cached_image(self, width, height, zoom, parameters=None, clear=False): global MAX_ALLOWED_AREA if not parameters: parameters = {} if self.__compare_parameters(width, height, zoom, parameters) and not clear: return True, self.__image, self.__zoom while True: try: self.__limiting_multiplicator = 1 area = width * zoom * self.__zoom_multiplicator * height * zoom * self.__zoom_multiplicator if area > MAX_ALLOWED_AREA: self.__limiting_multiplicator = sqrt(MAX_ALLOWED_AREA / area) image = ImageSurface(self.__format, int(ceil(width * zoom * self.multiplicator)), int(ceil(height * zoom * self.multiplicator))) break except Error: MAX_ALLOWED_AREA *= 0.8 self.__set_cached_image(image, width, height, zoom, parameters) return False, self.__image, zoom
Get ImageSurface object, if possible, cached The method checks whether the image was already rendered. This is done by comparing the passed size and parameters with those of the last image. If they are equal, the cached image is returned. Otherwise, a new ImageSurface with the specified dimensions is created and returned. :param width: The width of the image :param height: The height of the image :param zoom: The current scale/zoom factor :param parameters: The parameters used for the image :param clear: If True, the cache is emptied, thus the image won't be retrieved from cache :returns: The flag is True when the image is retrieved from the cache, otherwise False; The cached image surface or a blank one with the desired size; The zoom parameter when the image was stored :rtype: bool, ImageSurface, float
def _erase_vm_info(name): try: vm_ = get_vm_info(name) if vm_['machine']: key = _build_machine_uri(vm_['machine'], vm_.get('cwd', '.')) try: __utils__['sdb.sdb_delete'](key, __opts__) except KeyError: __utils__['sdb.sdb_set'](key, None, __opts__) except Exception: pass uri = _build_sdb_uri(name) try: __utils__['sdb.sdb_delete'](uri, __opts__) except KeyError: __utils__['sdb.sdb_set'](uri, {}, __opts__) except Exception: pass
erase the information for a VM the we are destroying. some sdb drivers (such as the SQLite driver we expect to use) do not have a `delete` method, so if the delete fails, we have to replace the with a blank entry.
def getHosts(filename=None, hostlist=None): if filename: return getHostsFromFile(filename) elif hostlist: return getHostsFromList(hostlist) elif getEnv() == "SLURM": return getHostsFromSLURM() elif getEnv() == "PBS": return getHostsFromPBS() elif getEnv() == "SGE": return getHostsFromSGE() else: return getDefaultHosts()
Return a list of hosts depending on the environment
def giant_text_sqltype(dialect: Dialect) -> str: if dialect.name == SqlaDialectName.SQLSERVER: return 'NVARCHAR(MAX)' elif dialect.name == SqlaDialectName.MYSQL: return 'LONGTEXT' else: raise ValueError("Unknown dialect: {}".format(dialect.name))
Returns the SQL column type used to make very large text columns for a given dialect. Args: dialect: a SQLAlchemy :class:`Dialect` Returns: the SQL data type of "giant text", typically 'LONGTEXT' for MySQL and 'NVARCHAR(MAX)' for SQL Server.
def download_feed_posts(self, max_count: int = None, fast_update: bool = False, post_filter: Optional[Callable[[Post], bool]] = None) -> None: self.context.log("Retrieving pictures from your feed...") count = 1 for post in self.get_feed_posts(): if max_count is not None and count > max_count: break name = post.owner_username if post_filter is not None and not post_filter(post): self.context.log("<pic by %s skipped>" % name, flush=True) continue self.context.log("[%3i] %s " % (count, name), end="", flush=True) count += 1 with self.context.error_catcher('Download feed'): downloaded = self.download_post(post, target=':feed') if fast_update and not downloaded: break
Download pictures from the user's feed. Example to download up to the 20 pics the user last liked:: loader = Instaloader() loader.load_session_from_file('USER') loader.download_feed_posts(max_count=20, fast_update=True, post_filter=lambda post: post.viewer_has_liked) :param max_count: Maximum count of pictures to download :param fast_update: If true, abort when first already-downloaded picture is encountered :param post_filter: function(post), which returns True if given picture should be downloaded
def int_global_to_local(self, index, axis=0): if index >= self.__mask[axis].stop-self.__halos[1][axis]: return None if index < self.__mask[axis].start+self.__halos[0][axis]: return None return index-self.__mask[axis].start
Calculate local index from global index for integer input :param index: global index as integer :param axis: current axis to process :return:
def flatten_dist_egginfo_json( source_dists, filename=DEFAULT_JSON, dep_keys=DEP_KEYS, working_set=None): working_set = working_set or default_working_set obj = {} depends = {dep: {} for dep in dep_keys} for dist in source_dists: obj = read_dist_egginfo_json(dist, filename) if not obj: continue logger.debug("merging '%s' for required '%s'", filename, dist) for dep in dep_keys: depends[dep].update(obj.get(dep, {})) if obj is None: return depends for dep in dep_keys: obj[dep] = {k: v for k, v in depends[dep].items() if v is not None} return obj
Flatten a distribution's egginfo json, with the depended keys to be flattened. Originally this was done for this: Resolve a distribution's (dev)dependencies through the working set and generate a flattened version package.json, returned as a dict, from the resolved distributions. Default working set is the one from pkg_resources. The generated package.json dict is done by grabbing all package.json metadata from all parent Python packages, starting from the highest level and down to the lowest. The current distribution's dependencies will be layered on top along with its other package information. This has the effect of child packages overriding node/npm dependencies which is by the design of this function. If nested dependencies are desired, just rely on npm only for all dependency management. Flat is better than nested.
def on(self, evt, func): if evt not in self._callbacks: raise NotImplementedError('callback "%s"' % evt) else: self._callbacks[evt] = func
Set a callback handler for a pubsub event
def unpack_small_tensors(tower_grads, packing): if not packing: return tower_grads new_tower_grads = [] num_devices = len(tower_grads) num_packed = len(packing.keys()) // num_devices for dev_idx, gv_list in enumerate(tower_grads): new_gv_list = gv_list[num_packed:] for i in xrange(0, num_packed): k = "%d:%d" % (dev_idx, i) gpt = packing[k] gv = unpack_grad_tuple(gv_list[i], gpt) for gi, idx in enumerate(gpt.indices): assert idx == gpt.indices[gi] new_gv_list.insert(idx, gv[gi]) new_tower_grads.append(new_gv_list) return new_tower_grads
Undo the structure alterations to tower_grads done by pack_small_tensors. Args: tower_grads: List of List of (grad, var) tuples. packing: A dict generated by pack_small_tensors describing the changes it made to tower_grads. Returns: new_tower_grads: identical to tower_grads except that concatentations of small tensors have been split apart and returned to their original positions, paired with their original variables.
def ExportMigrations(): from django.db.migrations.executor import MigrationExecutor if 'default' in connections and ( type(connections['default']) == DatabaseWrapper): return for alias in connections.databases: executor = MigrationExecutor(connections[alias]) ExportMigrationsForDatabase(alias, executor)
Exports counts of unapplied migrations. This is meant to be called during app startup, ideally by django_prometheus.apps.AppConfig.
def off(self, event): 'Remove an event handler' try: self._once_events.remove(event) except KeyError: pass self._callback_by_event.pop(event, None)
Remove an event handler
def check_points_type(self, type_class): for point in self.points: if (point.value is not None and not isinstance(point.value, type_class)): return False return True
Check that each point's value is an instance of `type_class`. `type_class` should typically be a Value type, i.e. one that extends :class: `opencensus.metrics.export.value.Value`. :type type_class: type :param type_class: Type to check against. :rtype: bool :return: Whether all points are instances of `type_class`.
def rename_pipe(self, old_name, new_name): if old_name not in self.pipe_names: raise ValueError(Errors.E001.format(name=old_name, opts=self.pipe_names)) if new_name in self.pipe_names: raise ValueError(Errors.E007.format(name=new_name, opts=self.pipe_names)) i = self.pipe_names.index(old_name) self.pipeline[i] = (new_name, self.pipeline[i][1])
Rename a pipeline component. old_name (unicode): Name of the component to rename. new_name (unicode): New name of the component. DOCS: https://spacy.io/api/language#rename_pipe
def taskdir(self): return os.path.join(self.BASE, self.TAG, self.task_family)
Return the directory under which all artefacts are stored.
def intersection(L1, L2): D = L1[0] * L2[1] - L1[1] * L2[0] Dx = L1[2] * L2[1] - L1[1] * L2[2] Dy = L1[0] * L2[2] - L1[2] * L2[0] if D != 0: x = Dx / D y = Dy / D return x, y else: return False
Intersects two line segments Args: L1 ([float, float]): x and y coordinates L2 ([float, float]): x and y coordinates Returns: bool: if they intersect (float, float): x and y of intersection, if they do
def onMarkedSeen( self, threads=None, seen_ts=None, ts=None, metadata=None, msg=None ): log.info( "Marked messages as seen in threads {} at {}s".format( [(x[0], x[1].name) for x in threads], seen_ts / 1000 ) )
Called when the client is listening, and the client has successfully marked threads as seen :param threads: The threads that were marked :param author_id: The ID of the person who changed the emoji :param seen_ts: A timestamp of when the threads were seen :param ts: A timestamp of the action :param metadata: Extra metadata about the action :param msg: A full set of the data recieved :type thread_type: models.ThreadType
def build_model(self): with tf.variable_scope( "model", reuse=None, initializer=self.initializer): self._create_placeholders() self._create_rnn_cells() self._create_initstate_and_embeddings() self._create_rnn_architecture() self._create_optimizer_node()
Build the model's computational graph.
def load(self): if self._modules_loaded is True: return self.load_modules_from_python(routes.ALL_ROUTES) self.aliases.update(routes.ALL_ALIASES) self._load_modules_from_entry_points('softlayer.cli') self._modules_loaded = True
Loads all modules.
def get_config(workflow): try: workspace = workflow.plugin_workspace[ReactorConfigPlugin.key] return workspace[WORKSPACE_CONF_KEY] except KeyError: conf = ReactorConfig() workspace = workflow.plugin_workspace.get(ReactorConfigPlugin.key, {}) workspace[WORKSPACE_CONF_KEY] = conf workflow.plugin_workspace[ReactorConfigPlugin.key] = workspace return conf
Obtain configuration object Does not fail :return: ReactorConfig instance
def generate_ical_file(generator): global events ics_fname = generator.settings['PLUGIN_EVENTS']['ics_fname'] if not ics_fname: return ics_fname = os.path.join(generator.settings['OUTPUT_PATH'], ics_fname) log.debug("Generating calendar at %s with %d events" % (ics_fname, len(events))) tz = generator.settings.get('TIMEZONE', 'UTC') tz = pytz.timezone(tz) ical = icalendar.Calendar() ical.add('prodid', '-//My calendar product//mxm.dk//') ical.add('version', '2.0') DEFAULT_LANG = generator.settings['DEFAULT_LANG'] curr_events = events if not localized_events else localized_events[DEFAULT_LANG] for e in curr_events: ie = icalendar.Event( summary=e.metadata['summary'], dtstart=e.dtstart, dtend=e.dtend, dtstamp=e.metadata['date'], priority=5, uid=e.metadata['title'] + e.metadata['summary'], ) if 'event-location' in e.metadata: ie.add('location', e.metadata['event-location']) ical.add_component(ie) with open(ics_fname, 'wb') as f: f.write(ical.to_ical())
Generate an iCalendar file
def generate_password(self) -> list: characterset = self._get_password_characters() if ( self.passwordlen is None or not characterset ): raise ValueError("Can't generate password: character set is " "empty or passwordlen isn't set") password = [] for _ in range(0, self.passwordlen): password.append(randchoice(characterset)) self.last_result = password return password
Generate a list of random characters.
def Add(self,ports,source_restrictions=None,private_ip=None): payload = {'ports': []} for port in ports: if 'port_to' in port: payload['ports'].append({'protocol':port['protocol'], 'port':port['port'], 'portTo':port['port_to']}) else: payload['ports'].append({'protocol':port['protocol'], 'port':port['port']}) if source_restrictions: payload['sourceRestrictions'] = source_restrictions if private_ip: payload['internalIPAddress'] = private_ip return(clc.v2.Requests(clc.v2.API.Call('POST','servers/%s/%s/publicIPAddresses' % (self.server.alias,self.server.id), json.dumps(payload),session=self.session), alias=self.server.alias,session=self.session))
Add new public_ip. Specify one or more ports using a list of dicts with the following keys: protocol - TCP, UDP, or ICMP port - int 0-65534 port_to - (optional) if specifying a range of ports then the rqange end. int 0-65534 Optionally specify one or more source restrictions using a list of dicts with the following keys: cidr - string with CIDR notation for the subnet (e.g. "132.200.20.0/24") private_ip is the existing private IP address to NAT to (optional) # New public IP with single port >>> p = clc.v2.Server(alias='BTDI',id='WA1BTDIX03').PublicIPs() >>> p.Add(ports=[{"protocol": "TCP","port":5}]).WaitUntilComplete() 0 # New public IP with port range >>> p.Add([{"protocol": "UDP","port":10,"port_to":50}]).WaitUntilComplete() 0 # Map existing private IP to single port >>> p.Add(ports=[{"protocol": "TCP","port":22}],k source_restrictions=[{'cidr': "132.200.20.0/24"}], private_ip="10.80.148.13").WaitUntilComplete() 0 * Note this API is subject to revision to make ports and source restrictions access to parallel that used for accessors. * public_ips.public_ips will not be updated to reflect this addition. Recreate object after request completes to access new info including getting the IP itself
def reset(self): if self.__row_number > self.__sample_size: self.__parser.reset() self.__extract_sample() self.__extract_headers() self.__row_number = 0
Resets the stream pointer to the beginning of the file.
def posterior_to_xarray(self): columns = self.posterior[0].columns posterior_predictive = self.posterior_predictive if posterior_predictive is None or ( isinstance(posterior_predictive, str) and posterior_predictive.lower().endswith(".csv") ): posterior_predictive = [] elif isinstance(posterior_predictive, str): posterior_predictive = [ col for col in columns if posterior_predictive == col.split(".")[0] ] else: posterior_predictive = [ col for col in columns if any(item == col.split(".")[0] for item in posterior_predictive) ] log_likelihood = self.log_likelihood if log_likelihood is None: log_likelihood = [] else: log_likelihood = [col for col in columns if log_likelihood == col.split(".")[0]] invalid_cols = posterior_predictive + log_likelihood valid_cols = [col for col in columns if col not in invalid_cols] data = _unpack_dataframes([item[valid_cols] for item in self.posterior]) return dict_to_dataset(data, coords=self.coords, dims=self.dims)
Extract posterior samples from output csv.
def WriteTaskStart(self): self._RaiseIfNotWritable() if self._storage_type != definitions.STORAGE_TYPE_TASK: raise IOError('Unsupported storage type.') task_start = self._task.CreateTaskStart() self._storage_file.WriteTaskStart(task_start)
Writes task start information. Raises: IOError: if the storage type is not supported or when the storage writer is closed. OSError: if the storage type is not supported or when the storage writer is closed.
def copy_target_dir(cls, orig_dir, dest_dir, roi_baseline, extracopy): try: os.makedirs(dest_dir) except OSError: pass copyfiles = ['%s.fits' % roi_baseline, '%s.npy' % roi_baseline, '%s_*.xml' % roi_baseline] + cls.copyfiles if isinstance(extracopy, list): copyfiles += extracopy cls.copy_analysis_files(orig_dir, dest_dir, copyfiles)
Create and populate directoris for target analysis
def _reset_on_error(self, server, func, *args, **kwargs): try: return func(*args, **kwargs) except NetworkTimeout: raise except ConnectionFailure: self.__reset_server(server.description.address) raise
Execute an operation. Reset the server on network error. Returns fn()'s return value on success. On error, clears the server's pool and marks the server Unknown. Re-raises any exception thrown by fn().
async def turn_off(self, switch=None): if switch is not None: switch = codecs.decode(switch.rjust(2, '0'), 'hex') packet = self.protocol.format_packet(b"\x10" + switch + b"\x02") else: packet = self.protocol.format_packet(b"\x0b") states = await self._send(packet) return states
Turn off relay.
def to_glyphs(ufos_or_designspace, glyphs_module=classes, minimize_ufo_diffs=False): if hasattr(ufos_or_designspace, "sources"): builder = GlyphsBuilder( designspace=ufos_or_designspace, glyphs_module=glyphs_module, minimize_ufo_diffs=minimize_ufo_diffs, ) else: builder = GlyphsBuilder( ufos=ufos_or_designspace, glyphs_module=glyphs_module, minimize_ufo_diffs=minimize_ufo_diffs, ) return builder.font
Take a list of UFOs or a single DesignspaceDocument with attached UFOs and converts it into a GSFont object. The GSFont object is in-memory, it's up to the user to write it to the disk if needed. This should be the inverse function of `to_ufos` and `to_designspace`, so we should have to_glyphs(to_ufos(font)) == font and also to_glyphs(to_designspace(font)) == font
def list_devices(self, project_id, conditions=None, params=None): default_params = {'per_page': 1000} if params: default_params.update(params) data = self.api('projects/%s/devices' % project_id, params=default_params) devices = [] for device in self.filter(conditions, data['devices']): devices.append(packet.Device(device, self.manager)) return devices
Retrieve list of devices in a project by one of more conditions.
def _from_dict(cls, _dict): args = {} if 'configuration_id' in _dict: args['configuration_id'] = _dict.get('configuration_id') if 'name' in _dict: args['name'] = _dict.get('name') else: raise ValueError( 'Required property \'name\' not present in Configuration JSON') if 'created' in _dict: args['created'] = string_to_datetime(_dict.get('created')) if 'updated' in _dict: args['updated'] = string_to_datetime(_dict.get('updated')) if 'description' in _dict: args['description'] = _dict.get('description') if 'conversions' in _dict: args['conversions'] = Conversions._from_dict( _dict.get('conversions')) if 'enrichments' in _dict: args['enrichments'] = [ Enrichment._from_dict(x) for x in (_dict.get('enrichments')) ] if 'normalizations' in _dict: args['normalizations'] = [ NormalizationOperation._from_dict(x) for x in (_dict.get('normalizations')) ] if 'source' in _dict: args['source'] = Source._from_dict(_dict.get('source')) return cls(**args)
Initialize a Configuration object from a json dictionary.
def make_mapcube_source(name, Spatial_Filename, spectrum): data = dict(Spatial_Filename=Spatial_Filename) if spectrum is not None: data.update(spectrum) return roi_model.MapCubeSource(name, data)
Construct and return a `fermipy.roi_model.MapCubeSource` object
def signature(self, node, frame, extra_kwargs=None): kwarg_workaround = False for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()): if is_python_keyword(kwarg): kwarg_workaround = True break for arg in node.args: self.write(', ') self.visit(arg, frame) if not kwarg_workaround: for kwarg in node.kwargs: self.write(', ') self.visit(kwarg, frame) if extra_kwargs is not None: for key, value in iteritems(extra_kwargs): self.write(', %s=%s' % (key, value)) if node.dyn_args: self.write(', *') self.visit(node.dyn_args, frame) if kwarg_workaround: if node.dyn_kwargs is not None: self.write(', **dict({') else: self.write(', **{') for kwarg in node.kwargs: self.write('%r: ' % kwarg.key) self.visit(kwarg.value, frame) self.write(', ') if extra_kwargs is not None: for key, value in iteritems(extra_kwargs): self.write('%r: %s, ' % (key, value)) if node.dyn_kwargs is not None: self.write('}, **') self.visit(node.dyn_kwargs, frame) self.write(')') else: self.write('}') elif node.dyn_kwargs is not None: self.write(', **') self.visit(node.dyn_kwargs, frame)
Writes a function call to the stream for the current node. A leading comma is added automatically. The extra keyword arguments may not include python keywords otherwise a syntax error could occour. The extra keyword arguments should be given as python dict.
def oauth2callback(self, view_func): @wraps(view_func) def decorated(*args, **kwargs): params = {} if 'state' in request.args: params.update(**self.parse_state(request.args.get('state'))) if params.pop('sig', None) != make_secure_token(**params): return self.login_manager.unauthorized() code = request.args.get('code') if code: token = self.exchange_code( code, url_for( request.endpoint, _external=True, _scheme=self.redirect_scheme, ), ) userinfo = self.get_userinfo(token['access_token']) params.update(token=token, userinfo=userinfo) else: if params: params.update(dict(request.args.items())) else: return return view_func(**params) return decorated
Decorator for OAuth2 callback. Calls `GoogleLogin.login` then passes results to `view_func`.
def merge_layouts(layouts): layout = layouts[0].clone() for l in layouts[1:]: layout.files.update(l.files) layout.domains.update(l.domains) for k, v in l.entities.items(): if k not in layout.entities: layout.entities[k] = v else: layout.entities[k].files.update(v.files) return layout
Utility function for merging multiple layouts. Args: layouts (list): A list of BIDSLayout instances to merge. Returns: A BIDSLayout containing merged files and entities. Notes: Layouts will be merged in the order of the elements in the list. I.e., the first Layout will be updated with all values in the 2nd Layout, then the result will be updated with values from the 3rd Layout, etc. This means that order matters: in the event of entity or filename conflicts, later layouts will take precedence.
def is_website_affected(self, website): if self.domain is None: return True if not self.include_subdomains: return self.domain in website['subdomains'] else: dotted_domain = "." + self.domain for subdomain in website['subdomains']: if subdomain == self.domain or subdomain.endswith(dotted_domain): return True return False
Tell if the website is affected by the domain change
def register_from_fields(self, *args): names = [] for field in args: widget = self.resolve_widget(field) self.register(widget.config_name) if widget.config_name not in names: names.append(widget.config_name) return names
Register config name from field widgets Arguments: *args: Fields that contains widget :class:`djangocodemirror.widget.CodeMirrorWidget`. Returns: list: List of registered config names from fields.
def write_root(histogram: HistogramBase, hfile: uproot.write.TFile.TFileUpdate, name: str): hfile[name] = histogram
Write histogram to an open ROOT file. Parameters ---------- histogram : Any histogram hfile : Updateable uproot file object name : The name of the histogram inside the file
def sso(user, desired_username, name, email, profile_fields=None): if not user: if not settings.REGISTRATION_OPEN: raise SSOError('Account registration is closed') user = _create_desired_user(desired_username) _configure_user(user, name, email, profile_fields) if not user.is_active: raise SSOError('Account disabled') user.backend = settings.AUTHENTICATION_BACKENDS[0] return user
Create a user, if the provided `user` is None, from the parameters. Then log the user in, and return it.
def column_coordinates(self, X): utils.validation.check_is_fitted(self, 'V_') _, _, _, col_names = util.make_labels_and_names(X) if isinstance(X, pd.SparseDataFrame): X = X.to_coo() elif isinstance(X, pd.DataFrame): X = X.to_numpy() if self.copy: X = X.copy() if isinstance(X, np.ndarray): X = X.T / X.T.sum(axis=1)[:, None] else: X = X.T / X.T.sum(axis=1) return pd.DataFrame( data=X @ sparse.diags(self.row_masses_.to_numpy() ** -0.5) @ self.U_, index=col_names )
The column principal coordinates.
def hsts_header(self): hsts_policy = 'max-age={0}'.format(self.hsts_age) if self.hsts_include_subdomains: hsts_policy += '; includeSubDomains' return hsts_policy
Returns the proper HSTS policy.
async def send(self, data: Union[bytes, str], final: bool = True): MsgType = TextMessage if isinstance(data, str) else BytesMessage data = MsgType(data=data, message_finished=final) data = self._connection.send(event=data) await self._sock.send_all(data)
Sends some data down the connection.
def options_from_file(args): logger.debug("updating options from config files") updated_from_file = [] for config_file in CONFIG_FILES: logger.debug("updating from: %s", config_file) parser = ConfigParser() parser.read(config_file) try: items = parser.items('fades') except NoSectionError: continue for config_key, config_value in items: if config_value in ['true', 'false']: config_value = config_value == 'true' if config_key in MERGEABLE_CONFIGS: current_value = getattr(args, config_key, []) if current_value is None: current_value = [] current_value.append(config_value) setattr(args, config_key, current_value) if not getattr(args, config_key, False) or config_key in updated_from_file: setattr(args, config_key, config_value) updated_from_file.append(config_key) logger.debug("updating %s to %s from file settings", config_key, config_value) return args
Get a argparse.Namespace and return it updated with options from config files. Config files will be parsed with priority equal to his order in CONFIG_FILES.
def render(self, doc, context=None, math_option=False, img_path='', css_path=CSS_PATH): if self.wait(): self.doc = doc self.context = context self.math_option = math_option self.img_path = img_path self.css_path = css_path self.start()
Start thread to render a given documentation
def asyncPipeUnion(context=None, _INPUT=None, conf=None, **kwargs): _input = yield _INPUT _OUTPUT = get_output(_input, **kwargs) returnValue(_OUTPUT)
An operator that asynchronously merges multiple source together. Not loopable. Parameters ---------- context : pipe2py.Context object _INPUT : asyncPipe like object (twisted Deferred iterable of items) conf : unused Keyword arguments ----------------- _OTHER1 : asyncPipe like object _OTHER2 : etc. Returns ------- _OUTPUT : twisted.internet.defer.Deferred generator of items
def _get_struct_fillstylearray(self, shape_number): obj = _make_object("FillStyleArray") obj.FillStyleCount = count = unpack_ui8(self._src) if count == 0xFF: obj.FillStyleCountExtended = count = unpack_ui16(self._src) obj.FillStyles = [self._get_struct_fillstyle(shape_number) for _ in range(count)] return obj
Get the values for the FILLSTYLEARRAY record.
def name(self): if self._name: return self._name return self.code.replace('_', ' ').capitalize()
Give back tab name if is set else generate name by code
def replace_complexes(self, linked_stmts=None): if linked_stmts is None: linked_stmts = self.infer_complexes(self.statements) new_stmts = [] for stmt in self.statements: if not isinstance(stmt, Complex): new_stmts.append(stmt) continue found = False for linked_stmt in linked_stmts: if linked_stmt.refinement_of(stmt, hierarchies): found = True if not found: new_stmts.append(stmt) else: logger.info('Removing complex: %s' % stmt) self.statements = new_stmts
Remove Complex Statements that can be inferred out. This function iterates over self.statements and looks for Complex Statements that either match or are refined by inferred Complex Statements that were linked (provided as the linked_stmts argument). It removes Complex Statements from self.statements that can be explained by the linked statements. Parameters ---------- linked_stmts : Optional[list[indra.mechlinker.LinkedStatement]] A list of linked statements, optionally passed from outside. If None is passed, the MechLinker runs self.infer_complexes to infer Complexes and obtain a list of LinkedStatements that are then used for removing existing Complexes in self.statements.
def main_production(self): for rule in self.productions: if rule.leftside[0] == self._initialsymbol: return rule raise IndexError
Returns main rule
def _conversion_checks(item, keys, box_config, check_only=False, pre_check=False): if box_config['box_duplicates'] != 'ignore': if pre_check: keys = list(keys) + [item] key_list = [(k, _safe_attr(k, camel_killer=box_config['camel_killer_box'], replacement_char=box_config['box_safe_prefix'] )) for k in keys] if len(key_list) > len(set(x[1] for x in key_list)): seen = set() dups = set() for x in key_list: if x[1] in seen: dups.add("{0}({1})".format(x[0], x[1])) seen.add(x[1]) if box_config['box_duplicates'].startswith("warn"): warnings.warn('Duplicate conversion attributes exist: ' '{0}'.format(dups)) else: raise BoxError('Duplicate conversion attributes exist: ' '{0}'.format(dups)) if check_only: return for k in keys: if item == _safe_attr(k, camel_killer=box_config['camel_killer_box'], replacement_char=box_config['box_safe_prefix']): return k
Internal use for checking if a duplicate safe attribute already exists :param item: Item to see if a dup exists :param keys: Keys to check against :param box_config: Easier to pass in than ask for specfic items :param check_only: Don't bother doing the conversion work :param pre_check: Need to add the item to the list of keys to check :return: the original unmodified key, if exists and not check_only
def url_info(request): return { 'MEDIA_URL' : core_settings.MEDIA_URL, 'STATIC_URL': core_settings.STATIC_URL, 'VERSION' : core_settings.VERSION, 'SERVER_INFO' : core_settings.SERVER_INFO, 'SITE_NAME' : current_site_name, 'CURRENT_SITE': current_site, }
Make MEDIA_URL and current HttpRequest object available in template code.
def _register_extensions(self, namespace): extmanager = ExtensionManager( 'extensions.classes.{}'.format(namespace), propagate_map_exceptions=True ) if extmanager.extensions: extmanager.map(util.register_extension_class, base=self) extmanager = ExtensionManager( 'extensions.methods.{}'.format(namespace), propagate_map_exceptions=True ) if extmanager.extensions: extmanager.map(util.register_extension_method, base=self)
Register any extensions under the given namespace.
def lowerbound(self, axis=0): if not 0 <= axis < self.GetDimension(): raise ValueError( "axis must be a non-negative integer less than " "the dimensionality of the histogram") if axis == 0: return self.xedges(1) if axis == 1: return self.yedges(1) if axis == 2: return self.zedges(1) raise TypeError("axis must be an integer")
Get the lower bound of the binning along an axis
def gauge(name, value, rate=1, tags=None): client().gauge(name, value, rate, tags)
Set the value for a gauge. >>> import statsdecor >>> statsdecor.gauge('my.metric', 10)
def preprocess_cell( self, cell: "NotebookNode", resources: dict, cell_index: int ) -> Tuple["NotebookNode", dict]: output_files_dir = resources.get("output_files_dir", None) if not isinstance(resources["outputs"], dict): resources["outputs"] = {} for name, attach in cell.get("attachments", {}).items(): orig_name = name name = re.sub(r"%[\w\d][\w\d]", "-", name) for mime, data in attach.items(): if mime not in self.extract_output_types: continue if mime in {"image/png", "image/jpeg", "application/pdf"}: data = a2b_base64(data) elif sys.platform == "win32": data = data.replace("\n", "\r\n").encode("UTF-8") else: data = data.encode("UTF-8") filename = self.output_filename_template.format( cell_index=cell_index, name=name, unique_key=resources.get("unique_key", ""), ) if output_files_dir is not None: filename = os.path.join(output_files_dir, filename) if name.endswith(".gif") and mime == "image/png": filename = filename.replace(".gif", ".png") resources["outputs"][filename] = data attach_str = "attachment:" + orig_name if attach_str in cell.source: cell.source = cell.source.replace(attach_str, filename) return cell, resources
Apply a transformation on each cell. Parameters ---------- cell : NotebookNode cell Notebook cell being processed resources : dictionary Additional resources used in the conversion process. Allows preprocessors to pass variables into the Jinja engine. cell_index : int Index of the cell being processed (see base.py)
def init_db(): db.drop_all() db.create_all() title = "de Finibus Bonorum et Malorum - Part I" text = "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor \ incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud \ exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure \ dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. \ Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt \ mollit anim id est laborum." post = Post(title=title, text=text) db.session.add(post) db.session.commit()
Populate a small db with some example entries.
def kill(pid_file): try: with open(pid_file) as f: os.kill(int(f.read()), 9) os.remove(pid_file) except (IOError, OSError): return False return True
Attempts to shut down a previously started daemon.
def add_listener(self, callback, event_type=None): listener_uid = uuid4() self.listeners.append( { 'uid': listener_uid, 'callback': callback, 'event_type': event_type } ) return listener_uid
Add a listener that will send a callback when the client recieves an event. Args: callback (func(roomchunk)): Callback called when an event arrives. event_type (str): The event_type to filter for. Returns: uuid.UUID: Unique id of the listener, can be used to identify the listener.
def merge_two_dictionaries(a, b, merge_lists=False): key = None try: if a is None or isinstance(a, (six.string_types, six.text_type, six.integer_types, float)): a = b elif isinstance(a, list): if isinstance(b, list): if merge_lists: a.extend(b) else: a = b else: a.append(b) elif isinstance(a, (dict, UserDict)): if isinstance(b, (dict, UserDict)): for key in b: if key in a: a[key] = merge_two_dictionaries(a[key], b[key], merge_lists=merge_lists) else: a[key] = b[key] else: raise ValueError('Cannot merge non-dict "%s" into dict "%s"' % (b, a)) else: raise ValueError('NOT IMPLEMENTED "%s" into "%s"' % (b, a)) except TypeError as e: raise ValueError('TypeError "%s" in key "%s" when merging "%s" into "%s"' % (e, key, b, a)) return a
Merges b into a and returns merged result NOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen Args: a (DictUpperBound): dictionary to merge into b (DictUpperBound): dictionary to merge from merge_lists (bool): Whether to merge lists (True) or replace lists (False). Default is False. Returns: DictUpperBound: Merged dictionary
def pull_dependencies(collector, image, **kwargs): for dep in image.commands.dependent_images: kwargs["image"] = dep pull_arbitrary(collector, **kwargs)
Pull an image's dependent images
def move_group_in_parent(self, group = None, index = None): if group is None or index is None: raise KPError("group and index must be set") elif type(group) is not v1Group or type(index) is not int: raise KPError("group must be a v1Group-instance and index " "must be an integer.") elif group not in self.groups: raise KPError("Given group doesn't exist") elif index < 0 or index >= len(group.parent.children): raise KPError("index must be a valid index if group.parent.groups") else: group_at_index = group.parent.children[index] pos_in_parent = group.parent.children.index(group) pos_in_groups = self.groups.index(group) pos_in_groups2 = self.groups.index(group_at_index) group.parent.children[index] = group group.parent.children[pos_in_parent] = group_at_index self.groups[pos_in_groups2] = group self.groups[pos_in_groups] = group_at_index if group.children: self._move_group_helper(group) if group_at_index.children: self._move_group_helper(group_at_index) group.last_mod = datetime.now().replace(microsecond=0) return True
Move group to another position in group's parent. index must be a valid index of group.parent.groups
def bind_to_storage_buffer(self, binding=0, *, offset=0, size=-1) -> None: self.mglo.bind_to_storage_buffer(binding, offset, size)
Bind the buffer to a shader storage buffer. Args: binding (int): The shader storage binding. Keyword Args: offset (int): The offset. size (int): The size. Value ``-1`` means all.
def _configure_shell(config): config.has_section('shell') or config.add_section('shell') logger.info( "What shells or environments would you like sprinter to work with?\n" "(Sprinter will not try to inject into environments not specified here.)\n" "If you specify 'gui', sprinter will attempt to inject it's state into graphical programs as well.\n" "i.e. environment variables sprinter set will affect programs as well, not just shells\n" "WARNING: injecting into the GUI can be very dangerous. it usually requires a restart\n" " to modify any environmental configuration." ) environments = list(enumerate(sorted(SHELL_CONFIG), start=1)) logger.info("[0]: All, " + ", ".join(["[%d]: %s" % (index, val) for index, val in environments])) desired_environments = lib.prompt("type the environment, comma-separated", default="0") for index, val in environments: if str(index) in desired_environments or "0" in desired_environments: config.set('shell', val, 'true') else: config.set('shell', val, 'false')
Checks and queries values for the shell
def add_tmpltbank_from_hdf_file(self, hdf_fp, vary_fupper=False): mass1s = hdf_fp['mass1'][:] mass2s = hdf_fp['mass2'][:] spin1zs = hdf_fp['spin1z'][:] spin2zs = hdf_fp['spin2z'][:] for idx in xrange(len(mass1s)): self.add_point_by_masses(mass1s[idx], mass2s[idx], spin1zs[idx], spin2zs[idx], vary_fupper=vary_fupper)
This function will take a pointer to an open HDF File object containing a list of templates and add them into the partitioned template bank object. Parameters ----------- hdf_fp : h5py.File object The template bank in HDF5 format. vary_fupper : False If given also include the additional information needed to compute distances with a varying upper frequency cutoff.
def set_until(self, frame, lineno=None): if lineno is None: lineno = frame.f_lineno + 1 self._set_stopinfo(frame, lineno)
Stop when the current line number in frame is greater than lineno or when returning from frame.
def get_cookie_string(cls, url, user_agent=None, **kwargs): tokens, user_agent = cls.get_tokens(url, user_agent=user_agent, **kwargs) return "; ".join("=".join(pair) for pair in tokens.items()), user_agent
Convenience function for building a Cookie HTTP header value.
def beta_diversity(self, metric="braycurtis", rank="auto"): if metric not in ("jaccard", "braycurtis", "cityblock"): raise OneCodexException( "For beta diversity, metric must be one of: jaccard, braycurtis, cityblock" ) if self._guess_normalized(): raise OneCodexException("Beta diversity requires unnormalized read counts.") df = self.to_df(rank=rank, normalize=False) counts = [] for c_id in df.index: counts.append(df.loc[c_id].tolist()) return skbio.diversity.beta_diversity(metric, counts, df.index.tolist())
Calculate the diversity between two communities. Parameters ---------- metric : {'jaccard', 'braycurtis', 'cityblock'} The distance metric to calculate. rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional Analysis will be restricted to abundances of taxa at the specified level. Returns ------- skbio.stats.distance.DistanceMatrix, a distance matrix.
def add_package(package_name, package_path='templates', encoding='utf-8'): if not _has_jinja: raise RuntimeError(_except_text) _jload.add_loader(PackageLoader(package_name, package_path, encoding))
Adds the given package to the template search routine
def cause_mip(self, mechanism, purview): return self.find_mip(Direction.CAUSE, mechanism, purview)
Return the irreducibility analysis for the cause MIP. Alias for |find_mip()| with ``direction`` set to |CAUSE|.
def remove(parent, idx): if isinstance(parent, dict): del parent[idx] elif isinstance(parent, list): del parent[int(idx)] else: raise JSONPathError("Invalid path for operation")
Remove a value from a dict.
def getUnionLocations(encoder, x, y, r, step=1): output = np.zeros(encoder.getWidth(), dtype=defaultDtype) locations = set() for dx in range(-r, r+1, step): for dy in range(-r, r+1, step): if dx*dx + dy*dy <= r*r: e = encodeLocation(encoder, x+dx, y+dy, output) locations = locations.union(set(e)) return locations
Return a union of location encodings that correspond to the union of all locations within the specified circle.
def handle(self, *args, **options): self.senate_class = options["senate_class"] writefile = open("bootstrap.json", "w") elex_args = [ "elex", "results", options["election_date"], "-o", "json", "--national-only", ] if options["test"]: elex_args.append("-t") subprocess.run(elex_args, stdout=writefile) with open("bootstrap.json", "r") as readfile: data = json.load(readfile) candidates = collections.defaultdict(list) for d in data: key = "{0} {1}: {2}, {3}".format( d["officename"], d["statename"], d["last"], d["first"] ) candidates[key].append(d) for candidate_races in tqdm( candidates.values(), desc="Candidates" ): tqdm.write( "{0} {1}: {2}, {3}".format( candidate_races[0]["statename"], candidate_races[0]["officename"], candidate_races[0]["last"], candidate_races[0]["first"], ) ) for race in tqdm( candidate_races, desc="Contests", leave=False ): if race["level"] == geography.DivisionLevel.TOWNSHIP: continue if not race.get("level", None): continue if race["is_ballot_measure"]: continue self.process_row(race)
This management command gets data for a given election date from elex. Then, it loops through each row of the data and calls `process_row`. In order for this command to work, you must have bootstrapped all of the dependent apps: entity, geography, government, election, vote, and almanac.
def _pb_attr_value(val): if isinstance(val, datetime.datetime): name = "timestamp" value = _datetime_to_pb_timestamp(val) elif isinstance(val, Key): name, value = "key", val.to_protobuf() elif isinstance(val, bool): name, value = "boolean", val elif isinstance(val, float): name, value = "double", val elif isinstance(val, six.integer_types): name, value = "integer", val elif isinstance(val, six.text_type): name, value = "string", val elif isinstance(val, six.binary_type): name, value = "blob", val elif isinstance(val, Entity): name, value = "entity", val elif isinstance(val, dict): entity_val = Entity(key=None) entity_val.update(val) name, value = "entity", entity_val elif isinstance(val, list): name, value = "array", val elif isinstance(val, GeoPoint): name, value = "geo_point", val.to_protobuf() elif val is None: name, value = "null", struct_pb2.NULL_VALUE else: raise ValueError("Unknown protobuf attr type", type(val)) return name + "_value", value
Given a value, return the protobuf attribute name and proper value. The Protobuf API uses different attribute names based on value types rather than inferring the type. This function simply determines the proper attribute name based on the type of the value provided and returns the attribute name as well as a properly formatted value. Certain value types need to be coerced into a different type (such as a `datetime.datetime` into an integer timestamp, or a `google.cloud.datastore.key.Key` into a Protobuf representation. This function handles that for you. .. note:: Values which are "text" ('unicode' in Python2, 'str' in Python3) map to 'string_value' in the datastore; values which are "bytes" ('str' in Python2, 'bytes' in Python3) map to 'blob_value'. For example: >>> _pb_attr_value(1234) ('integer_value', 1234) >>> _pb_attr_value('my_string') ('string_value', 'my_string') :type val: :class:`datetime.datetime`, :class:`google.cloud.datastore.key.Key`, bool, float, integer, bytes, str, unicode, :class:`google.cloud.datastore.entity.Entity`, dict, list, :class:`google.cloud.datastore.helpers.GeoPoint`, NoneType :param val: The value to be scrutinized. :rtype: tuple :returns: A tuple of the attribute name and proper value type.
def create_album(self, title=None, description=None, images=None, cover=None): url = self._base_url + "/3/album/" payload = {'ids': images, 'title': title, 'description': description, 'cover': cover} resp = self._send_request(url, params=payload, method='POST') return Album(resp, self, has_fetched=False)
Create a new Album. :param title: The title of the album. :param description: The albums description. :param images: A list of the images that will be added to the album after it's created. Can be Image objects, ids or a combination of the two. Images that you cannot add (non-existing or not owned by you) will not cause exceptions, but fail silently. :param cover: The id of the image you want as the albums cover image. :returns: The newly created album.
def rotate(name, pattern=None, conf_file=default_conf, **kwargs): kwargs = salt.utils.args.clean_kwargs(**kwargs) if 'entryname' not in kwargs and name and not name.startswith('/'): kwargs['entryname'] = name if 'log_file' not in kwargs: if pattern and pattern.startswith('/'): kwargs['log_file'] = pattern elif name and name.startswith('/'): kwargs['log_file'] = name log.debug("logadm.rotate - kwargs: %s", kwargs) command = "logadm -f {}".format(conf_file) for arg, val in kwargs.items(): if arg in option_toggles.values() and val: command = "{} {}".format( command, _arg2opt(arg), ) elif arg in option_flags.values(): command = "{} {} {}".format( command, _arg2opt(arg), _quote_args(six.text_type(val)) ) elif arg != 'log_file': log.warning("Unknown argument %s, don't know how to map this!", arg) if 'log_file' in kwargs: if 'entryname' not in kwargs: command = "{} -w {}".format(command, _quote_args(kwargs['log_file'])) else: command = "{} {}".format(command, _quote_args(kwargs['log_file'])) log.debug("logadm.rotate - command: %s", command) result = __salt__['cmd.run_all'](command, python_shell=False) if result['retcode'] != 0: return dict(Error='Failed in adding log', Output=result['stderr']) return dict(Result='Success')
Set up pattern for logging. name : string alias for entryname pattern : string alias for log_file conf_file : string optional path to alternative configuration file kwargs : boolean|string|int optional additional flags and parameters .. note:: ``name`` and ``pattern`` were kept for backwards compatibility reasons. ``name`` is an alias for the ``entryname`` argument, ``pattern`` is an alias for ``log_file``. These aliases will only be used if the ``entryname`` and ``log_file`` arguments are not passed. For a full list of arguments see ```logadm.show_args```. CLI Example: .. code-block:: bash salt '*' logadm.rotate myapplog pattern='/var/log/myapp/*.log' count=7 salt '*' logadm.rotate myapplog log_file='/var/log/myapp/*.log' count=4 owner=myappd mode='0700'
def parse_debug_object(response): "Parse the results of Redis's DEBUG OBJECT command into a Python dict" response = nativestr(response) response = 'type:' + response response = dict([kv.split(':') for kv in response.split()]) int_fields = ('refcount', 'serializedlength', 'lru', 'lru_seconds_idle') for field in int_fields: if field in response: response[field] = int(response[field]) return response
Parse the results of Redis's DEBUG OBJECT command into a Python dict
def save_chkpt_vars(dic, path): logger.info("Variables to save to {}:".format(path)) keys = sorted(list(dic.keys())) logger.info(pprint.pformat(keys)) assert not path.endswith('.npy') if path.endswith('.npz'): np.savez_compressed(path, **dic) else: with tf.Graph().as_default(), \ tf.Session() as sess: for k, v in six.iteritems(dic): k = get_op_tensor_name(k)[0] _ = tf.Variable(name=k, initial_value=v) sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() saver.save(sess, path, write_meta_graph=False)
Save variables in dic to path. Args: dic: {name: value} path: save as npz if the name ends with '.npz', otherwise save as a checkpoint.
def get_model(self, model, model_id): return self._store.find_record(self._get_model_class(model), int(model_id))
Get a single model from the server. Args: model (string): The class as a string. model_id (string): The integer ID as a string. Returns: :class:`cinder_data.model.CinderModel`: A instance of the model.
def sort_trigger_set(triggers, exclude_previous=True, say=None): if say is None: say = lambda x: x trigger_object_list = [] for index, trig in enumerate(triggers): if exclude_previous and trig[1]["previous"]: continue pattern = trig[0] match, weight = re.search(RE.weight, trig[0]), 0 if match: weight = int(match.group(1)) match = re.search(RE.inherit, pattern) if match: inherit = int(match.group(1)) say("\t\t\tTrigger belongs to a topic which inherits other topics: level=" + str(inherit)) triggers[index][0] = pattern = re.sub(RE.inherit, "", pattern) else: inherit = sys.maxsize trigger_object_list.append(TriggerObj(pattern, index, weight, inherit)) sorted_list = sorted(trigger_object_list, key=attrgetter('weight', 'inherit', 'is_empty', 'star', 'pound', 'under', 'option', 'wordcount', 'len', 'alphabet')) return [triggers[item.index] for item in sorted_list]
Sort a group of triggers in optimal sorting order. The optimal sorting order is, briefly: * Atomic triggers (containing nothing but plain words and alternation groups) are on top, with triggers containing the most words coming first. Triggers with equal word counts are sorted by length, and then alphabetically if they have the same length. * Triggers containing optionals are sorted next, by word count like atomic triggers. * Triggers containing wildcards are next, with ``_`` (alphabetic) wildcards on top, then ``#`` (numeric) and finally ``*``. * At the bottom of the sorted list are triggers consisting of only a single wildcard, in the order: ``_``, ``#``, ``*``. Triggers that have ``{weight}`` tags are grouped together by weight value and sorted amongst themselves. Higher weighted groups are then ordered before lower weighted groups regardless of the normal sorting algorithm. Triggers that come from topics which inherit other topics are also sorted with higher priority than triggers from the inherited topics. Arguments: triggers ([]str): Array of triggers to sort. exclude_previous (bool): Create a sort buffer for 'previous' triggers. say (function): A reference to ``RiveScript._say()`` or provide your own function.
def get_etag(storage, path, prefixed_path): cache_key = get_cache_key(path) etag = cache.get(cache_key, False) if etag is False: etag = get_remote_etag(storage, prefixed_path) cache.set(cache_key, etag) return etag
Get etag of path from cache or S3 - in that order.
def permutation_entropy(x, n, tau): PeSeq = [] Em = embed_seq(x, tau, n) for i in range(0, len(Em)): r = [] z = [] for j in range(0, len(Em[i])): z.append(Em[i][j]) for j in range(0, len(Em[i])): z.sort() r.append(z.index(Em[i][j])) z[z.index(Em[i][j])] = -1 PeSeq.append(r) RankMat = [] while len(PeSeq) > 0: RankMat.append(PeSeq.count(PeSeq[0])) x = PeSeq[0] for j in range(0, PeSeq.count(PeSeq[0])): PeSeq.pop(PeSeq.index(x)) RankMat = numpy.array(RankMat) RankMat = numpy.true_divide(RankMat, RankMat.sum()) EntropyMat = numpy.multiply(numpy.log2(RankMat), RankMat) PE = -1 * EntropyMat.sum() return PE
Compute Permutation Entropy of a given time series x, specified by permutation order n and embedding lag tau. Parameters ---------- x list a time series n integer Permutation order tau integer Embedding lag Returns ---------- PE float permutation entropy Notes ---------- Suppose the given time series is X =[x(1),x(2),x(3),...,x(N)]. We first build embedding matrix Em, of dimension(n*N-n+1), such that the ith row of Em is x(i),x(i+1),..x(i+n-1). Hence the embedding lag and the embedding dimension are 1 and n respectively. We build this matrix from a given time series, X, by calling pyEEg function embed_seq(x,1,n). We then transform each row of the embedding matrix into a new sequence, comprising a set of integers in range of 0,..,n-1. The order in which the integers are placed within a row is the same as those of the original elements:0 is placed where the smallest element of the row was and n-1 replaces the largest element of the row. To calculate the Permutation entropy, we calculate the entropy of PeSeq. In doing so, we count the number of occurrences of each permutation in PeSeq and write it in a sequence, RankMat. We then use this sequence to calculate entropy by using Shannon's entropy formula. Permutation entropy is usually calculated with n in range of 3 and 7. References ---------- Bandt, Christoph, and Bernd Pompe. "Permutation entropy: a natural complexity measure for time series." Physical Review Letters 88.17 (2002): 174102. Examples ---------- >>> import pyeeg >>> x = [1,2,4,5,12,3,4,5] >>> pyeeg.permutation_entropy(x,5,1) 2.0
def check_if_ready(self): try: results = self.manager.check(self.results_id) except exceptions.ResultsNotReady as e: self._is_ready = False self._not_ready_exception = e except exceptions.ResultsExpired as e: self._is_ready = True self._expired_exception = e else: failures = self.get_failed_requests(results) members = self.get_new_members(results) self.results = self.__class__.Results(list(members), list(failures)) self._is_ready = True self._not_ready_exception = None
Check for and fetch the results if ready.
def sudo(self, command, **kwargs): runner = self.config.runners.remote(self) return self._sudo(runner, command, **kwargs)
Execute a shell command, via ``sudo``, on the remote end. This method is identical to `invoke.context.Context.sudo` in every way, except in that -- like `run` -- it honors per-host/per-connection configuration overrides in addition to the generic/global ones. Thus, for example, per-host sudo passwords may be configured. .. versionadded:: 2.0
def activate(): global PathFinder, FileFinder, ff_path_hook path_hook_index = len(sys.path_hooks) sys.path_hooks.append(ff_path_hook) sys.path_importer_cache.clear() pathfinder_index = len(sys.meta_path) sys.meta_path.append(PathFinder) return path_hook_index, pathfinder_index
Install the path-based import components.
def _periodic_callback(self): try: self.notify(self._state) except Exception: self._error_callback(*sys.exc_info()) if self._subscriptions: self._call_later_handle = \ self._loop.call_later(self._interval, self._periodic_callback) else: self._state = NONE self._call_later_handle = None
Will be started on first emit
def build_vocab(self, *args, **kwargs): counter = Counter() sources = [] for arg in args: if isinstance(arg, Dataset): sources += [getattr(arg, name) for name, field in arg.fields.items() if field is self] else: sources.append(arg) for data in sources: for x in data: if not self.sequential: x = [x] try: counter.update(x) except TypeError: counter.update(chain.from_iterable(x)) specials = list(OrderedDict.fromkeys( tok for tok in [self.unk_token, self.pad_token, self.init_token, self.eos_token] + kwargs.pop('specials', []) if tok is not None)) self.vocab = self.vocab_cls(counter, specials=specials, **kwargs)
Construct the Vocab object for this field from one or more datasets. Arguments: Positional arguments: Dataset objects or other iterable data sources from which to construct the Vocab object that represents the set of possible values for this field. If a Dataset object is provided, all columns corresponding to this field are used; individual columns can also be provided directly. Remaining keyword arguments: Passed to the constructor of Vocab.