code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _zero_mantissa(dval): bb = _double_as_bytes(dval) return ((bb[1] & 0x0f) | reduce(operator.or_, bb[2:])) == 0
Determine whether the mantissa bits of the given double are all zero.
def cube2matrix(data_cube): r return data_cube.reshape([data_cube.shape[0]] + [np.prod(data_cube.shape[1:])]).T
r"""Cube to Matrix This method transforms a 3D cube to a 2D matrix Parameters ---------- data_cube : np.ndarray Input data cube, 3D array Returns ------- np.ndarray 2D matrix Examples -------- >>> from modopt.base.transform import cube2matrix >>> a = np.arange(16).reshape((4, 2, 2)) >>> cube2matrix(a) array([[ 0, 4, 8, 12], [ 1, 5, 9, 13], [ 2, 6, 10, 14], [ 3, 7, 11, 15]])
def inspect_node(node): node_information = {} ssh = node.connect() if not ssh: log.error("Unable to connect to node %s", node.name) return (_in, _out, _err) = ssh.exec_command("(type >& /dev/null -a srun && echo slurm) \ || (type >& /dev/null -a qconf && echo sge) \ || (type >& /dev/null -a pbsnodes && echo pbs) \ || echo UNKNOWN") node_information['type'] = _out.read().strip() (_in, _out, _err) = ssh.exec_command("arch") node_information['architecture'] = _out.read().strip() if node_information['type'] == 'slurm': inspect_slurm_cluster(ssh, node_information) elif node_information['type'] == 'sge': inspect_sge_cluster(ssh, node_information) ssh.close() return node_information
This function accept a `elasticluster.cluster.Node` class, connects to a node and tries to discover the kind of batch system installed, and some other information.
def iptag_get(self, iptag, x, y): ack = self._send_scp(x, y, 0, SCPCommands.iptag, int(consts.IPTagCommands.get) << 16 | iptag, 1, expected_args=0) return IPTag.from_bytestring(ack.data)
Get the value of an IPTag. Parameters ---------- iptag : int Index of the IPTag to get Returns ------- :py:class:`.IPTag` The IPTag returned from SpiNNaker.
def bh_fdr(pval): pval_array = np.array(pval) sorted_order = np.argsort(pval_array) original_order = np.argsort(sorted_order) pval_array = pval_array[sorted_order] n = float(len(pval)) pval_adj = np.zeros(int(n)) i = np.arange(1, int(n)+1, dtype=float)[::-1] pval_adj = np.minimum(1, cummin(n/i * pval_array[::-1]))[::-1] return pval_adj[original_order]
A python implementation of the Benjamani-Hochberg FDR method. This code should always give precisely the same answer as using p.adjust(pval, method="BH") in R. Parameters ---------- pval : list or array list/array of p-values Returns ------- pval_adj : np.array adjusted p-values according the benjamani-hochberg method
def run_jobs(delete_completed=False, ignore_errors=False, now=None): if ScheduledJob.objects.filter(status='running'): raise ValueError('jobs in progress found; aborting') if now is None: now = datetime.datetime.now() expire_jobs(now) schedule_sticky_jobs() start_scheduled_jobs(now, delete_completed, ignore_errors)
Run scheduled jobs. You may specify a date to be treated as the current time.
def _get_results_from_api(identifiers, endpoints, api_key, api_secret): if api_key is not None and api_secret is not None: client = housecanary.ApiClient(api_key, api_secret) else: client = housecanary.ApiClient() wrapper = getattr(client, endpoints[0].split('/')[0]) if len(endpoints) > 1: return wrapper.component_mget(identifiers, endpoints) else: return wrapper.fetch_identifier_component(endpoints[0], identifiers)
Use the HouseCanary API Python Client to access the API
def is_atlas_enabled(blockstack_opts): if not blockstack_opts['atlas']: log.debug("Atlas is disabled") return False if 'zonefiles' not in blockstack_opts: log.debug("Atlas is disabled: no 'zonefiles' path set") return False if 'atlasdb_path' not in blockstack_opts: log.debug("Atlas is disabled: no 'atlasdb_path' path set") return False return True
Can we do atlas operations?
async def spawn_slaves(self, spawn_cmd, ports=None, **ssh_kwargs): pool = multiprocessing.Pool(len(self.nodes)) rets = [] for i, node in enumerate(self.nodes): server, server_port = node port = ports[node] if ports is not None else self.port mgr_addr = "tcp://{}:{}/0".format(server, port) self._manager_addrs.append(mgr_addr) if type(spawn_cmd) in [list, tuple]: cmd = spawn_cmd[i] else: cmd = spawn_cmd args = [server, cmd] ssh_kwargs_cp = ssh_kwargs.copy() ssh_kwargs_cp['port'] = server_port ret = pool.apply_async(ssh_exec_in_new_loop, args=args, kwds=ssh_kwargs_cp, error_callback=logger.warning) rets.append(ret) self._pool = pool self._r = rets
Spawn multi-environments on the nodes through SSH-connections. :param spawn_cmd: str or list, command(s) used to spawn the environment on each node. If *list*, it must contain one command for each node in :attr:`nodes`. If *str*, the same command is used for each node. :param ports: Optional. If not ``None``, must be a mapping from nodes (``(server, port)``-tuples) to ports which are used for the spawned multi-environments' master manager environments. If ``None``, then the same port is used to derive the master manager addresses as was used to initialize this distributed environment's managing environment (port in :attr:`addr`). :param ssh_kwargs: Any additional SSH-connection arguments, as specified by :meth:`asyncssh.connect`. See `asyncssh documentation <http://asyncssh.readthedocs.io/en/latest/api.html#connect>`_ for details. Nodes are spawned by creating a multiprocessing pool where each node has its own subprocess. These subprocesses then use SSH-connections to spawn the multi-environments on the nodes. The SSH-connections in the pool are kept alive until the nodes are stopped, i.e. this distributed environment is destroyed.
def set_uppercase(self, uppercase): for row in self.rows: for key in row.keys: if type(key) == VKey: if uppercase: key.value = key.value.upper() else: key.value = key.value.lower()
Sets layout uppercase state. :param uppercase: True if uppercase, False otherwise.
def dns_resolution(self): if not self.can_update(): self._tcex.handle_error(910, [self.type]) return self.tc_requests.dns_resolution( self.api_type, self.api_sub_type, self.unique_id, owner=self.owner )
Updates the Host DNS resolution Returns:
def find_all(self, **names): values = names.items() if len(values) != 1: raise ValueError('Only one query is allowed at a time') name, value = values[0] for item in self: if item.get(name) == value: yield item
Find all items with matching extra values. :param \*\*names: Extra values to match. :rtype: ``Iterable[`EnumItem`]``
def append_executable(self, executable): if isinstance(executable, str) and not isinstance(executable, unicode): executable = unicode(executable) if not isinstance(executable, unicode): raise TypeError("expected executable name as str, not {}". format(executable.__class__.__name__)) self._executables.append(executable)
Append san executable os command to the list to be called. Argument: executable (str): os callable executable.
def fetch_credentials(auth_id, auth_token): if not (auth_id and auth_token): try: auth_id = os.environ['PLIVO_AUTH_ID'] auth_token = os.environ['PLIVO_AUTH_TOKEN'] except KeyError: raise AuthenticationError('The Plivo Python SDK ' 'could not find your auth credentials.') if not (is_valid_mainaccount(auth_id) or is_valid_subaccount(auth_id)): raise AuthenticationError('Invalid auth_id supplied: %s' % auth_id) return AuthenticationCredentials(auth_id=auth_id, auth_token=auth_token)
Fetches the right credentials either from params or from environment
def _on_model_save(sender, **kwargs): instance = kwargs.pop("instance") update_fields = kwargs.pop("update_fields") for index in instance.search_indexes: try: _update_search_index( instance=instance, index=index, update_fields=update_fields ) except Exception: logger.exception("Error handling 'on_save' signal for %s", instance)
Update document in search index post_save.
def compile_path(self, path, write=True, package=None, *args, **kwargs): path = fixpath(path) if not isinstance(write, bool): write = fixpath(write) if os.path.isfile(path): if package is None: package = False destpath = self.compile_file(path, write, package, *args, **kwargs) return [destpath] if destpath is not None else [] elif os.path.isdir(path): if package is None: package = True return self.compile_folder(path, write, package, *args, **kwargs) else: raise CoconutException("could not find source path", path)
Compile a path and returns paths to compiled files.
def template_subst(template, subs, delims=('<', '>')): subst_text = template for (k,v) in subs.items(): subst_text = subst_text.replace( delims[0] + k + delims[1], v) return subst_text
Perform substitution of content into tagged string. For substitutions into template input files for external computational packages, no checks for valid syntax are performed. Each key in `subs` corresponds to a delimited substitution tag to be replaced in `template` by the entire text of the value of that key. For example, the dict ``{"ABC": "text"}`` would convert ``The <ABC> is working`` to ``The text is working``, using the default delimiters of '<' and '>'. Substitutions are performed in iteration order from `subs`; recursive substitution as the tag parsing proceeds is thus feasible if an :class:`~collections.OrderedDict` is used and substitution key/value pairs are added in the proper order. Start and end delimiters for the tags are modified by `delims`. For example, to substitute a tag of the form **{\|TAG\|}**, the tuple ``("{|","|}")`` should be passed to `subs_delims`. Any elements in `delims` past the second are ignored. No checking is performed for whether the delimiters are "sensible" or not. Parameters ---------- template |str| -- Template containing tags delimited by `subs_delims`, with tag names and substitution contents provided in `subs` subs |dict| of |str| -- Each item's key and value are the tag name and corresponding content to be substituted into the provided template. delims iterable of |str| -- Iterable containing the 'open' and 'close' strings used to mark tags in the template, which are drawn from elements zero and one, respectively. Any elements beyond these are ignored. Returns ------- subst_text |str| -- String generated from the parsed template, with all tag substitutions performed.
def __could_edit(self, slug): page_rec = MWiki.get_by_uid(slug) if not page_rec: return False if self.check_post_role()['EDIT']: return True elif page_rec.user_name == self.userinfo.user_name: return True else: return False
Test if the user could edit the page.
def add_to_grid(self, agent): for i in range(len(self.grid)): for j in range(len(self.grid[0])): if self.grid[i][j] is None: x = self.origin[0] + i y = self.origin[1] + j self.grid[i][j] = agent return (x, y) raise ValueError("Trying to add an agent to a full grid." .format(len(self._grid[0]), len(self._grid[1])))
Add agent to the next available spot in the grid. :returns: (x,y) of the agent in the grid. This is the agent's overall coordinate in the grand grid (i.e. the actual coordinate of the agent w.t.r **origin**). :raises: `ValueError` if the grid is full.
def slackbuild(self, name, sbo_file): return URL(self.sbo_url + name + sbo_file).reading()
Read SlackBuild file
def check_trajectory_id(self, dataset): results = [] exists_ctx = TestCtx(BaseCheck.MEDIUM, 'Variable defining "trajectory_id" exists') trajectory_ids = dataset.get_variables_by_attributes(cf_role='trajectory_id') exists_ctx.assert_true(trajectory_ids, 'variable defining cf_role="trajectory_id" exists') if not trajectory_ids: return exists_ctx.to_result() results.append(exists_ctx.to_result()) test_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended attributes for the {} variable'.format(trajectory_ids[0].name)) test_ctx.assert_true( getattr(trajectory_ids[0], 'long_name', '') != "", "long_name attribute should exist and not be empty" ) results.append(test_ctx.to_result()) return results
Checks that if a variable exists for the trajectory id it has the appropriate attributes :param netCDF4.Dataset dataset: An open netCDF dataset
def readGyroRange( self ): raw_data = self._readByte( self.REG_GYRO_CONFIG ) raw_data = (raw_data | 0xE7) ^ 0xE7 return raw_data
! Read range of gyroscope. @return an int value. It should be one of the following values (GYRO_RANGE_250DEG) @see GYRO_RANGE_250DEG @see GYRO_RANGE_500DEG @see GYRO_RANGE_1KDEG @see GYRO_RANGE_2KDEG
def _calculate_checksum(value): polynomial = 0x131 crc = 0xFF for byteCtr in [ord(x) for x in struct.pack(">H", value)]: crc ^= byteCtr for bit in range(8, 0, -1): if crc & 0x80: crc = (crc << 1) ^ polynomial else: crc = (crc << 1) return crc
4.12 Checksum Calculation from an unsigned short input
def countByValue(self): return self.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x+y)
Return a new DStream in which each RDD contains the counts of each distinct value in each RDD of this DStream.
def find_data_files(source, target, patterns): if glob.has_magic(source) or glob.has_magic(target): raise ValueError("Magic not allowed in src, target") ret = {} for pattern in patterns: pattern = os.path.join(source, pattern) for filename in glob.glob(pattern): if os.path.isfile(filename): targetpath = os.path.join( target, os.path.relpath(filename, source) ) path = os.path.dirname(targetpath) ret.setdefault(path, []).append(filename) return sorted(ret.items())
Locates the specified data-files and returns the matches in a data_files compatible format. source is the root of the source data tree. Use '' or '.' for current directory. target is the root of the target data tree. Use '' or '.' for the distribution directory. patterns is a sequence of glob-patterns for the files you want to copy.
def home_two_point_field_goal_percentage(self): result = float(self.home_two_point_field_goals) / \ float(self.home_two_point_field_goal_attempts) return round(float(result), 3)
Returns a ``float`` of the number of two point field goals made divided by the number of two point field goal attempts by the home team. Percentage ranges from 0-1.
def get_widget(self, index=None, path=None, tabs=None): if (index and tabs) or (path and tabs): return tabs.widget(index) elif self.plugin: return self.get_plugin_tabwidget(self.plugin).currentWidget() else: return self.plugins_tabs[0][0].currentWidget()
Get widget by index. If no tabs and index specified the current active widget is returned.
def prt_gos_flat(self, prt): prtfmt = self.datobj.kws['fmtgo'] _go2nt = self.sortobj.grprobj.go2nt go2nt = {go:_go2nt[go] for go in self.go2nt} prt.write("\n{N} GO IDs:\n".format(N=len(go2nt))) _sortby = self._get_sortgo() for ntgo in sorted(go2nt.values(), key=_sortby): prt.write(prtfmt.format(**ntgo._asdict()))
Print flat GO list.
def _function_handler(function, args, kwargs, pipe): signal.signal(signal.SIGINT, signal.SIG_IGN) result = process_execute(function, *args, **kwargs) send_result(pipe, result)
Runs the actual function in separate process and returns its result.
def score_braycurtis(self, term1, term2, **kwargs): t1_kde = self.kde(term1, **kwargs) t2_kde = self.kde(term2, **kwargs) return 1-distance.braycurtis(t1_kde, t2_kde)
Compute a weighting score based on the "City Block" distance between the kernel density estimates of two terms. Args: term1 (str) term2 (str) Returns: float
def rarlognormal(a, sigma, rho, size=1): R f = utils.ar1 if np.isscalar(a): r = f(rho, 0, sigma, size) else: n = len(a) r = [f(rho, 0, sigma, n) for i in range(size)] if size == 1: r = r[0] return a * np.exp(r)
R""" Autoregressive normal random variates. If a is a scalar, generates one series of length size. If a is a sequence, generates size series of the same length as a.
def block(self, to_block): to_block = list(map(lambda obj: self.idpool.id(obj), to_block)) new_obj = list(filter(lambda vid: vid not in self.oracle.vmap.e2i, to_block)) self.oracle.add_clause([-vid for vid in to_block]) for vid in new_obj: self.oracle.add_clause([-vid], 1)
The method serves for imposing a constraint forbidding the hitting set solver to compute a given hitting set. Each set to block is encoded as a hard clause in the MaxSAT problem formulation, which is then added to the underlying oracle. :param to_block: a set to block :type to_block: iterable(obj)
def focus_parent(self): mid = self.get_selected_mid() newpos = self._tree.parent_position(mid) if newpos is not None: newpos = self._sanitize_position((newpos,)) self.body.set_focus(newpos)
move focus to parent of currently focussed message
def get_next_asset(self): try: next_object = next(self) except StopIteration: raise IllegalState('no more elements available in this list') except Exception: raise OperationFailed() else: return next_object
Gets the next Asset in this list. return: (osid.repository.Asset) - the next Asset in this list. The has_next() method should be used to test that a next Asset is available before calling this method. raise: IllegalState - no more elements available in this list raise: OperationFailed - unable to complete request compliance: mandatory - This method must be implemented.
def tobinary(images, path, prefix="image", overwrite=False, credentials=None): from thunder.writers import get_parallel_writer def tobuffer(kv): key, img = kv fname = prefix + "-" + "%05d.bin" % int(key) return fname, img.copy() writer = get_parallel_writer(path)(path, overwrite=overwrite, credentials=credentials) images.foreach(lambda x: writer.write(tobuffer(x))) config(path, list(images.value_shape), images.dtype, overwrite=overwrite)
Write out images as binary files. See also -------- thunder.data.images.tobinary
def _pfp__restore_snapshot(self, recurse=True): super(Struct, self)._pfp__restore_snapshot(recurse=recurse) if recurse: for child in self._pfp__children: child._pfp__restore_snapshot(recurse=recurse)
Restore the snapshotted value without triggering any events
def array_bytes(shape, dtype): return np.product(shape)*np.dtype(dtype).itemsize
Estimates the memory in bytes required for an array of the supplied shape and dtype
def compute_master_secret(self, pre_master_secret, client_random, server_random): seed = client_random + server_random if self.tls_version < 0x0300: return None elif self.tls_version == 0x0300: return self.prf(pre_master_secret, seed, 48) else: return self.prf(pre_master_secret, b"master secret", seed, 48)
Return the 48-byte master_secret, computed from pre_master_secret, client_random and server_random. See RFC 5246, section 6.3.
def find_first_in_list(txt: str, str_list: [str]) -> int: start = len(txt) + 1 for item in str_list: if start > txt.find(item) > -1: start = txt.find(item) return start if len(txt) + 1 > start > -1 else -1
Returns the index of the earliest occurence of an item from a list in a string Ex: find_first_in_list('foobar', ['bar', 'fin']) -> 3
def get_tasks(): from paver.tasks import environment for tsk in environment.get_tasks(): print(tsk.shortname)
Get all paver-defined tasks.
def handle(self, *args, **options): logger.info("Build started") self.set_options(*args, **options) if not options.get("keep_build_dir"): self.init_build_dir() if not options.get("skip_static"): self.build_static() if not options.get("skip_media"): self.build_media() self.build_views() logger.info("Build finished")
Making it happen.
def action(args): log.info('loading reference package') r = refpkg.Refpkg(args.refpkg, create=False) q = r.contents for i in range(args.n): if q['rollback'] is None: log.error('Cannot rollback {} changes; ' 'refpkg only records {} changes.'.format(args.n, i)) return 1 else: q = q['rollback'] for i in range(args.n): r.rollback() return 0
Roll back commands on a refpkg. *args* should be an argparse object with fields refpkg (giving the path to the refpkg to operate on) and n (giving the number of operations to roll back).
def add_request(self, request): queue_item = QueueItem(request, Response(request.url)) self.add(queue_item) return queue_item
Add a request to the queue. Args: request (:class:`nyawc.http.Request`): The request to add. Returns: :class:`nyawc.QueueItem`: The created queue item.
def name(self, value): if isinstance(value, string_types): match = Parameter._PARAM_NAME_COMPILER_MATCHER(value) if match is None or match.group() != value: value = re_compile(value) self._name = value
Set parameter name. :param str value: name value.
def get_note(self, note_id): index = 0 while True: notes = self.my_notes(start_index=index, sort_by='noteId') if notes['result'] != 'success': break if notes['loans'][0]['noteId'] > note_id: break if notes['loans'][-1]['noteId'] >= note_id: for note in notes['loans']: if note['noteId'] == note_id: return note index += 100 return False
Get a loan note that you've invested in by ID Parameters ---------- note_id : int The note ID Returns ------- dict A dictionary representing the matching note or False Examples -------- >>> from lendingclub import LendingClub >>> lc = LendingClub(email='[email protected]', password='secret123') >>> lc.authenticate() True >>> notes = lc.my_notes() # Get the first 100 loan notes >>> len(notes['loans']) 100 >>> notes['total'] # See the total number of loan notes you have 630 >>> notes = lc.my_notes(start_index=100) # Get the next 100 loan notes >>> len(notes['loans']) 100 >>> notes = lc.my_notes(get_all=True) # Get all notes in one request (may be slow) >>> len(notes['loans']) 630
def get_parameters(self): d = {} for k in ['label', 'verbose_name', 'required', 'hint', 'placeholder', 'choices', 'default', 'validators', 'max_length']: d[k] = getattr(self, k) return d
Get common attributes and it'll used for Model.relationship clone process
def CreateWeightTableLDAS(in_ldas_nc, in_nc_lon_var, in_nc_lat_var, in_catchment_shapefile, river_id, in_connectivity_file, out_weight_table, area_id=None, file_geodatabase=None): data_ldas_nc = Dataset(in_ldas_nc) variables_list = data_ldas_nc.variables.keys() if in_nc_lon_var not in variables_list: raise Exception("Invalid longitude variable. Choose from: {0}" .format(variables_list)) if in_nc_lat_var not in variables_list: raise Exception("Invalid latitude variable. Choose from: {0}" .format(variables_list)) ldas_lon = data_ldas_nc.variables[in_nc_lon_var][:] ldas_lat = data_ldas_nc.variables[in_nc_lat_var][:] data_ldas_nc.close() rtree_create_weight_table(ldas_lat, ldas_lon, in_catchment_shapefile, river_id, in_connectivity_file, out_weight_table, file_geodatabase, area_id)
Create Weight Table for NLDAS, GLDAS grids as well as for 2D Joules, or LIS Grids Parameters ---------- in_ldas_nc: str Path to the land surface model NetCDF grid. in_nc_lon_var: str The variable name in the NetCDF file for the longitude. in_nc_lat_var: str The variable name in the NetCDF file for the latitude. in_catchment_shapefile: str Path to the Catchment shapefile. river_id: str The name of the field with the river ID (Ex. 'DrainLnID' or 'LINKNO'). in_connectivity_file: str The path to the RAPID connectivity file. out_weight_table: str The path to the output weight table file. area_id: str, optional The name of the field with the area of each catchment stored in meters squared. Default is it calculate the area. file_geodatabase: str, optional Path to the file geodatabase. If you use this option, in_drainage_line is the name of the stream network feature class. (WARNING: Not always stable with GDAL.) Example: .. code:: python from RAPIDpy.gis.weight import CreateWeightTableLDAS CreateWeightTableLDAS( in_ldas_nc='/path/to/runoff_grid.nc', in_nc_lon_var="lon_110", in_nc_lat_var="lat_110", in_catchment_shapefile='/path/to/catchment.shp', river_id='LINKNO', in_connectivity_file='/path/to/rapid_connect.csv', out_weight_table='/path/to/ldas_weight.csv', )
def after(self, i, sibling, name=None): self.parent._insert(sibling, idx=self._own_index + 1 + i, name=name) return self
Adds siblings after the current tag.
def get_named_parent(decl): if not decl: return None parent = decl.parent while parent and (not parent.name or parent.name == '::'): parent = parent.parent return parent
Returns a reference to a named parent declaration. Args: decl (declaration_t): the child declaration Returns: declaration_t: the declaration or None if not found.
def exec_background(controller, cmd, *args): controller.logger.info("Executing in the background: {0} {1}", cmd, " ".join(args)) try: subprocess.Popen([cmd] + list(args), stdout=open(os.devnull, "wb"), stderr=open(os.devnull, "wb")) except OSError as err: controller.logger.error("Failed to execute process: {0}", err)
Executes a subprocess in the background.
def quantiles(data, nbins_or_partition_bounds): return apply_along_axis( qcut, 1, data, q=nbins_or_partition_bounds, labels=False, )
Compute rowwise array quantiles on an input.
def get_config(config_spec): config_file = None if config_spec.startswith("http"): config_file = urllib.urlopen(config_spec) else: config_file = open(config_spec) config = json.load(config_file) try: config_file.close() except: pass return config
Like get_json_config but does not parse result as JSON
def func_str(func, args=[], kwargs={}, type_aliases=[], packed=False, packkw=None, truncate=False): import utool as ut truncatekw = {} argrepr_list = ([] if args is None else ut.get_itemstr_list(args, nl=False, truncate=truncate, truncatekw=truncatekw)) kwrepr_list = ([] if kwargs is None else ut.dict_itemstr_list(kwargs, explicit=True, nl=False, truncate=truncate, truncatekw=truncatekw)) repr_list = argrepr_list + kwrepr_list argskwargs_str = ', '.join(repr_list) _str = '%s(%s)' % (meta_util_six.get_funcname(func), argskwargs_str) if packed: packkw_ = dict(textwidth=80, nlprefix=' ', break_words=False) if packkw is not None: packkw_.update(packkw_) _str = packstr(_str, **packkw_) return _str
string representation of function definition Returns: str: a representation of func with args, kwargs, and type_aliases Args: func (function): args (list): argument values (default = []) kwargs (dict): kwargs values (default = {}) type_aliases (list): (default = []) packed (bool): (default = False) packkw (None): (default = None) Returns: str: func_str CommandLine: python -m utool.util_str --exec-func_str Example: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> func = byte_str >>> args = [1024, 'MB'] >>> kwargs = dict(precision=2) >>> type_aliases = [] >>> packed = False >>> packkw = None >>> _str = func_str(func, args, kwargs, type_aliases, packed, packkw) >>> result = _str >>> print(result) byte_str(1024, 'MB', precision=2)
def run(self): self.timer = t.Thread(target=self.report_spans) self.timer.daemon = True self.timer.name = "Instana Span Reporting" self.timer.start()
Span a background thread to periodically report queued spans
def speckleRange(self, value): if value >= 0: self._speckle_range = value else: raise InvalidSpeckleRangeError("Speckle range cannot be negative.") self._replace_bm()
Set private ``_speckle_range`` and reset ``_block_matcher``.
def direct(ctx, path): try: url = make_url(ctx.obj['RWS'].base_url, path) resp = requests.get(url, auth=HTTPBasicAuth(ctx.obj['USERNAME'], ctx.obj['PASSWORD'])) click.echo(resp.text) except RWSException as e: click.echo(e.message) except requests.exceptions.HTTPError as e: click.echo(e.message)
Make direct call to RWS, bypassing rwslib
def register_func_list(self, func_and_handler): for func, handler in func_and_handler: self._function_dispatch.register(func, handler) self.dispatch.cache_clear()
register a function to determine if the handle should be used for the type
def remove_from_gallery(self): url = self._imgur._base_url + "/3/gallery/{0}".format(self.id) self._imgur._send_request(url, needs_auth=True, method='DELETE') if isinstance(self, Image): item = self._imgur.get_image(self.id) else: item = self._imgur.get_album(self.id) _change_object(self, item) return self
Remove this image from the gallery.
def add_crs(op, element, **kwargs): return element.map(lambda x: convert_to_geotype(x, kwargs.get('crs')), Element)
Converts any elements in the input to their equivalent geotypes if given a coordinate reference system.
def is_multifile_object_without_children(self, location: str) -> bool: if isdir(location): return len(self.find_multifile_object_children(location)) == 0 else: if exists(location): return True else: return False
Returns True if an item with this location is present as a multifile object without children. For this implementation, this means that there is a file with the appropriate name but without extension :param location: :return:
def update(self, new_email_address, name, access_level, password=None): params = {"email": self.email_address} body = { "EmailAddress": new_email_address, "Name": name, "AccessLevel": access_level, "Password": password} response = self._put("/clients/%s/people.json" % self.client_id, body=json.dumps(body), params=params) self.email_address = new_email_address
Updates the details for a person. Password is optional and is only updated if supplied.
def get_balance(self): self.br.open(self.MOBILE_WEB_URL % {'accountno': self.account}) try: self.br.find_link(text='Register') raise InvalidAccountException except mechanize.LinkNotFoundError: pass self.br.follow_link(text='My sarafu') self.br.follow_link(text='Balance Inquiry') self.br.select_form(nr=0) self.br['pin'] = self.pin r = self.br.submit().read() if re.search(r'Invalid PIN', r): raise AuthDeniedException if re.search(r'Error occured', r): raise RequestErrorException if re.search(r'Your balance is TSH (?P<balance>[\d\.]+)', r): match = re.search(r'Your balance is TSH (?P<balance>[\d\.]+)', r) return match.group('balance')
Retrieves the balance for the configured account
def spark_string(ints): ticks = u'▁▂▃▅▆▇' ints = [i for i in ints if type(i) == int] if len(ints) == 0: return "" step = (max(ints) / float(len(ticks) - 1)) or 1 return u''.join( ticks[int(round(i / step))] if type(i) == int else u'.' for i in ints)
Returns a spark string from given iterable of ints.
def _fix_lsm_bitspersample(self, parent): if self.code != 258 or self.count != 2: return log.warning('TiffTag %i: correcting LSM bitspersample tag', self.code) value = struct.pack('<HH', *self.value) self.valueoffset = struct.unpack('<I', value)[0] parent.filehandle.seek(self.valueoffset) self.value = struct.unpack('<HH', parent.filehandle.read(4))
Correct LSM bitspersample tag. Old LSM writers may use a separate region for two 16-bit values, although they fit into the tag value element of the tag.
def _add_flags(flags, new_flags): flags = _get_flags(flags) new_flags = _get_flags(new_flags) return flags | new_flags
Combine ``flags`` and ``new_flags``
def add_intf_router(self, rout_id, tenant_id, subnet_lst): try: for subnet_id in subnet_lst: body = {'subnet_id': subnet_id} intf = self.neutronclient.add_interface_router(rout_id, body=body) intf.get('port_id') except Exception as exc: LOG.error("Failed to create router intf ID %(id)s," " Exc %(exc)s", {'id': rout_id, 'exc': str(exc)}) return False return True
Add the interfaces to a router.
def open(self): self._geometry.lid_status = self._module.open() self._ctx.deck.recalculate_high_z() return self._geometry.lid_status
Opens the lid
def absolute(parser, token): node = url(parser, token) return AbsoluteUrlNode( view_name=node.view_name, args=node.args, kwargs=node.kwargs, asvar=node.asvar )
Returns a full absolute URL based on the request host. This template tag takes exactly the same paramters as url template tag.
def read(tex): if isinstance(tex, str): tex = tex else: tex = ''.join(itertools.chain(*tex)) buf, children = Buffer(tokenize(tex)), [] while buf.hasNext(): content = read_tex(buf) if content is not None: children.append(content) return TexEnv('[tex]', children), tex
Read and parse all LaTeX source :param Union[str,iterable] tex: LaTeX source :return TexEnv: the global environment
def splunk(cmd, user='admin', passwd='changeme'): return sudo('/opt/splunkforwarder/bin/splunk {c} -auth {u}:{p}' .format(c=cmd, u=user, p=passwd))
Authenticated call to splunk
def _sample_actions(self, state: Sequence[tf.Tensor]) -> Tuple[Sequence[tf.Tensor], tf.Tensor, tf.Tensor]: default = self.compiler.compile_default_action(self.batch_size) bound_constraints = self.compiler.compile_action_bound_constraints(state) action = self._sample_action(bound_constraints, default) n, action, checking = self._check_preconditions(state, action, bound_constraints, default) return action, n, checking
Returns sampled action fluents and tensors related to the sampling. Args: state (Sequence[tf.Tensor]): A list of state fluents. Returns: Tuple[Sequence[tf.Tensor], tf.Tensor, tf.Tensor]: A tuple with action fluents, an integer tensor for the number of samples, and a boolean tensor for checking all action preconditions.
def parse_partlist(str): lines = str.strip().splitlines() lines = filter(len, lines) hind = header_index(lines) if hind is None: log.debug('empty partlist found') return ([], []) header_line = lines[hind] header = header_line.split(' ') header = filter(len, header) positions = [header_line.index(x) for x in header] header = [x.strip().split()[0].lower() for x in header] data_lines = lines[hind + 1:] def parse_data_line(line): y = [(h, line[pos1:pos2].strip()) for h, pos1, pos2 in zip( header, positions, positions[1:] + [1000])] return dict(y) data = [parse_data_line(x) for x in data_lines] return (header, data)
parse partlist text delivered by eagle. header is converted to lowercase :param str: input string :rtype: tuple of header list and dict list: (['part','value',..], [{'part':'C1', 'value':'1n'}, ..])
def _parse_features(cls, feat_response): features = {} if feat_response.split("-")[0] == "211": for line in feat_response.splitlines(): if line.startswith(" "): key, _, value = line[1:].partition(" ") features[key] = value return features
Parse a dict of features from FTP feat response.
def _on_hid_pnp(self, w_param, l_param): "Process WM_DEVICECHANGE system messages" new_status = "unknown" if w_param == DBT_DEVICEARRIVAL: notify_obj = None if int(l_param): notify_obj = DevBroadcastDevInterface.from_address(l_param) if notify_obj and \ notify_obj.dbcc_devicetype == DBT_DEVTYP_DEVICEINTERFACE: new_status = "connected" elif w_param == DBT_DEVICEREMOVECOMPLETE: notify_obj = None if int(l_param): notify_obj = DevBroadcastDevInterface.from_address(l_param) if notify_obj and \ notify_obj.dbcc_devicetype == DBT_DEVTYP_DEVICEINTERFACE: new_status = "disconnected" if new_status != "unknown" and new_status != self.current_status: self.current_status = new_status self.on_hid_pnp(self.current_status) return True
Process WM_DEVICECHANGE system messages
def check_token_auth(self, token): serializer = self.get_signature() try: data = serializer.loads(token) except BadSignature: log.warning('Received bad token signature') return False, None if data['username'] not in self.users.users(): log.warning( 'Token auth signed message, but invalid user %s', data['username'] ) return False, None if data['hashhash'] != self.get_hashhash(data['username']): log.warning( 'Token and password do not match, %s ' 'needs to regenerate token', data['username'] ) return False, None return True, data['username']
Check to see who this is and if their token gets them into the system.
def reduce_alias_table(alias_table): for alias in alias_table.sections(): if alias_table.has_option(alias, 'command'): yield (alias, alias_table.get(alias, 'command'))
Reduce the alias table to a tuple that contains the alias and the command that the alias points to. Args: The alias table to be reduced. Yields A tuple that contains the alias and the command that the alias points to.
def get_activities(self, count=10, since=None, style='summary', limit=None): params = {} if since: params.update(fromDate=to_timestamp(since)) parts = ['my', 'activities', 'search'] if style != 'summary': parts.append(style) url = self._build_url(*parts) return islice(self._iter(url, count, **params), limit)
Iterate over all activities, from newest to oldest. :param count: The number of results to retrieve per page. If set to ``None``, pagination is disabled. :param since: Return only activities since this date. Can be either a timestamp or a datetime object. :param style: The type of records to return. May be one of 'summary', 'briefs', 'ids', or 'extended'. :param limit: The maximum number of activities to return for the given query.
def _get_minutes(self, duration): if isinstance(duration, datetime.datetime): from_now = (duration - datetime.datetime.now()).total_seconds() from_now = math.ceil(from_now / 60) if from_now > 0: return from_now return return duration
Calculate the number of minutes with the given duration. :param duration: The duration :type duration: int or datetime :rtype: int or None
def is_union(declaration): if not is_class(declaration): return False decl = class_traits.get_declaration(declaration) return decl.class_type == class_declaration.CLASS_TYPES.UNION
Returns True if declaration represents a C++ union Args: declaration (declaration_t): the declaration to be checked. Returns: bool: True if declaration represents a C++ union
def parse_object(self, data): for key, value in data.items(): if isinstance(value, (str, type(u''))) and \ self.strict_iso_match.match(value): data[key] = dateutil.parser.parse(value) return data
Look for datetime looking strings.
def is_suitable(self, request): if self.key_type: validation = KEY_TYPE_VALIDATIONS.get( self.get_type() ) return validation( request ) if validation else None return True
Checks if key is suitable for given request according to key type and request's user agent.
def from_mmap(cls, fname): memmaped = joblib.load(fname, mmap_mode="r+") return cls(vocab=memmaped.vocab, vectors=memmaped.vectors)
Create a WordVectors class from a memory map Parameters ---------- fname : path to file Returns ------- WordVectors instance
def _get_business_hours_by_sec(self): if self._get_daytime_flag: dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute) until = datetime(2014, 4, 1, self.end.hour, self.end.minute) return (until - dtstart).total_seconds() else: dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute) until = datetime(2014, 4, 2, self.end.hour, self.end.minute) return (until - dtstart).total_seconds()
Return business hours in a day by seconds.
def _keys_to_lower(self): for k in list(self.keys()): val = super(CaseInsensitiveDict, self).__getitem__(k) super(CaseInsensitiveDict, self).__delitem__(k) self.__setitem__(CaseInsensitiveStr(k), val)
Convert key set to lowercase.
def _build_resource(self): resource = {"name": self.name} if self.dns_name is not None: resource["dnsName"] = self.dns_name if self.description is not None: resource["description"] = self.description if self.name_server_set is not None: resource["nameServerSet"] = self.name_server_set return resource
Generate a resource for ``create`` or ``update``.
def toSparse(self): if self.isTransposed: values = np.ravel(self.toArray(), order='F') else: values = self.values indices = np.nonzero(values)[0] colCounts = np.bincount(indices // self.numRows) colPtrs = np.cumsum(np.hstack( (0, colCounts, np.zeros(self.numCols - colCounts.size)))) values = values[indices] rowIndices = indices % self.numRows return SparseMatrix(self.numRows, self.numCols, colPtrs, rowIndices, values)
Convert to SparseMatrix
def _get_clean_parameters(kwargs): return dict((k, v) for k, v in kwargs.items() if v is not None)
Clean the parameters by filtering out any parameters that have a None value.
def get(self): if not self.thread_local_data: self.thread_local_data = threading.local() if not hasattr(self.thread_local_data, 'context'): self.thread_local_data.context = OrderedDict() return self.thread_local_data.context
Return a reference to a thread-specific context
def initinfo(self) -> Tuple[Union[float, int, bool], bool]: init = self.INIT if (init is not None) and hydpy.pub.options.usedefaultvalues: with Parameter.parameterstep('1d'): return self.apply_timefactor(init), True return variabletools.TYPE2MISSINGVALUE[self.TYPE], False
The actual initial value of the given parameter. Some |Parameter| subclasses define another value for class attribute `INIT` than |None| to provide a default value. Let's define a parameter test class and prepare a function for initialising it and connecting the resulting instance to a |SubParameters| object: >>> from hydpy.core.parametertools import Parameter, SubParameters >>> class Test(Parameter): ... NDIM = 0 ... TYPE = float ... TIME = None ... INIT = 2.0 >>> class SubGroup(SubParameters): ... CLASSES = (Test,) >>> def prepare(): ... subpars = SubGroup(None) ... test = Test(subpars) ... test.__hydpy__connect_variable2subgroup__() ... return test By default, making use of the `INIT` attribute is disabled: >>> test = prepare() >>> test test(?) Enable it through setting |Options.usedefaultvalues| to |True|: >>> from hydpy import pub >>> pub.options.usedefaultvalues = True >>> test = prepare() >>> test test(2.0) When no `INIT` attribute is defined, enabling |Options.usedefaultvalues| has no effect, of course: >>> del Test.INIT >>> test = prepare() >>> test test(?) For time-dependent parameter values, the `INIT` attribute is assumed to be related to a |Parameterstep| of one day: >>> test.parameterstep = '2d' >>> test.simulationstep = '12h' >>> Test.INIT = 2.0 >>> Test.TIME = True >>> test = prepare() >>> test test(4.0) >>> test.value 1.0
def wns_send_bulk_message( uri_list, message=None, xml_data=None, raw_data=None, application_id=None, **kwargs ): res = [] if uri_list: for uri in uri_list: r = wns_send_message( uri=uri, message=message, xml_data=xml_data, raw_data=raw_data, application_id=application_id, **kwargs ) res.append(r) return res
WNS doesn't support bulk notification, so we loop through each uri. :param uri_list: list: A list of uris the notification will be sent to. :param message: str: The notification data to be sent. :param xml_data: dict: A dictionary containing data to be converted to an xml tree. :param raw_data: str: Data to be sent via a `raw` notification.
def server_reboot(host=None, admin_username=None, admin_password=None, module=None): return __execute_cmd('serveraction powercycle', host=host, admin_username=admin_username, admin_password=admin_password, module=module)
Issues a power-cycle operation on the managed server. This action is similar to pressing the power button on the system's front panel to power down and then power up the system. host The chassis host. admin_username The username used to access the chassis. admin_password The password used to access the chassis. module The element to reboot on the chassis such as a blade. If not provided, the chassis will be rebooted. CLI Example: .. code-block:: bash salt dell dracr.server_reboot salt dell dracr.server_reboot module=server-1
def to_dataframe(self): keys = self.data[0].keys() column_list =[] for k in keys: key_list = [] for i in xrange(0,len(self.data)): key_list.append(self.data[i][k]) column_list.append(key_list) df = DataFrame(np.asarray(column_list).transpose(), columns=keys) for i in xrange(0,df.shape[1]): if is_number(df.iloc[:,i]): df.iloc[:,i] = df.iloc[:,i].astype(float) return df
Reads the common format self.data and writes out to a dataframe.
def get_job_logs_from_workflow(workflow_id): query_result = ( db.session.query( models.CrawlerJob.logs, ) .join( models.CrawlerWorkflowObject, models.CrawlerJob.job_id == models.CrawlerWorkflowObject.job_id, ) .filter(models.CrawlerWorkflowObject.object_id == workflow_id) .one_or_none() ) if query_result is None: click.secho( ( "Workflow %s was not found, maybe it's not a crawl workflow?" % workflow_id ), fg='yellow', ) sys.exit(1) _show_file( file_path=query_result[0], header_name='Log', )
Retrieve the crawl logs from the workflow id.
def aggregate_detail(slug_list, with_data_table=False): r = get_r() metrics_data = [] granularities = r._granularities() keys = ['seconds', 'minutes', 'hours', 'day', 'week', 'month', 'year'] key_mapping = {gran: key for gran, key in zip(GRANULARITIES, keys)} keys = [key_mapping[gran] for gran in granularities] for slug, data in r.get_metrics(slug_list): values = [data[t] for t in keys] metrics_data.append((slug, values)) return { 'chart_id': "metric-aggregate-{0}".format("-".join(slug_list)), 'slugs': slug_list, 'metrics': metrics_data, 'with_data_table': with_data_table, 'granularities': [g.title() for g in keys], }
Template Tag to display multiple metrics. * ``slug_list`` -- A list of slugs to display * ``with_data_table`` -- if True, prints the raw data in a table.
def _coerce_consumer_group(consumer_group): if not isinstance(consumer_group, string_types): raise TypeError('consumer_group={!r} must be text'.format(consumer_group)) if not isinstance(consumer_group, text_type): consumer_group = consumer_group.decode('utf-8') return consumer_group
Ensure that the consumer group is a text string. :param consumer_group: :class:`bytes` or :class:`str` instance :raises TypeError: when `consumer_group` is not :class:`bytes` or :class:`str`
def get_option_labels(self, typ, element): inter = self.get_typ_interface(typ) return inter.get_option_labels(element)
Return labels for each level of the option model. The options returned by :meth:`RefobjInterface.fetch_options` is a treemodel with ``n`` levels. Each level should get a label to describe what is displays. E.g. if you organize your options, so that the first level shows the tasks, the second level shows the descriptors and the third one shows the versions, then your labels should be: ``["Task", "Descriptor", "Version"]``. :param typ: the typ of options. E.g. Asset, Alembic, Camera etc :type typ: str :param element: The element for which the options should be fetched. :type element: :class:`jukeboxcore.djadapter.models.Asset` | :class:`jukeboxcore.djadapter.models.Shot` :returns: label strings for all levels :rtype: list :raises: None
def _align_method_SERIES(left, right, align_asobject=False): if isinstance(right, ABCSeries): if not left.index.equals(right.index): if align_asobject: left = left.astype(object) right = right.astype(object) left, right = left.align(right, copy=False) return left, right
align lhs and rhs Series
def add_token_without_limits( self, token_address: TokenAddress, ) -> Address: return self._add_token( token_address=token_address, additional_arguments=dict(), )
Register token of `token_address` with the token network. This applies for versions prior to 0.13.0 of raiden-contracts, since limits were hardcoded into the TokenNetwork contract.
def add_to_class(self, cls, name): self.model_class = cls setattr(cls, name, PhoneNumberDescriptor(self)) self._bound = True
Overrides the base class to add a PhoheNumberDescriptor rather than the standard FieldDescriptor
def join(self, timeout=None): return super(_StoppableDaemonThread, self).join(timeout or self.JOIN_TIMEOUT)
Joins with a default timeout exposed on the class.