text
stringlengths
78
104k
score
float64
0
0.18
def hash32(data: Any, seed=0) -> int: """ Non-cryptographic, deterministic, fast hash. Args: data: data to hash seed: seed Returns: signed 32-bit integer """ with MultiTimerContext(timer, TIMING_HASH): c_data = to_str(data) if mmh3: return mmh3.hash(c_data, seed=seed) py_data = to_bytes(c_data) py_unsigned = murmur3_x86_32(py_data, seed=seed) return twos_comp_to_signed(py_unsigned, n_bits=32)
0.002012
def pair_hmm_align_unaligned_seqs(seqs, moltype=DNA_cogent, params={}): """ Checks parameters for pairwise alignment, returns alignment. Code from Greg Caporaso. """ seqs = LoadSeqs(data=seqs, moltype=moltype, aligned=False) try: s1, s2 = seqs.values() except ValueError: raise ValueError( "Pairwise aligning of seqs requires exactly two seqs.") try: gap_open = params['gap_open'] except KeyError: gap_open = 5 try: gap_extend = params['gap_extend'] except KeyError: gap_extend = 2 try: score_matrix = params['score_matrix'] except KeyError: score_matrix = make_dna_scoring_dict( match=1, transition=-1, transversion=-1) return local_pairwise(s1, s2, score_matrix, gap_open, gap_extend)
0.001188
def get_pltpat(self, plt_ext="svg"): """Return png pattern: {BASE}.png {BASE}_pruned.png {BASE}_upper_pruned.png""" if self.ntplt.desc == "": return ".".join(["{BASE}", plt_ext]) return "".join(["{BASE}_", self.ntplt.desc, ".", plt_ext])
0.010989
def run(func): """Execute the provided function if there are no subcommands""" @defaults.command(help='Run the service') @click.pass_context def runserver(ctx, *args, **kwargs): if (ctx.parent.invoked_subcommand and ctx.command.name != ctx.parent.invoked_subcommand): return # work around the fact that tornado's parse_command_line can't # cope with having subcommands / positional arguments. sys.argv = [sys.argv[0]] + [a for a in sys.argv if a[0] == '-'] sys.exit(func()) return runserver
0.001718
def list_jobs(config, *, status=JobStatus.Active, filter_by_type=None, filter_by_worker=None): """ Return a list of Celery jobs. Args: config (Config): Reference to the configuration object from which the settings are retrieved. status (JobStatus): The status of the jobs that should be returned. filter_by_type (list): Restrict the returned jobs to the types in this list. filter_by_worker (list): Only return jobs that were registered, reserved or are running on the workers given in this list of worker names. Using this option will increase the performance. Returns: list: A list of JobStats. """ celery_app = create_app(config) # option to filter by the worker (improves performance) if filter_by_worker is not None: inspect = celery_app.control.inspect( destination=filter_by_worker if isinstance(filter_by_worker, list) else [filter_by_worker]) else: inspect = celery_app.control.inspect() # get active, registered or reserved jobs if status == JobStatus.Active: job_map = inspect.active() elif status == JobStatus.Registered: job_map = inspect.registered() elif status == JobStatus.Reserved: job_map = inspect.reserved() elif status == JobStatus.Scheduled: job_map = inspect.scheduled() else: job_map = None if job_map is None: return [] result = [] for worker_name, jobs in job_map.items(): for job in jobs: try: job_stats = JobStats.from_celery(worker_name, job, celery_app) if (filter_by_type is None) or (job_stats.type == filter_by_type): result.append(job_stats) except JobStatInvalid: pass return result
0.002132
def render_children(self, element): """ Recursively renders child elements. Joins the rendered strings with no space in between. If newlines / spaces are needed between elements, add them in their respective templates, or override this function in the renderer subclass, so that whitespace won't seem to appear magically for anyone reading your program. :param element: a branch node who has children attribute. """ rendered = [self.render(child) for child in element.children] return ''.join(rendered)
0.003378
def query(self, w, ed=1): # Can only handle ed=1 """ Finds the fuzzy matches (within edit distance 1) of w from words """ assert ed <= self._ed if ed == 0: return [w] if w in self._L else [''] w = str(w) n = len(w) prefix, suffix = w[:n // 2], w[n // 2:][::-1] options_w_prefix = self._L.keys(prefix) options_w_suffix = [x[::-1] for x in self._R.iterkeys(suffix)] return [ _w for _w in set(itertools.chain(options_w_prefix, options_w_suffix)) if abs(len(_w) - len(w)) <= 1 and lvdistance(str(_w), str(w), 1) <= 1 ]
0.006061
def delete(config, username, type): """Delete an LDAP user.""" client = Client() client.prepare_connection() user_api = API(client) user_api.delete(username, type)
0.009852
def comment_unvote(self, comment_id): """Lets you unvote a specific comment (Requires login). Parameters: comment_id (int): """ return self._get('posts/{0}/unvote.json'.format(comment_id), method='POST', auth=True)
0.007042
def lower_band(close_data, high_data, low_data, period): """ Lower Band. Formula: LB = CB - BW """ cb = center_band(close_data, high_data, low_data, period) bw = band_width(high_data, low_data, period) lb = cb - bw return lb
0.003831
def checkReference(self, reference): """ Check the reference for security. Tries to avoid any characters necessary for doing a script injection. """ pattern = re.compile(r'[\s,;"\'&\\]') if pattern.findall(reference.strip()): return False return True
0.006289
def intervalsubtract(left, right, lstart='start', lstop='stop', rstart='start', rstop='stop', lkey=None, rkey=None, include_stop=False): """ Subtract intervals in the right hand table from intervals in the left hand table. """ assert (lkey is None) == (rkey is None), \ 'facet key field must be provided for both or neither table' return IntervalSubtractView(left, right, lstart=lstart, lstop=lstop, rstart=rstart, rstop=rstop, lkey=lkey, rkey=rkey, include_stop=include_stop)
0.005
def binPack(self, jobShapes): """Pack a list of jobShapes into the fewest nodes reasonable. Can be run multiple times.""" # TODO: Check for redundancy with batchsystems.mesos.JobQueue() sorting logger.debug('Running bin packing for node shapes %s and %s job(s).', self.nodeShapes, len(jobShapes)) # Sort in descending order from largest to smallest. The FFD like-strategy will pack the # jobs in order from longest to shortest. jobShapes.sort() jobShapes.reverse() assert len(jobShapes) == 0 or jobShapes[0] >= jobShapes[-1] for jS in jobShapes: self.addJobShape(jS)
0.005961
def string_avg(strings, binary=True): """ Takes a list of strings of equal length and returns a string containing the most common value from each index in the string. Optional argument: binary - a boolean indicating whether or not to treat strings as binary numbers (fill in leading zeros if lengths differ). """ if binary: # Assume this is a binary number and fill leading zeros strings = deepcopy(strings) longest = len(max(strings, key=len)) for i in range(len(strings)): while len(strings[i]) < longest: split_string = strings[i].split("b") strings[i] = "0b0" + split_string[1] avg = "" for i in (range(len(strings[0]))): opts = [] for s in strings: opts.append(s[i]) avg += max(set(opts), key=opts.count) return avg
0.001149
def get_steam(): """ Returns a Steam object representing the current Steam installation on the users computer. If the user doesn't have Steam installed, returns None. """ # Helper function which checks if the potential userdata directory exists # and returns a new Steam instance with that userdata directory if it does. # If the directory doesnt exist it returns None instead helper = lambda udd: Steam(udd) if os.path.exists(udd) else None # For both OS X and Linux, Steam stores it's userdata in a consistent # location. plat = platform.system() if plat == 'Darwin': return helper(paths.default_osx_userdata_path()) if plat == 'Linux': return helper(paths.default_linux_userdata_path()) # Windows is a bit trickier. The userdata directory is stored in the Steam # installation directory, meaning that theoretically it could be anywhere. # Luckily, Valve stores the installation directory in the registry, so its # still possible for us to figure out automatically if plat == 'Windows': possible_dir = winutils.find_userdata_directory() # Unlike the others, `possible_dir` might be None (if something odd # happened with the registry) return helper(possible_dir) if possible_dir is not None else None # This should never be hit. Windows, OS X, and Linux should be the only # supported platforms. # TODO: Add logging here so that the user (developer) knows that something # odd happened. return None
0.014946
def connect(*, dsn, autocommit=False, ansi=False, timeout=0, loop=None, executor=None, echo=False, after_created=None, **kwargs): """Accepts an ODBC connection string and returns a new Connection object. The connection string can be passed as the string `str`, as a list of keywords,or a combination of the two. Any keywords except autocommit, ansi, and timeout are simply added to the connection string. :param autocommit bool: False or zero, the default, if True or non-zero, the connection is put into ODBC autocommit mode and statements are committed automatically. :param ansi bool: By default, pyodbc first attempts to connect using the Unicode version of SQLDriverConnectW. If the driver returns IM001 indicating it does not support the Unicode version, the ANSI version is tried. :param timeout int: An integer login timeout in seconds, used to set the SQL_ATTR_LOGIN_TIMEOUT attribute of the connection. The default is 0 which means the database's default timeout, if any, is use :param after_created callable: support customize configuration after connection is connected. Must be an async unary function, or leave it as None. """ return _ContextManager(_connect(dsn=dsn, autocommit=autocommit, ansi=ansi, timeout=timeout, loop=loop, executor=executor, echo=echo, after_created=after_created, **kwargs))
0.000656
def lindbladR(self,OmegaP,m=2,**kwargs): """ NAME: lindbladR PURPOSE: calculate the radius of a Lindblad resonance INPUT: OmegaP - pattern speed (can be Quantity) m= order of the resonance (as in m(O-Op)=kappa (negative m for outer) use m='corotation' for corotation +scipy.optimize.brentq xtol,rtol,maxiter kwargs OUTPUT: radius of Linblad resonance, None if there is no resonance HISTORY: 2011-10-09 - Written - Bovy (IAS) """ if _APY_LOADED and isinstance(OmegaP,units.Quantity): OmegaP= OmegaP.to(1/units.Gyr).value/freq_in_Gyr(self._vo,self._ro) return lindbladR(self,OmegaP,m=m,use_physical=False,**kwargs)
0.026936
def writeMNIST(sc, input_images, input_labels, output, format, num_partitions): """Writes MNIST image/label vectors into parallelized files on HDFS""" # load MNIST gzip into memory with open(input_images, 'rb') as f: images = numpy.array(mnist.extract_images(f)) with open(input_labels, 'rb') as f: if format == "csv2": labels = numpy.array(mnist.extract_labels(f, one_hot=False)) else: labels = numpy.array(mnist.extract_labels(f, one_hot=True)) shape = images.shape print("images.shape: {0}".format(shape)) # 60000 x 28 x 28 print("labels.shape: {0}".format(labels.shape)) # 60000 x 10 # create RDDs of vectors imageRDD = sc.parallelize(images.reshape(shape[0], shape[1] * shape[2]), num_partitions) labelRDD = sc.parallelize(labels, num_partitions) output_images = output + "/images" output_labels = output + "/labels" # save RDDs as specific format if format == "pickle": imageRDD.saveAsPickleFile(output_images) labelRDD.saveAsPickleFile(output_labels) elif format == "csv": imageRDD.map(toCSV).saveAsTextFile(output_images) labelRDD.map(toCSV).saveAsTextFile(output_labels) elif format == "csv2": imageRDD.map(toCSV).zip(labelRDD).map(lambda x: str(x[1]) + "|" + x[0]).saveAsTextFile(output) else: # format == "tfr": tfRDD = imageRDD.zip(labelRDD).map(lambda x: (bytearray(toTFExample(x[0], x[1])), None)) # requires: --jars tensorflow-hadoop-1.0-SNAPSHOT.jar tfRDD.saveAsNewAPIHadoopFile(output, "org.tensorflow.hadoop.io.TFRecordFileOutputFormat", keyClass="org.apache.hadoop.io.BytesWritable", valueClass="org.apache.hadoop.io.NullWritable")
0.01451
def get_utc_iso_date(date_str): """Convert date str into a iso-formatted UTC date str, i.e.: yyyymmddhhmmss :type date_str: str :param date_str: date string to be parsed. :rtype: str :returns: iso-formatted UTC date str. """ try: utc_tuple = dateutil.parser.parse(date_str).utctimetuple() except ValueError: try: date_str = ' '.join(date_str.split(' ')[:-1]) utc_tuple = dateutil.parser.parse(date_str).utctimetuple() except ValueError: date_str = ''.join(date_str.split('(')[:-1]).strip(')') utc_tuple = dateutil.parser.parse(date_str).utctimetuple() date_object = datetime.datetime.fromtimestamp(time.mktime(utc_tuple)) utc_date_str = ''.join([x for x in date_object.isoformat() if x not in '-T:']) return utc_date_str
0.002372
def sc_dist(self, viewer, event, msg=True): """Interactively change the color distribution algorithm by scrolling. """ direction = self.get_direction(event.direction) self._cycle_dist(viewer, msg, direction=direction) return True
0.00722
def aggregate(self): """ Aggregate all merges into the target branch If the target_dir doesn't exist, create an empty git repo otherwise clean it, add all remotes , and merge all merges. """ logger.info('Start aggregation of %s', self.cwd) target_dir = self.cwd is_new = not os.path.exists(target_dir) if is_new: self.init_repository(target_dir) self._switch_to_branch(self.target['branch']) for r in self.remotes: self._set_remote(**r) self.fetch() merges = self.merges if not is_new: # reset to the first merge origin = merges[0] merges = merges[1:] self._reset_to(origin["remote"], origin["ref"]) for merge in merges: self._merge(merge) self._execute_shell_command_after() logger.info('End aggregation of %s', self.cwd)
0.002137
def from_variant_and_transcript( cls, variant, transcript, context_size): """ Extracts the reference sequence around a variant locus on a particular transcript and determines the reading frame at the start of that sequence context. Parameters ---------- variant : varcode.Variant transcript : pyensembl.Transcript context_size : int Returns SequenceKeyWithReadingFrame object or None if Transcript lacks coding sequence, protein sequence or annotated start/stop codons. """ if not transcript.contains_start_codon: logger.info( "Expected transcript %s for variant %s to have start codon", transcript.name, variant) return None if not transcript.contains_stop_codon: logger.info( "Expected transcript %s for variant %s to have stop codon", transcript.name, variant) return None if not transcript.protein_sequence: logger.info( "Expected transript %s for variant %s to have protein sequence", transcript.name, variant) return None sequence_key = ReferenceSequenceKey.from_variant_and_transcript( variant=variant, transcript=transcript, context_size=context_size) if sequence_key is None: logger.info( "No sequence key for variant %s on transcript %s", variant, transcript.name) return None return cls.from_variant_and_transcript_and_sequence_key( variant=variant, transcript=transcript, sequence_key=sequence_key)
0.001603
def channels_twitter_ticket_create(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/twitter_channel#create-ticket-from-tweet" api_path = "/api/v2/channels/twitter/tickets.json" return self.call(api_path, method="POST", data=data, **kwargs)
0.010453
def com_google_fonts_check_repo_dirname_match_nameid_1(fonts, gfonts_repo_structure): """Directory name in GFonts repo structure must match NameID 1 of the regular.""" from fontTools.ttLib import TTFont from fontbakery.utils import (get_name_entry_strings, get_absolute_path, get_regular) regular = get_regular(fonts) if not regular: yield FAIL, "The font seems to lack a regular." entry = get_name_entry_strings(TTFont(regular), NameID.FONT_FAMILY_NAME)[0] expected = entry.lower() expected = "".join(expected.split(' ')) expected = "".join(expected.split('-')) license, familypath, filename = get_absolute_path(regular).split(os.path.sep)[-3:] if familypath == expected: yield PASS, "OK" else: yield FAIL, (f"Family name on the name table ('{entry}') does not match" f" directory name in the repo structure ('{familypath}')." f" Expected '{expected}'.")
0.013321
def needsquoting(c, quotetabs, header): """Decide whether a particular character needs to be quoted. The 'quotetabs' flag indicates whether embedded tabs and spaces should be quoted. Note that line-ending tabs and spaces are always encoded, as per RFC 1521. """ if c in ' \t': return quotetabs # if header, we have to escape _ because _ is used to escape space if c == '_': return header return c == ESCAPE or not (' ' <= c <= '~')
0.002062
def get_belapi_handle(client, username=None, password=None): """Get BEL API arango db handle""" (username, password) = get_user_creds(username, password) sys_db = client.db("_system", username=username, password=password) # Create a new database named "belapi" try: if username and password: belapi_db = sys_db.create_database( name=belapi_db_name, users=[{"username": username, "password": password, "active": True}], ) else: belapi_db = sys_db.create_database(name=belapi_db_name) except arango.exceptions.DatabaseCreateError: if username and password: belapi_db = client.db(belapi_db_name, username=username, password=password) else: belapi_db = client.db(belapi_db_name) try: belapi_db.create_collection(belapi_settings_name) except Exception: pass try: belapi_db.create_collection(belapi_statemgmt_name) except Exception: pass return belapi_db
0.002846
def handle_error(self, error: Exception) -> None: """ Populates :data:`chess.pgn.Game.errors` with encountered errors and logs them. """ LOGGER.exception("error during pgn parsing") self.game.errors.append(error)
0.007692
def nextCmd(snmpEngine, authData, transportTarget, contextData, *varBinds, **options): """Performs SNMP GETNEXT query. Based on passed parameters, prepares SNMP GETNEXT packet (:RFC:`1905#section-4.2.2`) and schedules its transmission by :mod:`twisted` I/O framework at a later point of time. Parameters ---------- snmpEngine : :class:`~pysnmp.hlapi.SnmpEngine` Class instance representing SNMP engine. authData : :class:`~pysnmp.hlapi.CommunityData` or :class:`~pysnmp.hlapi.UsmUserData` Class instance representing SNMP credentials. transportTarget : :class:`~pysnmp.hlapi.twisted.UdpTransportTarget` or :class:`~pysnmp.hlapi.twisted.Udp6TransportTarget` Class instance representing transport type along with SNMP peer address. contextData : :class:`~pysnmp.hlapi.ContextData` Class instance representing SNMP ContextEngineId and ContextName values. \*varBinds : :class:`~pysnmp.smi.rfc1902.ObjectType` One or more class instances representing MIB variables to place into SNMP request. Other Parameters ---------------- \*\*options : Request options: * `lookupMib` - load MIB and resolve response MIB variables at the cost of slightly reduced performance. Default is `True`. * `ignoreNonIncreasingOid` - continue iteration even if response MIB variables (OIDs) are not greater then request MIB variables. Be aware that setting it to `True` may cause infinite loop between SNMP management and agent applications. Default is `False`. Returns ------- deferred : :class:`~twisted.internet.defer.Deferred` Twisted Deferred object representing work-in-progress. User is expected to attach his own `success` and `error` callback functions to the Deferred object though :meth:`~twisted.internet.defer.Deferred.addCallbacks` method. Raises ------ PySnmpError Or its derivative indicating that an error occurred while performing SNMP operation. Notes ----- User `success` callback is called with the following tuple as its first argument: * errorStatus (str) : True value indicates SNMP PDU error. * errorIndex (int) : Non-zero value refers to `varBinds[errorIndex-1]` * varBinds (tuple) : A sequence of sequences (e.g. 2-D array) of :py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances representing a table of MIB variables returned in SNMP response. Inner sequences represent table rows and ordered exactly the same as `varBinds` in request. Response to GETNEXT always contain a single row. User `error` callback is called with `errorIndication` object wrapped in :class:`~twisted.python.failure.Failure` object. Examples -------- >>> from twisted.internet.task import react >>> from pysnmp.hlapi.twisted import * >>> >>> def success(args): ... (errorStatus, errorIndex, varBindTable) = args ... print(errorStatus, errorIndex, varBindTable) ... >>> def failure(errorIndication): ... print(errorIndication) ... >>> def run(reactor): ... d = nextCmd(SnmpEngine(), ... CommunityData('public'), ... UdpTransportTarget(('demo.snmplabs.com', 161)), ... ContextData(), ... ObjectType(ObjectIdentity('SNMPv2-MIB', 'system')) ... d.addCallback(success).addErrback(failure) ... return d ... >>> react(run) (0, 0, [[ObjectType(ObjectIdentity(ObjectName('1.3.6.1.2.1.1.1.0')), DisplayString('SunOS zeus.snmplabs.com 4.1.3_U1 1 sun4m'))]]) """ def __cbFun(snmpEngine, sendRequestHandle, errorIndication, errorStatus, errorIndex, varBindTable, cbCtx): lookupMib, deferred = cbCtx if (options.get('ignoreNonIncreasingOid', False) and errorIndication and isinstance(errorIndication, errind.OidNotIncreasing)): errorIndication = None if errorIndication: deferred.errback(Failure(errorIndication)) else: try: varBindTable = [ VB_PROCESSOR.unmakeVarBinds(snmpEngine.cache, varBindTableRow, lookupMib) for varBindTableRow in varBindTable ] except Exception as e: deferred.errback(Failure(e)) else: deferred.callback((errorStatus, errorIndex, varBindTable)) addrName, paramsName = LCD.configure( snmpEngine, authData, transportTarget, contextData.contextName) varBinds = VB_PROCESSOR.makeVarBinds(snmpEngine.cache, varBinds) deferred = Deferred() cmdgen.NextCommandGenerator().sendVarBinds( snmpEngine, addrName, contextData.contextEngineId, contextData.contextName, varBinds, __cbFun, (options.get('lookupMib', True), deferred)) return deferred
0.001945
def gen_front_term(self, x, dmp_num): """Generates the front term on the forcing term. For rhythmic DMPs it's non-diminishing, so this function is just a placeholder to return 1. x float: the current value of the canonical system dmp_num int: the index of the current dmp """ if isinstance(x, np.ndarray): return np.ones(x.shape) return 1
0.009412
def read(self, nrml_file, validate=False, simple_fault_spacing=1.0, complex_mesh_spacing=5.0, mfd_spacing=0.1): """ Build the source model from nrml format """ self.source_file = nrml_file if validate: converter = SourceConverter(1.0, simple_fault_spacing, complex_mesh_spacing, mfd_spacing, 10.0) converter.fname = nrml_file root = nrml.read(nrml_file) if root['xmlns'] == 'http://openquake.org/xmlns/nrml/0.4': sg_nodes = [root.sourceModel.nodes] else: # NRML 0.5 sg_nodes = root.sourceModel.nodes sources = [] for sg_node in sg_nodes: for no, src_node in enumerate(sg_node, 1): if validate: print("Validating Source %s" % src_node.attrib["id"]) converter.convert_node(src_node) sources.append(src_node) return SourceModel(sources)
0.003636
def find_method_params(self): """Return the method params :returns: tuple (args, kwargs) that will be passed as *args, **kwargs """ req = self.request args = req.controller_info["method_args"] kwargs = req.controller_info["method_kwargs"] return args, kwargs
0.006349
def auto_set_dir(action=None, name=None): """ Use :func:`logger.set_logger_dir` to set log directory to "./train_log/{scriptname}:{name}". "scriptname" is the name of the main python file currently running""" mod = sys.modules['__main__'] basename = os.path.basename(mod.__file__) auto_dirname = os.path.join('train_log', basename[:basename.rfind('.')]) if name: auto_dirname += '_%s' % name if os.name == 'nt' else ':%s' % name set_logger_dir(auto_dirname, action=action)
0.003906
def participant_names(self): '''The names of the RTObjects participating in this context.''' with self._mutex: return [obj.get_component_profile().instance_name \ for obj in self._participants]
0.012448
def sweHouses(jd, lat, lon, hsys): """ Returns lists of houses and angles. """ hsys = SWE_HOUSESYS[hsys] hlist, ascmc = swisseph.houses(jd, lat, lon, hsys) # Add first house to the end of 'hlist' so that we # can compute house sizes with an iterator hlist += (hlist[0],) houses = [ { 'id': const.LIST_HOUSES[i], 'lon': hlist[i], 'size': angle.distance(hlist[i], hlist[i+1]) } for i in range(12) ] angles = [ {'id': const.ASC, 'lon': ascmc[0]}, {'id': const.MC, 'lon': ascmc[1]}, {'id': const.DESC, 'lon': angle.norm(ascmc[0] + 180)}, {'id': const.IC, 'lon': angle.norm(ascmc[1] + 180)} ] return (houses, angles)
0.005391
def make_python_xref_nodes_for_type(py_type, state, hide_namespace=False): """Make docutils nodes containing a cross-reference to a Python object, given the object's type. Parameters ---------- py_type : `obj` Type of an object. For example ``mypackage.mymodule.MyClass``. If you have instance of the type, use ``type(myinstance)``. state : ``docutils.statemachine.State`` Usually the directive's ``state`` attribute. hide_namespace : `bool`, optional If `True`, the namespace of the object is hidden in the rendered cross reference. Internally, this uses ``:py:obj:`~{py_obj}` (note tilde). Returns ------- instance from ``docutils.nodes`` Docutils node representing the cross reference. Examples -------- If called from within a directive: .. code-block:: python make_python_xref_nodes(numpy.sin, self.state) See also -------- `make_python_xref_nodes` """ if py_type.__module__ == 'builtins': typestr = py_type.__name__ else: typestr = '.'.join((py_type.__module__, py_type.__name__)) return make_python_xref_nodes(typestr, state, hide_namespace=hide_namespace)
0.000753
def toggle_keyboard(cls, flag=HIDE_IMPLICIT_ONLY): """ Toggle the keyboard on and off Parameters ---------- flag: int Flag to send to toggleSoftInput Returns -------- result: future Resolves when the toggle is complete """ app = AndroidApplication.instance() f = app.create_future() def on_ready(ims): ims.toggleSoftInput(flag, 0) f.set_result(True) cls.get().then(on_ready) return f
0.008591
def angsep2(lon_1, lat_1, lon_2, lat_2): """ Angular separation (deg) between two sky coordinates. """ import healpy v10, v11, v12 = healpy.ang2vec(np.radians(90. - lat_1), np.radians(lon_1)).transpose() v20, v21, v22 = healpy.ang2vec(np.radians(90. - lat_2), np.radians(lon_2)).transpose() val = (v10 * v20) + (v11 * v21) + (v12 * v22) val = np.clip(val, -1., 1.) return np.degrees(np.arccos(val))
0.006897
def unregister(self, bucket, name): """ Remove the function from the registry by name """ assert bucket in self, 'Bucket %s is unknown' % bucket if not name in self[bucket]: raise NotRegistered('The function %s is not registered' % name) del self[bucket][name]
0.009375
def process_seq(seq, material): '''Validate and process sequence inputs. :param seq: input sequence :type seq: str :param material: DNA, RNA, or peptide :type: str :returns: Uppercase version of `seq` with the alphabet checked by check_alphabet(). :rtype: str ''' check_alphabet(seq, material) seq = seq.upper() return seq
0.002618
def close(self): '''Stop running timers.''' if self._call_later_handle: self._call_later_handle.cancel() self._running = False
0.01227
def scan(cls, result_key, func): """ Define computed fields based on a string to "grep for". This is preferred to utilizing raw log lines in plugins because computed fields will be serialized, whereas raw log lines will not. """ if result_key in cls.scanner_keys: raise ValueError("'%s' is already a registered scanner key" % result_key) def scanner(self): result = func(self) setattr(self, result_key, result) cls.scanners.append(scanner) cls.scanner_keys.add(result_key)
0.005137
def not_present(subset=None, show_ip=False, show_ipv4=None): ''' .. versionadded:: 2015.5.0 .. versionchanged:: 2019.2.0 The 'show_ipv4' argument has been renamed to 'show_ip' as it now includes IPv6 addresses for IPv6-connected minions. Print a list of all minions that are NOT up according to Salt's presence detection (no commands will be sent) subset : None Pass in a CIDR range to filter minions by IP address. show_ip : False Also show the IP address each minion is connecting from. CLI Example: .. code-block:: bash salt-run manage.not_present ''' show_ip = _show_ip_migration(show_ip, show_ipv4) return list_not_state(subset=subset, show_ip=show_ip)
0.001332
def restore_layout(self, name, *args): """ Restores given layout. :param name: Layout name. :type name: unicode :param \*args: Arguments. :type \*args: \* :return: Method success. :rtype: bool """ layout = self.__layouts.get(name) if not layout: raise umbra.exceptions.LayoutExistError("{0} | '{1}' layout isn't registered!".format( self.__class__.__name__, name)) LOGGER.debug("> Restoring layout '{0}'.".format(name)) for component, profile in self.__container.components_manager: if profile.category == "QWidget" and component not in self.__container.visible_components: interface = self.__container.components_manager.get_interface(component) interface and interface.hide() self.__current_layout = name self.__container.centralWidget().setVisible( self.__settings.get_key("Layouts", "{0}_central_widget".format(name)).toBool()) self.__container.restoreState( self.__settings.get_key("Layouts", "{0}_window_state".format(name)).toByteArray()) self.__restore_geometry_on_layout_change and \ self.__container.restoreGeometry( self.__settings.get_key("Layouts", "{0}_geometry".format(name)).toByteArray()) self.layout_restored.emit(self.__current_layout) return True
0.008339
def _initialize_workflow(self): """ **Purpose**: Initialize the PST of the workflow with a uid and type checks """ try: self._prof.prof('initializing workflow', uid=self._uid) for p in self._workflow: p._assign_uid(self._sid) self._prof.prof('workflow initialized', uid=self._uid) except Exception, ex: self._logger.exception( 'Fatal error while initializing workflow: %s' % ex) raise
0.005758
def express_route_cross_connections(self): """Instance depends on the API version: * 2018-02-01: :class:`ExpressRouteCrossConnectionsOperations<azure.mgmt.network.v2018_02_01.operations.ExpressRouteCrossConnectionsOperations>` * 2018-04-01: :class:`ExpressRouteCrossConnectionsOperations<azure.mgmt.network.v2018_04_01.operations.ExpressRouteCrossConnectionsOperations>` """ api_version = self._get_api_version('express_route_cross_connections') if api_version == '2018-02-01': from .v2018_02_01.operations import ExpressRouteCrossConnectionsOperations as OperationClass elif api_version == '2018-04-01': from .v2018_04_01.operations import ExpressRouteCrossConnectionsOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
0.007715
def parseXRDS(text): """Parse the given text as an XRDS document. @return: ElementTree containing an XRDS document @raises XRDSError: When there is a parse error or the document does not contain an XRDS. """ try: element = ElementTree.XML(text) except XMLError, why: exc = XRDSError('Error parsing document as XML') exc.reason = why raise exc else: tree = ElementTree.ElementTree(element) if not isXRDS(tree): raise XRDSError('Not an XRDS document') return tree
0.001761
def make_catalog_sources(catalog_roi_model, source_names): """Construct and return dictionary of sources that are a subset of sources in catalog_roi_model. Parameters ---------- catalog_roi_model : dict or `fermipy.roi_model.ROIModel` Input set of sources source_names : list Names of sourcs to extract Returns dict mapping source_name to `fermipy.roi_model.Source` object """ sources = {} for source_name in source_names: sources[source_name] = catalog_roi_model[source_name] return sources
0.001776
def configure_logging(self, context): """ Configure logging for the application. :param context: The guacamole context object. This method attaches a :py:class:logging.StreamHandler` with a subclass of :py:class:`logging.Formatter` to the root logger. The specific subclass is :class:`ANSIFormatter` and it adds basic ANSI formatting (colors and some styles) to logging messages so that they stand out from normal output. """ fmt = "%(name)-12s: %(levelname)-8s %(message)s" formatter = ANSIFormatter(context, fmt) handler = logging.StreamHandler() handler.setFormatter(formatter) logging.root.addHandler(handler)
0.002706
def note(name, source=None, contents=None, **kwargs): ''' Add content to a document generated using `highstate_doc.render`. This state does not preform any tasks on the host. It only is used in highstate_doc lowstate proccessers to include extra documents. .. code-block:: yaml {{sls}} example note: highstate_doc.note: - name: example note - require_in: - pkg: somepackage - contents: | example `highstate_doc.note` ------------------ This state does not do anything to the system! It is only used by a `proccesser` you can use `requisites` and `order` to move your docs around the rendered file. .. this message appare aboce the `pkg: somepackage` state. - source: salt://{{tpldir}}/also_include_a_file.md {{sls}} extra help: highstate_doc.note: - name: example - order: 0 - source: salt://{{tpldir}}/HELP.md ''' comment = '' if source: comment += 'include file: {0}\n'.format(source) if contents and len(contents) < 200: comment += contents return {'name': name, 'result': True, 'comment': comment, 'changes': {}}
0.002967
def adjust_tuning(input_file, output_file): '''Load audio, estimate tuning, apply pitch correction, and save.''' print('Loading ', input_file) y, sr = librosa.load(input_file) print('Separating harmonic component ... ') y_harm = librosa.effects.harmonic(y) print('Estimating tuning ... ') # Just track the pitches associated with high magnitude tuning = librosa.estimate_tuning(y=y_harm, sr=sr) print('{:+0.2f} cents'.format(100 * tuning)) print('Applying pitch-correction of {:+0.2f} cents'.format(-100 * tuning)) y_tuned = librosa.effects.pitch_shift(y, sr, -tuning) print('Saving tuned audio to: ', output_file) librosa.output.write_wav(output_file, y_tuned, sr)
0.001387
def PathToComponents(path): """Converts a canonical path representation to a list of components. Args: path: A canonical MySQL path representation. Returns: A sequence of path components. """ precondition.AssertType(path, Text) if path and not path.startswith("/"): raise ValueError("Path '{}' is not absolute".format(path)) if path: return tuple(path.split("/")[1:]) else: return ()
0.014218
def _get_question(self, qcount): """Read the next I{qcount} records from the wire data and add them to the question section. @param qcount: the number of questions in the message @type qcount: int""" if self.updating and qcount > 1: raise dns.exception.FormError for i in xrange(0, qcount): (qname, used) = dns.name.from_wire(self.wire, self.current) if not self.message.origin is None: qname = qname.relativize(self.message.origin) self.current = self.current + used (rdtype, rdclass) = \ struct.unpack('!HH', self.wire[self.current:self.current + 4]) self.current = self.current + 4 self.message.find_rrset(self.message.question, qname, rdclass, rdtype, create=True, force_unique=True) if self.updating: self.zone_rdclass = rdclass
0.00385
def draw(self): """ Draw the figure using the renderer """ if __debug__: verbose.report('FigureCanvasAgg.draw', 'debug-annoying') self.renderer = self.get_renderer(cleared=True) # acquire a lock on the shared font cache RendererAgg.lock.acquire() try: self.figure.draw(self.renderer) finally: RendererAgg.lock.release()
0.007194
def projection(self, plain_src_name): """Return the projection for the given source namespace.""" mapped = self.lookup(plain_src_name) if not mapped: return None fields = mapped.include_fields or mapped.exclude_fields if fields: include = 1 if mapped.include_fields else 0 return dict((field, include) for field in fields) return None
0.004785
def register_id(self, cmd_type, obj): """Registers an object (through its integration id) to receive update notifications. This is the core mechanism how Output and Keypad objects get notified when the controller sends status updates.""" ids = self._ids.setdefault(cmd_type, {}) if obj.id in ids: raise IntegrationIdExistsError self._ids[cmd_type][obj.id] = obj
0.005115
def get_matrix(self, x1=None, x2=None, include_diagonal=None, include_general=None): """ Get the covariance matrix at given independent coordinates Args: x1 (Optional[array[n1]]): The first set of independent coordinates. If this is omitted, ``x1`` will be assumed to be equal to ``x`` from a previous call to :func:`GP.compute`. x2 (Optional[array[n2]]): The second set of independent coordinates. If this is omitted, ``x2`` will be assumed to be ``x1``. include_diagonal (Optional[bool]): Should the white noise and ``yerr`` terms be included on the diagonal? (default: ``False``) """ if x1 is None and x2 is None: if self._t is None or not self.computed: raise RuntimeError("you must call 'compute' first") K = self.kernel.get_value(self._t[:, None] - self._t[None, :]) if include_diagonal is None or include_diagonal: K[np.diag_indices_from(K)] += ( self._yerr**2 + self.kernel.jitter ) if (include_general is None or include_general) and len(self._A): K[np.diag_indices_from(K)] += self._A K += np.tril(np.dot(self._U.T, self._V), -1) K += np.triu(np.dot(self._V.T, self._U), 1) return K incl = False x1 = np.ascontiguousarray(x1, dtype=float) if x2 is None: x2 = x1 incl = include_diagonal is not None and include_diagonal K = self.kernel.get_value(x1[:, None] - x2[None, :]) if incl: K[np.diag_indices_from(K)] += self.kernel.jitter return K
0.001665
def _load_credentials(self): """(Re-)loads the credentials from the file.""" if not self._file: return loaded_credentials = _load_credentials_file(self._file) self._credentials.update(loaded_credentials) logger.debug('Read credential file')
0.006803
def plot_xtf(fignum, XTF, Fs, e, b): """ function to plot series of chi measurements as a function of temperature, holding field constant and varying frequency """ plt.figure(num=fignum) plt.xlabel('Temperature (K)') plt.ylabel('Susceptibility (m^3/kg)') k = 0 Flab = [] for freq in XTF: T, X = [], [] for xt in freq: X.append(xt[0]) T.append(xt[1]) plt.plot(T, X) plt.text(T[-1], X[-1], str(int(Fs[k])) + ' Hz') # Flab.append(str(int(Fs[k]))+' Hz') k += 1 plt.title(e + ': B = ' + '%8.1e' % (b) + ' T')
0.003257
def start( context: typing.Optional[typing.Mapping] = None, banner: typing.Optional[str] = None, shell: typing.Type[Shell] = AutoShell, prompt: typing.Optional[str] = None, output: typing.Optional[str] = None, context_format: str = "full", **kwargs: typing.Any, ) -> None: """Start up the konch shell. Takes the same parameters as Shell.__init__. """ logger.debug(f"Using shell: {shell!r}") if banner is None: banner = speak() # Default to global config context_ = context or _cfg["context"] banner_ = banner or _cfg["banner"] if isinstance(shell, type) and issubclass(shell, Shell): shell_ = shell else: shell_ = SHELL_MAP.get(shell or _cfg["shell"], _cfg["shell"]) prompt_ = prompt or _cfg["prompt"] output_ = output or _cfg["output"] context_format_ = context_format or _cfg["context_format"] shell_( context=context_, banner=banner_, prompt=prompt_, output=output_, context_format=context_format_, **kwargs, ).start()
0.000929
def xadd(self, name, fields, id='*', maxlen=None, approximate=True): """ Add to a stream. name: name of the stream fields: dict of field/value pairs to insert into the stream id: Location to insert this record. By default it is appended. maxlen: truncate old stream members beyond this size approximate: actual stream length may be slightly more than maxlen """ pieces = [] if maxlen is not None: if not isinstance(maxlen, (int, long)) or maxlen < 1: raise DataError('XADD maxlen must be a positive integer') pieces.append(Token.get_token('MAXLEN')) if approximate: pieces.append(Token.get_token('~')) pieces.append(str(maxlen)) pieces.append(id) if not isinstance(fields, dict) or len(fields) == 0: raise DataError('XADD fields must be a non-empty dict') for pair in iteritems(fields): pieces.extend(pair) return self.execute_command('XADD', name, *pieces)
0.001862
def step(self, x): r"""perform a single Brownian dynamics step""" return x - self.coeff_A * self.gradient(x) \ + self.coeff_B * np.random.normal(size=self.dim)
0.015789
def eleventh(note): """Build an eleventh chord on note. Example: >>> eleventh('C') ['C', 'G', 'Bb', 'F'] """ return [note, intervals.perfect_fifth(note), intervals.minor_seventh(note), intervals.perfect_fourth(note)]
0.003953
def _apply_to_sets(self, func, operation, keys, *args): """Helper function for sdiff, sinter, and sunion""" keys = self._list_or_args(keys, args) if not keys: raise TypeError("{} takes at least two arguments".format(operation.lower())) left = self._get_set(keys[0], operation) or set() for key in keys[1:]: right = self._get_set(key, operation) or set() left = func(left, right) return left
0.006329
def _is_epsilon_nash(x, g, epsilon, indptr=None): """ Determine whether `x` is an `epsilon`-Nash equilibrium of `g`. Parameters ---------- x : array_like(float, ndim=1) Array of flattened mixed action profile of length equal to n_0 + ... + n_N-1, where `out[indptr[i]:indptr[i+1]]` contains player i's mixed action. g : NormalFormGame epsilon : scalar(float) indptr : array_like(int, ndim=1), optional(default=None) Array of index pointers of length N+1, where `indptr[0] = 0` and `indptr[i+1] = indptr[i] + n_i`. Created internally if None. Returns ------- bool """ if indptr is None: indptr = np.empty(g.N+1, dtype=int) indptr[0] = 0 indptr[1:] = np.cumsum(g.nums_actions) action_profile = _get_action_profile(x, indptr) return g.is_nash(action_profile, tol=epsilon)
0.001112
def _stmt_location_to_agents(stmt, location): """Apply an event location to the Agents in the corresponding Statement. If a Statement is in a given location we represent that by requiring all Agents in the Statement to be in that location. """ if location is None: return agents = stmt.agent_list() for a in agents: if a is not None: a.location = location
0.002427
def get_facet_serializer_class(self): """ Return the class to use for serializing facets. Defaults to using ``self.facet_serializer_class``. """ if self.facet_serializer_class is None: raise AttributeError( "%(cls)s should either include a `facet_serializer_class` attribute, " "or override %(cls)s.get_facet_serializer_class() method." % {"cls": self.__class__.__name__} ) return self.facet_serializer_class
0.005682
def _process_rval_components(self): """This is suspiciously similar to _process_macro_default_arg, probably want to figure out how to merge the two. Process the rval of an assignment statement or a do-block """ while True: match = self._expect_match( 'do block component', # you could have a string, though that would be weird STRING_PATTERN, # a quote or an open/close parenthesis NON_STRING_DO_BLOCK_MEMBER_PATTERN, # a tag close TAG_CLOSE_PATTERN ) matchgroups = match.groupdict() self.advance(match.end()) if matchgroups.get('string') is not None: continue elif matchgroups.get('quote') is not None: self.rewind() # now look for a string match = self._expect_match('any string', STRING_PATTERN) self.advance(match.end()) elif matchgroups.get('open'): self._parenthesis_stack.append(True) elif matchgroups.get('close'): self._parenthesis_stack.pop() elif matchgroups.get('tag_close'): if self._parenthesis_stack: msg = ('Found "%}", expected ")"') dbt.exceptions.raise_compiler_error(msg) return
0.001386
def vectors(self, direction="all", failed=False): """Get vectors that connect at this node. Direction can be "incoming", "outgoing" or "all" (default). Failed can be True, False or all """ # check direction if direction not in ["all", "incoming", "outgoing"]: raise ValueError( "{} is not a valid vector direction. " "Must be all, incoming or outgoing.".format(direction)) if failed not in ["all", False, True]: raise ValueError("{} is not a valid vector failed".format(failed)) # get the vectors if failed == "all": if direction == "all": return Vector.query\ .filter(or_(Vector.destination_id == self.id, Vector.origin_id == self.id))\ .all() if direction == "incoming": return Vector.query\ .filter_by(destination_id=self.id)\ .all() if direction == "outgoing": return Vector.query\ .filter_by(origin_id=self.id)\ .all() else: if direction == "all": return Vector.query\ .filter(and_(Vector.failed == failed, or_(Vector.destination_id == self.id, Vector.origin_id == self.id)))\ .all() if direction == "incoming": return Vector.query\ .filter_by(destination_id=self.id, failed=failed)\ .all() if direction == "outgoing": return Vector.query\ .filter_by(origin_id=self.id, failed=failed)\ .all()
0.001087
def jsonify_status_code(status_code, *args, **kw): """Returns a jsonified response with the specified HTTP status code. The positional and keyword arguments are passed directly to the :func:`flask.jsonify` function which creates the response. """ is_batch = kw.pop('is_batch', False) if is_batch: response = flask_make_response(json.dumps(*args, **kw)) response.mimetype = 'application/json' response.status_code = status_code return response response = jsonify(*args, **kw) response.status_code = status_code return response
0.001684
def _cwl_workflow_template(inputs, top_level=False): """Retrieve CWL inputs shared amongst different workflows. """ ready_inputs = [] for inp in inputs: cur_inp = copy.deepcopy(inp) for attr in ["source", "valueFrom", "wf_duplicate"]: cur_inp.pop(attr, None) if top_level: cur_inp = workflow._flatten_nested_input(cur_inp) cur_inp = _clean_record(cur_inp) ready_inputs.append(cur_inp) return {"class": "Workflow", "cwlVersion": "v1.0", "hints": [], "requirements": [{"class": "EnvVarRequirement", "envDef": [{"envName": "MPLCONFIGDIR", "envValue": "."}]}, {"class": "ScatterFeatureRequirement"}, {"class": "SubworkflowFeatureRequirement"}], "inputs": ready_inputs, "outputs": [], "steps": []}
0.002137
def get_current_branch(): """ Return the current branch """ cmd = ["git", "rev-parse", "--abbrev-ref", "HEAD"] output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) return output.strip().decode("utf-8")
0.004237
def get_source_variable(self, source_id, variable): """ Get the current value of a source variable. If the variable is not in the cache it will be retrieved from the controller. """ source_id = int(source_id) try: return self._retrieve_cached_source_variable( source_id, variable) except UncachedVariable: return (yield from self._send_cmd("GET S[%d].%s" % ( source_id, variable)))
0.004149
def run_toy_DistilledSGLD(gpu_id): """Run DistilledSGLD on toy dataset""" X, Y, X_test, Y_test = load_toy() minibatch_size = 1 teacher_noise_precision = 1.0 teacher_net = get_toy_sym(True, teacher_noise_precision) student_net = get_toy_sym(False) data_shape = (minibatch_size,) + X.shape[1::] teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id)), 'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev(gpu_id))} student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev(gpu_id))} teacher_initializer = mx.init.Uniform(0.07) student_initializer = mx.init.Uniform(0.07) student_grad_f = lambda student_outputs, teacher_pred: \ regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision) student_exe, student_params, _ = \ DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net, teacher_data_inputs=teacher_data_inputs, student_data_inputs=student_data_inputs, X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=80000, teacher_initializer=teacher_initializer, student_initializer=student_initializer, teacher_learning_rate=1E-4, student_learning_rate=0.01, # teacher_lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5), student_lr_scheduler=mx.lr_scheduler.FactorScheduler(8000, 0.8), student_grad_f=student_grad_f, teacher_prior_precision=0.1, student_prior_precision=0.001, perturb_deviation=0.1, minibatch_size=minibatch_size, task='regression', dev=dev(gpu_id))
0.005042
def get_link_url(self, datum=None): """Returns the final URL based on the value of ``url``. If ``url`` is callable it will call the function. If not, it will then try to call ``reverse`` on ``url``. Failing that, it will simply return the value of ``url`` as-is. When called for a row action, the current row data object will be passed as the first parameter. """ if not self.url: raise NotImplementedError('A LinkAction class must have a ' 'url attribute or define its own ' 'get_link_url method.') if callable(self.url): return self.url(datum, **self.kwargs) try: if datum: obj_id = self.table.get_object_id(datum) return urls.reverse(self.url, args=(obj_id,)) else: return urls.reverse(self.url) except urls.NoReverseMatch as ex: LOG.info('No reverse found for "%(url)s": %(exception)s', {'url': self.url, 'exception': ex}) return self.url
0.001741
def matrix(mat): """Convert a ROOT TMatrix into a NumPy matrix. Parameters ---------- mat : ROOT TMatrixT A ROOT TMatrixD or TMatrixF Returns ------- mat : numpy.matrix A NumPy matrix Examples -------- >>> from root_numpy import matrix >>> from ROOT import TMatrixD >>> a = TMatrixD(4, 4) >>> a[1][2] = 2 >>> matrix(a) matrix([[ 0., 0., 0., 0.], [ 0., 0., 2., 0.], [ 0., 0., 0., 0.], [ 0., 0., 0., 0.]]) """ import ROOT if isinstance(mat, (ROOT.TMatrixD, ROOT.TMatrixDSym)): return _librootnumpy.matrix_d(ROOT.AsCObject(mat)) elif isinstance(mat, (ROOT.TMatrixF, ROOT.TMatrixFSym)): return _librootnumpy.matrix_f(ROOT.AsCObject(mat)) raise TypeError( "unable to convert object of type {0} " "into a numpy matrix".format(type(mat)))
0.001098
def prepare_message(self, message_data, delivery_mode, priority=None, content_type=None, content_encoding=None): """Encapsulate data into a AMQP message.""" properties = pika.BasicProperties(priority=priority, content_type=content_type, content_encoding=content_encoding, delivery_mode=delivery_mode) return message_data, properties
0.006073
def is_modified(self) -> bool: """ Find whether the files on the left and right are different. Note, modified implies the contents of the file have changed, which is predicated on the file existing on both the left and right. Therefore this will be false if the file on the left has been deleted, or the file on the right is new. :return: Whether the file has been modified. """ if self.is_new or self.is_deleted: return False return self.left.md5 != self.right.md5
0.003584
def target_group_exists(name, region=None, key=None, keyid=None, profile=None): ''' Check to see if an target group exists. CLI example: .. code-block:: bash salt myminion boto_elbv2.target_group_exists arn:aws:elasticloadbalancing:us-west-2:644138682826:targetgroup/learn1give1-api/414788a16b5cf163 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: if name.startswith('arn:aws:elasticloadbalancing'): alb = conn.describe_target_groups(TargetGroupArns=[name]) else: alb = conn.describe_target_groups(Names=[name]) if alb: return True else: log.warning('The target group does not exist in region %s', region) return False except ClientError as error: log.warning('target_group_exists check for %s returned: %s', name, error) return False
0.002959
def is_parseable (self): """ Check if content is parseable for recursion. @return: True if content is parseable @rtype: bool """ if not self.valid: return False # some content types must be validated with the page content if self.content_type in ("application/xml", "text/xml"): rtype = mimeutil.guess_mimetype_read(self.get_content) if rtype is not None: # XXX side effect self.content_type = rtype if self.content_type not in self.ContentMimetypes: log.debug(LOG_CHECK, "URL with content type %r is not parseable", self.content_type) return False return True
0.005457
def read(filename,ext=None,swapyz=False): """ NAME: read PURPOSE: read a NEMO snapshot file consisting of mass,position,velocity INPUT: filename - name of the file ext= if set, 'nemo' for NEMO binary format, otherwise assumed ASCII; if not set, gleaned from extension swapyz= (False) if True, swap the y and z axes in the output (only for position and velocity) OUTPUT: snapshots [nbody,ndim,nt] HISTORY: 2015-11-18 - Written - Bovy (UofT) """ if ext is None and filename.split('.')[-1] == 'nemo': ext= 'nemo' elif ext is None: ext= 'dat' # Convert to ASCII if necessary if ext.lower() == 'nemo': file_handle, asciifilename= tempfile.mkstemp() os.close(file_handle) stderr= open('/dev/null','w') try: subprocess.check_call(['s2a',filename,asciifilename])#,stderr=stderr) except subprocess.CalledProcessError: os.remove(asciifilename) finally: stderr.close() else: asciifilename= filename # Now read out= numpy.loadtxt(asciifilename,comments='#') if ext.lower() == 'nemo': os.remove(asciifilename) if swapyz: out[:,[2,3]]= out[:,[3,2]] out[:,[5,6]]= out[:,[6,5]] # Get the number of snapshots nt= (_wc(asciifilename)-out.shape[0])//13 # 13 comments/snapshot out= numpy.reshape(out,(nt,out.shape[0]//nt,out.shape[1])) return numpy.swapaxes(numpy.swapaxes(out,0,1),1,2)
0.025675
def get_server(self, key, **kwds): """ Get a new or existing server for this key. :param int key: key for the server to use """ kwds = dict(self.kwds, **kwds) server = self.servers.get(key) if server: # Make sure it's the right server. server.check_keywords(self.constructor, kwds) else: # Make a new server server = _CachedServer(self.constructor, key, kwds) self.servers[key] = server return server
0.003745
def parseString(txt, cip=True): """ Parse string `txt` and return DOM tree consisting of single linked :class:`.HTMLElement`. Args: txt (str): HTML/XML string, which will be parsed to DOM. cip (bool, default True): Case Insensitive Parameters. Use special dictionary to store :attr:`.HTMLElement.params` as case insensitive. Returns: obj: Single conteiner HTML element with blank tag, which has whole DOM\ in it's :attr:`.HTMLElement.childs` property. This element can be\ queried using :meth:`.HTMLElement.find` functions. """ if isinstance(txt, HTMLElement): return txt # remove UTF BOM (prettify fails if not) if len(txt) > 3 and txt[:3] == u"\xef\xbb\xbf": txt = txt[3:] if not cip: htmlelement.html_parser.SpecialDict = dict elif isinstance(htmlelement.html_parser.SpecialDict, dict): htmlelement.html_parser.SpecialDict = specialdict.SpecialDict container = HTMLElement() container.childs = _parseDOM([ HTMLElement(x) for x in _raw_split(txt) ]) return container
0.000871
def check_filemode(filepath, mode): """Return True if 'file' matches ('permission') which should be entered in octal. """ filemode = stat.S_IMODE(os.stat(filepath).st_mode) return (oct(filemode) == mode)
0.009091
def parse_glob(path, included): """Parse a glob.""" files = glob.glob(path, recursive=True) array = [] for file in files: file = os.path.abspath(file) if file not in included: array.append(file) included += array return array
0.041494
def guest_register(self, userid, meta, net_set): """DB operation for migrate vm from another z/VM host in same SSI :param userid: (str) the userid of the vm to be relocated or tested :param meta: (str) the metadata of the vm to be relocated or tested :param net_set: (str) the net_set of the vm, default is 1. """ userid = userid.upper() if not zvmutils.check_userid_exist(userid): LOG.error("User directory '%s' does not exist." % userid) raise exception.SDKObjectNotExistError( obj_desc=("Guest '%s'" % userid), modID='guest') else: action = "list all guests in database which has been migrated." with zvmutils.log_and_reraise_sdkbase_error(action): guests = self._GuestDbOperator.get_migrated_guest_list() if userid in str(guests): """change comments for vm""" comments = self._GuestDbOperator.get_comments_by_userid( userid) comments['migrated'] = 0 action = "update guest '%s' in database" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._GuestDbOperator.update_guest_by_userid(userid, comments=comments) else: """add one record for new vm""" action = "add guest '%s' to database" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._GuestDbOperator.add_guest_migrated(userid, meta, net_set) action = "add switches of guest '%s' to database" % userid info = self._vmops.get_definition_info(userid) user_direct = info['user_direct'] for nic_info in user_direct: if nic_info.startswith('NICDEF'): nic_list = nic_info.split() interface = nic_list[1] switch = nic_list[6] with zvmutils.log_and_reraise_sdkbase_error(action): self._NetworkDbOperator.switch_add_record_migrated( userid, interface, switch) LOG.info("Guest %s registered." % userid)
0.001223
def from_path_by_size(dir_path, min_size=0, max_size=1 << 40): """Create a new FileCollection, and select all files that size in a range:: dir_path = "your/path" # select by file size larger than 100MB fc = FileCollection.from_path_by_size( dir_path, min_size=100*1024*1024) # select by file size smaller than 100MB fc = FileCollection.from_path_by_size( dir_path, max_size=100*1024*1024) # select by file size from 1MB to 100MB fc = FileCollection.from_path_by_size( dir_path, min_size=1024*1024, max_size=100*1024*1024) """ def filter(winfile): if (winfile.size_on_disk >= min_size) and \ (winfile.size_on_disk <= max_size): return True else: return False return FileCollection.from_path_by_criterion( dir_path, filter, keepboth=False)
0.006667
def get_crystal_system(self): """ Get the crystal system for the structure, e.g., (triclinic, orthorhombic, cubic, etc.). Returns: (str): Crystal system for structure or None if system cannot be detected. """ n = self._space_group_data["number"] f = lambda i, j: i <= n <= j cs = {"triclinic": (1, 2), "monoclinic": (3, 15), "orthorhombic": (16, 74), "tetragonal": (75, 142), "trigonal": (143, 167), "hexagonal": (168, 194), "cubic": (195, 230)} crystal_sytem = None for k, v in cs.items(): if f(*v): crystal_sytem = k break return crystal_sytem
0.005442
def _sanitize_instance_name(name, max_length): """Instance names must start with a lowercase letter. All following characters must be a dash, lowercase letter, or digit. """ name = str(name).lower() # make all letters lowercase name = re.sub(r'[^-a-z0-9]', '', name) # remove invalid characters # remove non-lowercase letters from the beginning name = re.sub(r'^[^a-z]+', '', name) name = name[:max_length] name = re.sub(r'-+$', '', name) # remove hyphens from the end return name
0.001825
def close_socket(self): """ Correctly closes the socket :return: """ try: self.docker_py_sock._sock.close() # pylint: disable=protected-access except AttributeError: pass self.docker_py_sock.close()
0.010753
def DbPutDeviceAttributeProperty(self, argin): """ Create/Update device attribute property(ies) in database :param argin: Str[0] = Device name Str[1] = Attribute number Str[2] = Attribute name Str[3] = Property number Str[4] = Property name Str[5] = Property value ..... :type: tango.DevVarStringArray :return: :rtype: tango.DevVoid """ self._log.debug("In DbPutDeviceAttributeProperty()") device_name = argin[0] nb_attributes = int(argin[1]) self.db.put_device_attribute_property(device_name, nb_attributes, argin[2:])
0.004688
def config(self, body): """Configure the email provider. Args: body (dict): Please see: https://auth0.com/docs/api/v2#!/Emails/post_provider """ return self.client.post(self._url(), data=body)
0.012658
def watch(limit): """watch scan rates across the cluster""" period = 5.0 prev = db.db() prev_totals = None while True: click.clear() time.sleep(period) cur = db.db() cur.data['gkrate'] = {} progress = [] prev_buckets = {b.bucket_id: b for b in prev.buckets()} totals = {'scanned': 0, 'krate': 0, 'lrate': 0, 'bucket_id': 'totals'} for b in cur.buckets(): if not b.scanned: continue totals['scanned'] += b.scanned totals['krate'] += b.krate totals['lrate'] += b.lrate if b.bucket_id not in prev_buckets: b.data['gkrate'][b.bucket_id] = b.scanned / period elif b.scanned == prev_buckets[b.bucket_id].scanned: continue else: b.data['gkrate'][b.bucket_id] = ( b.scanned - prev_buckets[b.bucket_id].scanned) / period progress.append(b) if prev_totals is None: totals['gkrate'] = '...' else: totals['gkrate'] = (totals['scanned'] - prev_totals['scanned']) / period prev = cur prev_totals = totals progress = sorted(progress, key=lambda x: x.gkrate, reverse=True) if limit: progress = progress[:limit] progress.insert(0, Bag(totals)) format_plain( progress, None, explicit_only=True, keys=['bucket_id', 'scanned', 'gkrate', 'lrate', 'krate'])
0.001295
def _init(): """Dynamically import engines that initialize successfully.""" import importlib import os import re filenames = os.listdir(os.path.dirname(__file__)) module_names = set() for filename in filenames: match = re.match(r'^(?P<name>[A-Z_a-z]\w*)\.py[co]?$', filename) if match: module_names.add(match.group('name')) for module_name in module_names: try: module = importlib.import_module('.' + module_name, __name__) except ImportError: continue for name, member in module.__dict__.items(): if not isinstance(member, type): # skip non-new-style classes continue if not issubclass(member, Engine): # skip non-subclasses of Engine continue if member is Engine: # skip "abstract" class Engine continue try: handle = member.handle except AttributeError: continue engines[handle] = member
0.000906
def ask(question, default=True, exact=False): """Ask the question in y/n form and return True/False. If you don't want a default 'yes', set default to None (or to False if you want a default 'no'). With exact=True, we want to get a literal 'yes' or 'no', at least when it does not match the default. """ if AUTO_RESPONSE: if default is None: msg = ("The question '%s' requires a manual answer, but " + "we're running in --no-input mode.") msg = msg % question raise RuntimeError(msg) logger.debug("Auto-responding '%s' to the question below." % ( default and "yes" or "no")) logger.debug(question) return default while True: yn = 'y/n' if default is True: yn = 'Y/n' if default is False: yn = 'y/N' q = question + " (%s)? " % yn answer = input(q) if answer: answer = answer else: answer = '' if not answer and default is not None: return default if exact and answer.lower() not in ('yes', 'no'): print("Please explicitly answer yes/no in full " "(or accept the default)") continue if answer: answer = answer[0].lower() if answer == 'y': return True if answer == 'n': return False # We really want an answer. print('Please explicitly answer y/n') continue
0.00064
def rename_agents(self, stmts): """Return a list of mapped statements with updated agent names. Creates a new list of statements without modifying the original list. The agents in a statement should be renamed if the grounding map has updated their db_refs. If an agent contains a FamPlex grounding, the FamPlex ID is used as a name. Otherwise if it contains a Uniprot ID, an attempt is made to find the associated HGNC gene name. If one can be found it is used as the agent name and the associated HGNC ID is added as an entry to the db_refs. If neither a FamPlex ID or HGNC name can be found, falls back to the original name. Parameters ---------- stmts : list of :py:class:`indra.statements.Statement` List of statements whose Agents need their names updated. Returns ------- mapped_stmts : list of :py:class:`indra.statements.Statement` A new list of Statements with updated Agent names """ # Make a copy of the stmts mapped_stmts = deepcopy(stmts) # Iterate over the statements for _, stmt in enumerate(mapped_stmts): # Iterate over the agents for agent in stmt.agent_list(): if agent is None: continue # If there's a FamPlex ID, prefer that for the name if agent.db_refs.get('FPLX'): agent.name = agent.db_refs.get('FPLX') # Take a HGNC name from Uniprot next elif agent.db_refs.get('UP'): # Try for the gene name gene_name = uniprot_client.get_gene_name( agent.db_refs.get('UP'), web_fallback=False) if gene_name: agent.name = gene_name hgnc_id = hgnc_client.get_hgnc_id(gene_name) if hgnc_id: agent.db_refs['HGNC'] = hgnc_id # Take the text string #if agent.db_refs.get('TEXT'): # agent.name = agent.db_refs.get('TEXT') # If this fails, then we continue with no change # Fall back to the text string #elif agent.db_refs.get('TEXT'): # agent.name = agent.db_refs.get('TEXT') return mapped_stmts
0.001575
async def get_xy_address(self, xy): '''Get address of the agent residing in *xy* coordinate, or ``None`` if no such agent is in this multi-environment. ''' manager_addr = self.get_xy_environment(xy) if manager_addr is None: return None else: r_agent = await self._env.connect(manager_addr) xy_addr = await r_agent.get_xy_address(xy) return xy_addr
0.004515
def delete_room(room, reason=''): """Deletes a MUC room from the XMPP server.""" if room.custom_server: return def _delete_room(xmpp): muc = xmpp.plugin['xep_0045'] muc.destroy(room.jid, reason=reason) current_plugin.logger.info('Deleting room %s', room.jid) _execute_xmpp(_delete_room) delete_logs(room)
0.002817
def toc(tt, return_msg=False, write_msg=True, verbose=None): """ similar to matlab toc SeeAlso: ut.tic """ if verbose is not None: write_msg = verbose (msg, start_time) = tt ellapsed = (default_timer() - start_time) if (not return_msg) and write_msg and msg is not None: sys.stdout.write('...toc(%.4fs, ' % ellapsed + '"' + str(msg) + '"' + ')\n') if return_msg: return msg else: return ellapsed
0.004202
def write_new_config(self, updates): """ Given a list of updates, write the updates out to the provided configuartion file. Args: updates (list): List of Update objects. """ with open(self._new_config, 'w') as config_file: for update in updates: line = '{0}=={1} # The installed version is: {2}\n'.format( update.name, update.new_version, update.current_version ) config_file.write(line)
0.003509
def get_api_publisher(self, social_user): """ and other https://vk.com/dev.php?method=wall.post """ def _post(**kwargs): api = self.get_api(social_user) from pudb import set_trace; set_trace() # api.group.getInfo('uids'='your_group_id', 'fields'='members_count') #response = api.wall.post(**kwargs) return response return _post
0.011547