code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def min_count(self, n=1): word_count = {w:c for w,c in iteritems(self.word_count) if c >= n} return CountedVocabulary(word_count=word_count)
Returns a vocabulary after eliminating the words that appear < `n`. Args: n (integer): specifies the minimum word frequency allowed.
def is_transition_matrix(T, tol=1e-10): if T.ndim != 2: return False if T.shape[0] != T.shape[1]: return False dim = T.shape[0] X = np.abs(T) - T x = np.sum(T, axis=1) return np.abs(x - np.ones(dim)).max() < dim * tol and X.max() < 2.0 * tol
Tests whether T is a transition matrix Parameters ---------- T : ndarray shape=(n, n) matrix to test tol : float tolerance to check with Returns ------- Truth value : bool True, if all elements are in interval [0, 1] and each row of T sums up to 1. False, otherwise
def InteractiveShell(self, cmd=None, strip_cmd=True, delim=None, strip_delim=True): conn = self._get_service_connection(b'shell:') return self.protocol_handler.InteractiveShellCommand( conn, cmd=cmd, strip_cmd=strip_cmd, delim=delim, strip_delim=strip_delim)
Get stdout from the currently open interactive shell and optionally run a command on the device, returning all output. Args: cmd: Optional. Command to run on the target. strip_cmd: Optional (default True). Strip command name from stdout. delim: Optional. Delimiter to look for in the output to know when to stop expecting more output (usually the shell prompt) strip_delim: Optional (default True): Strip the provided delimiter from the output Returns: The stdout from the shell command.
def _expire(self): del self.map.addr[self.name] self.map.notify("addrmap_expired", *[self.name], **{})
callback done via callLater
def apply(self, reboot=False): self.root.use_virtual_addresses = True self.root.manage.manage = True self.root.mode = 'new' self.root.init_boot = reboot self.client.set_profile(self.root.get_json())
Apply the configuration to iRMC.
def format_field(self, field): if field is None: return "NULL" elif isinstance(field, TypeError): return "TypeError" elif isinstance(field, Decimal): if field % 1 == 0: return str(int(field)) return str(float(field)) elif isinstance(field, set): return "(" + ", ".join([self.format_field(v) for v in field]) + ")" elif isinstance(field, datetime): return field.isoformat() elif isinstance(field, timedelta): rd = relativedelta( seconds=int(field.total_seconds()), microseconds=field.microseconds ) return delta_to_str(rd) elif isinstance(field, Binary): return "<Binary %d>" % len(field.value) pretty = repr(field) if pretty.startswith("u'"): return pretty[1:] return pretty
Format a single Dynamo value
def fit_transform(self, X, y=None, **fit_params): y = column_or_1d(X, warn=True) _check_numpy_unicode_bug(X) self.classes_, X = np.unique(X, return_inverse=True) return X
Fit label encoder and return encoded labels Parameters ---------- y : array-like of shape [n_samples] Target values. Returns ------- y : array-like of shape [n_samples]
def generate_hypergraph(num_nodes, num_edges, r = 0): random_graph = hypergraph() nodes = list(map(str, list(range(num_nodes)))) random_graph.add_nodes(nodes) edges = list(map(str, list(range(num_nodes, num_nodes+num_edges)))) random_graph.add_hyperedges(edges) if 0 == r: for e in edges: for n in nodes: if choice([True, False]): random_graph.link(n, e) else: for e in edges: shuffle(nodes) for i in range(r): random_graph.link(nodes[i], e) return random_graph
Create a random hyper graph. @type num_nodes: number @param num_nodes: Number of nodes. @type num_edges: number @param num_edges: Number of edges. @type r: number @param r: Uniform edges of size r.
def updateDynamics(self): history_vars_string = '' arg_names = list(getArgNames(self.calcDynamics)) if 'self' in arg_names: arg_names.remove('self') for name in arg_names: history_vars_string += ' \'' + name + '\' : self.' + name + '_hist,' update_dict = eval('{' + history_vars_string + '}') dynamics = self.calcDynamics(**update_dict) for var_name in self.dyn_vars: this_obj = getattr(dynamics,var_name) for this_type in self.agents: setattr(this_type,var_name,this_obj) return dynamics
Calculates a new "aggregate dynamic rule" using the history of variables named in track_vars, and distributes this rule to AgentTypes in agents. Parameters ---------- none Returns ------- dynamics : instance The new "aggregate dynamic rule" that agents believe in and act on. Should have attributes named in dyn_vars.
def stop_condition(self, condition): for cond_format in self._known_conditions: try: cond = cond_format.FromString(condition) self.stop_conditions.append(cond) return except ArgumentError: continue raise ArgumentError("Stop condition could not be processed by any known StopCondition type", condition=condition, suggestion="It may be mistyped or otherwise invalid.")
Add a stop condition to this simulation. Stop conditions are specified as strings and parsed into the appropriate internal structures. Args: condition (str): a string description of the stop condition
def _get_jar_fp(self): if os.path.exists(self._command): return self._command elif 'RDP_JAR_PATH' in environ: return getenv('RDP_JAR_PATH') else: return None
Returns the full path to the JAR file. If the JAR file cannot be found in the current directory and the environment variable RDP_JAR_PATH is not set, returns None.
def _get_lb(self, lb_or_id): if isinstance(lb_or_id, CloudLoadBalancer): ret = lb_or_id else: ret = self.get(lb_or_id) return ret
Accepts either a loadbalancer or the ID of a loadbalancer, and returns the CloudLoadBalancer instance.
def IsFile(self): if self._stat_object is None: self._stat_object = self._GetStat() if self._stat_object is not None: self.entry_type = self._stat_object.type return self.entry_type == definitions.FILE_ENTRY_TYPE_FILE
Determines if the file entry is a file. Returns: bool: True if the file entry is a file.
def update_utxoset(self, transaction): spent_outputs = [ spent_output for spent_output in transaction.spent_outputs ] if spent_outputs: self.delete_unspent_outputs(*spent_outputs) self.store_unspent_outputs( *[utxo._asdict() for utxo in transaction.unspent_outputs] )
Update the UTXO set given ``transaction``. That is, remove the outputs that the given ``transaction`` spends, and add the outputs that the given ``transaction`` creates. Args: transaction (:obj:`~bigchaindb.models.Transaction`): A new transaction incoming into the system for which the UTXO set needs to be updated.
def score(ID, sign, lon): info = getInfo(sign, lon) dignities = [dign for (dign, objID) in info.items() if objID == ID] return sum([SCORES[dign] for dign in dignities])
Returns the score of an object on a sign and longitude.
def phrase_replace(self, replace_dict): def r(tokens): text = ' ' + ' '.join(tokens) for k, v in replace_dict.items(): text = text.replace(" " + k + " ", " " + v + " ") return text.split() self.stems = list(map(r, self.stems))
Replace phrases with single token, mapping defined in replace_dict
def add_f77_to_env(env): try: F77Suffixes = env['F77FILESUFFIXES'] except KeyError: F77Suffixes = ['.f77'] try: F77PPSuffixes = env['F77PPFILESUFFIXES'] except KeyError: F77PPSuffixes = [] DialectAddToEnv(env, "F77", F77Suffixes, F77PPSuffixes)
Add Builders and construction variables for f77 to an Environment.
def run_id(self): s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', self.__class__.__name__) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
Run name without whitespace
def logon(): session = requests.session() payload = {"jsonrpc": "2.0", "id": "ID0", "method": "login", "params": [DETAILS['username'], DETAILS['password'], DETAILS['auth'], True] } logon_response = session.post(DETAILS['url'], data=json.dumps(payload), verify=False) if logon_response.status_code != 200: log.error("Error logging into proxy. HTTP Error code: %s", logon_response.status_code) raise salt.exceptions.CommandExecutionError( "Did not receive a valid response from host.") try: cookies = {'sslng_csrf_token': logon_response.cookies['sslng_csrf_token'], 'sslng_session_id': logon_response.cookies['sslng_session_id']} csrf_token = logon_response.cookies['sslng_csrf_token'] except KeyError: log.error("Unable to authentication to the bluecoat_sslv proxy.") raise salt.exceptions.CommandExecutionError( "Did not receive a valid response from host.") return session, cookies, csrf_token
Logs into the bluecoat_sslv device and returns the session cookies.
def percentile(values=None, percentile=None): if values in [None, tuple(), []] or len(values) < 1: raise InsufficientData( "Expected a sequence of at least 1 integers, got {0!r}".format(values)) if percentile is None: raise ValueError("Expected a percentile choice, got {0}".format(percentile)) sorted_values = sorted(values) rank = len(values) * percentile / 100 if rank > 0: index = rank - 1 if index < 0: return sorted_values[0] else: index = rank if index % 1 == 0: return sorted_values[int(index)] else: fractional = index % 1 integer = int(index - fractional) lower = sorted_values[integer] higher = sorted_values[integer + 1] return lower + fractional * (higher - lower)
Calculates a simplified weighted average percentile
def validate_type(prop, value, expected): if value is not None and not isinstance(value, expected): _validation_error(prop, type(value).__name__, None, expected)
Default validation for all types
def structs2pandas(structs): try: import pandas records = list(structs2records(structs)) df = pandas.DataFrame.from_records(records) if 'id' in df: df["id"] = df["id"].apply(str.rstrip) return df except ImportError: return structs
convert ctypes structure or structure array to pandas data frame
def _influxdb_url(self): url = "{0}/db/{1}/series".format(self.influxdb.url.rstrip('/'), self.config.dbname) if self.influxdb.user and self.influxdb.password: url += "?u={0}&p={1}".format(self.influxdb.user, self.influxdb.password) return url
Return REST API URL to access time series.
def Bmatrix(C): L, Q = eigh(C) minL = 1e-9*L[-1] L[L < minL] = minL S = np.diag(1 / np.sqrt(L)) B = Q.dot(S) return B
Calculate a matrix which is effectively the square root of the correlation matrix C Parameters ---------- C : 2d array A covariance matrix Returns ------- B : 2d array A matrix B such the B.dot(B') = inv(C)
def get_comments(self, sharekey=None): if not sharekey: raise Exception( "You must specify a sharekey of the file you" "want to 'like'.") endpoint = '/api/sharedfile/{0}/comments'.format(sharekey) data = self._make_request("GET", endpoint=endpoint) return [Comment.NewFromJSON(c) for c in data['comments']]
Retrieve comments on a SharedFile Args: sharekey (str): Sharekey for the file from which you want to return the set of comments. Returns: List of Comment objects.
def dependencies_as_list(self): dependencies = [] for dependency in self.dependencies: dependencies.append(dependency.name) return dependencies
Returns a list of dependency names.
def get_gains_losses(changes): res = {'gains': [], 'losses': []} for change in changes: if change > 0: res['gains'].append(change) else: res['losses'].append(change * -1) logger.debug('Gains: {0}'.format(res['gains'])) logger.debug('Losses: {0}'.format(res['losses'])) return res
Categorizes changes into gains and losses Args: changes: List of floats of price changes between entries in JSON. Returns: Dict of changes with keys 'gains' and 'losses'. All values are positive.
def urlretrieve(url, dest, write_mode="w"): response = urllib2.urlopen(url) mkdir_recursive(os.path.dirname(dest)) with open(dest, write_mode) as f: f.write(response.read()) f.close()
save a file to disk from a given url
def upload(client, source_dir): print('') print('upload store listings') print('---------------------') listings_folder = os.path.join(source_dir, 'listings') langfolders = filter(os.path.isdir, list_dir_abspath(listings_folder)) for language_dir in langfolders: language = os.path.basename(language_dir) with open(os.path.join(language_dir, 'listing.json')) as listings_file: listing = json.load(listings_file) listing_response = client.update( 'listings', language=language, body=listing) print(' Listing for language %s was updated.' % listing_response['language'])
Upload listing files in source_dir. folder herachy.
def from_coffeescript(cls, func, v_func, args={}): compiled = nodejs_compile(func, lang="coffeescript", file="???") if "error" in compiled: raise CompilationError(compiled.error) v_compiled = nodejs_compile(v_func, lang="coffeescript", file="???") if "error" in v_compiled: raise CompilationError(v_compiled.error) return cls(func=compiled.code, v_func=v_compiled.code, args=args)
Create a ``CustomJSTransform`` instance from a pair of CoffeeScript snippets. The function bodies are translated to JavaScript functions using node and therefore require return statements. The ``func`` snippet namespace will contain the variable ``x`` (the untransformed value) at render time. The ``v_func`` snippet namespace will contain the variable ``xs`` (the untransformed vector) at render time. Example: .. code-block:: coffeescript func = "return Math.cos(x)" v_func = "return [Math.cos(x) for x in xs]" transform = CustomJSTransform.from_coffeescript(func, v_func) Args: func (str) : a coffeescript snippet to transform a single ``x`` value v_func (str) : a coffeescript snippet function to transform a vector ``xs`` Returns: CustomJSTransform
def copy(self): doppel = type(self)( self.unpack, self.apply, self.collect, self.reduce, apply_empty_slots=self.apply_empty_slots, extraneous=self.extraneous, ignore_empty_string=self.ignore_empty_string, ignore_none=self.ignore_none, visit_filter=self.visit_filter, ) for x in self.cue: doppel.push(x) doppel.seen = self.seen return doppel
Be sure to implement this method when sub-classing, otherwise you will lose any specialization context.
def _related(self, concept): return concept.hypernyms() + \ concept.hyponyms() + \ concept.member_meronyms() + \ concept.substance_meronyms() + \ concept.part_meronyms() + \ concept.member_holonyms() + \ concept.substance_holonyms() + \ concept.part_holonyms() + \ concept.attributes() + \ concept.also_sees() + \ concept.similar_tos()
Returns related concepts for a concept.
def update_team(self, slug): if self._org: if not self._org.has_team(slug): return self._org.update() return self._org.update_team(slug) return False
Trigger update and cache invalidation for the team identified by the given `slug`, if any. Returns `True` if the update was successful, `False` otherwise. :param slug: GitHub 'slug' name for the team to be updated.
def get_right_word(self, cursor=None): if cursor is None: cursor = self._editor.textCursor() cursor.movePosition(QtGui.QTextCursor.WordRight, QtGui.QTextCursor.KeepAnchor) return cursor.selectedText().strip()
Gets the character on the right of the text cursor. :param cursor: QTextCursor where the search will start. :return: The word that is on the right of the text cursor.
def run(self, args): jlink = self.create_jlink(args) if args.downgrade: if not jlink.firmware_newer(): print('DLL firmware is not older than J-Link firmware.') else: jlink.invalidate_firmware() try: jlink.update_firmware() except pylink.JLinkException as e: jlink = self.create_jlink(args) print('Firmware Downgraded: %s' % jlink.firmware_version) elif args.upgrade: if not jlink.firmware_outdated(): print('DLL firmware is not newer than J-Link firmware.') else: try: jlink.update_firmware() except pylink.JLinkException as e: jlink = self.create_jlink(args) print('Firmware Updated: %s' % jlink.firmware_version) return None
Runs the firmware command. Args: self (FirmwareCommand): the ``FirmwareCommand`` instance args (Namespace): arguments to parse Returns: ``None``
def max_zoom(self): zoom_levels = [map_layer.max_zoom for map_layer in self.layers] return max(zoom_levels)
Get the maximal zoom level of all layers. Returns: int: the maximum of all zoom levels of all layers Raises: ValueError: if no layers exist
async def _cancel_payloads(self): for task in self._tasks: task.cancel() await asyncio.sleep(0) for task in self._tasks: while not task.done(): await asyncio.sleep(0.1) task.cancel()
Cancel all remaining payloads
def ensure_float_vector(F, require_order = False): if is_float_vector(F): return F elif is_float(F): return np.array([F]) elif is_iterable_of_float(F): return np.array(F) elif isinstance(F, set): if require_order: raise TypeError('Argument is an unordered set, but I require an ordered array of floats') else: lF = list(F) if is_list_of_float(lF): return np.array(lF) else: raise TypeError('Argument is not of a type that is convertible to an array of floats.')
Ensures that F is a numpy array of floats If F is already a numpy array of floats, F is returned (no copied!) Otherwise, checks if the argument can be converted to an array of floats and does that. Parameters ---------- F: float, or iterable of float require_order : bool If False (default), an unordered set is accepted. If True, a set is not accepted. Returns ------- arr : ndarray(n) numpy array with the floats contained in the argument
def moveto(self, x, y, scale=1): self.root.set("transform", "translate(%s, %s) scale(%s) %s" % (x, y, scale, self.root.get("transform") or ''))
Move and scale element. Parameters ---------- x, y : float displacement in x and y coordinates in user units ('px'). scale : float scaling factor. To scale down scale < 1, scale up scale > 1. For no scaling scale = 1.
def custom_objective(self, objective_function, *args): result = sco.minimize( objective_function, x0=self.initial_guess, args=args, method="SLSQP", bounds=self.bounds, constraints=self.constraints, ) self.weights = result["x"] return dict(zip(self.tickers, self.weights))
Optimise some objective function. While an implicit requirement is that the function can be optimised via a quadratic optimiser, this is not enforced. Thus there is a decent chance of silent failure. :param objective_function: function which maps (weight, args) -> cost :type objective_function: function with signature (np.ndarray, args) -> float :return: asset weights that optimise the custom objective :rtype: dict
def extract_fields(cls, schema): for part in parse.PARSE_RE.split(schema): if not part or part == '{{' or part == '}}': continue elif part[0] == '{': yield cls.parse(part)
Extract fields in a parse expression schema. :param schema: Parse expression schema/format to use (as string). :return: Generator for fields in schema (as Field objects).
def dummynum(character, name): num = 0 for nodename in character.node: nodename = str(nodename) if not nodename.startswith(name): continue try: nodenum = int(nodename.lstrip(name)) except ValueError: continue num = max((nodenum, num)) return num
Count how many nodes there already are in the character whose name starts the same.
def remove_stage_from_deployed_values(key, filename): final_values = {} try: with open(filename, 'r') as f: final_values = json.load(f) except IOError: return try: del final_values[key] with open(filename, 'wb') as f: data = serialize_to_json(final_values) f.write(data.encode('utf-8')) except KeyError: pass
Delete a top level key from the deployed JSON file.
def install(): ceph_dir = "/etc/ceph" if not os.path.exists(ceph_dir): os.mkdir(ceph_dir) apt_install('ceph-common', fatal=True)
Basic Ceph client installation.
def _print_pgfplot_libs_message(data): pgfplotslibs = ",".join(list(data["pgfplots libs"])) tikzlibs = ",".join(list(data["tikz libs"])) print(70 * "=") print("Please add the following lines to your LaTeX preamble:\n") print("\\usepackage[utf8]{inputenc}") print("\\usepackage{fontspec} % This line only for XeLaTeX and LuaLaTeX") print("\\usepackage{pgfplots}") if tikzlibs: print("\\usetikzlibrary{" + tikzlibs + "}") if pgfplotslibs: print("\\usepgfplotslibrary{" + pgfplotslibs + "}") print(70 * "=") return
Prints message to screen indicating the use of PGFPlots and its libraries.
def get_signing_policy(signing_policy_name): signing_policy = _get_signing_policy(signing_policy_name) if not signing_policy: return 'Signing policy {0} does not exist.'.format(signing_policy_name) if isinstance(signing_policy, list): dict_ = {} for item in signing_policy: dict_.update(item) signing_policy = dict_ try: del signing_policy['signing_private_key'] except KeyError: pass try: signing_policy['signing_cert'] = get_pem_entry(signing_policy['signing_cert'], 'CERTIFICATE') except KeyError: log.debug('Unable to get "certificate" PEM entry') return signing_policy
Returns the details of a names signing policy, including the text of the public key that will be used to sign it. Does not return the private key. CLI Example: .. code-block:: bash salt '*' x509.get_signing_policy www
def get_computers(self, filterTerm=None, domain=None): cur = self.conn.cursor() if self.is_computer_valid(filterTerm): cur.execute("SELECT * FROM computers WHERE id=? LIMIT 1", [filterTerm]) elif filterTerm == 'dc': if domain: cur.execute("SELECT * FROM computers WHERE dc=1 AND LOWER(domain)=LOWER(?)", [domain]) else: cur.execute("SELECT * FROM computers WHERE dc=1") elif filterTerm and filterTerm != "": cur.execute("SELECT * FROM computers WHERE ip LIKE ? OR LOWER(hostname) LIKE LOWER(?)", ['%{}%'.format(filterTerm), '%{}%'.format(filterTerm)]) else: cur.execute("SELECT * FROM computers") results = cur.fetchall() cur.close() return results
Return hosts from the database.
def getConstraints(self): constraints = lock_and_call( lambda: self._impl.getConstraints(), self._lock ) return EntityMap(constraints, Constraint)
Get all the constraints declared.
def end_of_directory(self, succeeded=True, update_listing=False, cache_to_disc=True): self._update_listing = update_listing if not self._end_of_directory: self._end_of_directory = True return xbmcplugin.endOfDirectory(self.handle, succeeded, update_listing, cache_to_disc) assert False, 'Already called endOfDirectory.'
Wrapper for xbmcplugin.endOfDirectory. Records state in self._end_of_directory. Typically it is not necessary to call this method directly, as calling :meth:`~xbmcswift2.Plugin.finish` will call this method.
def new_port(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP) for i in range(12042, 16042): try: s.bind(('127.0.0.1', i)) s.close() return i except socket.error: pass raise Exception('No local port available')
Find a free local port and allocate it
def set_parameter_vector(self, vector, include_frozen=False): v = self.parameter_vector if include_frozen: v[:] = vector else: v[self.unfrozen_mask] = vector self.parameter_vector = v self.dirty = True
Set the parameter values to the given vector Args: vector (array[vector_size] or array[full_size]): The target parameter vector. This must be in the same order as ``parameter_names`` and it should only include frozen parameters if ``include_frozen`` is ``True``. include_frozen (Optional[bool]): Should the frozen parameters be included in the returned value? (default: ``False``)
def printmp(msg): filler = (80 - len(msg)) * ' ' print(msg + filler, end='\r') sys.stdout.flush()
Print temporarily, until next print overrides it.
def remove_zero_normals(self): points_of_interest = np.where(np.linalg.norm(self._data, axis=0) != 0.0)[0] self._data = self._data[:, points_of_interest]
Removes normal vectors with a zero magnitude. Note ---- This returns nothing and updates the NormalCloud in-place.
def get_model(name, **kwargs): models = {'resnet18_v1': resnet18_v1, 'resnet34_v1': resnet34_v1, 'resnet50_v1': resnet50_v1, 'resnet101_v1': resnet101_v1, 'resnet152_v1': resnet152_v1, 'resnet18_v2': resnet18_v2, 'resnet34_v2': resnet34_v2, 'resnet50_v2': resnet50_v2, 'resnet101_v2': resnet101_v2, 'resnet152_v2': resnet152_v2, 'vgg11': vgg11, 'vgg13': vgg13, 'vgg16': vgg16, 'vgg19': vgg19, 'vgg11_bn': vgg11_bn, 'vgg13_bn': vgg13_bn, 'vgg16_bn': vgg16_bn, 'vgg19_bn': vgg19_bn, 'alexnet': alexnet, 'densenet121': densenet121, 'densenet161': densenet161, 'densenet169': densenet169, 'densenet201': densenet201, 'squeezenet1.0': squeezenet1_0, 'squeezenet1.1': squeezenet1_1, 'inceptionv3': inception_v3, 'mobilenet1.0': mobilenet1_0, 'mobilenet0.75': mobilenet0_75, 'mobilenet0.5': mobilenet0_5, 'mobilenet0.25': mobilenet0_25, 'mobilenetv2_1.0': mobilenet_v2_1_0, 'mobilenetv2_0.75': mobilenet_v2_0_75, 'mobilenetv2_0.5': mobilenet_v2_0_5, 'mobilenetv2_0.25': mobilenet_v2_0_25 } name = name.lower() if name not in models: raise ValueError( 'Model %s is not supported. Available options are\n\t%s' % ( name, '\n\t'.join(sorted(models.keys())))) return models[name](**kwargs)
Returns a pre-defined model by name Parameters ---------- name : str Name of the model. pretrained : bool Whether to load the pretrained weights for model. classes : int Number of classes for the output layer. ctx : Context, default CPU The context in which to load the pretrained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. Returns ------- HybridBlock The model.
def _agate_to_schema(self, agate_table, column_override): bq_schema = [] for idx, col_name in enumerate(agate_table.column_names): inferred_type = self.convert_agate_type(agate_table, idx) type_ = column_override.get(col_name, inferred_type) bq_schema.append( google.cloud.bigquery.SchemaField(col_name, type_) ) return bq_schema
Convert agate.Table with column names to a list of bigquery schemas.
def tag_syntax_maltparser(self): if not self.__syntactic_parser or not isinstance(self.__syntactic_parser, MaltParser): self.__syntactic_parser = MaltParser() return self.tag_syntax()
Changes default syntactic parser to MaltParser, performs syntactic analysis, and stores the results in the layer named LAYER_CONLL.
def read_until(self, expected_commands, timeout): msg = timeouts.loop_until_timeout_or_valid( timeout, lambda: self.read_message(timeout), lambda m: m.command in expected_commands, 0) if msg.command not in expected_commands: raise usb_exceptions.AdbTimeoutError( 'Timed out establishing connection, waiting for: %s', expected_commands) return msg
Read AdbMessages from this transport until we get an expected command. The ADB protocol specifies that before a successful CNXN handshake, any other packets must be ignored, so this method provides the ability to ignore unwanted commands. It's primarily used during the initial connection to the device. See Read() for more details, including more exceptions that may be raised. Args: expected_commands: Iterable of expected command responses, like ('CNXN', 'AUTH'). timeout: timeouts.PolledTimeout object to use for timeout. Returns: The ADB message received that matched one of expected_commands. Raises: AdbProtocolError: If timeout expires between reads, this can happen if we are getting spammed with unexpected commands.
def match_route(self, reqpath): route_dicts = [routes for _, routes in self.api.http.routes.items()][0] routes = [route for route, _ in route_dicts.items()] if reqpath in routes: return reqpath for route in routes: if re.match(re.sub(r'/{[^{}]+}', '/\w+', route) + '$', reqpath): return route return reqpath
match a request with parameter to it's corresponding route
def providerIsAuthoritative(providerID, canonicalID): lastbang = canonicalID.rindex('!') parent = canonicalID[:lastbang] return parent == providerID
Is this provider ID authoritative for this XRI? @returntype: bool
def iflatten(L): for sublist in L: if hasattr(sublist, '__iter__'): for item in iflatten(sublist): yield item else: yield sublist
Iterative flatten.
def coders(self): return (PrimitiveTypeCoder, TensorFlowCoder, FunctionCoder, ListCoder, DictCoder, SliceCoder, ParameterCoder, ParamListCoder, ParameterizedCoder, TransformCoder, PriorCoder)
List of default supported coders. First coder in the list has higher priority.
def visit_GpxModel(self, gpx_model, *args, **kwargs): result = OrderedDict() put_scalar = lambda name, json_name=None: self.optional_attribute_scalar(result, gpx_model, name, json_name) put_list = lambda name, json_name=None: self.optional_attribute_list(result, gpx_model, name, json_name) put_scalar('creator') put_scalar('metadata') put_list('waypoints') put_list('routes') put_list('tracks') put_list('extensions') return result
Render a GPXModel as a single JSON structure.
def get_execution(self, id_execution, access_token=None, user_id=None): if access_token: self.req.credential.set_token(access_token) if user_id: self.req.credential.set_user_id(user_id) if not self.check_credentials(): raise CredentialsError('credentials invalid') execution = self.req.get('/Executions/' + id_execution) if "codeId" in execution: execution['code'] = self.get_code(execution["codeId"]) return execution
Get a execution, by its id
def lint(fmt='colorized'): if fmt == 'html': outfile = 'pylint_report.html' local('pylint -f %s davies > %s || true' % (fmt, outfile)) local('open %s' % outfile) else: local('pylint -f %s davies || true' % fmt)
Run verbose PyLint on source. Optionally specify fmt=html for HTML output.
def getconnections(self, vhost = None): "Return accepted connections, optionally filtered by vhost" if vhost is None: return list(self.managed_connections) else: return [c for c in self.managed_connections if c.protocol.vhost == vhost]
Return accepted connections, optionally filtered by vhost
def _select_default_algorithm(analysis): if not analysis or analysis == "Standard": return "Standard", {"aligner": "bwa", "platform": "illumina", "quality_format": "Standard", "recalibrate": False, "realign": False, "mark_duplicates": True, "variantcaller": False} elif "variant" in analysis: try: config, _ = template.name_to_config(analysis) except ValueError: config, _ = template.name_to_config("freebayes-variant") return "variant", config["details"][0]["algorithm"] else: return analysis, {}
Provide default algorithm sections from templates or standard
def get_config_load_path(conf_path=None): if conf_path is None: if os.path.isfile('andes.conf'): conf_path = 'andes.conf' home_dir = os.path.expanduser('~') if os.path.isfile(os.path.join(home_dir, '.andes', 'andes.conf')): conf_path = os.path.join(home_dir, '.andes', 'andes.conf') if conf_path is not None: logger.debug('Found config file at {}.'.format(conf_path)) return conf_path
Return config file load path Priority: 1. conf_path 2. current directory 3. home directory Parameters ---------- conf_path Returns -------
def badge_width(self): return self.get_text_width(' ' + ' ' * int(float(self.num_padding_chars) * 2.0)) \ + self.label_width + self.value_width
The total width of badge. >>> badge = Badge('pylint', '5', font_name='DejaVu Sans,Verdana,Geneva,sans-serif', ... font_size=11) >>> badge.badge_width 91
def __folder_size(self, path): ret = 0 for f in scandir(path): if f.is_dir() and (f.name != '.' or f.name != '..'): ret += self.__folder_size(os.path.join(path, f.name)) else: try: ret += f.stat().st_size except OSError: pass return ret
Return the size of the directory given by path path: <string>
def listen(cls, event, func): signal(event).connect(func, sender=cls)
Add a callback for a signal against the class
def get_forecast(self, latitude, longitude): reply = self.http_get(self.url_builder(latitude, longitude)) self.forecast = json.loads(reply) for item in self.forecast.keys(): setattr(self, item, self.forecast[item])
Gets the weather data from darksky api and stores it in the respective dictionaries if available. This function should be used to fetch weather information.
def load_configuration_from_file(directory, args): args = copy.copy(args) directory_or_file = directory if args.config is not None: directory_or_file = args.config options = _get_options(directory_or_file, debug=args.debug) args.report = options.get('report', args.report) threshold_dictionary = docutils.frontend.OptionParser.thresholds args.report = int(threshold_dictionary.get(args.report, args.report)) args.ignore_language = get_and_split( options, 'ignore_language', args.ignore_language) args.ignore_messages = options.get( 'ignore_messages', args.ignore_messages) args.ignore_directives = get_and_split( options, 'ignore_directives', args.ignore_directives) args.ignore_substitutions = get_and_split( options, 'ignore_substitutions', args.ignore_substitutions) args.ignore_roles = get_and_split( options, 'ignore_roles', args.ignore_roles) return args
Return new ``args`` with configuration loaded from file.
def mda_count(self): self.open() mda = lvm_pv_get_mda_count(self.handle) self.close() return mda
Returns the physical volume mda count.
def _check(self): import time if self.expires_in is None or self.authenticated is None: return False current = time.time() expire_time = self.authenticated + self.expires_in return expire_time > current
Check if the access token is expired or not.
def _check_unit(new_unit, old_unit): try: new_unit.physical_type except AttributeError: raise UnitMismatch("The provided unit (%s) has no physical type. Was expecting a unit for %s" % (new_unit, old_unit.physical_type)) if new_unit.physical_type != old_unit.physical_type: raise UnitMismatch("Physical type mismatch: you provided a unit for %s instead of a unit for %s" % (new_unit.physical_type, old_unit.physical_type))
Check that the new unit is compatible with the old unit for the quantity described by variable_name :param new_unit: instance of astropy.units.Unit :param old_unit: instance of astropy.units.Unit :return: nothin
def get_trips(self, authentication_info, start, end): import requests if (authentication_info is None or not authentication_info.is_valid()): return [] data_url = "https://api.ritassist.nl/api/trips/GetTrips" query = f"?equipmentId={self.identifier}&from={start}&to={end}&extendedInfo=True" header = authentication_info.create_header() response = requests.get(data_url + query, headers=header) trips = response.json() result = [] for trip_json in trips: trip = Trip(trip_json) result.append(trip) return result
Get trips for this device between start and end.
def block_comment(solver, start, end): text, pos = solver.parse_state length = len(text) startlen = len(start) endlen = len(end) if pos==length: return if not text[pos:].startswith(start): return level = 1 p = pos+1 while p<length: if text[p:].startswith(end): level -= 1 p += endlen if level==0: break elif text[p:].startswith(start): level += 1 p += startlen else: p += 1 else: return solver.parse_state = text, p yield cont, text[pos:p] solver.parse_state = text, pos
embedable block comment
def register_hit_type( self, title, description, reward, duration_hours, keywords, qualifications ): reward = str(reward) duration_secs = int(datetime.timedelta(hours=duration_hours).total_seconds()) hit_type = self.mturk.create_hit_type( Title=title, Description=description, Reward=reward, AssignmentDurationInSeconds=duration_secs, Keywords=",".join(keywords), AutoApprovalDelayInSeconds=0, QualificationRequirements=qualifications, ) return hit_type["HITTypeId"]
Register HIT Type for this HIT and return the type's ID, which is required for creating a HIT.
def delete_leaderboard_named(self, leaderboard_name): pipeline = self.redis_connection.pipeline() pipeline.delete(leaderboard_name) pipeline.delete(self._member_data_key(leaderboard_name)) pipeline.delete(self._ties_leaderboard_key(leaderboard_name)) pipeline.execute()
Delete the named leaderboard. @param leaderboard_name [String] Name of the leaderboard.
def get_dir_meta(fp, atts): atts.reverse() dirname = os.path.split(fp)[0] meta = dirname.split('/') res = {} try: for key in atts: res[key] = meta.pop() except IndexError: raise PathError(dirname) return res
Pop path information and map to supplied atts
def normalizeToTag(val): try: val = val.upper() except AttributeError: raise KeyError("{} is not a tag or name string".format(val)) if val not in tagsAndNameSetUpper: raise KeyError("{} is not a tag or name string".format(val)) else: try: return fullToTagDictUpper[val] except KeyError: return val
Converts tags or full names to 2 character tags, case insensitive # Parameters _val_: `str` > A two character string giving the tag or its full name # Returns `str` > The short name of _val_
def numRegisteredForRole(self, role, includeTemporaryRegs=False): count = self.eventregistration_set.filter(cancelled=False,dropIn=False,role=role).count() if includeTemporaryRegs: count += self.temporaryeventregistration_set.filter(dropIn=False,role=role).exclude( registration__expirationDate__lte=timezone.now()).count() return count
Accepts a DanceRole object and returns the number of registrations of that role.
def _cb_inform_interface_change(self, msg): self._logger.debug('cb_inform_interface_change(%s)', msg) self._interface_changed.set()
Update the sensors and requests available.
def filterlet(function=bool, iterable=None): if iterable is None: return _filterlet(function=function) else: return iterlet(elem for elem in iterable if function(elem))
Filter chunks of data from an iterable or a chain :param function: callable selecting valid elements :type function: callable :param iterable: object providing chunks via iteration :type iterable: iterable or None For any chunk in ``iterable`` or the chain, it is passed on only if ``function(chunk)`` returns true. .. code:: chain = iterlet(range(10)) >> filterlet(lambda chunk: chunk % 2 == 0) for value in chain: print(value) # prints 0, 2, 4, 6, 8
def benchmark_forward(self): self._setup() def f(): self._forward() self.mod_ext.synchronize(**self.ext_kwargs) f() self.forward_stat = self._calc_benchmark_stat(f)
Benchmark forward execution.
def emphasis(node): o = nodes.emphasis() for n in MarkDown(node): o += n return o
An italicized section
def cube(data, xcoords=None, ycoords=None, chcoords=None, scalarcoords=None, datacoords=None, attrs=None, name=None): cube = xr.DataArray(data, dims=('x', 'y', 'ch'), attrs=attrs, name=name) cube.dcc._initcoords() if xcoords is not None: cube.coords.update({key: ('x', xcoords[key]) for key in xcoords}) if ycoords is not None: cube.coords.update({key: ('y', ycoords[key]) for key in ycoords}) if chcoords is not None: cube.coords.update({key: ('ch', chcoords[key]) for key in chcoords}) if datacoords is not None: cube.coords.update({key: (('x', 'y', 'ch'), datacoords[key]) for key in datacoords}) if scalarcoords is not None: cube.coords.update(scalarcoords) return cube
Create a cube as an instance of xarray.DataArray with Decode accessor. Args: data (numpy.ndarray): 3D (x x y x channel) array. xcoords (dict, optional): Dictionary of arrays that label x axis. ycoords (dict, optional): Dictionary of arrays that label y axis. chcoords (dict, optional): Dictionary of arrays that label channel axis. scalarcoords (dict, optional): Dictionary of values that don't label any axes (point-like). datacoords (dict, optional): Dictionary of arrays that label x, y, and channel axes. attrs (dict, optional): Dictionary of attributes to add to the instance. name (str, optional): String that names the instance. Returns: decode cube (decode.cube): Decode cube.
def call(self, method, *args): try: response = getattr(self.client.service, method)(*args) except (URLError, SSLError) as e: log.exception('Failed to connect to responsys service') raise ConnectError("Request to service timed out") except WebFault as web_fault: fault_name = getattr(web_fault.fault, 'faultstring', None) error = str(web_fault.fault.detail) if fault_name == 'TableFault': raise TableFault(error) if fault_name == 'ListFault': raise ListFault(error) if fault_name == 'API_LIMIT_EXCEEDED': raise ApiLimitError(error) if fault_name == 'AccountFault': raise AccountFault(error) raise ServiceError(web_fault.fault, web_fault.document) return response
Calls the service method defined with the arguments provided
def success_rate(self): if self.successes + self.fails == 0: success_rate = 0 else: total_attempts = self.successes + self.fails success_rate = (self.successes * 100 / total_attempts) return success_rate
Returns a float with the rate of success from all the logged results.
def __populate_archive_files(self): self.archive_files = [] for _ptr in _bfd.archive_list_files(self._ptr): try: self.archive_files.append(Bfd(_ptr)) except BfdException, err: pass
Store the list of files inside an archive file.
def domain_unblock(self, domain=None): params = self.__generate_params(locals()) self.__api_request('DELETE', '/api/v1/domain_blocks', params)
Remove a domain block for the logged-in user.
def get_max_distance_from_start(latlon_track): latlon_list = [] for idx, point in enumerate(latlon_track): lat = latlon_track[idx][1] lon = latlon_track[idx][2] alt = latlon_track[idx][3] latlon_list.append([lat, lon, alt]) start_position = latlon_list[0] max_distance = 0 for position in latlon_list: distance = gps_distance(start_position, position) if distance > max_distance: max_distance = distance return max_distance
Returns the radius of an entire GPS track. Used to calculate whether or not the entire sequence was just stationary video Takes a sequence of points as input
def _get_url(cls, administration_id: int, resource_path: str): url = urljoin(cls.base_url, '%s/' % cls.version) if administration_id is not None: url = urljoin(url, '%s/' % administration_id) url = urljoin(url, '%s.json' % resource_path) return url
Builds the URL to the API endpoint specified by the given parameters. :param administration_id: The ID of the administration (may be None). :param resource_path: The path to the resource. :return: The absolute URL to the endpoint.
def sum(self, only_valid=True) -> ErrorValue: if not only_valid: mask = 1 else: mask = self.mask return ErrorValue((self.intensity * mask).sum(), ((self.error * mask) ** 2).sum() ** 0.5)
Calculate the sum of pixels, not counting the masked ones if only_valid is True.
def getNextSample(self, V): W, WProb = self.drawRankingPlakettLuce(V) VProb = self.calcProbOfVFromW(V, W) acceptanceRatio = self.calcAcceptanceRatio(V, W) prob = min(1.0, acceptanceRatio * (VProb/WProb)) if random.random() <= prob: V = W return V
Given a ranking over the candidates, generate a new ranking by assigning each candidate at position i a Plakett-Luce weight of phi^i and draw a new ranking. :ivar list<int> V: Contains integer representations of each candidate in order of their ranking in a vote, from first to last.
def sync(remote='origin', branch='master'): pull(branch, remote) push(branch, remote) print(cyan("Git Synced!"))
git pull and push commit
def cmd_list_identities(self, *args): identities = self._get_available_identities() print('Available identities:') for x in identities: print(' - {}'.format(x))
List the available identities to use for signing.
def squeeze(self, axis=None): axis = (self._AXIS_NAMES if axis is None else (self._get_axis_number(axis),)) try: return self.iloc[ tuple(0 if i in axis and len(a) == 1 else slice(None) for i, a in enumerate(self.axes))] except Exception: return self
Squeeze 1 dimensional axis objects into scalars. Series or DataFrames with a single element are squeezed to a scalar. DataFrames with a single column or a single row are squeezed to a Series. Otherwise the object is unchanged. This method is most useful when you don't know if your object is a Series or DataFrame, but you do know it has just a single column. In that case you can safely call `squeeze` to ensure you have a Series. Parameters ---------- axis : {0 or 'index', 1 or 'columns', None}, default None A specific axis to squeeze. By default, all length-1 axes are squeezed. .. versionadded:: 0.20.0 Returns ------- DataFrame, Series, or scalar The projection after squeezing `axis` or all the axes. See Also -------- Series.iloc : Integer-location based indexing for selecting scalars. DataFrame.iloc : Integer-location based indexing for selecting Series. Series.to_frame : Inverse of DataFrame.squeeze for a single-column DataFrame. Examples -------- >>> primes = pd.Series([2, 3, 5, 7]) Slicing might produce a Series with a single value: >>> even_primes = primes[primes % 2 == 0] >>> even_primes 0 2 dtype: int64 >>> even_primes.squeeze() 2 Squeezing objects with more than one value in every axis does nothing: >>> odd_primes = primes[primes % 2 == 1] >>> odd_primes 1 3 2 5 3 7 dtype: int64 >>> odd_primes.squeeze() 1 3 2 5 3 7 dtype: int64 Squeezing is even more effective when used with DataFrames. >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b']) >>> df a b 0 1 2 1 3 4 Slicing a single column will produce a DataFrame with the columns having only one value: >>> df_a = df[['a']] >>> df_a a 0 1 1 3 So the columns can be squeezed down, resulting in a Series: >>> df_a.squeeze('columns') 0 1 1 3 Name: a, dtype: int64 Slicing a single row from a single column will produce a single scalar DataFrame: >>> df_0a = df.loc[df.index < 1, ['a']] >>> df_0a a 0 1 Squeezing the rows produces a single scalar Series: >>> df_0a.squeeze('rows') a 1 Name: 0, dtype: int64 Squeezing all axes wil project directly into a scalar: >>> df_0a.squeeze() 1
def is_deletion(reference_bases, alternate_bases): if len(alternate_bases) > 1: return False if is_indel(reference_bases, alternate_bases): alt_allele = alternate_bases[0] if alt_allele is None: return True if len(reference_bases) > len(alt_allele): return True else: return False else: return False
Return whether or not the INDEL is a deletion
def show_help(self): self.main_stacked_widget.setCurrentIndex(0) header = html_header() footer = html_footer() string = header message = impact_report_help() string += message.to_html() string += footer self.help_web_view.setHtml(string)
Show usage info to the user. .. versionadded: 4.3.0