docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Load a text file as an array of lines. Args: filename: Path to the input file. Returns: An array of strings, each representing an individual line.
def load_lines(filename): with open(filename, 'r', encoding='utf-8') as f: return [line.rstrip('\n') for line in f.readlines()]
869,103
Save an array of lines to a file. Args: lines: An array of strings that will be saved as individual lines. filename: Path to the output file.
def save_lines(lines, filename): with open(filename, 'w', encoding='utf-8') as f: f.write('\n'.join(lines))
869,104
r""" Args: sourcecode (str): Returns: str: json formatted ipython notebook code cell CommandLine: python -m ibeis.templates.generate_notebook --exec-code_cell Example: >>> # DISABLE_DOCTEST >>> from ibeis.templates.generate_notebook import * # NOQA >>> sourcecode = notebook_cells.timestamp_distribution[1] >>> sourcecode = notebook_cells.initialize[1] >>> result = code_cell(sourcecode) >>> print(result)
def code_cell(sourcecode): r import utool as ut sourcecode = ut.remove_codeblock_syntax_sentinals(sourcecode) cell_header = ut.codeblock( ) cell_footer = ut.codeblock( ) if sourcecode is None: source_line_repr = ' []\n' else: lines = sourcecode.split('\n') line_list = [line + '\n' if count < len(lines) else line for count, line in enumerate(lines, start=1)] #repr_line_list = [repr_single_for_md(line) for line in line_list] repr_line_list = [repr_single_for_md(line) for line in line_list] source_line_repr = ut.indent(',\n'.join(repr_line_list), ' ' * 2) source_line_repr = ' [\n' + source_line_repr + '\n ]\n' return (cell_header + source_line_repr + cell_footer)
869,194
r""" Args: markdown (str): Returns: str: json formatted ipython notebook markdown cell CommandLine: python -m ibeis.templates.generate_notebook --exec-markdown_cell Example: >>> # DISABLE_DOCTEST >>> from ibeis.templates.generate_notebook import * # NOQA >>> markdown = '# Title' >>> result = markdown_cell(markdown) >>> print(result)
def markdown_cell(markdown): r import utool as ut markdown_header = ut.codeblock( ) markdown_footer = ut.codeblock( ) return (markdown_header + '\n' + ut.indent(repr_single_for_md(markdown), ' ' * 2) + '\n' + markdown_footer)
869,195
r""" Args: s (str): Returns: str: str_repr CommandLine: python -m ibeis.templates.generate_notebook --exec-repr_single_for_md --show Example: >>> # DISABLE_DOCTEST >>> from ibeis.templates.generate_notebook import * # NOQA >>> s = '#HTML(\'<iframe src="%s" width=700 height=350></iframe>\' % pdf_fpath)' >>> result = repr_single_for_md(s) >>> print(result)
def repr_single_for_md(s): r import utool as ut if True: str_repr = ut.reprfunc(s) import re if str_repr.startswith('\''): dq = (ut.DOUBLE_QUOTE) sq = (ut.SINGLE_QUOTE) bs = (ut.BACKSLASH) dq_, sq_, bs_ = list(map(re.escape, [dq, sq, bs])) no_bs = ut.negative_lookbehind(bs_) #no_sq = ut.negative_lookbehind(sq) #no_dq = ut.negative_lookbehind(dq) #inside = str_repr[1:-1] #inside = re.sub(no_bs + dq, bs + dq, inside) #inside = re.sub(no_bs + bs + sq, r"\\'", r"'", inside) #str_repr = '"' + inside + '"' #inside = re.sub(r'"', r'\\"', inside) #inside = re.sub(ut.negative_lookbehind(r"'") + r"\\'", r"'", inside) inside = str_repr[1:-1] # Escape double quotes inside = re.sub(no_bs + r'"', r'\\"', inside) # Unescape single quotes inside = re.sub(no_bs + bs_ + r"'", r"'", inside) # Append external double quotes str_repr = '"' + inside + '"' return str_repr else: return '"' + ut.reprfunc('\'' + s)[2:]
869,197
Check if all required services are provided Args: services: List with the service names which are required Returns: List with missing services
def get_missing_services(self, services): required_services = set(services) provided_services = set(self._services.keys()) missing_services = required_services.difference(provided_services) return sorted(missing_services)
869,291
Description: Calculates the Mean Squared Error (MSE) of an estimation on flat numpy ndarrays. Parameters: mean: actual value (numpy ndarray) estimator: estimated value of the mean (numpy ndarray)
def mse(mean, estimator): return np.mean((np.asarray(estimator) - np.asarray(mean)) ** 2, axis=0)
869,382
Description: Calculates the Sum of Squared Errors (SSE) of an estimation on flat numpy ndarrays. Parameters: mean: actual value (numpy ndarray) estimator: estimated value of the mean (numpy ndarray)
def sse(mean, estimator): return np.sum((np.asarray(estimator) - np.asarray(mean)) ** 2, axis=0)
869,383
Description: Computes an additional value for the objective function value when used in an unconstrained optimization formulation. Parameters: params: all parameters for the Plackett-Luce mixture model (numpy ndarray) c: constant multiplier scaling factor of the returned term
def uncons_term(params, c): return (c * ((np.sum(params[1:5]) - 1)**2)) + (c * ((np.sum(params[5:]) - 1)**2))
869,404
Description: Top 2 alternatives 12 moment conditions objective function Parameters: params: all parameters for the Plackett-Luce mixture model (numpy ndarray) moments: values of the moment conditions from the data (numpy ndarray)
def top2_reduced(params, moments): params = np.asarray(params) alpha = params[0] a = params[1:5] b = params[5:] p = np.asarray(moments) p1 = alpha*a+(1-alpha)*b-p[:4] p21 = alpha*a[0]*a[2:]/(1-a[0])+(1-alpha)*b[0]*b[2:]/(1-b[0])-p[4:6] p22 = alpha*a[1]*np.hstack((a[0],a[3]))/(1-a[1])+(1-alpha)*b[1]*np.hstack((b[0],b[3]))/(1-b[1])-p[6:8] p23 = alpha*a[2]*a[:2]/(1-a[2])+(1-alpha)*b[2]*b[:2]/(1-b[2])-p[8:10] p24 = alpha*a[3]*a[1:3]/(1-a[3])+(1-alpha)*b[3]*b[1:3]/(1-b[3])-p[10:] allp = np.concatenate((p1,p21,p22,p23,p24)) return np.sum(allp**2)
869,405
Description: Top 3 alternatives 16 moment conditions objective function Parameters: params: all parameters for the Plackett-Luce mixture model (numpy ndarray) moments: values of the moment conditions from the data (numpy ndarray)
def top3_reduced(params, moments): params = np.asarray(params) alpha = params[0] a = params[1:5] b = params[5:] p = np.asarray(moments) p1 = alpha*a+(1-alpha)*b-p[:4] p21 = alpha*a[0]*a[2:]/(1-a[0])+(1-alpha)*b[0]*b[2:]/(1-b[0])-p[4:6] p22 = alpha*a[1]*np.hstack((a[0],a[3]))/(1-a[1])+(1-alpha)*b[1]*np.hstack((b[0],b[3]))/(1-b[1])-p[6:8] p23 = alpha*a[2]*a[:2]/(1-a[2])+(1-alpha)*b[2]*b[:2]/(1-b[2])-p[8:10] p24 = alpha*a[3]*a[1:3]/(1-a[3])+(1-alpha)*b[3]*b[1:3]/(1-b[3])-p[10:12] p3 = np.array([ alpha*a[0]*a[2]*a[3]/(1-a[2])/(a[0]+a[1])+(1-alpha)*b[0]*b[2]*b[3]/(1-b[2])/(b[0]+b[1])-p[12], alpha*a[0]*a[1]*a[3]/(1-a[3])/(a[1]+a[2])+(1-alpha)*b[0]*b[1]*b[3]/(1-b[3])/(b[1]+b[2])-p[13], alpha*a[0]*a[1]*a[2]/(1-a[0])/(a[3]+a[2])+(1-alpha)*b[0]*b[1]*b[2]/(1-b[0])/(b[3]+b[2])-p[14], alpha*a[2]*a[1]*a[3]/(1-a[1])/(a[0]+a[3])+(1-alpha)*b[2]*b[1]*b[3]/(1-b[1])/(b[0]+b[3])-p[15] ]) allp = np.concatenate((p1,p21,p22,p23,p24,p3)) return np.sum(allp**2)
869,406
Description: Top m - 1 alternatives m(m - 1) + 2m moment conditions objective function Parameters: params: all parameters for the Plackett-Luce mixture model (numpy ndarray) moments: values of the moment conditions from the data (numpy ndarray)
def top3_full(params, moments): #variables params = np.asarray(params) #convert numpy matrix to list alpha = params[0] #first parameter is the alpha value half = int((len(params) - 1) / 2) #assuming 2 mixtures a = params[1:half + 1] #first mixture b = params[half + 1:] #second mixture p = np.asarray(moments) #convert numpy matrix to list p1 = list(alpha*a+(1-alpha)*b-p[:half]) #new list with one element p2 = [] #new empty list #iterate through each for i in range(0, half): #alpha times the score of a given point in mixture one, mutiplied by #each of the other scores, divided by the sum of the other values #Each of these top two plackett-luce values is added to the same values #from the other mixture, then the moment value is subtracted for those #top two from the vote p1 += list(alpha*a[i]*np.hstack((a[:i],a[i + 1:]))/(1-a[i]) +(1-alpha)*b[i]*np.hstack((b[:i],b[i + 1:]))/(1-b[i]) -p[half + (half - 1) * i:half + (half - 1) * (i + 1)]) #iterate through each value in each mixture for i in range(0, half): #begin with alpha values for given mixture num_a = alpha num_b = 1 - alpha #iterate again for j in range(0, half): #this eventually multiplies all values to its alpha num_a *= a[j] num_b *= b[j] #divide by the sum of other values if j > i: num_a /= np.sum(np.concatenate((a[j:], a[:i]))) num_b /= np.sum(np.concatenate((b[j:], b[:i]))) elif j < i: num_a /= np.sum(a[j:i]) num_b /= np.sum(b[j:i]) p2.append(num_a + num_b - p[half + (half * (half - 1)) + i]) p3 = np.array(p2) #create one array allp = np.concatenate((p1,p3)) return np.sum(allp**2)
869,407
Rename the root timer (regardless of current timing level). Args: name (any): Identifier, passed through str() Returns: str: Implemented identifier.
def rename_root(name): name = str(name) f.root.name = name f.root.times.name = name return name
869,418
Adjust the root timer save_itrs setting, such as for use in multiprocessing, when a root timer may become a parallel subdivision (see subdivide()). Args: setting (bool): Save individual iterations data, passed through bool() Returns: bool: Implemented setting value.
def set_save_itrs_root(setting): setting = bool(setting) f.root.times.save_itrs = setting return setting
869,419
Register stamps with the root timer (see subdivision()). Args: rgstr_stamps (list, tuple): Collection of identifiers, passed through set(), then each is passed through str(). Returns: list: Implemented registered stamp collection.
def rgstr_stamps_root(rgstr_stamps): rgstr_stamps = sanitize_rgstr_stamps(rgstr_stamps) f.root.rgstr_stamps = rgstr_stamps return rgstr_stamps
869,420
Description: Generate a Plackett-Luce dataset and save it to disk. Parameters: n: number of votes to generate m: number of alternatives outfile: open file object to which the dataset is written useDirichlet: boolean flag to use the Dirichlet distribution
def _generate_pl_dataset(n, m, outfile, useDirichlet): gamma, votes = generate_pl_dataset(n, m, useDirichlet) outfile.write(str(len(gamma)) + ',' + str(len(votes)) + '\n') outfile.write(','.join(map(str, gamma)) + '\n') for vote in votes: outfile.write(','.join(map(str, vote)) + '\n') return (gamma, votes)
869,427
Description: Generate a Plackett-Luce dataset and return the parameters and votes Parameters: n: number of votes to generate m: number of alternatives useDirichlet: boolean flag to use the Dirichlet distribution
def generate_pl_dataset(n, m, useDirichlet=True): gamma = None if useDirichlet: gamma = np.random.dirichlet(np.ones(m)) else: gamma = np.random.rand(m) gamma /= np.sum(gamma) # normalize sum to 1.0 (not needed for Dirichlet) votes = [] for i in range(n): # generate vote for every agent votes.append(draw_pl_vote(m, gamma)) return (gamma, votes)
869,428
Description: Read from disk a Plackett-Luce dataset. Parameters: infile: open file object from which to read the dataset
def read_pl_dataset(infile): m, n = [int(i) for i in infile.readline().split(',')] gamma = np.array([float(f) for f in infile.readline().split(',')]) if len(gamma) != m: infile.close() raise ValueError("malformed file: len(gamma) != m") votes = [] i = 0 for line in infile: vote = [int(v) for v in line.split(',')] if len(vote) != m: infile.close() raise ValueError("malformed file: len(vote) != m") votes.append(vote) i += 1 infile.close() if i != n: raise ValueError("malformed file: number of votes != n") return (gamma, np.array(votes))
869,429
Description: Generate a Plackett-Luce vote given the model parameters. Parameters: m: number of alternatives gamma: parameters of the Plackett-Luce model
def draw_pl_vote(m, gamma): localgamma = np.copy(gamma) # work on a copy of gamma localalts = np.arange(m) # enumeration of the candidates vote = [] for j in range(m): # generate position in vote for every alternative # transform local gamma into intervals up to 1.0 localgammaintervals = np.copy(localgamma) prev = 0.0 for k in range(len(localgammaintervals)): localgammaintervals[k] += prev prev = localgammaintervals[k] selection = np.random.random() # pick random number # selection will fall into a gamma interval for l in range(len(localgammaintervals)): # determine position if selection <= localgammaintervals[l]: vote.append(localalts[l]) localgamma = np.delete(localgamma, l) # remove that gamma localalts = np.delete(localalts, l) # remove the alternative localgamma /= np.sum(localgamma) # renormalize break return vote
869,430
Description: Generate a Mixture of 2 Plackett-Luce models dataset and save it to disk. Parameters: n: number of votes to generate m: number of alternatives outfile: open file object to which the dataset is written useDirichlet: boolean flag to use the Dirichlet distribution
def _generate_mix2pl_dataset(n, m, outfile, useDirichlet=True): params, votes = generate_mix2pl_dataset(n, m, useDirichlet) outfile.write(str(m) + ',' + str(n) + '\n') outfile.write(','.join(map(str, params)) + '\n') for vote in votes: outfile.write(','.join(map(str, vote)) + '\n') return (params, votes)
869,431
Description: Read from disk a Mixture of 2 Plackett-Luce models dataset. Parameters: infile: open file object from which to read the dataset numVotes: number of votes to read from the file or all if None
def read_mix2pl_dataset(infile, numVotes=None): m, n = [int(i) for i in infile.readline().split(',')] if numVotes is not None and n < numVotes: raise ValueError("invalid number of votes to read: exceeds file amount") params = np.array([float(f) for f in infile.readline().split(',')]) if len(params) != (2*m + 1): infile.close() raise ValueError("malformed file: len(params) != 2*m + 1") votes = [] i = 0 for line in infile: if i > (numVotes - 1): break vote = [int(v) for v in line.split(',')] if len(vote) != m: infile.close() raise ValueError("malformed file: len(vote) != m") votes.append(vote) i += 1 infile.close() return (params, np.array(votes))
869,432
Description: Generate a mixture of 2 Plackett-Luce models dataset and return the parameters and votes. Parameters: n: number of votes to generate m: number of alternatives useDirichlet: boolean flag to use the Dirichlet distribution
def generate_mix2pl_dataset(n, m, useDirichlet=True): alpha = np.random.rand() gamma1 = None gamma2 = None if useDirichlet: gamma1 = np.random.dirichlet(np.ones(m)) gamma2 = np.random.dirichlet(np.ones(m)) else: gamma1 = np.random.rand(m) gamma1 /= np.sum(gamma1) # normalize sum to 1.0 (not needed for Dirichlet) gamma2 = np.random.rand(m) gamma2 /= np.sum(gamma1) votes = [] for i in range(n): vote = None draw = np.random.rand() if draw <= alpha: vote = draw_pl_vote(m, gamma1) else: # draw > alpha vote = draw_pl_vote(m, gamma2) votes.append(vote) params = np.hstack((alpha, gamma1, gamma2)) return (params, votes)
869,433
Description: Initializes the aggregator with the set of alternatives and the number of candidates Parameters: alts_list: the set of integer alternatives (a.k.a candidates)
def __init__(self, alts_list): self.alts = alts_list self.alts_set = set(alts_list) self.m = len(alts_list) if len(self.alts) != len(self.alts_set): raise ValueError("Alternatives must not contain duplicates") self.alts_to_ranks = None # Maps alternatives to ranking (projective) self.ranks_to_alts = None
869,578
Description: Returns the ranking of a given alternative in the computed aggregate ranking. An error is thrown if the alternative does not exist. The ranking is the index in the aggregate ranking, which is 0-indexed. Parameters: alt: the key that represents an alternative
def get_ranking(self, alt): if self.alts_to_ranks is None: raise ValueError("Aggregate ranking must be created first") try: rank = self.alts_to_ranks[alt] return rank except KeyError: raise KeyError("No alternative \"{}\" found in ".format(str(alt)) + "the aggregate ranking")
869,579
Description: Takes in the scores of the alternatives in the form alt:score and generates the dictionaries mapping alternatives to rankings and rankings to alternatives. Parameters: alt_scores: dictionary of the scores of every alternative
def create_rank_dicts(self, alt_scores): self.alts_to_ranks = dict() cur_score = max(alt_scores.values()) cur_rank = 0 self.ranks_to_alts = {cur_rank:[]} for i in sorted(alt_scores.keys(), key=lambda x: -alt_scores[x]): if alt_scores[i] == cur_score: self.ranks_to_alts[cur_rank].append(i) elif alt_scores[i] < cur_score: cur_rank += 1 cur_score = alt_scores[i] self.ranks_to_alts[cur_rank] = [i] self.alts_to_ranks[i] = cur_rank
869,581
Handles Datastore response errors according to their documentation. Parameters: error(dict) Returns: int or None: The max number of times this error should be retried or None if it shouldn't. See also: https://cloud.google.com/datastore/docs/concepts/errors
def _max_retries_for_error(self, error): status = error.get("status") if status == "ABORTED" and get_transactions() > 0: # Avoids retrying Conflicts when inside a transaction. return None return self._MAX_RETRIES.get(status)
869,635
Initialize services without authenticating to Globus Auth. Note: Clients may have reduced functionality without authentication. Arguments: services (str or list of str): The services to initialize clients for. Returns: dict: The clients requested, indexed by service name.
def anonymous_login(services): if isinstance(services, str): services = [services] clients = {} # Initialize valid services for serv in services: try: clients[serv] = KNOWN_CLIENTS[serv](http_timeout=STD_TIMEOUT) except KeyError: # No known client print("Error: No known client for '{}' service.".format(serv)) except Exception: # Other issue, probably auth print("Error: Unable to create client for '{}' service.\n" "Anonymous access may not be allowed.".format(serv)) return clients
869,642
Remove ALL tokens in the token directory. This will force re-authentication to all services. Arguments: token_dir (str): The path to the directory to save tokens in and look for credentials by default. If this argument was given to a ``login()`` function, the same value must be given here to properly logout. **Default**: ``DEFAULT_CRED_PATH``.
def logout(token_dir=DEFAULT_CRED_PATH): for f in os.listdir(token_dir): if f.endswith("tokens.json"): try: os.remove(os.path.join(token_dir, f)) except OSError as e: # Eat ENOENT (no such file/dir, tokens already deleted) only, # raise any other issue (bad permissions, etc.) if e.errno != errno.ENOENT: raise
869,643
Translate a known Globus Search index into the index UUID. The UUID is the proper way to access indices, and will eventually be the only way. This method will return names it cannot disambiguate. Arguments: index_name (str): The name of the index. Returns: str: The UUID of the index. If the index is not known and is not unambiguous, this will be the ``index_name`` unchanged instead.
def translate_index(index_name): uuid = SEARCH_INDEX_UUIDS.get(index_name.strip().lower()) if not uuid: try: index_info = globus_sdk.SearchClient().get_index(index_name).data if not isinstance(index_info, dict): raise ValueError("Multiple UUIDs possible") uuid = index_info.get("id", index_name) except Exception: uuid = index_name return uuid
869,647
Generate a filename like Google for a song based on metadata. Parameters: metadata (~collections.abc.Mapping): A metadata dict. Returns: str: A filename string without an extension.
def suggest_filename(metadata): if 'title' in metadata and 'track_number' in metadata: # Music Manager. suggested_filename = f"{metadata['track_number']:0>2} {metadata['title']}" elif 'title' in metadata and 'trackNumber' in metadata: # Mobile. suggested_filename = f"{metadata['trackNumber']:0>2} {metadata['title']}" elif 'title' in metadata and 'tracknumber' in metadata: # audio-metadata/mutagen. track_number = _split_number_field( list_to_single_value( metadata['tracknumber'] ) ) title = list_to_single_value(metadata['title']) suggested_filename = f"{track_number:0>2} {title}" else: suggested_filename = f"00 {list_to_single_value(metadata.get('title', ['']))}" return _replace_invalid_characters(suggested_filename)
869,700
Clean up a query string for searching. Removes unmatched parentheses and joining operators. Arguments: q (str): Query string to be cleaned Returns: str: The clean query string.
def _clean_query_string(q): q = q.replace("()", "").strip() if q.endswith("("): q = q[:-1].strip() # Remove misplaced AND/OR/NOT at end if q[-3:] == "AND" or q[-3:] == "NOT": q = q[:-3] elif q[-2:] == "OR": q = q[:-2] # Balance parentheses while q.count("(") > q.count(")"): q += ")" while q.count(")") > q.count("("): q = "(" + q return q.strip()
869,793
Validate and clean up a query to be sent to Search. Cleans the query string, removes unneeded parameters, and validates for correctness. Does not modify the original argument. Raises an Exception on invalid input. Arguments: query (dict): The query to validate. Returns: dict: The validated query.
def _validate_query(query): query = deepcopy(query) # q is always required if query["q"] == BLANK_QUERY["q"]: raise ValueError("No query specified.") query["q"] = _clean_query_string(query["q"]) # limit should be set to appropriate default if not specified if query["limit"] is None: query["limit"] = SEARCH_LIMIT if query["advanced"] else NONADVANCED_LIMIT # If specified, the limit should not be greater than the Search maximum elif query["limit"] > SEARCH_LIMIT: warnings.warn('Reduced result limit from {} to the Search maximum: {}' .format(query["limit"], SEARCH_LIMIT), RuntimeWarning) query["limit"] = SEARCH_LIMIT # Remove all blank/default values for key, val in BLANK_QUERY.items(): # Default for get is NaN so comparison is always False if query.get(key, float('nan')) == val: query.pop(key) # Remove unsupported fields to_remove = [field for field in query.keys() if field not in BLANK_QUERY.keys()] [query.pop(field) for field in to_remove] return query
869,794
Add a term to the query. Arguments: term (str): The term to add. Returns: SearchHelper: Self
def _term(self, term): # All terms must be strings for Elasticsearch term = str(term) if term: self.__query["q"] += term return self
869,796
Add a ``field:value`` term to the query. Matches will have the ``value`` in the ``field``. Note: This method triggers advanced mode. Arguments: field (str): The field to check for the value, in Elasticsearch dot syntax. value (str): The value to match. Returns: SearchHelper: Self
def _field(self, field, value): # Fields and values must be strings for Elasticsearch field = str(field) value = str(value) # Check if quotes required and allowed, and quotes not present # If the user adds improper double-quotes, this will not fix them if (any([char in value for char in QUOTE_LIST]) and '"' not in value and not any([char in value for char in UNQUOTE_LIST])): value = '"' + value + '"' # Cannot add field:value if one is blank if field and value: self.__query["q"] += field + ":" + value # Field matches are advanced queries self.__query["advanced"] = True return self
869,797
Sort the search results by a certain field. If this method is called multiple times, the later sort fields are given lower priority, and will only be considered when the eariler fields have the same value. Arguments: field (str): The field to sort by, in Elasticsearch dot syntax. ascending (bool): Sort in ascending order? **Default**: ``True``. Returns: SearchHelper: Self
def _add_sort(self, field, ascending=True): # Fields must be strings for Elasticsearch field = str(field) # No-op on blank sort field if field: self.__query["sort"].append({ 'field_name': field, 'order': 'asc' if ascending else 'desc' }) return self
869,801
Retrieve and return the mapping for the given metadata block. Arguments: block (str): The top-level field to fetch the mapping for (for example, ``"mdf"``), or the special values ``None`` for everything or ``"top"`` for just the top-level fields. **Default:** ``None``. index (str): The Search index to map. **Default:** The current index. Returns: dict: ``field:datatype`` pairs.
def show_fields(self, block=None): mapping = self._mapping() if block is None: return mapping elif block == "top": blocks = set() for key in mapping.keys(): blocks.add(key.split(".")[0]) block_map = {} for b in blocks: block_map[b] = "object" else: block_map = {} for key, value in mapping.items(): if key.startswith(block): block_map[key] = value return block_map
869,814
Description: Top 2 alternatives 12 moment conditions values calculation Parameters: votes: ordinal preference data (numpy ndarray of integers)
def top2_reduced(votes): res = np.zeros(12) for vote in votes: # the top ranked alternative is in vote[0][0], second in vote[1][0] if vote[0][0] == 0: # i.e. the first alt is ranked first res[0] += 1 if vote[1][0] == 2: res[4] += 1 elif vote[1][0] == 3: res[5] += 1 elif vote[0][0] == 1: res[1] += 1 if vote[1][0] == 0: res[6] += 1 elif vote[1][0] == 3: res[7] += 1 elif vote[0][0] == 2: res[2] += 1 if vote[1][0] == 0: res[8] += 1 elif vote[1][0] == 1: res[9] += 1 elif vote[0][0] == 3: res[3] += 1 if vote[1][0] == 1: res[10] += 1 elif vote[1][0] == 2: res[11] += 1 res /= len(votes) return res
869,908
Description: Top 2 alternatives 16 moment conditions values calculation Parameters: votes: ordinal preference data (numpy ndarray of integers)
def top2_full(votes): res = np.zeros(16) for vote in votes: # the top ranked alternative is in vote[0][0], second in vote[1][0] if vote[0][0] == 0: # i.e. the first alt is ranked first res[0] += 1 if vote[1][0] == 1: # i.e. the second alt is ranked second res[4] += 1 elif vote[1][0] == 2: res[5] += 1 elif vote[1][0] == 3: res[6] += 1 elif vote[0][0] == 1: res[1] += 1 if vote[1][0] == 0: res[7] += 1 elif vote[1][0] == 2: res[8] += 1 elif vote[1][0] == 3: res[9] += 1 elif vote[0][0] == 2: res[2] += 1 if vote[1][0] == 0: res[10] += 1 elif vote[1][0] == 1: res[11] += 1 elif vote[1][0] == 3: res[12] += 1 elif vote[0][0] == 3: res[3] += 1 if vote[1][0] == 0: res[13] += 1 elif vote[1][0] == 1: res[14] += 1 elif vote[1][0] == 2: res[15] += 1 res /= len(votes) return res
869,909
Description: Top 3 alternatives 16 moment conditions values calculation Parameters: votes: ordinal preference data (numpy ndarray of integers)
def top3_reduced(votes): res = np.zeros(16) for vote in votes: # the top ranked alternative is in vote[0][0], second in vote[1][0] if vote[0][0] == 0: # i.e. the first alt is ranked first res[0] += 1 if vote[1][0] == 2: res[4] += 1 elif vote[1][0] == 3: res[5] += 1 elif vote[1][0] == 1 and vote[2][0] == 2: res[14] += 1 elif vote[0][0] == 1: res[1] += 1 if vote[1][0] == 0: res[6] += 1 elif vote[1][0] == 3: res[7] += 1 elif vote[1][0] == 2 and vote[2][0] == 3: res[15] += 1 elif vote[0][0] == 2: res[2] += 1 if vote[1][0] == 0: res[8] += 1 elif vote[1][0] == 1: res[9] += 1 elif vote[1][0] == 3 and vote[2][0] == 0: res[12] += 1 elif vote[0][0] == 3: res[3] += 1 if vote[1][0] == 1: res[10] += 1 elif vote[1][0] == 2: res[11] += 1 elif vote[1][0] == 0 and vote[2][0] == 1: res[13] += 1 res /= len(votes) return res
869,910
Description: Top m - 1 alternatives q = m(m - 1) + 2m moment conditions values calculation Parameters: votes: ordinal preference data (numpy ndarray of integers)
def top3_full(votes): #create array of zeros, length = q res = np.zeros(2 * len(votes[0]) + (len(votes[0]) * (len(votes[0]) - 1))) #iterate through each vote for vote in votes: #set verification boolean to true ver = True #check if vote belongs to c1 < c2 < c3, c2 < c3 < c1... moment for i in range(0, len(votes[0])): if vote[i][0] != vote[i - 1][0] + 1 and vote[i][0] != 0: ver = False break if ver: res[len(votes[0]) + (len(votes[0]) * (len(votes[0]) - 1)) + vote[0][0]] += 1 #increment moment of top ranked choice ranked at the top res[vote[0][0]] += 1 #top two moment add = 0 if vote[0][0] > vote[1][0]: add = 1 res[(vote[0][0] + 1) * (len(votes[0]) - 1) + add + vote[1][0]] += 1 res /= len(votes) #normalize moments return res
869,911
Produce a formatted record of a times data structure. Args: times (Times, optional): If not provided, uses the current root timer. Returns: str: Timer tree hierarchy in a formatted string. Raises: TypeError: If provided argument is not a Times object.
def write_structure(times=None): if times is None: return report_loc.write_structure(f.root.times) else: if not isinstance(times, Times): raise TypeError("Expected Times instance for param 'times' (default is root).") return report_loc.write_structure(times)
870,007
Description: Returns the first index of the array (vector) x containing the value i. Parameters: x: one-dimensional array i: search value
def get_index_nested(x, i): for ind in range(len(x)): if i == x[ind]: return ind return -1
870,694
Serialize and / or save a Times data object using pickle (cPickle). Args: filename (None, optional): Filename to dump to. If not provided, returns serialized object. times (None, optional): object to dump. If non provided, uses current root. Returns: pkl: Pickled Times data object, only if no filename provided. Raises: TypeError: If 'times' is not a Times object or a list of tuple of them.
def save_pkl(filename=None, times=None): if times is None: if not f.root.stopped: times = collapse.collapse_times() else: times = f.root.times else: if isinstance(times, (list, tuple)): for t in times: if not isinstance(t, Times): raise TypeError("Expected single Times instance or list/tuple of Times instances for param 'times'.") elif not isinstance(times, Times): raise TypeError("Expected single Times instance or list/tuple of Times instances for param 'times'.") if filename is not None: with open(str(filename), 'wb') as file: pickle.dump(times, file) else: return pickle.dumps(times)
871,280
Unpickle file contents. Args: filenames (str): Can be one or a list or tuple of filenames to retrieve. Returns: Times: A single object, or from a collection of filenames, a list of Times objects. Raises: TypeError: If any loaded object is not a Times object.
def load_pkl(filenames): if not isinstance(filenames, (list, tuple)): filenames = [filenames] times = [] for name in filenames: name = str(name) with open(name, 'rb') as file: loaded_obj = pickle.load(file) if not isinstance(loaded_obj, Times): raise TypeError("At least one loaded object is not a Times data object.") times.append(loaded_obj) return times if len(times) > 1 else times[0]
871,281
Create the main window. Args: size (tuple): The width and height of the window. samples (int): The number of samples. Keyword Args: fullscreen (bool): Fullscreen? title (bool): The title of the window. threaded (bool): Threaded? Returns: Window: The main window.
def create_window(size=None, samples=16, *, fullscreen=False, title=None, threaded=True) -> Window: if size is None: width, height = 1280, 720 else: width, height = size if samples < 0 or (samples & (samples - 1)) != 0: raise Exception('Invalid number of samples: %d' % samples) window = Window.__new__(Window) window.wnd = glwnd.create_window(width, height, samples, fullscreen, title, threaded) return window
871,407
r"""Provides a way for each connection wrapper to handle error responses. Parameters: response(Response): An instance of :class:`.requests.Response`. retries(int): The number of times :meth:`.request` has been called so far. \**kwargs: The parameters with which :meth:`.request` was called. The `retries` parameter is excluded from `kwargs` intentionally. Returns: requests.Response
def _handle_response_error(self, response, retries, **kwargs): r error = self._convert_response_to_error(response) if error is None: return response max_retries = self._max_retries_for_error(error) if max_retries is None or retries >= max_retries: return response backoff = min(0.0625 * 2 ** retries, 1.0) self.logger.warning("Sleeping for %r before retrying failed request...", backoff) time.sleep(backoff) retries += 1 self.logger.warning("Retrying failed request. Attempt %d/%d.", retries, max_retries) return self.request(retries=retries, **kwargs)
871,430
Subclasses may override this method in order to influence how errors are parsed from the response. Parameters: response(Response): The response object. Returns: object or None: Any object for which a max retry count can be retrieved or None if the error cannot be handled.
def _convert_response_to_error(self, response): content_type = response.headers.get("content-type", "") if "application/x-protobuf" in content_type: self.logger.debug("Decoding protobuf response.") data = status_pb2.Status.FromString(response.content) status = self._PB_ERROR_CODES.get(data.code) error = {"status": status} return error elif "application/json" in content_type: self.logger.debug("Decoding json response.") data = response.json() error = data.get("error") if not error or not isinstance(error, dict): self.logger.warning("Unexpected error response: %r", data) return None return error self.logger.warning("Unexpected response: %r", response.text) return None
871,431
Receives a list of events and transmits them to Riemann Arguments: events -- list of `tensor.objects.Event`
def eventsReceived(self, events): # Make sure queue isn't oversized if (self.maxsize < 1) or (len(self.events) < self.maxsize): self.events.extend(events)
871,671
Description: Full breaking Parameters: k: not used
def _full(self, k): G = np.ones((self.m, self.m)) #np.fill_diagonal(G, 0) # erroneous code from prefpy return G
871,778
Description: Top k breaking Parameters: k: the number of alternatives to break from highest rank
def _top(self, k): if k > self.m: raise ValueError("k larger than the number of alternatives") G = np.ones((self.m, self.m)) #np.fill_diagonal(G, 0) # erroneous code from prefpy for i in range(self.m): for j in range(self.m): if i == j: continue if i > k and j > k: G[i][j] = 0 return G
871,779
Description: Bottom k breaking Parameters: k: the number of alternatives to break from lowest rank
def _bot(self, k): if k < 2: raise ValueError("k smaller than 2") G = np.ones((self.m, self.m)) np.fill_diagonal(G, 0) for i in range(self.m): for j in range(self.m): if i == j: continue if i <= k and j <= k: G[i][j] = 0 return G
871,780
Description: Position k breaking Parameters: k: position k is used for the breaking
def _pos(self, k): if k < 2: raise ValueError("k smaller than 2") G = np.zeros((self.m, self.m)) for i in range(self.m): for j in range(self.m): if i == j: continue if i < k or j < k: continue if i == k or j == k: G[i][j] = 1 return G
871,782
Description: Takes in a set of rankings and computes the Plackett-Luce model aggregate ranking. Parameters: rankings: set of rankings to aggregate breaking: type of breaking to use k: number to be used for top, bottom, and position breakings
def aggregate(self, rankings, breaking="full", k=None): breakings = { "full": self._full, "top": self._top, "bottom": self._bot, "adjacent": self._adj, "position": self._pos } if (k == None and (breaking != "full" != breaking != "position")): raise ValueError("k cannot be None for non-full or non-position breaking") break_mat = breakings[breaking](k) P = np.zeros((self.m, self.m)) for ranking in rankings: localP = np.zeros((self.m, self.m)) for ind1, alt1 in enumerate(self.alts): for ind2, alt2 in enumerate(self.alts): if ind1 == ind2: continue alt1_rank = util.get_index_nested(ranking, alt1) alt2_rank = util.get_index_nested(ranking, alt2) if alt1_rank < alt2_rank: # alt 1 is ranked higher localP[ind1][ind2] = 1 for ind, alt in enumerate(self.alts): localP[ind][ind] = -1*(np.sum(localP.T[ind][:ind]) + np.sum(localP.T[ind][ind+1:])) localP *= break_mat P += localP/len(rankings) #epsilon = 1e-7 #assert(np.linalg.matrix_rank(P) == self.m-1) #assert(all(np.sum(P, axis=0) <= epsilon)) U, S, V = np.linalg.svd(P) gamma = np.abs(V[-1]) gamma /= np.sum(gamma) #assert(all(np.dot(P, gamma) < epsilon)) alt_scores = {cand: gamma[ind] for ind, cand in enumerate(self.alts)} self.P = P self.create_rank_dicts(alt_scores) return gamma
871,783
Description: Minorization-Maximization algorithm which returns an estimate of the ground-truth parameters, gamma for the given data. Parameters: rankings: set of rankings to aggregate epsilon: convergence condition value, set to None for iteration only max_iters: maximum number of iterations of MM algorithm
def aggregate(self, rankings, epsilon, max_iters): # compute the matrix w, the numbers of pairwise wins: w = np.zeros((self.m, self.m)) for ranking in rankings: localw = np.zeros((self.m, self.m)) for ind1, alt1 in enumerate(self.alts): for ind2, alt2 in enumerate(self.alts): if ind1 == ind2: continue alt1_rank = util.get_index_nested(ranking, alt1) alt2_rank = util.get_index_nested(ranking, alt2) if alt1_rank < alt2_rank: # alt 1 is ranked higher localw[ind1][ind2] = 1 w += localw W = w.sum(axis=1) # gamma_t is the value of gamma at time = t # gamma_t1 is the value of gamma at time t = t+1 (the next iteration) # initial arbitrary value for gamma: gamma_t = np.ones(self.m) / self.m gamma_t1 = np.empty(self.m) for f in range(max_iters): for i in range(self.m): s = 0 # sum of updating function for j in range(self.m): if j != i: s += (w[j][i] + w[i][j]) / (gamma_t[i]+gamma_t[j]) gamma_t1[i] = W[i] / s gamma_t1 /= np.sum(gamma_t1) if epsilon != None and np.all(np.absolute(gamma_t1 - gamma_t) < epsilon): alt_scores = {cand: gamma_t1[ind] for ind, cand in enumerate(self.alts)} self.create_rank_dicts(alt_scores) return gamma_t1 # convergence reached before max_iters gamma_t = gamma_t1 # update gamma_t for the next iteration alt_scores = {cand: gamma_t1[ind] for ind, cand in enumerate(self.alts)} self.create_rank_dicts(alt_scores) return gamma_t1
871,817
Usage: var list var delete NAMES var NAME=VALUE var NAME Arguments: NAME Name of the variable NAMES Names of the variable separated by spaces VALUE VALUE to be assigned special vars date and time are defined
def do_var(self, arg, arguments): if arguments['list'] or arg == '' or arg is None: self._list_variables() return elif arguments['NAME=VALUE'] and "=" in arguments["NAME=VALUE"]: (variable, value) = arg.split('=', 1) if value == "time" or value == "now": value = datetime.datetime.now().strftime("%H:%M:%S") elif value == "date": value = datetime.datetime.now().strftime("%Y-%m-%d") self._add_variable(variable, value) return elif arguments['NAME=VALUE'] and "=" in arguments["NAME=VALUE"]: try: v = arguments['NAME=VALUE'] Console.ok(str(self.variables[v])) except: Console.error('variable {:} not defined'.format(arguments['NAME=VALUE'])) elif arg.startswith('delete'): variable = arg.split(' ')[1] self._delete_variable(variable) return
871,897
cm. Usage: cm [-q] help cm [-v] [-b] [--file=SCRIPT] [-i] [COMMAND ...] Arguments: COMMAND A command to be executed Options: --file=SCRIPT -f SCRIPT Executes the script -i After start keep the shell interactive, otherwise quit [default: False] -b surpress the printing of the banner [default: False]
def main(): echo = False try: arguments = docopt(main.__doc__, help=True) # fixing the help parameter parsing if arguments['help']: arguments['COMMAND'] = ['help'] arguments['help'] = 'False' script_file = arguments['--file'] interactive = arguments['-i'] echo = arguments['-v'] if echo: pprint(arguments) except: script_file = None interactive = False arguments = {'-b': True, 'COMMAND': [' '.join(sys.argv[1:])]} plugins = [] plugins.append(dict(get_plugins_from_dir("sys", "cmd3"))) # plugins.append(dict(get_plugins_from_dir("~/.cloudmesh", "cmd3local"))) # if not os.path.exists(path_expand( "~/.cloudmesh/cmd3.yaml")): # from cmd3.plugins.shell_core import create_cmd3_yaml_file # create_cmd3_yaml_file() create_cmd3_yaml_file(force=False, verbose=False) filename = path_expand("~/.cloudmesh/cmd3.yaml") try: module_config = ConfigDict(filename=filename) modules = module_config["cmd3"]["modules"] properties = module_config["cmd3"]["properties"] except: modules = ['cloudmesh_cmd3.plugins'] for module_name in modules: #print ("INSTALL", module_name) try: plugins.append(dict(get_plugins_from_module(module_name))) except: # print "WARNING: could not find", module_name pass # sys.exit() # plugins.append(dict(get_plugins_from_dir("~/.cloudmesh", "cmd3local"))) # plugins.append(dict(get_plugins_from_dir (".", "dot"))) for plugin in plugins: sys.path.append(os.path.expanduser(plugin['dir'])) sys.path.append("../..") sys.path.append(".") sys.path.append("..") for plugin in plugins: plugin['class'] += ".plugins" # pprint(plugins) # pprint(sys.path) # sys.exit() name = "CmCli" # # not yet quite what i want, but falling back to a flatt array # (cmd, plugin_objects) = DynamicCmd(name, plugins) cmd.set_verbose(echo) cmd.activate() cmd.set_verbose(echo) cmd.set_debug(properties["debug"]) if arguments['-b']: cmd.set_banner("") if script_file is not None: cmd.do_exec(script_file) if len(arguments['COMMAND']) > 0: try: user_cmd = " ".join(arguments['COMMAND']) if echo: print(">", user_cmd) cmd.onecmd(user_cmd) except Exception, e: Console.error("") Console.error("ERROR: executing command '{0}'".format(user_cmd)) Console.error("") print (70 * "=") print(e) print (70 * "=") print(traceback.format_exc()) if interactive: cmd.cmdloop() elif not script_file or interactive: cmd.cmdloop()
871,958
Constructor. Initializes various variables, setup the HTTP handler and stores all values Args: prefix: The prefix of the urls. Raises: AttributeError: if not all values for parameters in `url_fields` are passed
def __init__(self, prefix='/api/2/', **url_values): self._http = registry.http_handler self._prefix = prefix self._modified_fields = {} self._populated_fields = {} for field in url_values: if field in self.url_fields: setattr(self, field, url_values[field]) else: self._handle_wrong_field(field, ATTR_TYPE_URL) # From now on only, only specific attributes can be set # on this object: # a) one of the instance variables set above # b) one of the attributes found in `self.writable_fields` self._is_initialized = True
871,986
Resolve a value from :attr:`resolver_dict` based on the :attr:`data_format`. Args: data_format (:class:`~.DataFormat` or str): The data format; must be a member of :class:`~.DataFormat` or a string equivalent. resolver_dict (dict): the resolving dict. Can hold any value for any of the valid :attr:`data_format` strings Returns: The value of the key in :attr:`resolver_dict` that matches :attr:`data_format`
def _data_format_resolver(data_format, resolver_dict): try: data_format = DataFormat(data_format) except ValueError: supported_formats = ', '.join( ["'{}'".format(f.value) for f in DataFormat]) raise ValueError(("'data_format' must be one of {formats}. Given " "'{value}'.").format(formats=supported_formats, value=data_format)) return (resolver_dict.get(data_format) or resolver_dict.get(data_format.value))
872,030
:: Usage: man COMMAND man [--noheader] Options: --norule no rst header Arguments: COMMAND the command to be printed Description: man Prints out the help pages man COMMAND Prints out the help page for a specific command
def do_man(self, args, arguments): if arguments['COMMAND'] is None: print print "Commands" print 70 * "=" commands = [k for k in dir(self) if k.startswith("do_")] commands.sort() else: print arguments commands = [arguments['COMMAND']] for command in commands: what = command.replace("do_", "") try: if not arguments["--noheader"]: print what print 70 * "-" self._print_rst(what) except: print "\n Command documentation %s missing, help_%s" % (what, what) print
872,119
Return text in camelCase style. Args: text: input string to convert case detect_acronyms: should attempt to detect acronyms acronyms: a list of acronyms to detect >>> camelcase("hello world") 'helloWorld' >>> camelcase("HELLO_HTML_WORLD", True, ["HTML"]) 'helloHTMLWorld'
def camelcase(text, acronyms=None): words, _case, _sep = case_parse.parse_case(text, acronyms) if words: words[0] = words[0].lower() return ''.join(words)
872,455
Return text in PascalCase style (aka MixedCase). Args: text: input string to convert case detect_acronyms: should attempt to detect acronyms acronyms: a list of acronyms to detect >>> pascalcase("hello world") 'HelloWorld' >>> pascalcase("HELLO_HTML_WORLD", True, ["HTML"]) 'HelloHTMLWorld'
def pascalcase(text, acronyms=None): words, _case, _sep = case_parse.parse_case(text, acronyms) return ''.join(words)
872,456
Return text in CONST_CASE style (aka SCREAMING_SNAKE_CASE). Args: text: input string to convert case detect_acronyms: should attempt to detect acronyms acronyms: a list of acronyms to detect >>> constcase("hello world") 'HELLO_WORLD' >>> constcase("helloHTMLWorld", True, ["HTML"]) 'HELLO_HTML_WORLD'
def constcase(text, acronyms=None): words, _case, _sep = case_parse.parse_case(text, acronyms) return '_'.join([w.upper() for w in words])
872,457
Return text in dot.case style. Args: text: input string to convert case detect_acronyms: should attempt to detect acronyms acronyms: a list of acronyms to detect >>> dotcase("hello world") 'hello.world' >>> dotcase("helloHTMLWorld", True, ["HTML"]) 'hello.html.world'
def dotcase(text, acronyms=None): words, _case, _sep = case_parse.parse_case(text, acronyms) return '.'.join([w.lower() for w in words])
872,458
Return text in "seperate words" style. Args: text: input string to convert case detect_acronyms: should attempt to detect acronyms acronyms: a list of acronyms to detect >>> separate_words("HELLO_WORLD") 'HELLO WORLD' >>> separate_words("helloHTMLWorld", True, ["HTML"]) 'hello HTML World'
def separate_words(text, acronyms=None): words, _case, _sep = case_parse.parse_case(text, acronyms, preserve_case=True) return ' '.join(words)
872,459
Segment string on separator into list of words. Arguments: string -- the string we want to process Returns: words -- list of words the string got minced to separator -- the separator char intersecting words was_upper -- whether string happened to be upper-case
def _separate_words(string): words = [] separator = "" # Index of current character. Initially 1 because we don't want to check # if the 0th character is a boundary. i = 1 # Index of first character in a sequence s = 0 # Previous character. p = string[0:1] # Treat an all-caps stringiable as lower-case, so that every letter isn't # counted as a boundary. was_upper = False if string.isupper(): string = string.lower() was_upper = True # Iterate over each character, checking for boundaries, or places where # the stringiable should divided. while i <= len(string): c = string[i:i + 1] split = False if i < len(string): # Detect upper-case letter as boundary. if UPPER.match(c): split = True # Detect transition from separator to not separator. elif NOTSEP.match(c) and SEP.match(p): split = True # Detect transition not separator to separator. elif SEP.match(c) and NOTSEP.match(p): split = True else: # The loop goes one extra iteration so that it can handle the # remaining text after the last boundary. split = True if split: if NOTSEP.match(p): words.append(string[s:i]) else: # stringiable contains at least one separator. # Use the first one as the stringiable's primary separator. if not separator: separator = string[s:s + 1] # Use None to indicate a separator in the word list. words.append(None) # If separators weren't included in the list, then breaks # between upper-case sequences ("AAA_BBB") would be # disregarded; the letter-run detector would count them as one # sequence ("AAABBB"). s = i i += 1 p = c return words, separator, was_upper
872,541
:: Usage: edit FILENAME Edits the file with the given name Arguments: FILENAME the file to edit
def do_edit(self, arg, arguments): def _create_file(filename): if not os.path.exists(filename): file(filename, 'w+').close() def _edit(prefix, editors, filename): for editor in editors: if os.path.exists(editor): _create_file(filename) os.system("{:} {:} {:}".format(prefix, editor, filename)) return True return False filename = arg what = platform.system().lower() prefix = "" print (what) if 'darwin' in what: editors = ["/Applications/Aquamacs.app", "/Applications/Emacs.app"] prefix = "open -a " elif "linux" in what: editors = ["/usr/bin/emacs", "/usr/bin/vi", "/usr/bin/vim", "/usr/bin/nano"] elif "windows" in what: editors = ["emacs", "vi", "vim", "nano", "notepad", "notepad++"] else: Console.error("Please contact the developers to add an " "editor for your platform") return if not _edit(prefix, editors, filename): Console.error("Could not find working editor in {0}" .format(str(editors)))
872,665
:: Usage: graphviz FILENAME Export the data in cvs format to a file. Former cvs command Arguments: FILENAME The filename
def do_graphviz(self, args, arguments): filename = arguments['FILENAME'] if platform.system() == 'Darwin': if os.path.isfile(filename): os.system("open -a '\''/Applications/Graphviz.app'\'' " + filename)
872,818
:: Usage: dot2 FILENAME FORMAT Export the data in cvs format to a file. Former cvs command Arguments: FILENAME The filename FORMAT the export format, pdf, png, ...
def do_dot2(self, args, arguments): filename = arguments['FILENAME'] output_format = arguments['FORMAT'] base = filename.replace(".dot", "") out = base + "." + output_format if output_format == "pdf": exec_command = "dot -Tps %s | epstopdf --filter --ooutput %s" % ( file, out) else: exec_command = "dot -T%s %s -o %s 2>/tmp/err" % (output_format, file, out) os.system(exec_command)
872,819
Start sirbot Configure sirbot and start the aiohttp.web.Application Args: host (str): host port (int): port
def run(self, host: str = '0.0.0.0', port: int = 8080): self._loop.run_until_complete(self._configure_plugins()) web.run_app(self._app, host=host, port=port)
872,998
Table Constructor todo::make sure this is memory efficient Args: Index (Index): An Index object with a valid .query method and a .columns attribute. Returns: A table object Usage example >>> Table(ind)
def __init__(self, index, port = 8081): self.index = index self.server = None self.port = port if port else find_free_port() self.settings = index.columns self.docs = index.docs self._create_settings() self.html_path = get_cur_path()+'/data/table/' # set to true if we want to delete the viz directory self.cleanup_flag = False
872,999
Initializer for the base class. Save the hostname to use for all requests as well as any authentication info needed. Args: hostname: The host for the requests. auth: The authentication info needed for any requests.
def __init__(self, hostname, auth=AnonymousAuth()): self._hostname = self._construct_full_hostname(hostname) _logger.debug("Hostname is %s" % self._hostname) self._auth_info = auth
873,136
Create a full (scheme included) hostname from the argument given. Only HTTP and HTTP+SSL protocols are allowed. Args: hostname: The hostname to use. Returns: The full hostname. Raises: ValueError: A not supported protocol is used.
def _construct_full_hostname(self, hostname): if hostname.startswith(('http://', 'https://', )): return hostname if '://' in hostname: protocol, host = hostname.split('://', 1) raise ValueError('Protocol %s is not supported.' % protocol) return '://'.join([self.default_scheme, hostname, ])
873,137
Add the authentication info to the supplied dictionary. We use the `requests.HTTPBasicAuth` class as the `auth` param. Args: `request_args`: The arguments that will be passed to the request. Returns: The updated arguments for the request.
def populate_request_data(self, request_args): request_args['auth'] = HTTPBasicAuth( self._username, self._password) return request_args
873,321
:: Usage: exec FILENAME executes the commands in the file. See also the script command. Arguments: FILENAME The name of the file
def do_exec(self, filename): if not filename: Console.error("the command requires a filename as parameter") return if os.path.exists(filename): with open(filename, "r") as f: for line in f: Console.ok("> {:}".format(str(line))) self.onecmd(line) else: Console.error('file "{:}" does not exist.'.format(filename)) sys.exit()
873,600
MathList Constructor todo:: share a port among lists. Or maybe close the server after serving from it? Args: lst (list): A list of LaTeX math to be rendered by KaTeX Returns: A math list object Usage example >>> lst = ["\int x = y", "x + 6"] >>> MathList(lst) ... see nicely formatted math.
def __init__(self, lst): list.__init__(self, lst) self.server = None self.port = find_free_port() self.html_path = get_cur_path()+'/data/math_list/index.html'
873,719
:: Usage: open FILENAME ARGUMENTS: FILENAME the file to open in the cwd if . is specified. If file in in cwd you must specify it with ./FILENAME Opens the given URL in a browser window.
def do_open(self, args, arguments): filename = arguments['FILENAME'] filename = self._expand_filename(filename) Console.ok("open {0}".format(filename)) if not (filename.startswith("file:") or filename.startswith("http:")): try: with open(filename): pass filename += "file://" except: Console.error("unsupported browser format in file {0}".format(filename)) return try: webbrowser.open("%s" % filename) except: Console.error("can not open browser with file {0}".format(filename))
873,868
Creates an entity in Mambu This method must be implemented in child classes Args: data (dictionary): dictionary with data to send, this dictionary is specific for each Mambu entity
def create(self, data, *args, **kwargs): # if module of the function is diferent from the module of the object # that means create is not implemented in child class if self.create.__func__.__module__ != self.__module__: raise Exception("Child method not implemented") self._MambuStruct__method = "POST" self._MambuStruct__data = data self.connect(*args, **kwargs) self._MambuStruct__method = "GET" self._MambuStruct__data = None
873,886
Make a request. Use the `requests` module to actually perform the request. Args: `method`: The method to use. `path`: The path to the resource. `data`: Any data to send (for POST and PUT requests). `kwargs`: Other parameters for `requests`. Returns: The content of the response. Raises: An exception depending on the HTTP status code of the response.
def _make_request(self, method, path, data=None, **kwargs): _logger.debug("Method for request is %s" % method) url = self._construct_full_url(path) _logger.debug("URL for request is %s" % url) self._auth_info.populate_request_data(kwargs) _logger.debug("The arguments are %s" % kwargs) # Add custom headers for the request if self._auth_info._headers: kwargs.setdefault('headers', {}).update(self._auth_info._headers) res = requests.request(method, url, data=data, **kwargs) if res.ok: _logger.debug("Request was successful.") return res.content.decode('utf-8') if hasattr(res, 'content'): _logger.debug("Response was %s:%s", res.status_code, res.content) raise self._exception_for(res.status_code)( res.content, http_code=res.status_code ) else: msg = "No response from URL: %s" % res.request.url _logger.error(msg) raise NoResponseError(msg)
874,033
Send data to a remote server, either with a POST or a PUT request. Args: `method`: The method (POST or PUT) to use. `path`: The path to the resource. `data`: The data to send. `filename`: The filename of the file to send (if any). Returns: The content of the response. Raises: An exception depending on the HTTP status code of the response.
def _send(self, method, path, data, filename): if filename is None: return self._send_json(method, path, data) else: return self._send_file(method, path, data, filename)
874,034
Make a application/json request. Args: `method`: The method of the request (POST or PUT). `path`: The path to the resource. `data`: The JSON-encoded data. Returns: The content of the response. Raises: An exception depending on the HTTP status code of the response.
def _send_json(self, method, path, data): headers = {'Content-type': 'application/json'} return self._make_request(method, path, data=data, headers=headers)
874,035
Make a multipart/form-encoded request. Args: `method`: The method of the request (POST or PUT). `path`: The path to the resource. `data`: The JSON-encoded data. `filename`: The filename of the file to send. Returns: The content of the response. Raises: An exception depending on the HTTP status code of the response.
def _send_file(self, method, path, data, filename): with open(filename, 'r') as f: return self._make_request(method, path, data=data, files=[f, ])
874,036
Execute rcon command on server and fetch result Args: command --- executed command timeout --- read timeout Returns: bytes response
def execute(self, command, timeout=1): self.send(command) return self.read_untill(timeout)
874,301
Extract index and watch from :class:`Blocking` Parameters: obj (Blocking): the blocking object Returns: tuple: index and watch
def extract_blocking(obj): if isinstance(obj, tuple): try: a, b = obj except: raise TypeError("Not a Blocking object") else: a, b = obj, None return extract_attr(a, keys=["Index"]), b
874,394
Perform a request. Args: request_method: HTTP method for this request. api_method: API method name for this request. *args: Extra arguments to pass to the request. **kwargs: Extra keyword arguments to pass to the request. Returns: A dict contains the request response data. Raises: RequestFailedError: Raises when BearyChat's OpenAPI responses with status code != 2xx
def request(self, request_method, api_method, *args, **kwargs): url = self._build_url(api_method) resp = requests.request(request_method, url, *args, **kwargs) try: rv = resp.json() except ValueError: raise RequestFailedError(resp, 'not a json body') if not resp.ok: raise RequestFailedError(resp, rv.get('error')) return rv
874,754
Returns the specified key Parameters: dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. watch (Blocking): Do a blocking query consistency (Consistency): Force consistency
async def _read(self, path, *, raw=None, recurse=None, dc=None, separator=None, keys=None, watch=None, consistency=None): response = await self._api.get( "/v1/kv", path, params={ "raw": raw, "dc": dc, "recurse": recurse, "separator": separator, "keys": keys }, watch=watch, consistency=consistency) return response
875,078
Returns the specified key Parameters: key (str): Key to fetch dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. watch (Blocking): Do a blocking query consistency (Consistency): Force consistency Returns: ObjectMeta: where value is the raw value
async def raw(self, key, *, dc=None, watch=None, consistency=None): response = await self._read(key, dc=dc, raw=True, watch=watch, consistency=consistency) return consul(response)
875,081
Sets the key to the given value. Parameters: key (str): Key to set value (Payload): Value to set, It will be encoded by flags flags (int): Flags to set with value Returns: bool: ``True`` on success
async def set(self, key, value, *, flags=None): value = encode_value(value, flags) response = await self._write(key, value, flags=flags) return response.body is True
875,083
Locks the Key with the given Session. Parameters: key (str): Key to set value (Payload): Value to set, It will be encoded by flags session (ObjectID): Session ID flags (int): Flags to set with value Response: bool: ``True`` on success The Key will only obtain the lock if the Session is valid, and no other session has it locked
async def lock(self, key, value, *, flags=None, session): value = encode_value(value, flags) session_id = extract_attr(session, keys=["ID"]) response = await self._write(key, value, flags=flags, acquire=session_id) return response.body is True
875,085
Unlocks the Key with the given Session. Parameters: key (str): Key to set value (Payload): Value to set, It will be encoded by flags session (ObjectID): Session ID flags (int): Flags to set with value Response: bool: ``True`` on success The Key will only release the lock if the Session is valid and currently has it locked.
async def unlock(self, key, value, *, flags=None, session): value = encode_value(value, flags) session_id = extract_attr(session, keys=["ID"]) response = await self._write(key, value, flags=flags, release=session_id) return response.body is True
875,086
Deletes the Key Parameters: key (str): Key to delete Response: bool: ``True`` on success
async def delete(self, key): response = await self._discard(key) return response.body is True
875,088
Deletes all keys with a prefix of Key. Parameters: key (str): Key to delete separator (str): Delete only up to a given separator Response: bool: ``True`` on success
async def delete_tree(self, prefix, *, separator=None): response = await self._discard(prefix, recurse=True, separator=separator) return response.body is True
875,089
Deletes the Key with check-and-set semantics. Parameters: key (str): Key to delete index (ObjectIndex): Index ID Response: bool: ``True`` on success The Key will only be deleted if its current modify index matches the supplied Index.
async def delete_cas(self, key, *, index): index = extract_attr(index, keys=["ModifyIndex", "Index"]) response = await self._discard(key, cas=index) return response.body is True
875,090
Sets the Key to the given Value Parameters: key (str): Key to set value (Payload): Value to set, It will be encoded by flags flags (int): Flags to set with value
def set(self, key, value, *, flags=None): self.append({ "Verb": "set", "Key": key, "Value": encode_value(value, flags, base64=True).decode("utf-8"), "Flags": flags }) return self
875,093
Sets the Key to the given Value with check-and-set semantics Parameters: key (str): Key to set value (Payload): Value to set, It will be encoded by flags index (ObjectIndex): Index ID flags (int): Flags to set with value The Key will only be set if its current modify index matches the supplied Index
def cas(self, key, value, *, flags=None, index): self.append({ "Verb": "cas", "Key": key, "Value": encode_value(value, flags, base64=True).decode("utf-8"), "Flags": flags, "Index": extract_attr(index, keys=["ModifyIndex", "Index"]) }) return self
875,094
Locks the Key with the given Session Parameters: key (str): Key to set value (Payload): Value to set, It will be encoded by flags session (ObjectID): Session ID The Key will only be set if its current modify index matches the supplied Index
def lock(self, key, value, *, flags=None, session): self.append({ "Verb": "lock", "Key": key, "Value": encode_value(value, flags, base64=True).decode("utf-8"), "Flags": flags, "Session": extract_attr(session, keys=["ID"]) }) return self
875,095
Fails the transaction if Key does not have a modify index equal to Index Parameters: key (str): Key to check index (ObjectIndex): Index ID
def check_index(self, key, *, index): self.append({ "Verb": "check-index", "Key": key, "Index": extract_attr(index, keys=["ModifyIndex", "Index"]) }) return self
875,096
Fails the transaction if Key is not currently locked by Session Parameters: key (str): Key to check session (ObjectID): Session ID
def check_session(self, key, *, session=None): self.append({ "Verb": "check-session", "Key": key, "Session": extract_attr(session, keys=["ID"]) }) return self
875,097
Deletes the Key with check-and-set semantics. Parameters: key (str): Key to delete index (ObjectIndex): Index ID The Key will only be deleted if its current modify index matches the supplied Index
def delete_cas(self, key, *, index): self.append({ "Verb": "delete-cas", "Key": key, "Index": extract_attr(index, keys=["ModifyIndex", "Index"]) }) return self
875,098
Execute stored operations Parameters: dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. token (ObjectID): Token ID Returns: Collection: Results of operations. Raises: TransactionError: Transaction failed
async def execute(self, dc=None, token=None): token_id = extract_attr(token, keys=["ID"]) try: response = await self._api.put( "/v1/txn", data=self.operations, params={ "dc": dc, "token": token_id }) except ConflictError as error: errors = {elt["OpIndex"]: elt for elt in error.value["Errors"]} operations = [op["KV"] for op in self.operations] meta = error.meta raise TransactionError(errors, operations, meta) from error except Exception as error: raise error else: self.operations[:] = [] results = [] for _ in response.body["Results"]: data = _["KV"] if data["Value"] is not None: data["Value"] = decode_value(data["Value"], data["Flags"]) results.append(data) return results
875,099
Destroys a given session Parameters: session (ObjectID): Session ID dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Returns: bool: ``True`` on success
async def destroy(self, session, *, dc=None): session_id = extract_attr(session, keys=["ID"]) response = await self._api.put("/v1/session/destroy", session_id, params={"dc": dc}) return response.body is True
875,191