docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Find a program in the resolved environment. Args: cmd: String name of the program to find. parent_environ: Environment to interpret the context within, defaults to os.environ if None. fallback: If True, and the program is not found in the context, the current environment will then be searched. Returns: Path to the program, or None if the program was not found.
def which(self, cmd, parent_environ=None, fallback=False): env = self.get_environ(parent_environ=parent_environ) path = which(cmd, env=env) if fallback and path is None: path = which(cmd) return path
233,423
Convert context to dict containing only builtin types. Args: fields (list of str): If present, only write these fields into the dict. This can be used to avoid constructing expensive fields (such as 'graph') for some cases. Returns: dict: Dictified context.
def to_dict(self, fields=None): data = {} def _add(field): return (fields is None or field in fields) if _add("resolved_packages"): resolved_packages = [] for pkg in (self._resolved_packages or []): resolved_packages.append(pkg.handle.to_dict()) data["resolved_packages"] = resolved_packages if _add("serialize_version"): data["serialize_version"] = \ '.'.join(map(str, ResolvedContext.serialize_version)) if _add("patch_locks"): data["patch_locks"] = dict((k, v.name) for k, v in self.patch_locks) if _add("package_orderers"): package_orderers = [package_order.to_pod(x) for x in (self.package_orderers or [])] data["package_orderers"] = package_orderers or None if _add("package_filter"): data["package_filter"] = self.package_filter.to_pod() if _add("graph"): if self.graph_string and self.graph_string.startswith('{'): graph_str = self.graph_string # already in compact format else: g = self.graph() graph_str = write_compacted(g) data["graph"] = graph_str data.update(dict( timestamp=self.timestamp, requested_timestamp=self.requested_timestamp, building=self.building, caching=self.caching, implicit_packages=map(str, self.implicit_packages), package_requests=map(str, self._package_requests), package_paths=self.package_paths, default_patch_lock=self.default_patch_lock.name, rez_version=self.rez_version, rez_path=self.rez_path, user=self.user, host=self.host, platform=self.platform, arch=self.arch, os=self.os, created=self.created, parent_suite_path=self.parent_suite_path, suite_context_name=self.suite_context_name, status=self.status_.name, failure_description=self.failure_description, from_cache=self.from_cache, solve_time=self.solve_time, load_time=self.load_time, num_loaded_packages=self.num_loaded_packages )) if fields: data = dict((k, v) for k, v in data.iteritems() if k in fields) return data
233,427
Load a `ResolvedContext` from a dict. Args: d (dict): Dict containing context data. identifier_str (str): String identifying the context, this is only used to display in an error string if a serialization version mismatch is detected. Returns: `ResolvedContext` object.
def from_dict(cls, d, identifier_str=None): # check serialization version def _print_version(value): return '.'.join(str(x) for x in value) toks = str(d["serialize_version"]).split('.') load_ver = tuple(int(x) for x in toks) curr_ver = ResolvedContext.serialize_version if load_ver[0] > curr_ver[0]: msg = ["The context"] if identifier_str: msg.append("in %s" % identifier_str) msg.append("was written by a newer version of Rez. The load may " "fail (serialize version %d > %d)" % (_print_version(load_ver), _print_version(curr_ver))) print >> sys.stderr, ' '.join(msg) # create and init the context r = ResolvedContext.__new__(ResolvedContext) r.load_path = None r.pre_resolve_bindings = None r.timestamp = d["timestamp"] r.building = d["building"] r.caching = d["caching"] r.implicit_packages = [PackageRequest(x) for x in d["implicit_packages"]] r._package_requests = [PackageRequest(x) for x in d["package_requests"]] r.package_paths = d["package_paths"] r.rez_version = d["rez_version"] r.rez_path = d["rez_path"] r.user = d["user"] r.host = d["host"] r.platform = d["platform"] r.arch = d["arch"] r.os = d["os"] r.created = d["created"] r.verbosity = d.get("verbosity", 0) r.status_ = ResolverStatus[d["status"]] r.failure_description = d["failure_description"] r.solve_time = d["solve_time"] r.load_time = d["load_time"] r.graph_string = d["graph"] r.graph_ = None r._resolved_packages = [] for d_ in d["resolved_packages"]: variant_handle = d_ if load_ver < (4, 0): # -- SINCE SERIALIZE VERSION 4.0 from rez.utils.backcompat import convert_old_variant_handle variant_handle = convert_old_variant_handle(variant_handle) variant = get_variant(variant_handle) variant.set_context(r) r._resolved_packages.append(variant) # -- SINCE SERIALIZE VERSION 1 r.requested_timestamp = d.get("requested_timestamp", 0) # -- SINCE SERIALIZE VERSION 2 r.parent_suite_path = d.get("parent_suite_path") r.suite_context_name = d.get("suite_context_name") # -- SINCE SERIALIZE VERSION 3 r.default_patch_lock = PatchLock[d.get("default_patch_lock", "no_lock")] patch_locks = d.get("patch_locks", {}) r.patch_locks = dict((k, PatchLock[v]) for k, v in patch_locks) # -- SINCE SERIALIZE VERSION 4.0 r.from_cache = d.get("from_cache", False) # -- SINCE SERIALIZE VERSION 4.1 data = d.get("package_filter", []) r.package_filter = PackageFilterList.from_pod(data) # -- SINCE SERIALIZE VERSION 4.2 data = d.get("package_orderers") if data: r.package_orderers = [package_order.from_pod(x) for x in data] else: r.package_orderers = None # -- SINCE SERIALIZE VERSION 4.3 r.num_loaded_packages = d.get("num_loaded_packages", -1) # track context usage if config.context_tracking_host: data = dict((k, v) for k, v in d.iteritems() if k in config.context_tracking_context_fields) r._track_context(data, action="sourced") return r
233,428
Create a config. Args: filepaths (list of str): List of config files to load. overrides (dict): A dict containing settings that override all others. Nested settings are overridden with nested dicts. locked: If True, settings overrides in environment variables are ignored.
def __init__(self, filepaths, overrides=None, locked=False): self.filepaths = filepaths self._sourced_filepaths = None self.overrides = overrides or {} self.locked = locked
233,595
Runs a python subproc to calculate a package attribute. Args: attr (str): Name of package attribute being created. src (list of str): Python code to execute, will be converted into semicolon-delimited single line of code. Returns: str: Output of python process.
def exec_python(attr, src, executable="python"): import subprocess if isinstance(src, basestring): src = [src] p = popen([executable, "-c", "; ".join(src)], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if p.returncode: from rez.exceptions import InvalidPackageError raise InvalidPackageError( "Error determining package attribute '%s':\n%s" % (attr, err)) return out.strip()
233,635
Escape the <, >, ^, and & special characters reserved by Windows. Args: value (str/EscapedString): String or already escaped string. Returns: str: The value escaped for Windows.
def escape_string(self, value): if isinstance(value, EscapedString): return value.formatted(self._escaper) return self._escaper(value)
233,675
Invoke a diff editor to show the difference between the source of two packages. Args: pkg1 (`Package`): Package to diff. pkg2 (`Package`): Package to diff against. If None, the next most recent package version is used.
def diff_packages(pkg1, pkg2=None): if pkg2 is None: it = iter_packages(pkg1.name) pkgs = [x for x in it if x.version < pkg1.version] if not pkgs: raise RezError("No package to diff with - %s is the earliest " "package version" % pkg1.qualified_name) pkgs = sorted(pkgs, key=lambda x: x.version) pkg2 = pkgs[-1] def _check_pkg(pkg): if not (pkg.vcs and pkg.revision): raise RezError("Cannot diff package %s: it is a legacy format " "package that does not contain enough information" % pkg.qualified_name) _check_pkg(pkg1) _check_pkg(pkg2) path = mkdtemp(prefix="rez-pkg-diff") paths = [] for pkg in (pkg1, pkg2): print "Exporting %s..." % pkg.qualified_name path_ = os.path.join(path, pkg.qualified_name) vcs_cls_1 = plugin_manager.get_plugin_class("release_vcs", pkg1.vcs) vcs_cls_1.export(revision=pkg.revision, path=path_) paths.append(path_) difftool = config.difftool print "Opening diff viewer %s..." % difftool proc = Popen([difftool] + paths) proc.wait()
233,734
Test the validity of a package name string. Args: name (str): Name to test. raise_error (bool): If True, raise an exception on failure Returns: bool.
def is_valid_package_name(name, raise_error=False): is_valid = PACKAGE_NAME_REGEX.match(name) if raise_error and not is_valid: raise PackageRequestError("Not a valid package name: %r" % name) return is_valid
233,778
Expand abbreviations in a format string. If an abbreviation does not match a field, or matches multiple fields, it is left unchanged. Example: >>> fields = ("hey", "there", "dude") >>> expand_abbreviations("hello {d}", fields) 'hello dude' Args: txt (str): Format string. fields (list of str): Fields to expand to. Returns: Expanded string.
def expand_abbreviations(txt, fields): def _expand(matchobj): s = matchobj.group("var") if s not in fields: matches = [x for x in fields if x.startswith(s)] if len(matches) == 1: s = matches[0] return "{%s}" % s return re.sub(FORMAT_VAR_REGEX, _expand, txt)
233,779
Expand shell variables of form $var and ${var}. Unknown variables are left unchanged. Args: text (str): String to expand. environ (dict): Environ dict to use for expansions, defaults to os.environ. Returns: The expanded string.
def expandvars(text, environ=None): if '$' not in text: return text i = 0 if environ is None: environ = os.environ while True: m = ENV_VAR_REGEX.search(text, i) if not m: break i, j = m.span(0) name = m.group(1) if name.startswith('{') and name.endswith('}'): name = name[1:-1] if name in environ: tail = text[j:] text = text[:i] + environ[name] i = len(text) text += tail else: i = j return text
233,780
Like `columnise`, but with colored rows. Args: printer (`colorize.Printer`): Printer object. Note: The last entry in each row is the row color, or None for no coloring.
def print_colored_columns(printer, rows, padding=2): rows_ = [x[:-1] for x in rows] colors = [x[-1] for x in rows] for col, line in zip(colors, columnise(rows_, padding=padding)): printer(line, col)
233,783
Create a formatter. Args: instance: The object to format with. pretty: If True, references to non-string attributes such as lists are converted to basic form, with characters such as brackets and parentheses removed. expand: `StringFormatType`.
def __init__(self, instance, pretty=False, expand=StringFormatType.error): self.instance = instance self.pretty = pretty self.expand = expand
233,790
Create a Version object. Args: ver_str: Version string. make_token: Callable that creates a VersionToken subclass from a string.
def __init__(self, ver_str='', make_token=AlphanumericVersionToken): self.tokens = [] self.seps = [] self._str = None self._hash = None if ver_str: toks = re_token.findall(ver_str) if not toks: raise VersionError(ver_str) seps = re_token.split(ver_str) if seps[0] or seps[-1] or max(len(x) for x in seps) > 1: raise VersionError("Invalid version syntax: '%s'" % ver_str) for tok in toks: try: self.tokens.append(make_token(tok)) except VersionError as e: raise VersionError("Invalid version '%s': %s" % (ver_str, str(e))) self.seps = seps[1:-1]
233,808
Return a copy of the version, possibly with less tokens. Args: len_ (int): New version length. If >= current length, an unchanged copy of the version is returned.
def trim(self, len_): other = Version(None) other.tokens = self.tokens[:len_] other.seps = self.seps[:len_ - 1] return other
233,810
Create a VersionRange object. Args: range_str: Range string, such as "3", "3+<4.5", "2|6+". The range will be optimised, so the string representation of this instance may not match range_str. For example, "3+<6|4+<8" == "3+<8". make_token: Version token class to use. invalid_bound_error (bool): If True, raise an exception if an impossible range is given, such as '3+<2'.
def __init__(self, range_str='', make_token=AlphanumericVersionToken, invalid_bound_error=True): self._str = None self.bounds = [] # note: kept in ascending order if range_str is None: return try: parser = _VersionRangeParser(range_str, make_token, invalid_bound_error=invalid_bound_error) bounds = parser.bounds except ParseException as e: raise VersionError("Syntax error in version range '%s': %s" % (range_str, str(e))) except VersionError as e: raise VersionError("Invalid version range '%s': %s" % (range_str, str(e))) if bounds: self.bounds = self._union(bounds) else: self.bounds.append(_Bound.any)
233,837
OR together version ranges. Calculates the union of this range with one or more other ranges. Args: other: VersionRange object (or list of) to OR with. Returns: New VersionRange object representing the union.
def union(self, other): if not hasattr(other, "__iter__"): other = [other] bounds = self.bounds[:] for range in other: bounds += range.bounds bounds = self._union(bounds) range = VersionRange(None) range.bounds = bounds return range
233,838
AND together version ranges. Calculates the intersection of this range with one or more other ranges. Args: other: VersionRange object (or list of) to AND with. Returns: New VersionRange object representing the intersection, or None if no ranges intersect.
def intersection(self, other): if not hasattr(other, "__iter__"): other = [other] bounds = self.bounds for range in other: bounds = self._intersection(bounds, range.bounds) if not bounds: return None range = VersionRange(None) range.bounds = bounds return range
233,839
Create a range from lower_version..upper_version. Args: lower_version: Version object representing lower bound of the range. upper_version: Version object representing upper bound of the range. Returns: `VersionRange` object.
def as_span(cls, lower_version=None, upper_version=None, lower_inclusive=True, upper_inclusive=True): lower = (None if lower_version is None else _LowerBound(lower_version, lower_inclusive)) upper = (None if upper_version is None else _UpperBound(upper_version, upper_inclusive)) bound = _Bound(lower, upper) range = cls(None) range.bounds = [bound] return range
233,842
Create a range from a version. Args: version: Version object. This is used as the upper/lower bound of the range. op: Operation as a string. One of 'gt'/'>', 'gte'/'>=', lt'/'<', 'lte'/'<=', 'eq'/'=='. If None, a bounded range will be created that contains the version superset. Returns: `VersionRange` object.
def from_version(cls, version, op=None): lower = None upper = None if op is None: lower = _LowerBound(version, True) upper = _UpperBound(version.next(), False) elif op in ("eq", "=="): lower = _LowerBound(version, True) upper = _UpperBound(version, True) elif op in ("gt", ">"): lower = _LowerBound(version, False) elif op in ("gte", ">="): lower = _LowerBound(version, True) elif op in ("lt", "<"): upper = _UpperBound(version, False) elif op in ("lte", "<="): upper = _UpperBound(version, True) else: raise VersionError("Unknown bound operation '%s'" % op) bound = _Bound(lower, upper) range = cls(None) range.bounds = [bound] return range
233,843
Create a range from a list of versions. This method creates a range that contains only the given versions and no other. Typically the range looks like (for eg) "==3|==4|==5.1". Args: versions: List of Version objects. Returns: `VersionRange` object.
def from_versions(cls, versions): range = cls(None) range.bounds = [] for version in dedup(sorted(versions)): lower = _LowerBound(version, True) upper = _UpperBound(version, True) bound = _Bound(lower, upper) range.bounds.append(bound) return range
233,844
Internal method to streamline the getting of data from the json Args: json_inp (json): json input from our caller ndx (int): index where the data is located in the api Returns: If pandas is present: DataFrame (pandas.DataFrame): data set from ndx within the API's json else: A dictionary of both headers and values from the page
def _api_scrape(json_inp, ndx): try: headers = json_inp['resultSets'][ndx]['headers'] values = json_inp['resultSets'][ndx]['rowSet'] except KeyError: # This is so ugly but this is what you get when your data comes out # in not a standard format try: headers = json_inp['resultSet'][ndx]['headers'] values = json_inp['resultSet'][ndx]['rowSet'] except KeyError: # Added for results that only include one set (ex. LeagueLeaders) headers = json_inp['resultSet']['headers'] values = json_inp['resultSet']['rowSet'] if HAS_PANDAS: return DataFrame(values, columns=headers) else: # Taken from www.github.com/bradleyfay/py-goldsberry return [dict(zip(headers, value)) for value in values]
234,340
Internal method to streamline our requests / json getting Args: endpoint (str): endpoint to be called from the API params (dict): parameters to be passed to the API Raises: HTTPError: if requests hits a status code != 200 Returns: json (json): json object for selected API call
def _get_json(endpoint, params, referer='scores'): h = dict(HEADERS) h['referer'] = 'http://stats.nba.com/{ref}/'.format(ref=referer) _get = get(BASE_URL.format(endpoint=endpoint), params=params, headers=h) # print _get.url _get.raise_for_status() return _get.json()
234,341
Return dict of name -> object pairs from a list of objects with unique names. Args: object_list: list of objects, each X of which has a unique name accessible as X.name.value Returns: dict, { X.name.value: X for x in object_list } If the list is empty or None, returns an empty dict.
def get_uniquely_named_objects_by_name(object_list): if not object_list: return dict() result = dict() for obj in object_list: name = obj.name.value if name in result: raise GraphQLCompilationError(u'Found duplicate object key: ' u'{} {}'.format(name, object_list)) result[name] = obj return result
235,096
Return a list of vertex fields, and a list of property fields, for the given AST node. Also verifies that all property fields for the AST node appear before all vertex fields, raising GraphQLCompilationError if that is not the case. Args: ast: GraphQL AST node, obtained from the graphql library Returns: tuple of two lists - the first list contains ASTs for vertex fields - the second list contains ASTs for property fields
def _get_fields(ast): if not ast.selection_set: # There are no child fields. return [], [] property_fields = [] vertex_fields = [] seen_field_names = set() switched_to_vertices = False # Ensures that all property fields are before all vertex fields. for field_ast in ast.selection_set.selections: if not isinstance(field_ast, Field): # We are getting Fields only, ignore everything else. continue name = get_ast_field_name(field_ast) if name in seen_field_names: # If we ever allow repeated field names, # then we have to change the Location naming scheme to reflect the repetitions # and disambiguate between Recurse and Traverse visits to a Location. raise GraphQLCompilationError(u'Encountered repeated field name: {}'.format(name)) seen_field_names.add(name) # Vertex fields start with 'out_' or 'in_', denoting the edge direction to that vertex. if is_vertex_field_name(name): switched_to_vertices = True vertex_fields.append(field_ast) else: if switched_to_vertices: raise GraphQLCompilationError(u'Encountered property field {} ' u'after vertex fields!'.format(name)) property_fields.append(field_ast) return vertex_fields, property_fields
235,132
Perform type checks on the enclosing type and the recursed type for a recurse directive. Args: current_schema_type: GraphQLType, the schema type at the current location field_schema_type: GraphQLType, the schema type at the inner scope context: dict, various per-compilation data (e.g. declared tags, whether the current block is optional, etc.). May be mutated in-place in this function!
def _validate_recurse_directive_types(current_schema_type, field_schema_type, context): # Get the set of all allowed types in the current scope. type_hints = context['type_equivalence_hints'].get(field_schema_type) type_hints_inverse = context['type_equivalence_hints_inverse'].get(field_schema_type) allowed_current_types = {field_schema_type} if type_hints and isinstance(type_hints, GraphQLUnionType): allowed_current_types.update(type_hints.types) if type_hints_inverse and isinstance(type_hints_inverse, GraphQLUnionType): allowed_current_types.update(type_hints_inverse.types) # The current scope must be of the same type as the field scope, or an acceptable subtype. current_scope_is_allowed = current_schema_type in allowed_current_types is_implemented_interface = ( isinstance(field_schema_type, GraphQLInterfaceType) and isinstance(current_schema_type, GraphQLObjectType) and field_schema_type in current_schema_type.interfaces ) if not any((current_scope_is_allowed, is_implemented_interface)): raise GraphQLCompilationError(u'Edges expanded with a @recurse directive must either ' u'be of the same type as their enclosing scope, a supertype ' u'of the enclosing scope, or be of an interface type that is ' u'implemented by the type of their enclosing scope. ' u'Enclosing scope type: {}, edge type: ' u'{}'.format(current_schema_type, field_schema_type))
235,137
Construct the final ConstructResult basic block that defines the output format of the query. Args: outputs: dict, output name (string) -> output data dict, specifying the location from where to get the data, and whether the data is optional (and therefore may be missing); missing optional data is replaced with 'null' Returns: a ConstructResult basic block that constructs appropriate outputs for the query
def _compile_output_step(outputs): if not outputs: raise GraphQLCompilationError(u'No fields were selected for output! Please mark at least ' u'one field with the @output directive.') output_fields = {} for output_name, output_context in six.iteritems(outputs): location = output_context['location'] optional = output_context['optional'] graphql_type = output_context['type'] expression = None existence_check = None # pylint: disable=redefined-variable-type if isinstance(location, FoldScopeLocation): if optional: raise AssertionError(u'Unreachable state reached, optional in fold: ' u'{}'.format(output_context)) if location.field == COUNT_META_FIELD_NAME: expression = expressions.FoldCountContextField(location) else: expression = expressions.FoldedContextField(location, graphql_type) else: expression = expressions.OutputContextField(location, graphql_type) if optional: existence_check = expressions.ContextFieldExistence(location.at_vertex()) if existence_check: expression = expressions.TernaryConditional( existence_check, expression, expressions.NullLiteral) # pylint: enable=redefined-variable-type output_fields[output_name] = expression return blocks.ConstructResult(output_fields)
235,144
Construct a new Variable object for the given variable name. Args: variable_name: string, should start with '$' and then obey variable naming rules (see validate_safe_string()) inferred_type: GraphQL type object, specifying the inferred type of the variable Returns: new Variable object
def __init__(self, variable_name, inferred_type): variable_name = ensure_unicode_string(variable_name) super(Variable, self).__init__(variable_name, inferred_type) self.variable_name = variable_name self.inferred_type = inferred_type self.validate()
235,161
Construct a new GlobalContextField object that references a field at a given location. Args: location: Location, specifying where the field was declared. Returns: new GlobalContextField object
def __init__(self, location, field_type): super(GlobalContextField, self).__init__(location, field_type) self.location = location self.field_type = field_type self.validate()
235,168
Construct a new OutputContextField object for the field at the given location. Args: location: Location, specifying where the field was declared. The Location must point to a property, and that property's value is output as the result. field_type: GraphQL type object, specifying the type of the field being output Returns: new OutputContextField object
def __init__(self, location, field_type): super(OutputContextField, self).__init__(location, field_type) self.location = location self.field_type = field_type self.validate()
235,174
Construct a new FoldedContextField object for this folded field. Args: fold_scope_location: FoldScopeLocation specifying the location of the context field being output. field_type: GraphQL type object, specifying the type of the field being output. Since the field is folded, this must be a GraphQLList of some kind. Returns: new FoldedContextField object
def __init__(self, fold_scope_location, field_type): super(FoldedContextField, self).__init__(fold_scope_location, field_type) self.fold_scope_location = fold_scope_location self.field_type = field_type self.validate()
235,179
Construct a new FoldCountContextField object for this fold. Args: fold_scope_location: FoldScopeLocation specifying the fold whose size is being output. Returns: new FoldCountContextField object
def __init__(self, fold_scope_location): super(FoldCountContextField, self).__init__(fold_scope_location) self.fold_scope_location = fold_scope_location self.validate()
235,183
Construct a new ContextFieldExistence object for a vertex field from the global context. Args: location: Location, specifying where the field was declared. Must point to a vertex. Returns: new ContextFieldExistence expression which evaluates to True iff the vertex exists
def __init__(self, location): super(ContextFieldExistence, self).__init__(location) self.location = location self.validate()
235,186
Construct an expression that connects two expressions with an operator. Args: operator: unicode, specifying where the field was declared left: Expression on the left side of the binary operator right: Expression on the right side of the binary operator Returns: new BinaryComposition object
def __init__(self, operator, left, right): super(BinaryComposition, self).__init__(operator, left, right) self.operator = operator self.left = left self.right = right self.validate()
235,193
Construct an expression that evaluates a predicate and returns one of two results. Args: predicate: Expression to evaluate, and based on which to choose the returned value if_true: Expression to return if the predicate was true if_false: Expression to return if the predicate was false Returns: new TernaryConditional object
def __init__(self, predicate, if_true, if_false): super(TernaryConditional, self).__init__(predicate, if_true, if_false) self.predicate = predicate self.if_true = if_true self.if_false = if_false self.validate()
235,198
Assert that IR blocks originating from the frontend do not have nonsensical structure. Args: ir_blocks: list of BasicBlocks representing the IR to sanity-check Raises: AssertionError, if the IR has unexpected structure. If the IR produced by the front-end cannot be successfully and correctly used to generate MATCH or Gremlin due to a bug, this is the method that should catch the problem.
def sanity_check_ir_blocks_from_frontend(ir_blocks, query_metadata_table): if not ir_blocks: raise AssertionError(u'Received no ir_blocks: {}'.format(ir_blocks)) _sanity_check_fold_scope_locations_are_unique(ir_blocks) _sanity_check_no_nested_folds(ir_blocks) _sanity_check_query_root_block(ir_blocks) _sanity_check_output_source_follower_blocks(ir_blocks) _sanity_check_block_pairwise_constraints(ir_blocks) _sanity_check_mark_location_preceding_optional_traverse(ir_blocks) _sanity_check_every_location_is_marked(ir_blocks) _sanity_check_coerce_type_outside_of_fold(ir_blocks) _sanity_check_all_marked_locations_are_registered(ir_blocks, query_metadata_table) _sanity_check_registered_locations_parent_locations(query_metadata_table)
235,203
Attempt to get a column by name from the selectable. Args: column_name: str, name of the column to retrieve. node: SqlNode, the node the column is being retrieved for. context: CompilationContext, compilation specific metadata. Returns: Optional[column], the SQLAlchemy column if found, None otherwise.
def try_get_column(column_name, node, context): selectable = get_node_selectable(node, context) if not hasattr(selectable, 'c'): raise AssertionError( u'Selectable "{}" does not have a column collection. Context is {}.'.format( selectable, context)) return selectable.c.get(column_name, None)
235,242
Get a column by name from the selectable. Args: column_name: str, name of the column to retrieve. node: SqlNode, the node the column is being retrieved for. context: CompilationContext, compilation specific metadata. Returns: column, the SQLAlchemy column if found. Raises an AssertionError otherwise.
def get_column(column_name, node, context): column = try_get_column(column_name, node, context) if column is None: selectable = get_node_selectable(node, context) raise AssertionError( u'Column "{}" not found in selectable "{}". Columns present are {}. ' u'Context is {}.'.format(column_name, selectable.original, [col.name for col in selectable.c], context)) return column
235,243
Return a dict of directive name to directive object for the given AST node. Any directives that are allowed to exist more than once on any AST node are ignored. For any directives that can only exist up to once, we verify that they are not duplicated raising GraphQLCompilationError in case we find them more than once on the AST node. Args: ast: GraphQL AST node, obtained from the graphql library Returns: dict of string to directive object
def get_unique_directives(ast): if not ast.directives: return dict() result = dict() for directive_obj in ast.directives: directive_name = directive_obj.name.value if directive_name in ALLOWED_DUPLICATED_DIRECTIVES: pass # We don't return these. elif directive_name in result: raise GraphQLCompilationError(u'Directive was unexpectedly applied twice in the same ' u'location: {} {}'.format(directive_name, ast.directives)) else: result[directive_name] = directive_obj return result
235,244
Insert the arguments into the compiled MATCH query to form a complete query. Args: compilation_result: a CompilationResult object derived from the GraphQL compiler arguments: dict, mapping argument name to its value, for every parameter the query expects. Returns: string, a MATCH query with inserted argument data
def insert_arguments_into_match_query(compilation_result, arguments): if compilation_result.language != MATCH_LANGUAGE: raise AssertionError(u'Unexpected query output language: {}'.format(compilation_result)) base_query = compilation_result.query argument_types = compilation_result.input_metadata # The arguments are assumed to have already been validated against the query. sanitized_arguments = { key: _safe_match_argument(argument_types[key], value) for key, value in six.iteritems(arguments) } return base_query.format(**sanitized_arguments)
235,255
Split a list of IR blocks into per-location MATCH steps. Args: pruned_ir_blocks: list of IR basic block objects that have gone through a lowering step. Returns: list of MatchStep namedtuples, each of which contains all basic blocks that correspond to a single MATCH step.
def _split_ir_into_match_steps(pruned_ir_blocks): output = [] current_tuple = None for block in pruned_ir_blocks: if isinstance(block, OutputSource): # OutputSource blocks do not require any MATCH code, and only serve to help # optimizations and debugging. Simply omit them at this stage. continue elif isinstance(block, root_block_types): if current_tuple is not None: output.append(current_tuple) current_tuple = (block,) elif isinstance(block, (CoerceType, Filter, MarkLocation)): current_tuple += (block,) else: raise AssertionError(u'Unexpected block type when converting to MATCH query: ' u'{} {}'.format(block, pruned_ir_blocks)) if current_tuple is None: raise AssertionError(u'current_tuple was unexpectedly None: {}'.format(pruned_ir_blocks)) output.append(current_tuple) return [_per_location_tuple_to_step(x) for x in output]
235,259
Insert the arguments into the compiled SQL query to form a complete query. Args: compilation_result: CompilationResult, compilation result from the GraphQL compiler. arguments: Dict[str, Any], parameter name -> value, for every parameter the query expects. Returns: SQLAlchemy Selectable, a executable SQL query with parameters bound.
def insert_arguments_into_sql_query(compilation_result, arguments): if compilation_result.language != SQL_LANGUAGE: raise AssertionError(u'Unexpected query output language: {}'.format(compilation_result)) base_query = compilation_result.query return base_query.params(**arguments)
235,263
Construct an expression that is true when the field value is within the given bounds. Args: field: LocalField Expression, denoting the field in consideration lower_bound: lower bound constraint for given field upper_bound: upper bound constraint for given field Returns: a new BetweenClause object
def __init__(self, field, lower_bound, upper_bound): super(BetweenClause, self).__init__(field, lower_bound, upper_bound) self.field = field self.lower_bound = lower_bound self.upper_bound = upper_bound self.validate()
235,271
Insert a path of optional Locations into the tree. Each OptionalTraversalTree object contains child Location objects as keys mapping to other OptionalTraversalTree objects. Args: optional_root_locations_path: list of optional root Locations all except the last of which must be present in complex_optional_roots
def insert(self, optional_root_locations_path): encountered_simple_optional = False parent_location = self._root_location for optional_root_location in optional_root_locations_path: if encountered_simple_optional: raise AssertionError(u'Encountered simple optional root location {} in path, but' u'further locations are present. This should not happen: {}' .format(optional_root_location, optional_root_locations_path)) if optional_root_location not in self._location_to_children: # Simple optionals are ignored. # There should be no complex optionals after a simple optional. encountered_simple_optional = True else: self._location_to_children[parent_location].add(optional_root_location) parent_location = optional_root_location
235,276
Return a SQLAlchemy Query from a passed SqlQueryTree. Args: sql_query_tree: SqlQueryTree, tree representation of the query to emit. compiler_metadata: SqlMetadata, SQLAlchemy specific metadata. Returns: SQLAlchemy Query
def emit_code_from_ir(sql_query_tree, compiler_metadata): context = CompilationContext( query_path_to_selectable=dict(), query_path_to_location_info=sql_query_tree.query_path_to_location_info, query_path_to_output_fields=sql_query_tree.query_path_to_output_fields, query_path_to_filters=sql_query_tree.query_path_to_filters, query_path_to_node=sql_query_tree.query_path_to_node, compiler_metadata=compiler_metadata, ) return _query_tree_to_query(sql_query_tree.root, context)
235,278
Create an aliased table for a SqlNode. Updates the relevant Selectable global context. Args: node: SqlNode, the current node. context: CompilationContext, global compilation state and metadata. Returns: Table, the newly aliased SQLAlchemy table.
def _create_table_and_update_context(node, context): schema_type_name = sql_context_helpers.get_schema_type_name(node, context) table = context.compiler_metadata.get_table(schema_type_name).alias() context.query_path_to_selectable[node.query_path] = table return table
235,279
Create a query from a SqlNode. Args: node: SqlNode, the current node. context: CompilationContext, global compilation state and metadata. Returns: Selectable, selectable of the generated query.
def _create_query(node, context): visited_nodes = [node] output_columns = _get_output_columns(visited_nodes, context) filters = _get_filters(visited_nodes, context) selectable = sql_context_helpers.get_node_selectable(node, context) query = select(output_columns).select_from(selectable).where(and_(*filters)) return query
235,280
Get the output columns for a list of SqlNodes. Args: nodes: List[SqlNode], the nodes to get output columns from. context: CompilationContext, global compilation state and metadata. Returns: List[Column], list of SqlAlchemy Columns to output for this query.
def _get_output_columns(nodes, context): columns = [] for node in nodes: for sql_output in sql_context_helpers.get_outputs(node, context): field_name = sql_output.field_name column = sql_context_helpers.get_column(field_name, node, context) column = column.label(sql_output.output_name) columns.append(column) return columns
235,281
Get filters to apply to a list of SqlNodes. Args: nodes: List[SqlNode], the SqlNodes to get filters for. context: CompilationContext, global compilation state and metadata. Returns: List[Expression], list of SQLAlchemy expressions.
def _get_filters(nodes, context): filters = [] for node in nodes: for filter_block in sql_context_helpers.get_filters(node, context): filter_sql_expression = _transform_filter_to_sql(filter_block, node, context) filters.append(filter_sql_expression) return filters
235,282
Transform a Filter block to its corresponding SQLAlchemy expression. Args: filter_block: Filter, the Filter block to transform. node: SqlNode, the node Filter block applies to. context: CompilationContext, global compilation state and metadata. Returns: Expression, SQLAlchemy expression equivalent to the Filter.predicate expression.
def _transform_filter_to_sql(filter_block, node, context): expression = filter_block.predicate return _expression_to_sql(expression, node, context)
235,283
Recursively transform a Filter block predicate to its SQLAlchemy expression representation. Args: expression: expression, the compiler expression to transform. node: SqlNode, the SqlNode the expression applies to. context: CompilationContext, global compilation state and metadata. Returns: Expression, SQLAlchemy Expression equivalent to the passed compiler expression.
def _expression_to_sql(expression, node, context): _expression_transformers = { expressions.LocalField: _transform_local_field_to_expression, expressions.Variable: _transform_variable_to_expression, expressions.Literal: _transform_literal_to_expression, expressions.BinaryComposition: _transform_binary_composition_to_expression, } expression_type = type(expression) if expression_type not in _expression_transformers: raise NotImplementedError( u'Unsupported compiler expression "{}" of type "{}" cannot be converted to SQL ' u'expression.'.format(expression, type(expression))) return _expression_transformers[expression_type](expression, node, context)
235,284
Transform a BinaryComposition compiler expression into a SQLAlchemy expression. Recursively calls _expression_to_sql to convert its left and right sub-expressions. Args: expression: expression, BinaryComposition compiler expression. node: SqlNode, the SqlNode the expression applies to. context: CompilationContext, global compilation state and metadata. Returns: Expression, SQLAlchemy expression.
def _transform_binary_composition_to_expression(expression, node, context): if expression.operator not in constants.SUPPORTED_OPERATORS: raise NotImplementedError( u'Filter operation "{}" is not supported by the SQL backend.'.format( expression.operator)) sql_operator = constants.SUPPORTED_OPERATORS[expression.operator] left = _expression_to_sql(expression.left, node, context) right = _expression_to_sql(expression.right, node, context) if sql_operator.cardinality == constants.CARDINALITY_UNARY: left, right = _get_column_and_bindparam(left, right, sql_operator) clause = getattr(left, sql_operator.name)(right) return clause elif sql_operator.cardinality == constants.CARDINALITY_BINARY: clause = getattr(sql_expressions, sql_operator.name)(left, right) return clause elif sql_operator.cardinality == constants.CARDINALITY_LIST_VALUED: left, right = _get_column_and_bindparam(left, right, sql_operator) # ensure that SQLAlchemy treats the right bind parameter as list valued right.expanding = True clause = getattr(left, sql_operator.name)(right) return clause raise AssertionError(u'Unreachable, operator cardinality {} for compiler expression {} is ' u'unknown'.format(sql_operator.cardinality, expression))
235,285
Transform a Variable compiler expression into its SQLAlchemy expression representation. Args: expression: expression, Variable compiler expression. node: SqlNode, the SqlNode the expression applies to. context: CompilationContext, global compilation state and metadata. Returns: Expression, SQLAlchemy expression.
def _transform_variable_to_expression(expression, node, context): variable_name = expression.variable_name if not variable_name.startswith(u'$'): raise AssertionError(u'Unexpectedly received variable name {} that is not ' u'prefixed with "$"'.format(variable_name)) return bindparam(variable_name[1:])
235,287
Transform a LocalField compiler expression into its SQLAlchemy expression representation. Args: expression: expression, LocalField compiler expression. node: SqlNode, the SqlNode the expression applies to. context: CompilationContext, global compilation state and metadata. Returns: Expression, SQLAlchemy expression.
def _transform_local_field_to_expression(expression, node, context): column_name = expression.field_name column = sql_context_helpers.get_column(column_name, node, context) return column
235,288
Optimize comparisons of a boolean binary comparison expression against a boolean literal. Rewriting example: BinaryComposition( '=', BinaryComposition('!=', something, NullLiteral) False) The above is rewritten into: BinaryComposition('=', something, NullLiteral) Args: ir_blocks: list of basic block objects Returns: a new list of basic block objects, with the optimization applied
def optimize_boolean_expression_comparisons(ir_blocks): operator_inverses = { u'=': u'!=', u'!=': u'=', } def visitor_fn(expression): if not isinstance(expression, BinaryComposition): return expression left_is_binary_composition = isinstance(expression.left, BinaryComposition) right_is_binary_composition = isinstance(expression.right, BinaryComposition) if not left_is_binary_composition and not right_is_binary_composition: # Nothing to rewrite, return the expression as-is. return expression identity_literal = None # The boolean literal for which we just use the inner expression. inverse_literal = None # The boolean literal for which we negate the inner expression. if expression.operator == u'=': identity_literal = TrueLiteral inverse_literal = FalseLiteral elif expression.operator == u'!=': identity_literal = FalseLiteral inverse_literal = TrueLiteral else: return expression expression_to_rewrite = None if expression.left == identity_literal and right_is_binary_composition: return expression.right elif expression.right == identity_literal and left_is_binary_composition: return expression.left elif expression.left == inverse_literal and right_is_binary_composition: expression_to_rewrite = expression.right elif expression.right == inverse_literal and left_is_binary_composition: expression_to_rewrite = expression.left if expression_to_rewrite is None: # We couldn't find anything to rewrite, return the expression as-is. return expression elif expression_to_rewrite.operator not in operator_inverses: # We can't rewrite the inner expression since we don't know its inverse operator. return expression else: return BinaryComposition( operator_inverses[expression_to_rewrite.operator], expression_to_rewrite.left, expression_to_rewrite.right) new_ir_blocks = [] for block in ir_blocks: new_block = block.visit_and_update_expressions(visitor_fn) new_ir_blocks.append(new_block) return new_ir_blocks
235,291
Extract all @fold data from the IR blocks, and cut the folded IR blocks out of the IR. Args: ir_blocks: list of IR blocks to extract fold data from Returns: tuple (folds, remaining_ir_blocks): - folds: dict of FoldScopeLocation -> list of IR blocks corresponding to that @fold scope. The list does not contain Fold or Unfold blocks. - remaining_ir_blocks: list of IR blocks that were not part of a Fold-Unfold section.
def extract_folds_from_ir_blocks(ir_blocks): folds = dict() remaining_ir_blocks = [] current_folded_blocks = [] in_fold_location = None for block in ir_blocks: if isinstance(block, Fold): if in_fold_location is not None: raise AssertionError(u'in_fold_location was not None at a Fold block: {} {} ' u'{}'.format(current_folded_blocks, remaining_ir_blocks, ir_blocks)) in_fold_location = block.fold_scope_location elif isinstance(block, Unfold): if in_fold_location is None: raise AssertionError(u'in_fold_location was None at an Unfold block: {} {} ' u'{}'.format(current_folded_blocks, remaining_ir_blocks, ir_blocks)) folds[in_fold_location] = current_folded_blocks current_folded_blocks = [] in_fold_location = None else: if in_fold_location is not None: current_folded_blocks.append(block) else: remaining_ir_blocks.append(block) return folds, remaining_ir_blocks
235,292
If location A translates to B, and B to C, then make A translate directly to C. Args: location_translations: dict of Location -> Location, where the key translates to the value. Mutated in place for efficiency and simplicity of implementation.
def _flatten_location_translations(location_translations): sources_to_process = set(six.iterkeys(location_translations)) def _update_translation(source): destination = location_translations[source] if destination not in location_translations: # "destination" cannot be translated, no further flattening required. return destination else: # "destination" can itself be translated -- do so, # and then flatten "source" to the final translation as well. sources_to_process.discard(destination) final_destination = _update_translation(destination) location_translations[source] = final_destination return final_destination while sources_to_process: _update_translation(sources_to_process.pop())
235,302
Return a dict mapping location -> list of filters applied at that location. Args: match_query: MatchQuery object from which to extract location -> filters dict Returns: dict mapping each location in match_query to a list of Filter objects applied at that location
def _construct_location_to_filter_list(match_query): # For each location, all filters for that location should be applied at the first instance. # This function collects a list of all filters corresponding to each location # present in the given MatchQuery. location_to_filters = {} for match_traversal in match_query.match_traversals: for match_step in match_traversal: current_filter = match_step.where_block if current_filter is not None: current_location = match_step.as_block.location location_to_filters.setdefault(current_location, []).append( current_filter) return location_to_filters
235,311
Lower BinaryCompositions involving non-existent ContextFields to True. Args: present_locations: set of all locations in the current MatchQuery that have not been pruned expression: BinaryComposition with at least one ContextField operand Returns: TrueLiteral iff either ContextField operand is not in `present_locations`, and the original expression otherwise
def _update_context_field_binary_composition(present_locations, expression): if not any((isinstance(expression.left, ContextField), isinstance(expression.right, ContextField))): raise AssertionError(u'Received a BinaryComposition {} without any ContextField ' u'operands. This should never happen.'.format(expression)) if isinstance(expression.left, ContextField): context_field = expression.left location_name, _ = context_field.location.get_location_name() if location_name not in present_locations: return TrueLiteral if isinstance(expression.right, ContextField): context_field = expression.right location_name, _ = context_field.location.get_location_name() if location_name not in present_locations: return TrueLiteral return expression
235,315
Return a simplified BinaryComposition if either operand is a TrueLiteral. Args: expression: BinaryComposition without any ContextField operand(s) Returns: simplified expression if the given expression is a disjunction/conjunction and one of it's operands is a TrueLiteral, and the original expression otherwise
def _simplify_non_context_field_binary_composition(expression): if any((isinstance(expression.left, ContextField), isinstance(expression.right, ContextField))): raise AssertionError(u'Received a BinaryComposition {} with a ContextField ' u'operand. This should never happen.'.format(expression)) if expression.operator == u'||': if expression.left == TrueLiteral or expression.right == TrueLiteral: return TrueLiteral else: return expression elif expression.operator == u'&&': if expression.left == TrueLiteral: return expression.right if expression.right == TrueLiteral: return expression.left else: return expression else: return expression
235,316
Insert the arguments into the compiled GraphQL query to form a complete query. Args: compilation_result: a CompilationResult object derived from the GraphQL compiler arguments: dict, mapping argument name to its value, for every parameter the query expects. Returns: string, a query in the appropriate output language, with inserted argument data
def insert_arguments_into_query(compilation_result, arguments): _ensure_arguments_are_provided(compilation_result.input_metadata, arguments) if compilation_result.language == MATCH_LANGUAGE: return insert_arguments_into_match_query(compilation_result, arguments) elif compilation_result.language == GREMLIN_LANGUAGE: return insert_arguments_into_gremlin_query(compilation_result, arguments) elif compilation_result.language == SQL_LANGUAGE: return insert_arguments_into_sql_query(compilation_result, arguments) else: raise AssertionError(u'Unrecognized language in compilation result: ' u'{}'.format(compilation_result))
235,356
Construct a ConstructResult object that maps the given field names to their expressions. Args: fields: dict, variable name string -> Expression see rules for variable names in validate_safe_string(). Returns: new ConstructResult object
def __init__(self, fields): self.fields = { ensure_unicode_string(key): value for key, value in six.iteritems(fields) } # All key values are normalized to unicode before being passed to the parent constructor, # which saves them to enable human-readable printing and other functions. super(ConstructResult, self).__init__(self.fields) self.validate()
235,362
Create a new MarkLocation at the specified Location. Args: location: Location object, must not be at a property field in the query Returns: new MarkLocation object
def __init__(self, location): super(MarkLocation, self).__init__(location) self.location = location self.validate()
235,369
Create a new Traverse block in the given direction and across the given edge. Args: direction: string, 'in' or 'out' edge_name: string obeying variable name rules (see validate_safe_string). optional: optional bool, specifying whether the traversal to the given location is optional (i.e. non-filtering) or mandatory (filtering). Returns: new Traverse object
def __init__(self, direction, edge_name, optional=False, within_optional_scope=False): super(Traverse, self).__init__( direction, edge_name, optional=optional, within_optional_scope=within_optional_scope) self.direction = direction self.edge_name = edge_name self.optional = optional # Denotes whether the traversal is occurring after a prior @optional traversal self.within_optional_scope = within_optional_scope self.validate()
235,371
Create a new Recurse block which traverses the given edge up to "depth" times. Args: direction: string, 'in' or 'out'. edge_name: string obeying variable name rules (see validate_safe_string). depth: int, always greater than or equal to 1. Returns: new Recurse object
def __init__(self, direction, edge_name, depth, within_optional_scope=False): super(Recurse, self).__init__( direction, edge_name, depth, within_optional_scope=within_optional_scope) self.direction = direction self.edge_name = edge_name self.depth = depth # Denotes whether the traversal is occuring after a prior @optional traversal self.within_optional_scope = within_optional_scope self.validate()
235,374
Create a new Backtrack block, returning to the given location in the query. Args: location: Location object, specifying where to backtrack to optional: optional bool, specifying whether the steps between the current location and the location to which Backtrack is returning were optional or not Returns: new Backtrack object
def __init__(self, location, optional=False): super(Backtrack, self).__init__(location, optional=optional) self.location = location self.optional = optional self.validate()
235,377
Create a map from each query path to a LocationInfo at that path. Args: query_metadata_table: QueryMetadataTable, object containing all metadata collected during query processing, including location metadata (e.g. which locations are folded or optional). Returns: Dict[Tuple[str], LocationInfo], dictionary mapping query path to LocationInfo at that path.
def _map_query_path_to_location_info(query_metadata_table): query_path_to_location_info = {} for location, location_info in query_metadata_table.registered_locations: if not isinstance(location, Location): continue if location.query_path in query_path_to_location_info: # make sure the stored location information equals the new location information # for the fields the SQL backend requires. equivalent_location_info = query_path_to_location_info[location.query_path] if not _location_infos_equal(location_info, equivalent_location_info): raise AssertionError( u'Differing LocationInfos at query_path {} between {} and {}. Expected ' u'parent_location.query_path, optional_scopes_depth, recursive_scopes_depth ' u'and types to be equal for LocationInfos sharing the same query path.'.format( location.query_path, location_info, equivalent_location_info)) query_path_to_location_info[location.query_path] = location_info return query_path_to_location_info
235,385
Return True if LocationInfo objects are equivalent for the SQL backend, False otherwise. LocationInfo objects are considered equal for the SQL backend iff the optional scopes depth, recursive scopes depth, types and parent query paths are equal. Args: left: LocationInfo, left location info object to compare. right: LocationInfo, right location info object to compare. Returns: bool, True if LocationInfo objects equivalent, False otherwise.
def _location_infos_equal(left, right): if not isinstance(left, LocationInfo) or not isinstance(right, LocationInfo): raise AssertionError( u'Unsupported LocationInfo comparison between types {} and {} ' u'with values {}, {}'.format(type(left), type(right), left, right)) optional_scopes_depth_equal = (left.optional_scopes_depth == right.optional_scopes_depth) parent_query_paths_equal = ( (left.parent_location is None and right.parent_location is None) or (left.parent_location.query_path == right.parent_location.query_path)) recursive_scopes_depths_equal = (left.recursive_scopes_depth == right.recursive_scopes_depth) types_equal = left.type == right.type return all([ optional_scopes_depth_equal, parent_query_paths_equal, recursive_scopes_depths_equal, types_equal, ])
235,386
Approximate the 95% confidence interval for Student's T distribution. Given the degrees of freedom, returns an approximation to the 95% confidence interval for the Student's T distribution. Args: df: An integer, the number of degrees of freedom. Returns: A float.
def tdist95conf_level(df): df = int(round(df)) highest_table_df = len(_T_DIST_95_CONF_LEVELS) if df >= 200: return 1.960 if df >= 100: return 1.984 if df >= 80: return 1.990 if df >= 60: return 2.000 if df >= 50: return 2.009 if df >= 40: return 2.021 if df >= highest_table_df: return _T_DIST_95_CONF_LEVELS[highest_table_df - 1] return _T_DIST_95_CONF_LEVELS[df]
235,446
Find the pooled sample variance for two samples. Args: sample1: one sample. sample2: the other sample. Returns: Pooled sample variance, as a float.
def pooled_sample_variance(sample1, sample2): deg_freedom = len(sample1) + len(sample2) - 2 mean1 = statistics.mean(sample1) squares1 = ((x - mean1) ** 2 for x in sample1) mean2 = statistics.mean(sample2) squares2 = ((x - mean2) ** 2 for x in sample2) return (math.fsum(squares1) + math.fsum(squares2)) / float(deg_freedom)
235,447
Calculate a t-test score for the difference between two samples. Args: sample1: one sample. sample2: the other sample. Returns: The t-test score, as a float.
def tscore(sample1, sample2): if len(sample1) != len(sample2): raise ValueError("different number of values") error = pooled_sample_variance(sample1, sample2) / len(sample1) diff = statistics.mean(sample1) - statistics.mean(sample2) return diff / math.sqrt(error * 2)
235,448
Determine whether two samples differ significantly. This uses a Student's two-sample, two-tailed t-test with alpha=0.95. Args: sample1: one sample. sample2: the other sample. Returns: (significant, t_score) where significant is a bool indicating whether the two samples differ significantly; t_score is the score from the two-sample T test.
def is_significant(sample1, sample2): deg_freedom = len(sample1) + len(sample2) - 2 critical_value = tdist95conf_level(deg_freedom) t_score = tscore(sample1, sample2) return (abs(t_score) >= critical_value, t_score)
235,449
N-Queens solver. Args: queen_count: the number of queens to solve for. This is also the board size. Yields: Solutions to the problem. Each yielded value is looks like (3, 8, 2, 1, 4, ..., 6) where each number is the column position for the queen, and the index into the tuple indicates the row.
def n_queens(queen_count): cols = range(queen_count) for vec in permutations(cols): if (queen_count == len(set(vec[i] + i for i in cols)) == len(set(vec[i] - i for i in cols))): yield vec
235,509
Filters out benchmarks not supported by both Pythons. Args: benchmarks: a set() of benchmark names bench_funcs: dict mapping benchmark names to functions python: the interpereter commands (as lists) Returns: The filtered set of benchmark names
def filter_benchmarks(benchmarks, bench_funcs, base_ver): for bm in list(benchmarks): func = bench_funcs[bm] if getattr(func, '_python2_only', False) and (3, 0) <= base_ver: benchmarks.discard(bm) logging.info("Skipping Python2-only benchmark %s; " "not compatible with Python %s" % (bm, base_ver)) continue return benchmarks
235,548
Recursively expand name benchmark names. Args: bm_name: string naming a benchmark or benchmark group. Yields: Names of actual benchmarks, with all group names fully expanded.
def expand_benchmark_name(bm_name, bench_groups): expansion = bench_groups.get(bm_name) if expansion: for name in expansion: for name in expand_benchmark_name(name, bench_groups): yield name else: yield bm_name
235,549
Checks if the connection to provided ``host`` and ``port`` is possible or not. Args: host (str): Hostname for the host to check connection. port (int): Port name of the host to check connection on.
def _can_connect(host, port=22): # type: (str, int) -> bool try: logger.debug('Testing connection to host %s', host) client = paramiko.SSHClient() client.load_system_host_keys() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(host, port=port) client.close() logger.info('Can connect to host %s', host) return True except Exception as e: logger.info('Cannot connect to host %s', host) logger.info('Connection failed with exception: \n %s', str(e)) return False
237,149
Add a signal-based timeout to any block of code. If multiple time units are specified, they will be added together to determine time limit. Usage: with timeout(seconds=5): my_slow_function(...) Args: - seconds: The time limit, in seconds. - minutes: The time limit, in minutes. - hours: The time limit, in hours.
def timeout(seconds=0, minutes=0, hours=0): limit = seconds + 60 * minutes + 3600 * hours def handler(signum, frame): # pylint: disable=W0613 raise TimeoutError('timed out after {} seconds'.format(limit)) try: signal.signal(signal.SIGALRM, handler) signal.setitimer(signal.ITIMER_REAL, limit) yield finally: signal.alarm(0)
237,156
Prepare a Python script (or module) to be imported as a module. If the script does not contain a setup.py file, it creates a minimal setup. Args: path (str): path to directory with the script or module. name (str): name of the script or module.
def prepare(path, name): # type: (str, str) -> None setup_path = os.path.join(path, 'setup.py') if not os.path.exists(setup_path): data = textwrap.dedent( % name) logger.info('Module %s does not provide a setup.py. \nGenerating setup.py' % name) _files.write_file(setup_path, data) data = textwrap.dedent() logger.info('Generating setup.cfg') _files.write_file(os.path.join(path, 'setup.cfg'), data) data = textwrap.dedent() logger.info('Generating MANIFEST.in') _files.write_file(os.path.join(path, 'MANIFEST.in'), data)
237,157
Install a Python module in the executing Python environment. Args: path (str): Real path location of the Python module. capture_error (bool): Default false. If True, the running process captures the stderr, and appends it to the returned Exception message in case of errors.
def install(path, capture_error=False): # type: (str, bool) -> None cmd = '%s -m pip install -U . ' % _process.python_executable() if has_requirements(path): cmd += '-r requirements.txt' logger.info('Installing module with the following command:\n%s', cmd) _process.check_error(shlex.split(cmd), _errors.InstallModuleError, cwd=path, capture_error=capture_error)
237,158
Install the user provided entry point to be executed as follow: - add the path to sys path - if the user entry point is a command, gives exec permissions to the script Args: name (str): name of the script or module. dst (str): path to directory with the script or module. capture_error (bool): Default false. If True, the running process captures the stderr, and appends it to the returned Exception message in case of errors.
def install(name, dst, capture_error=False): if dst not in sys.path: sys.path.insert(0, dst) entrypoint_type = _entry_point_type.get(dst, name) if entrypoint_type is _entry_point_type.PYTHON_PACKAGE: _modules.install(dst, capture_error) if entrypoint_type is _entry_point_type.COMMAND: os.chmod(os.path.join(dst, name), 511)
237,170
Set logger configuration. Args: level (int): Logger level format (str): Logger format
def configure_logger(level, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s'): # type: (int, str) -> None logging.basicConfig(format=format, level=level) if level >= logging.INFO: logging.getLogger('boto3').setLevel(logging.INFO) logging.getLogger('s3transfer').setLevel(logging.INFO) logging.getLogger('botocore').setLevel(logging.WARN)
237,171
Transform a dictionary in a dictionary of env vars. Example: >>>env_vars = mapping.to_env_vars({'model_dir': '/opt/ml/model', 'batch_size': 25}) >>> >>>print(args) ['MODEL_DIR', '/opt/ml/model', 'BATCH_SIZE', 25] Args: mapping (dict[str, object]): A Python mapping. Returns: (dict): Dictionary of env vars
def to_env_vars(mapping): # type: (dict) -> dict def format_key(key): if key: decoded_name = 'SM_%s' % str(key).upper() return decoded_name else: return '' def format_value(_mapping): if six.PY3 and isinstance(_mapping, six.binary_type): # transforms a byte string (b'') in unicode return _mapping.decode('latin1') elif _mapping is None: return '' elif isinstance(_mapping, six.string_types): return str(_mapping) else: return json.dumps(_mapping, sort_keys=True, separators=(',', ':'), ensure_ascii=True) return {format_key(k): format_value(v) for k, v in mapping.items()}
237,182
Transform a dictionary in a list of cmd arguments. Example: >>>args = mapping.to_cmd_args({'model_dir': '/opt/ml/model', 'batch_size': 25}) >>> >>>print(args) ['--model_dir', '/opt/ml/model', '--batch_size', 25] Args: mapping (dict[str, object]): A Python mapping. Returns: (list): List of cmd arguments
def to_cmd_args(mapping): # type: (dict) -> list sorted_keys = sorted(mapping.keys()) def arg_name(obj): string = _decode(obj) if string: return u'--%s' % string if len(string) > 1 else u'-%s' % string else: return u'' arg_names = [arg_name(argument) for argument in sorted_keys] def arg_value(value): if hasattr(value, 'items'): map_items = ['%s=%s' % (k, v) for k, v in sorted(value.items())] return ','.join(map_items) return _decode(value) arg_values = [arg_value(mapping[key]) for key in sorted_keys] items = zip(arg_names, arg_values) return [item for item in itertools.chain.from_iterable(items)]
237,183
Decode an object to unicode. Args: obj (bytes or str or unicode or anything serializable): object to be decoded Returns: object decoded in unicode.
def _decode(obj): # type: (bytes or str or unicode or object) -> unicode # noqa ignore=F821 if obj is None: return u'' if six.PY3 and isinstance(obj, six.binary_type): # transforms a byte string (b'') in unicode return obj.decode('latin1') elif six.PY3: # PY3 strings are unicode. return str(obj) elif isinstance(obj, six.text_type): # returns itself if it is unicode return obj else: # decodes pY2 string to unicode return str(obj).decode('utf-8')
237,184
Function responsible to serialize the prediction for the response. Args: prediction (obj): prediction returned by predict_fn . accept (str): accept content-type expected by the client. Returns: (worker.Response): a Flask response object with the following args: * Args: response: the serialized data to return accept: the content-type that the data was transformed to.
def default_output_fn(prediction, accept): return _worker.Response(response=_encoders.encode(prediction, accept), mimetype=accept)
237,189
Write the dictionary env_vars in the system, as environment variables. Args: env_vars (): Returns:
def write_env_vars(env_vars=None): # type: (dict) -> None env_vars = env_vars or {} env_vars['PYTHONPATH'] = ':'.join(sys.path) for name, value in env_vars.items(): os.environ[name] = value
237,205
Convert an array like object to the NPY format. To understand better what an array like object is see: https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays Args: array_like (np.array or Iterable or int or float): array like object to be converted to NPY. Returns: (obj): NPY array.
def array_to_npy(array_like): # type: (np.array or Iterable or int or float) -> object buffer = BytesIO() np.save(buffer, array_like) return buffer.getvalue()
237,210
Convert an NPY array into numpy. Args: npy_array (npy array): to be converted to numpy array Returns: (np.array): converted numpy array.
def npy_to_numpy(npy_array): # type: (object) -> np.array stream = BytesIO(npy_array) return np.load(stream, allow_pickle=True)
237,211
Convert an array like object to JSON. To understand better what an array like object is see: https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays Args: array_like (np.array or Iterable or int or float): array like object to be converted to JSON. Returns: (str): object serialized to JSON
def array_to_json(array_like): # type: (np.array or Iterable or int or float) -> str def default(_array_like): if hasattr(_array_like, 'tolist'): return _array_like.tolist() return json.JSONEncoder().default(_array_like) return json.dumps(array_like, default=default)
237,212
Convert a JSON object to a numpy array. Args: string_like (str): JSON string. dtype (dtype, optional): Data type of the resulting array. If None, the dtypes will be determined by the contents of each column, individually. This argument can only be used to 'upcast' the array. For downcasting, use the .astype(t) method. Returns: (np.array): numpy array
def json_to_numpy(string_like, dtype=None): # type: (str) -> np.array data = json.loads(string_like) return np.array(data, dtype=dtype)
237,213
Convert a CSV object to a numpy array. Args: string_like (str): CSV string. dtype (dtype, optional): Data type of the resulting array. If None, the dtypes will be determined by the contents of each column, individually. This argument can only be used to 'upcast' the array. For downcasting, use the .astype(t) method. Returns: (np.array): numpy array
def csv_to_numpy(string_like, dtype=None): # type: (str) -> np.array stream = StringIO(string_like) return np.genfromtxt(stream, dtype=dtype, delimiter=',')
237,214
Convert an array like object to CSV. To understand better what an array like object is see: https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays Args: array_like (np.array or Iterable or int or float): array like object to be converted to CSV. Returns: (str): object serialized to CSV
def array_to_csv(array_like): # type: (np.array or Iterable or int or float) -> str stream = StringIO() np.savetxt(stream, array_like, delimiter=',', fmt='%s') return stream.getvalue()
237,215
Decode an object ton a one of the default content types to a numpy array. Args: obj (object): to be decoded. content_type (str): content type to be used. Returns: np.array: decoded object.
def decode(obj, content_type): # type: (np.array or Iterable or int or float, str) -> np.array try: decoder = _decoders_map[content_type] return decoder(obj) except KeyError: raise _errors.UnsupportedFormatError(content_type)
237,216
Encode an array like object in a specific content_type to a numpy array. To understand better what an array like object is see: https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays Args: array_like (np.array or Iterable or int or float): to be converted to numpy. content_type (str): content type to be used. Returns: (np.array): object converted as numpy array.
def encode(array_like, content_type): # type: (np.array or Iterable or int or float, str) -> np.array try: encoder = _encoders_map[content_type] return encoder(array_like) except KeyError: raise _errors.UnsupportedFormatError(content_type)
237,217
Create a file 'failure' if training fails after all algorithm output (for example, logging) completes, the failure description should be written to this file. In a DescribeTrainingJob response, Amazon SageMaker returns the first 1024 characters from this file as FailureReason. See: https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html Args: failure_msg: The description of failure
def write_failure_file(failure_msg): # type: (str) -> None file_path = os.path.join(_env.output_dir, 'failure') write_file(file_path, failure_msg)
237,223
Write data to a file. Args: path (str): path to the file. data (str): data to be written to the file. mode (str): mode which the file will be open.
def write_file(path, data, mode='w'): # type: (str, str, str) -> None with open(path, mode) as f: f.write(data)
237,225
Download, prepare and install a compressed tar file from S3 or local directory as an entry point. SageMaker Python SDK saves the user provided entry points as compressed tar files in S3 Args: name (str): name of the entry point. uri (str): the location of the entry point. path (bool): The path where the script will be installed. It will not download and install the if the path already has the user entry point.
def download_and_extract(uri, name, path): # type: (str, str, str) -> None if not os.path.exists(path): os.makedirs(path) if not os.listdir(path): with tmpdir() as tmp: if uri.startswith('s3://'): dst = os.path.join(tmp, 'tar_file') s3_download(uri, dst) with tarfile.open(name=dst, mode='r:gz') as t: t.extractall(path=path) elif os.path.isdir(uri): if uri == path: return if os.path.exists(path): shutil.rmtree(path) shutil.move(uri, path) else: shutil.copy2(uri, os.path.join(path, name))
237,226
Download a file from S3. Args: url (str): the s3 url of the file. dst (str): the destination where the file will be saved.
def s3_download(url, dst): # type: (str, str) -> None url = parse.urlparse(url) if url.scheme != 's3': raise ValueError("Expecting 's3' scheme, got: %s in %s" % (url.scheme, url)) bucket, key = url.netloc, url.path.lstrip('/') region = os.environ.get('AWS_REGION', os.environ.get(_params.REGION_NAME_ENV)) s3 = boto3.resource('s3', region_name=region) s3.Bucket(bucket).download_file(key, dst)
237,227
Wraps function fn in a try catch block that re-raises error_class. Args: fn (function): function to wrapped error_class (Exception): Error class to be re-raised Returns: (object): fn wrapped in a try catch.
def error_wrapper(fn, error_class): # type: (Callable or None, Exception) -> ... def wrapper(*args, **kwargs): try: return fn(*args, **kwargs) except Exception as e: six.reraise(error_class, error_class(e), sys.exc_info()[2]) return wrapper
237,230
Represents a Tuya device. Args: dev_id (str): The device id. address (str): The network address. local_key (str, optional): The encryption key. Defaults to None. dev_type (str, optional): The device type. It will be used as key for lookups in payload_dict. Defaults to None. Attributes: port (int): The port to connect to.
def __init__(self, dev_id, address, local_key=None, dev_type=None, connection_timeout=10): self.id = dev_id self.address = address self.local_key = local_key self.local_key = local_key.encode('latin1') self.dev_type = dev_type self.connection_timeout = connection_timeout self.port = 6668
237,677
Send single buffer `payload` and receive a single buffer. Args: payload(bytes): Data to send.
def _send_receive(self, payload): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) s.settimeout(self.connection_timeout) s.connect((self.address, self.port)) s.send(payload) data = s.recv(1024) s.close() return data
237,678