docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Print a variant. If a result file is provided the variante will be appended to the file, otherwise they are printed to stdout. Args: variants_file (str): A string with the path to a file outfile (FileHandle): An opened file_handle silent (bool): Bool. If nothing should be printed.
def print_variant(variant_line, outfile=None, silent=False): variant_line = variant_line.rstrip() if not variant_line.startswith('#'): if outfile: outfile.write(variant_line+'\n') else: if not silent: print(variant_line) return
1,038,783
Create a list of file lines from a given filepath. Args: path (str): File path as_interned (bool): List of "interned" strings (default False) Returns: strings (list): File line list
def lines_from_file(path, as_interned=False, encoding=None): lines = None with io.open(path, encoding=encoding) as f: if as_interned: lines = [sys.intern(line) for line in f.read().splitlines()] else: lines = f.read().splitlines() return lines
1,038,784
Create a list of file lines from a given file stream. Args: f (io.TextIOWrapper): File stream as_interned (bool): List of "interned" strings (default False) Returns: strings (list): File line list
def lines_from_stream(f, as_interned=False): if as_interned: return [sys.intern(line) for line in f.read().splitlines()] return f.read().splitlines()
1,038,785
Create a list of file lines from a given string. Args: string (str): File string as_interned (bool): List of "interned" strings (default False) Returns: strings (list): File line list
def lines_from_string(string, as_interned=False): if as_interned: return [sys.intern(line) for line in string.splitlines()] return string.splitlines()
1,038,786
Perform formatting and write the formatted string to a file or stdout. Optional arguments can be used to format the editor's contents. If no file path is given, prints to standard output. Args: path (str): Full file path (default None, prints to stdout) *args: Positional arguments to format the editor with **kwargs: Keyword arguments to format the editor with
def write(self, path=None, *args, **kwargs): if path is None: print(self.format(*args, **kwargs)) else: with io.open(path, 'w', newline="") as f: f.write(self.format(*args, **kwargs))
1,038,787
Format the string representation of the editor. Args: inplace (bool): If True, overwrite editor's contents with formatted contents
def format(self, *args, **kwargs): inplace = kwargs.pop("inplace", False) if not inplace: return str(self).format(*args, **kwargs) self._lines = str(self).format(*args, **kwargs).splitlines()
1,038,788
Display the top of the file. Args: n (int): Number of lines to display
def head(self, n=10): r = self.__repr__().split('\n') print('\n'.join(r[:n]), end=' ')
1,038,789
Insert lines into the editor. Note: To insert before the first line, use :func:`~exa.core.editor.Editor.preappend` (or key 0); to insert after the last line use :func:`~exa.core.editor.Editor.append`. Args: lines (dict): Dictionary of lines of form (lineno, string) pairs
def insert(self, lines=None): for i, (key, line) in enumerate(lines.items()): n = key + i first_half = self._lines[:n] last_half = self._lines[n:] self._lines = first_half + [line] + last_half
1,038,791
Delete all lines with given line numbers. Args: lines (list): List of integers corresponding to line numbers to delete
def delete_lines(self, lines): for k, i in enumerate(lines): del self[i-k]
1,038,794
From the editor's current cursor position find the next instance of the given string. Args: strings (iterable): String or strings to search for Returns: tup (tuple): Tuple of cursor position and line or None if not found Note: This function cycles the entire editor (i.e. cursor to length of editor to zero and back to cursor position).
def find_next(self, *strings, **kwargs): start = kwargs.pop("start", None) keys_only = kwargs.pop("keys_only", False) staht = start if start is not None else self.cursor for start, stop in [(staht, len(self)), (0, staht)]: for i in range(start, stop): for string in strings: if string in self[i]: tup = (i, self[i]) self.cursor = i + 1 if keys_only: return i return tup
1,038,796
Search the editor for lines matching the regular expression. re.MULTILINE is not currently supported. Args: \*patterns: Regular expressions to search each line for keys_only (bool): Only return keys flags (re.FLAG): flags passed to re.search Returns: results (dict): Dictionary of pattern keys, line values (or groups - default)
def regex(self, *patterns, **kwargs): start = kwargs.pop("start", 0) stop = kwargs.pop("stop", None) keys_only = kwargs.pop("keys_only", False) flags = kwargs.pop("flags", 0) results = {pattern: [] for pattern in patterns} stop = stop if stop is not None else -1 for i, line in enumerate(self[start:stop]): for pattern in patterns: grps = re.search(pattern, line, flags=flags) if grps and keys_only: results[pattern].append(i) elif grps and grps.groups(): for group in grps.groups(): results[pattern].append((i, group)) elif grps: results[pattern].append((i, line)) if len(patterns) == 1: return results[patterns[0]] return results
1,038,797
Replace all instances of a pattern with a replacement. Args: pattern (str): Pattern to replace replacement (str): Text to insert
def replace(self, pattern, replacement): for i, line in enumerate(self): if pattern in line: self[i] = line.replace(pattern, replacement)
1,038,798
Returns the result of tab-separated pandas.read_csv on a subset of the file. Args: start (int): line number where structured data starts stop (int): line number where structured data stops ncol (int or list): the number of columns in the structured data or a list of that length with column names Returns: pd.DataFrame: structured data
def pandas_dataframe(self, start, stop, ncol, **kwargs): try: int(start) int(stop) except TypeError: print('start and stop must be ints') try: ncol = int(ncol) return pd.read_csv(six.StringIO('\n'.join(self[start:stop])), delim_whitespace=True, names=range(ncol), **kwargs) except TypeError: try: ncol = list(ncol) return pd.read_csv(six.StringIO('\n'.join(self[start:stop])), delim_whitespace=True, names=ncol, **kwargs) except TypeError: print('Cannot pandas_dataframe if ncol is {}, must be int or list'.format(type(ncol)))
1,038,799
Determines whether a type is a List[...]. How to do this varies for different Python versions, due to the typing library not having a stable API. This functions smooths over the differences. Args: type_: The type to check. Returns: True iff it's a List[...something...].
def is_generic_list(type_: Type) -> bool: if hasattr(typing, '_GenericAlias'): # 3.7 return (isinstance(type_, typing._GenericAlias) and # type: ignore type_.__origin__ is list) else: # 3.6 and earlier return (isinstance(type_, typing.GenericMeta) and type_.__origin__ is List)
1,038,808
Determines whether a type is a Dict[...]. How to do this varies for different Python versions, due to the typing library not having a stable API. This functions smooths over the differences. Args: type_: The type to check. Returns: True iff it's a Dict[...something...].
def is_generic_dict(type_: Type) -> bool: if hasattr(typing, '_GenericAlias'): # 3.7 return (isinstance(type_, typing._GenericAlias) and # type: ignore type_.__origin__ is dict) else: # 3.6 and earlier return (isinstance(type_, typing.GenericMeta) and type_.__origin__ is Dict)
1,038,809
Determines whether a type is a Union[...]. How to do this varies for different Python versions, due to the typing library not having a stable API. This functions smooths over the differences. Args: type_: The type to check. Returns: True iff it's a Union[...something...].
def is_generic_union(type_: Type) -> bool: if hasattr(typing, '_GenericAlias'): # 3.7 return (isinstance(type_, typing._GenericAlias) and # type: ignore type_.__origin__ is Union) else: if hasattr(typing, '_Union'): # 3.6 return isinstance(type_, typing._Union) # type: ignore else: # 3.5 and earlier (?) return isinstance(type_, typing.UnionMeta) # type: ignore raise RuntimeError('Could not determine whether type is a Union. Is this' ' a YAtiML-supported Python version?')
1,038,810
Gets the type argument list for the given generic type. If you give this function List[int], it will return [int], and if you give it Union[int, str] it will give you [int, str]. Note that on Python < 3.7, Union[int, bool] collapses to Union[int] and then to int; this is already done by the time this function is called, so it does not help with that. Args: type_: The type to get the arguments list of. Returns: A list of Type objects.
def generic_type_args(type_: Type) -> List[Type]: if hasattr(type_, '__union_params__'): # 3.5 Union return list(type_.__union_params__) return list(type_.__args__)
1,038,811
Convert a type to a human-readable description. This is used for generating nice error messages. We want users \ to see a nice readable text, rather than something like \ "typing.List<~T>[str]". Args: type_: The type to represent. Returns: A human-readable description.
def type_to_desc(type_: Type) -> str: scalar_type_to_str = { str: 'string', int: 'int', float: 'float', bool: 'boolean', None: 'null value', type(None): 'null value' } if type_ in scalar_type_to_str: return scalar_type_to_str[type_] if is_generic_union(type_): return 'union of {}'.format([type_to_desc(t) for t in generic_type_args(type_)]) if is_generic_list(type_): return 'list of ({})'.format(type_to_desc(generic_type_args(type_)[0])) if is_generic_dict(type_): return 'dict of string to ({})'.format( type_to_desc(generic_type_args(type_)[1])) return type_.__name__
1,038,812
Set the type corresponding to the whole document. Args: loader_cls: The loader class to set the document type for. type_: The type to loader should process the document into.
def set_document_type(loader_cls: Type, type_: Type) -> None: loader_cls.document_type = type_ if not hasattr(loader_cls, '_registered_classes'): loader_cls._registered_classes = dict()
1,038,848
Registers one or more classes with a YAtiML loader. Once a class has been registered, it can be recognized and \ constructed when reading a YAML text. Args: loader_cls: The loader to register the classes with. classes: The class(es) to register, a plain Python class or a \ list of them.
def add_to_loader(loader_cls: Type, classes: List[Type]) -> None: if not isinstance(classes, list): classes = [classes] # type: ignore for class_ in classes: tag = '!{}'.format(class_.__name__) if issubclass(class_, enum.Enum): loader_cls.add_constructor(tag, EnumConstructor(class_)) elif issubclass(class_, str) or issubclass(class_, UserString): loader_cls.add_constructor(tag, UserStringConstructor(class_)) else: loader_cls.add_constructor(tag, Constructor(class_)) if not hasattr(loader_cls, '_registered_classes'): loader_cls._registered_classes = dict() loader_cls._registered_classes[tag] = class_
1,038,849
Convert a type to the corresponding YAML tag. Args: type_: The type to convert Returns: A string containing the YAML tag.
def __type_to_tag(self, type_: Type) -> str: if type_ in scalar_type_to_tag: return scalar_type_to_tag[type_] if is_generic_list(type_): return 'tag:yaml.org,2002:seq' if is_generic_dict(type_): return 'tag:yaml.org,2002:map' if type_ in self._registered_classes.values(): return '!{}'.format(type_.__name__) raise RuntimeError(( 'Unknown type {} in type_to_tag,' # pragma: no cover ' please report a YAtiML bug.').format(type_))
1,038,852
Removes syntactic sugar from the node. This calls yatiml_savorize(), first on the class's base \ classes, then on the class itself. Args: node: The node to modify. expected_type: The type to assume this type is.
def __savorize(self, node: yaml.Node, expected_type: Type) -> yaml.Node: logger.debug('Savorizing node assuming type {}'.format( expected_type.__name__)) for base_class in expected_type.__bases__: if base_class in self._registered_classes.values(): node = self.__savorize(node, base_class) if hasattr(expected_type, 'yatiml_savorize'): logger.debug('Calling {}.yatiml_savorize()'.format( expected_type.__name__)) cnode = Node(node) expected_type.yatiml_savorize(cnode) node = cnode.yaml_node return node
1,038,853
Processes a node. This is the main function that implements yatiml's \ functionality. It figures out how to interpret this node \ (recognition), then applies syntactic sugar, and finally \ recurses to the subnodes, if any. Args: node: The node to process. expected_type: The type we expect this node to be. Returns: The transformed node, or a transformed copy.
def __process_node(self, node: yaml.Node, expected_type: Type) -> yaml.Node: logger.info('Processing node {} expecting type {}'.format( node, expected_type)) # figure out how to interpret this node recognized_types, message = self.__recognizer.recognize( node, expected_type) if len(recognized_types) != 1: raise RecognitionError(message) recognized_type = recognized_types[0] # remove syntactic sugar logger.debug('Savorizing node {}'.format(node)) if recognized_type in self._registered_classes.values(): node = self.__savorize(node, recognized_type) logger.debug('Savorized, now {}'.format(node)) # process subnodes logger.debug('Recursing into subnodes') if is_generic_list(recognized_type): if node.tag != 'tag:yaml.org,2002:seq': raise RecognitionError('{}{}Expected a {} here'.format( node.start_mark, os.linesep, type_to_desc(expected_type))) for item in node.value: self.__process_node(item, generic_type_args(recognized_type)[0]) elif is_generic_dict(recognized_type): if node.tag != 'tag:yaml.org,2002:map': raise RecognitionError('{}{}Expected a {} here'.format( node.start_mark, os.linesep, type_to_desc(expected_type))) for _, value_node in node.value: self.__process_node(value_node, generic_type_args(recognized_type)[1]) elif recognized_type in self._registered_classes.values(): if (not issubclass(recognized_type, enum.Enum) and not issubclass(recognized_type, str) and not issubclass(recognized_type, UserString)): for attr_name, type_, _ in class_subobjects(recognized_type): cnode = Node(node) if cnode.has_attribute(attr_name): subnode = cnode.get_attribute(attr_name) new_subnode = self.__process_node( subnode.yaml_node, type_) cnode.set_attribute(attr_name, new_subnode) else: logger.debug('Not a generic class or a user-defined class, not' ' recursing') node.tag = self.__type_to_tag(recognized_type) logger.debug('Finished processing node {}'.format(node)) return node
1,038,854
Initialize. Args: campfire (:class:`Campfire`): Campfire instance password (str): Room ID
def __init__(self, campfire, id): super(Room, self).__init__(campfire) self._load(id)
1,039,071
Set the room name. Args: name (str): Name Returns: bool. Success
def set_name(self, name): if not self._campfire.get_user().admin: return False result = self._connection.put("room/%s" % self.id, {"room": {"name": name}}) if result["success"]: self._load() return result["success"]
1,039,076
Set the room topic. Args: topic (str): Topic Returns: bool. Success
def set_topic(self, topic): if not topic: topic = '' result = self._connection.put("room/%s" % self.id, {"room": {"topic": topic}}) if result["success"]: self._load() return result["success"]
1,039,077
Post a message. Args: message (:class:`Message` or string): Message Returns: bool. Success
def speak(self, message): campfire = self.get_campfire() if not isinstance(message, Message): message = Message(campfire, message) result = self._connection.post( "room/%s/speak" % self.id, {"message": message.get_data()}, parse_data=True, key="message" ) if result["success"]: return Message(campfire, result["data"]) return result["success"]
1,039,078
Parses a unit file and updates self._data['options'] Args: file_handle (file): a file-like object (supporting read()) containing a unit Returns: True: The file was successfuly parsed and options were updated Raises: IOError: from_file was specified and it does not exist ValueError: The unit contents specified in from_string or from_file is not valid
def _set_options_from_file(self, file_handle): # TODO: Find a library to handle this unit file parsing # Can't use configparser, it doesn't handle multiple entries for the same key in the same section # This is terribly naive # build our output here options = [] # keep track of line numbers to report when parsing problems happen line_number = 0 # the section we are currently in section = None for line in file_handle.read().splitlines(): line_number += 1 # clear any extra white space orig_line = line line = line.strip() # ignore comments, and blank lines if not line or line.startswith('#'): continue # is this a section header? If so, update our variable and continue # Section headers look like: [Section] if line.startswith('[') and line.endswith(']'): section = line.strip('[]') continue # We encountered a non blank line outside of a section, this is a problem if not section: raise ValueError( 'Unable to parse unit file; ' 'Unexpected line outside of a section: {0} (line: {1}'.format( line, line_number )) # Attempt to parse a line inside a section # Lines should look like: name=value \ # continuation continuation = False try: # if the previous value ends with \ then we are a continuation # so remove the \, and set the flag so we'll append to this below if options[-1]['value'].endswith('\\'): options[-1]['value'] = options[-1]['value'][:-1] continuation = True except IndexError: pass try: # if we are a continuation, then just append our value to the previous line if continuation: options[-1]['value'] += orig_line continue # else we are a normal line, so spit and get our name / value name, value = line.split('=', 1) options.append({ 'section': section, 'name': name, 'value': value }) except ValueError: raise ValueError( 'Unable to parse unit file; ' 'Malformed line in section {0}: {1} (line: {2})'.format( section, line, line_number )) # update our internal structure self._data['options'] = options return True
1,039,269
Add an option to a section of the unit file Args: section (str): The name of the section, If it doesn't exist it will be created name (str): The name of the option to add value (str): The value of the option Returns: True: The item was added
def add_option(self, section, name, value): # Don't allow updating units we loaded from fleet, it's not supported if self._is_live(): raise RuntimeError('Submitted units cannot update their options') option = { 'section': section, 'name': name, 'value': value } self._data['options'].append(option) return True
1,039,270
Remove an option from a unit Args: section (str): The section to remove from. name (str): The item to remove. value (str, optional): If specified, only the option matching this value will be removed If not specified, all options with ``name`` in ``section`` will be removed Returns: True: At least one item was removed False: The item requested to remove was not found
def remove_option(self, section, name, value=None): # Don't allow updating units we loaded from fleet, it's not supported if self._is_live(): raise RuntimeError('Submitted units cannot update their options') removed = 0 # iterate through a copy of the options for option in list(self._data['options']): # if it's in our section if option['section'] == section: # and it matches our name if option['name'] == name: # and they didn't give us a value, or it macthes if value is None or option['value'] == value: # nuke it from the source self._data['options'].remove(option) removed += 1 if removed > 0: return True return False
1,039,271
Update the desired state of a unit. Args: state (str): The desired state for the unit, must be one of ``_STATES`` Returns: str: The updated state Raises: fleet.v1.errors.APIError: Fleet returned a response code >= 400 ValueError: An invalid value for ``state`` was provided
def set_desired_state(self, state): if state not in self._STATES: raise ValueError( 'state must be one of: {0}'.format( self._STATES )) # update our internal structure self._data['desiredState'] = state # if we have a name, then we came from the server # and we have a handle to an active client # Then update our selves on the server if self._is_live(): self._update('_data', self._client.set_unit_desired_state(self.name, self.desiredState)) # Return the state return self._data['desiredState']
1,039,273
Initializes a BIP32 wallet. Addresses returned by the wallet are of the form ``(path, address)``. Args: password (bytes): Master secret for the wallet. The password can also be passed as a string (``str``). testnet (bool): Wwether to use the bitcoin testnet or mainnet. Defaults to ``False``.
def __init__(self, password, testnet=False): netcode = 'XTN' if testnet else 'BTC' if isinstance(password, str): password = password.encode() self.wallet = BIP32Node.from_master_secret(password, netcode=netcode) self.root_address = ('', self.wallet.address())
1,039,466
Walk over a scope tree and mangle symbol names. Args: toplevel: Defines if global scope should be mangled or not.
def mangle_scope_tree(root, toplevel): def mangle(scope): # don't mangle global scope if not specified otherwise if scope.get_enclosing_scope() is None and not toplevel: return for name in scope.symbols: mangled_name = scope.get_next_mangled_name() scope.mangled[name] = mangled_name scope.rev_mangled[mangled_name] = name def visit(node): mangle(node) for child in node.children: visit(child) visit(root)
1,039,810
Actually serialize input. Args: struct: structure to serialize to fmt: format to serialize to encoding: encoding to use while serializing Returns: encoded serialized structure Raises: various sorts of errors raised by libraries while serializing
def _do_serialize(struct, fmt, encoding): res = None _check_lib_installed(fmt, 'serialize') if fmt == 'ini': config = configobj.ConfigObj(encoding=encoding) for k, v in struct.items(): config[k] = v res = b'\n'.join(config.write()) elif fmt in ['json', 'json5']: # specify separators to get rid of trailing whitespace # specify ensure_ascii to make sure unicode is serialized in \x... sequences, # not in \u sequences res = (json if fmt == 'json' else json5).dumps(struct, indent=2, separators=(',', ': '), ensure_ascii=False).encode(encoding) elif fmt == 'toml': if not _is_utf8(encoding): raise AnyMarkupError('toml must always be utf-8 encoded according to specification') res = toml.dumps(struct).encode(encoding) elif fmt == 'xml': # passing encoding argument doesn't encode, just sets the xml property res = xmltodict.unparse(struct, pretty=True, encoding='utf-8').encode('utf-8') elif fmt == 'yaml': res = yaml.safe_dump(struct, encoding='utf-8', default_flow_style=False) else: raise # unknown format return res
1,040,014
Try to guess format of given bytestring. Args: inp: byte string to guess format of Returns: guessed format
def _guess_fmt_from_bytes(inp): stripped = inp.strip() fmt = None ini_section_header_re = re.compile(b'^\[([\w-]+)\]') if len(stripped) == 0: # this can be anything, so choose yaml, for example fmt = 'yaml' else: if stripped.startswith(b'<'): fmt = 'xml' else: for l in stripped.splitlines(): line = l.strip() # there are C-style comments in json5, but we don't auto-detect it, # so it doesn't matter here if not line.startswith(b'#') and line: break # json, ini or yaml => skip comments and then determine type if ini_section_header_re.match(line): fmt = 'ini' else: # we assume that yaml is superset of json # TODO: how do we figure out it's not yaml? fmt = 'yaml' return fmt
1,040,018
Wrapper for all errors that occur during anymarkup calls. Args: cause: either a reraised exception or a string with cause
def __init__(self, cause, original_tb=''): super(AnyMarkupError, self).__init__() self.cause = cause self.original_tb = original_tb
1,040,022
Find closest station from the new(er) list. Warning: There may be some errors with smaller non US stations. Args: latitude (float) longitude (float) Returns: tuple (station_code (str), station_name (str))
def closest_eere(latitude, longitude): with open(env.SRC_PATH + '/eere_meta.csv') as eere_meta: stations = csv.DictReader(eere_meta) d = 9999 station_code = '' station_name = '' for station in stations: new_dist = great_circle((latitude, longitude), (float(station['latitude']), float(station['longitude']))).miles if new_dist <= d: d = new_dist station_code = station['station_code'] station_name = station['weather_station'] return station_code, station_name raise KeyError('station not found')
1,040,471
Station information. Args: station_code (str): station code. Returns (dict): station information
def eere_station(station_code): with open(env.SRC_PATH + '/eere_meta.csv') as eere_meta: stations = csv.DictReader(eere_meta) for station in stations: if station['station_code'] == station_code: return station raise KeyError('station not found')
1,040,472
Setup an HTTP connection over an already connected socket. Args: host: ignored (exists for compatibility with parent) post: ignored (exists for compatibility with parent) strict: ignored (exists for compatibility with parent) timeout: ignored (exists for compatibility with parent) proxy_info (SSHTunnelProxyInfo): A SSHTunnelProxyInfo instance.
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None): # do the needful httplib.HTTPConnection.__init__(self, host, port) # looks like the python2 and python3 versions of httplib differ # python2, executables any callables and returns the result as proxy_info # python3 passes the callable directly to this function :( if hasattr(proxy_info, '__call__'): proxy_info = proxy_info(None) # make sure we have a validate socket before we stash it if not proxy_info or not isinstance(proxy_info, SSHTunnelProxyInfo) or not proxy_info.sock: raise ValueError('This Connection must be suppplied an SSHTunnelProxyInfo via the proxy_info arg') # keep it self.sock = proxy_info.sock
1,040,480
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.Classify = channel.unary_unary( '/tensorflow.serving.PredictionService/Classify', request_serializer=tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationRequest.SerializeToString, response_deserializer=tensorflow__serving_dot_apis_dot_classification__pb2.ClassificationResponse.FromString, ) self.Regress = channel.unary_unary( '/tensorflow.serving.PredictionService/Regress', request_serializer=tensorflow__serving_dot_apis_dot_regression__pb2.RegressionRequest.SerializeToString, response_deserializer=tensorflow__serving_dot_apis_dot_regression__pb2.RegressionResponse.FromString, ) self.Predict = channel.unary_unary( '/tensorflow.serving.PredictionService/Predict', request_serializer=tensorflow__serving_dot_apis_dot_predict__pb2.PredictRequest.SerializeToString, response_deserializer=tensorflow__serving_dot_apis_dot_predict__pb2.PredictResponse.FromString, ) self.GetModelMetadata = channel.unary_unary( '/tensorflow.serving.PredictionService/GetModelMetadata', request_serializer=tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataRequest.SerializeToString, response_deserializer=tensorflow__serving_dot_apis_dot_get__model__metadata__pb2.GetModelMetadataResponse.FromString, )
1,040,624
Open a connection to host:port via an ssh tunnel. Args: host (str): The host to connect to. port (int): The port to connect to. Returns: A socket-like object that is connected to the provided host:port.
def forward_tcp(self, host, port): return self.transport.open_channel( 'direct-tcpip', (host, port), self.transport.getpeername() )
1,041,131
Split a string in the format of '<host>:<port>' into it's component parts default_port will be used if a port is not included in the string Args: str ('<host>' or '<host>:<port>'): A string to split into it's parts Returns: two item tuple: (host, port) Raises: ValueError: The string was in an invalid element
def _split_hostport(self, hostport, default_port=None): try: (host, port) = hostport.split(':', 1) except ValueError: # no colon in the string so make our own port host = hostport if default_port is None: raise ValueError('No port found in hostport, and default_port not provided.') port = default_port try: port = int(port) if port < 1 or port > 65535: raise ValueError() except ValueError: raise ValueError("{0} is not a valid TCP port".format(port)) return (host, port)
1,041,133
Convert a URL into a host / port, or into a path to a unix domain socket Args: endpoint (str): A URL parsable by urlparse Returns: 3 item tuple: (host, port, path). host and port will None, and path will be not None if a a unix domain socket URL is passed path will be None if a normal TCP based URL is passed
def _endpoint_to_target(self, endpoint): parsed = urlparse.urlparse(endpoint) scheme = parsed[0] hostport = parsed[1] if 'unix' in scheme: return (None, None, unquote(hostport)) if scheme == 'https': target_port = 443 else: target_port = 80 (target_host, target_port) = self._split_hostport(hostport, default_port=target_port) return (target_host, target_port, None)
1,041,134
Generate a ProxyInfo class from a connected SSH transport Args: _ (None): Ignored. This is just here as the ProxyInfo spec requires it. Returns: SSHTunnelProxyInfo: A ProxyInfo with an active socket tunneled through SSH
def _get_proxy_info(self, _=None): # parse the fleet endpoint url, to establish a tunnel to that host (target_host, target_port, target_path) = self._endpoint_to_target(self._endpoint) # implement the proxy_info interface from httplib which requires # that we accept a scheme, and return a ProxyInfo object # we do :P # This is called once per request, so we keep this here # so that we can keep one ssh connection open, and allocate # new channels as needed per-request sock = None if target_path: sock = self._ssh_tunnel.forward_unix(path=target_path) else: sock = self._ssh_tunnel.forward_tcp(target_host, port=target_port) # Return a ProxyInfo class with this socket return SSHTunnelProxyInfo(sock=sock)
1,041,135
Make a single request to the fleet API endpoint Args: method (str): A dot delimited string indicating the method to call. Example: 'Machines.List' *args: Passed directly to the method being called. **kwargs: Passed directly to the method being called. Returns: dict: The response from the method called. Raises: fleet.v1.errors.APIError: Fleet returned a response code >= 400
def _single_request(self, method, *args, **kwargs): # The auto generated client binding require instantiating each object you want to call a method on # For example to make a request to /machines for the list of machines you would do: # self._service.Machines().List(**kwargs) # This code iterates through the tokens in `method` and instantiates each object # Passing the `*args` and `**kwargs` to the final method listed # Start here _method = self._service # iterate over each token in the requested method for item in method.split('.'): # if it's the end of the line, pass our argument if method.endswith(item): _method = getattr(_method, item)(*args, **kwargs) else: # otherwise, just create an instance and move on _method = getattr(_method, item)() # Discovered endpoints look like r'$ENDPOINT/path/to/method' which isn't a valid URI # Per the fleet API documentation: # "Note that this discovery document intentionally ships with an unusable rootUrl; # clients must initialize this as appropriate." # So we follow the documentation, and replace the token with our actual endpoint _method.uri = _method.uri.replace('$ENDPOINT', self._endpoint) # Execute the method and return it's output directly try: return _method.execute(http=self._http) except googleapiclient.errors.HttpError as exc: response = json.loads(exc.content.decode('utf-8'))['error'] raise APIError(code=response['code'], message=response['message'], http_error=exc)
1,041,136
Delete a unit from the cluster Args: unit (str, Unit): The Unit, or name of the unit to delete Returns: True: The unit was deleted Raises: fleet.v1.errors.APIError: Fleet returned a response code >= 400
def destroy_unit(self, unit): # if we are given an object, grab it's name property # otherwise, convert to unicode if isinstance(unit, Unit): unit = unit.name else: unit = str(unit) self._single_request('Units.Delete', unitName=unit) return True
1,041,140
Retreive a specifi unit from the fleet cluster by name Args: name (str): If specified, only this unit name is returned Returns: Unit: The unit identified by ``name`` in the fleet cluster Raises: fleet.v1.errors.APIError: Fleet returned a response code >= 400
def get_unit(self, name): return Unit(client=self, data=self._single_request('Units.Get', unitName=name))
1,041,142
Return the current UnitState for the fleet cluster Args: machine_id (str): filter all UnitState objects to those originating from a specific machine unit_name (str): filter all UnitState objects to those related to a specific unit Yields: UnitState: The next UnitState in the cluster Raises: fleet.v1.errors.APIError: Fleet returned a response code >= 400
def list_unit_states(self, machine_id=None, unit_name=None): for page in self._request('UnitState.List', machineID=machine_id, unitName=unit_name): for state in page.get('states', []): yield UnitState(data=state)
1,041,143
Basename for USAF base. Args: usaf (str): USAF code Returns: (str)
def tmybasename(usaf): url_file = open(env.SRC_PATH + '/tmy3.csv') for line in url_file.readlines(): if line.find(usaf) is not -1: return line.rstrip().partition(',')[0]
1,042,281
change TMY3 date to an arbitrary year. Args: tmy_date (datetime): date to mangle. year (int): desired year. Returns: (None)
def normalize_date(tmy_date, year): month = tmy_date.month day = tmy_date.day - 1 hour = tmy_date.hour # hack to get around 24:00 notation if month is 1 and day is 0 and hour is 0: year = year + 1 return datetime.datetime(year, month, 1) + \ datetime.timedelta(days=day, hours=hour, minutes=0)
1,042,282
initialize. Args: usaf (str) Returns: (object)
def __init__(self, usaf): filename = env.WEATHER_DATA_PATH + '/' + usaf + 'TYA.csv' self.csvfile = None try: self.csvfile = open(filename) except IOError: logger.info("%s not found", filename) download(_tmy_url(usaf), filename) self.csvfile = open(filename) logging.debug('opened %s', self.csvfile.name) header = self.csvfile.readline().split(',') self.tmy_data = csv.DictReader(self.csvfile) self.latitude = float(header[4]) self.longitude = float(header[5]) self.tz = float(header[3])
1,042,284
Create a connection with given settings. Args: settings (dict): A dictionary of settings Returns: :class:`Connection`. The connection
def create_from_settings(settings): return Connection( settings["url"], settings["base_url"], settings["user"], settings["password"], authorizations = settings["authorizations"], debug = settings["debug"] )
1,042,609
Parses a response. Args: text (str): Text to parse Kwargs: key (str): Key to look for, if any Returns: Parsed value Raises: ValueError
def parse(self, text, key=None): try: data = json.loads(text) except ValueError as e: raise ValueError("%s: Value: [%s]" % (e, text)) if data and key: if key not in data: raise ValueError("Invalid response (key %s not found): %s" % (key, data)) data = data[key] return data
1,042,615
Initialize. Args: campfire (:class:`Campfire`): Campfire instance data (dict or str): If string, message type will be set to either paste or text
def __init__(self, campfire, data): dataType = type(data) if dataType == types.StringType or dataType == types.UnicodeType: messageType = self._TYPE_PASTE if data.find("\n") >= 0 else self._TYPE_TEXT if messageType == self._TYPE_TEXT: matches = re.match("^https?://(www\.)?twitter\.com/([^/]+)/status/(\d+)", data) if matches: messageType = self._TYPE_TWEET data = { "type": messageType, "body": data } super(Message, self).__init__(campfire) self.set_data(data, ["created_at"]) self.user = None self.room = None if "user_id" in data and data["user_id"]: self.user = self._campfire.get_user(data["user_id"]) if "room_id" in data and data["room_id"]: self.room = self._campfire.get_room(data["room_id"]) if self.is_upload(): self.upload = self._connection.get("room/%s/messages/%s/upload" % (self.room.id, self.id), key="upload") if "full_url" in self.upload: self.upload["url"] = self.upload["full_url"] del self.upload["full_url"] if self.is_tweet(): # Tweet formats may be different if the streaming is line, or transcript based (I know, I know...) matches = re.match("(.+)\s+--\s+@([^,]+),\s*(.+)$", self.body) if matches: self.tweet = { "tweet": matches.group(1), "user": matches.group(2), "url": matches.group(3) } else: tweet_data = {} if re.match("^---", self.body): for line in self.body.split("\n")[1:]: matches = re.match('^:([^:]+):\s*"?(.+)"?$', line) if matches: tweet_data[matches.group(1)] = matches.group(2) if tweet_data and "author_username" in tweet_data and "message" in tweet_data and "id" in tweet_data: self.tweet = { "tweet": tweet_data["message"], "user": tweet_data["author_username"], "url": "http://twitter.com/%s/status/%s" % (tweet_data["author_username"], tweet_data["id"]) } else: self.type = self._TYPE_TEXT
1,042,684
Initialize. Args: subdomain (str): Campfire subdomain username (str): User password (str): pasword Kwargs: ssl (bool): enabled status of SSL currentUser (:class:`User`): If specified, don't auto load current user, use this one instead
def __init__(self, subdomain, username, password, ssl=False, currentUser=None): self.base_url = "http%s://%s.campfirenow.com" % ("s" if ssl else "", subdomain) self._settings = { "subdomain": subdomain, "username": username, "password": password, "ssl": ssl } self._user = currentUser self._users = {} self._rooms = {} if not self._user: _connection = Connection(url="%s/users/me" % self.base_url, user=username, password=password) user = _connection.get(key="user") self._connection = Connection( base_url=self.base_url, user=self._user.token if self._user else user["api_auth_token"], password="x" ) if self._user: self._user.set_connection(self._connection) else: self._user = User(self, user["id"], current=True) self._user.token = user["api_auth_token"]
1,042,954
Search transcripts. Args: terms (str): Terms for search Returns: array. Messages
def search(self, terms): messages = self._connection.get("search/%s" % urllib.quote_plus(terms), key="messages") if messages: messages = [Message(self, message) for message in messages] return messages
1,042,960
Initialize. Args: campfire (:class:`Campfire`): Campfire instance id (str): User ID Kwargs: current (bool): Wether user is current user, or not
def __init__(self, campfire, id, current=False): super(User, self).__init__(campfire) self.set_data(self._connection.get("users/%s" % id, key="user")) self.current = current
1,043,870
Attach an observer. Args: observer (func): A function to be called when new messages arrive Returns: :class:`Stream`. Current instance to allow chaining
def attach(self, observer): if not observer in self._observers: self._observers.append(observer) return self
1,044,088
Called when incoming messages arrive. Args: messages (tuple): Messages (each message is a dict)
def incoming(self, messages): if self._observers: campfire = self._room.get_campfire() for message in messages: for observer in self._observers: observer(Message(campfire, message))
1,044,089
Initialize. Args: settings (dict): Settings used to create a :class:`Connection` instance room_id (int): Room ID Kwargs: pause (int): Pause in seconds between requests
def __init__(self, settings, room_id, pause=1): Process.__init__(self) self._pause = pause self._room_id = room_id self._callback = None self._queue = None self._connection = Connection.create_from_settings(settings) self._last_message_id = None
1,044,091
Called when new messages arrive. Args: messages (tuple): Messages
def received(self, messages): if messages: if self._queue: self._queue.put_nowait(messages) if self._callback: self._callback(messages)
1,044,094
Initialize. Args: settings (dict): Settings used to create a :class:`Connection` instance room_id (int): Room ID
def __init__(self, settings, room_id): StreamProcess.__init__(self, settings, room_id) self._reactor = self._connection.get_twisted_reactor() self._protocol = None
1,044,095
Callback issued by twisted when new line arrives. Args: line (str): Incoming line
def lineReceived(self, line): while self._in_header: if line: self._headers.append(line) else: http, status, message = self._headers[0].split(" ", 2) status = int(status) if status == 200: self.factory.get_stream().connected() else: self.factory.continueTrying = 0 self.transport.loseConnection() self.factory.get_stream().disconnected(RuntimeError(status, message)) return self._in_header = False break else: try: self._len_expected = int(line, 16) self.setRawMode() except: pass
1,044,100
Process data. Args: data (str): Incoming data
def rawDataReceived(self, data): if self._len_expected is not None: data, extra = data[:self._len_expected], data[self._len_expected:] self._len_expected -= len(data) else: extra = "" self._buffer += data if self._len_expected == 0: data = self._buffer.strip() if data: lines = data.split("\r") for line in lines: try: message = self.factory.get_stream().get_connection().parse(line) if message: self.factory.get_stream().received([message]) except ValueError: pass self._buffer = "" self._len_expected = None self.setLineMode(extra)
1,044,101
Constructs a :class:`Spoolverb` instance from the string representation of the given verb. Args: verb (str): representation of the verb e.g.: ``'ASCRIBESPOOL01LOAN12/150526150528'``. Can also be in binary format (:obj:`bytes`): ``b'ASCRIBESPOOL01PIECE'``. Returns: :class:`Spoolverb` instance.
def from_verb(cls, verb): pattern = r'^(?P<meta>[A-Z]+)(?P<version>\d+)(?P<action>[A-Z]+)(?P<arg1>\d+)?(\/(?P<arg2>\d+))?$' try: verb = verb.decode() except AttributeError: pass match = re.match(pattern, verb) if not match: raise SpoolverbError('Invalid spoolverb: {}'.format(verb)) data = match.groupdict() meta = data['meta'] version = data['version'] action = data['action'] if action == 'EDITIONS': num_editions = data['arg1'] return cls(meta=meta, version=version, action=action, num_editions=int(num_editions)) elif action == 'LOAN': # TODO Review. Workaround for piece loans try: edition_num = int(data['arg1']) except TypeError: edition_num = 0 loan_start = data['arg2'][:6] loan_end = data['arg2'][6:] return cls(meta=meta, version=version, action=action, edition_num=int(edition_num), loan_start=loan_start, loan_end=loan_end) elif action in ['FUEL', 'PIECE', 'CONSIGNEDREGISTRATION']: # no edition number for these verbs return cls(meta=meta, version=version, action=action) else: edition_num = data['arg1'] return cls(meta=meta, version=version, action=action, edition_num=int(edition_num))
1,044,679
Selects the inputs for the spool transaction. Args: address (str): bitcoin address to select inputs for nfees (int): number of fees ntokens (int): number of tokens min_confirmations (Optional[int]): minimum number of required confirmations; defaults to 6
def select_inputs(self, address, nfees, ntokens, min_confirmations=6): unspents = self._t.get(address, min_confirmations=min_confirmations)['unspents'] unspents = [u for u in unspents if u not in self._spents.queue] if len(unspents) == 0: raise Exception("No spendable outputs found") fees = [u for u in unspents if u['amount'] == self.fee][:nfees] tokens = [u for u in unspents if u['amount'] == self.token][:ntokens] if len(fees) != nfees or len(tokens) != ntokens: raise SpoolFundsError("Not enough outputs to spend. Refill your wallet") if self._spents.qsize() > self.SPENTS_QUEUE_MAXSIZE - (nfees + ntokens): [self._spents.get() for i in range(self._spents.qsize() + nfees + ntokens - self.SPENTS_QUEUE_MAXSIZE)] [self._spents.put(fee) for fee in fees] [self._spents.put(token) for token in tokens] return fees + tokens
1,044,742
Start producing. Args: consumer: Consumer
def startProducing(self, consumer): self._consumer = consumer self._current_deferred = defer.Deferred() self._sent = 0 self._paused = False if not hasattr(self, "_chunk_headers"): self._build_chunk_headers() if self._data: block = "" for field in self._data: block += self._chunk_headers[field] block += self._data[field] block += "\r\n" self._send_to_consumer(block) if self._files: self._files_iterator = self._files.iterkeys() self._files_sent = 0 self._files_length = len(self._files) self._current_file_path = None self._current_file_handle = None self._current_file_length = None self._current_file_sent = 0 result = self._produce() if result: return result else: return defer.succeed(None) return self._current_deferred
1,045,746
Send a block of bytes to the consumer. Args: block (str): Block of bytes
def _send_to_consumer(self, block): self._consumer.write(block) self._sent += len(block) if self._callback: self._callback(self._sent, self.length)
1,045,751
Returns the header of the encoding of this parameter. Args: name (str): Field name Kwargs: is_file (bool): If true, this is a file field Returns: array. Headers
def _headers(self, name, is_file=False): value = self._files[name] if is_file else self._data[name] _boundary = self.boundary.encode("utf-8") if isinstance(self.boundary, unicode) else urllib.quote_plus(self.boundary) headers = ["--%s" % _boundary] if is_file: disposition = 'form-data; name="%s"; filename="%s"' % (name, os.path.basename(value)) else: disposition = 'form-data; name="%s"' % name headers.append("Content-Disposition: %s" % disposition) if is_file: file_type = self._file_type(name) else: file_type = "text/plain; charset=utf-8" headers.append("Content-Type: %s" % file_type) if is_file: headers.append("Content-Length: %i" % self._file_size(name)) else: headers.append("Content-Length: %i" % len(value)) headers.append("") headers.append("") return "\r\n".join(headers)
1,045,754
Returns file type for given file field. Args: field (str): File field Returns: string. File type
def _file_type(self, field): type = mimetypes.guess_type(self._files[field])[0] return type.encode("utf-8") if isinstance(type, unicode) else str(type)
1,045,756
Returns the file size for given file field. Args: field (str): File field Returns: int. File size
def _file_size(self, field): size = 0 try: handle = open(self._files[field], "r") size = os.fstat(handle.fileno()).st_size handle.close() except: size = 0 self._file_lengths[field] = size return self._file_lengths[field]
1,045,757
时间戳转换为日期字符串 Args: ts: 待转换的时间戳 dt_format: 目标日期字符串格式 Returns: 日期字符串
def ts_to_dt_str(ts, dt_format='%Y-%m-%d %H:%M:%S'): return datetime.datetime.fromtimestamp(int(ts)).strftime(dt_format)
1,045,856
Two percent high design temperature for a location. Degrees in Celcius Args: station_code (str): Weather Station Code Returns: float degrees Celcius
def twopercent(station_code): # (DB=>MWB) 2%, MaxDB= temp = None try: fin = open('%s/%s' % (env.WEATHER_DATA_PATH, _basename(station_code, 'ddy'))) for line in fin: value = re.search(, line) if value: temp = float(value.groups()[0]) except IOError: pass if not temp: # (DB=>MWB) 2%, MaxDB= try: fin = open('%s/%s' % (env.WEATHER_DATA_PATH, _basename(station_code, 'stat'))) flag = 0 tdata = [] for line in fin: if line.find('2%') is not -1: flag = 3 if flag > 0: tdata.append(line.split('\t')) flag -= 1 temp = float(tdata[2][5].strip()) except IOError: pass if temp: return temp else: raise Exception("Error: 2% High Temperature not found")
1,046,483
Extreme Minimum Design Temperature for a location. Degrees in Celcius Args: station_code (str): Weather Station Code Returns: float degrees Celcius
def minimum(station_code): temp = None fin = None try: fin = open('%s/%s' % (env.WEATHER_DATA_PATH, _basename(station_code, 'ddy'))) except IOError: logger.info("File not found") download_extract(_eere_url(station_code)) fin = open('%s/%s' % (env.WEATHER_DATA_PATH, _basename(station_code, 'ddy'))) for line in fin: value = re.search('Max Drybulb=(-?\\d+\\.\\d*)', line) if value: temp = float(value.groups()[0]) if not temp: try: fin = open('%s/%s' % (env.WEATHER_DATA_PATH, _basename(station_code, 'stat'))) for line in fin: if line.find('Minimum Dry Bulb') is not -1: return float(line[37:-1].split('\xb0')[0]) except IOError: pass if temp: return temp else: raise Exception("Error: Minimum Temperature not found")
1,046,484
Data for a weather station. Args: station_code (str): Station code of weather station DST (bool): Weather timestands in daylight savings. Default False
def __init__(self, station_code, DST=False): filename = env.WEATHER_DATA_PATH + '/' + _basename(station_code) self.csvfile = None try: self.csvfile = open(filename) except IOError: logger.info("File not found") download_extract(_eere_url(station_code)) self.csvfile = open(filename) logging.debug('opened %s', self.csvfile.name) fieldnames = ["Year", "Month", "Day", "Hour", "Minute", "DS", "Dry-bulb (C)", "Dewpoint (C)", "Relative Humidity", "Pressure (Pa)", "ETR (W/m^2)", "ETRN (W/m^2)", "HIR (W/m^2)", "GHI (W/m^2)", "DNI (W/m^2)", "DHI (W/m^2)", "GHIL (lux)", "DNIL (lux)", "DFIL (lux)", "Zlum (Cd/m2)", "Wdir (degrees)", "Wspd (m/s)", "Ts cover", "O sky cover", "CeilHgt (m)", "Present Weather", "Pw codes", "Pwat (cm)", "AOD (unitless)", "Snow Depth (cm)", "Days since snowfall"] station_meta = self.csvfile.readline().split(',') self.station_name = station_meta[1] self.CC = station_meta[3] self.station_fmt = station_meta[4] self.station_code = station_meta[5] self.lat = station_meta[6] self.lon = station_meta[7] self.TZ = float(station_meta[8]) self.ELEV = station_meta[9] self.DST = DST if self.DST: geocoder = geocoders.GoogleV3() self.local_tz = pytz.timezone(geocoder.timezone((self.lat, self.lon)).zone) dummy = "" for _ in range(7): dummy += self.csvfile.readline() self.epw_data = csv.DictReader(self.csvfile, fieldnames=fieldnames)
1,046,485
Apply selector to transform each object in objs. This operates in-place on objs. Empty objects are removed from the list. Args: mode: either KEEP (to keep selected items & their ancestors) or DELETE (to delete selected items and their children).
def apply_filter(objs, selector, mode): indices_to_delete = [] presumption = DELETE if mode == KEEP else KEEP for i, obj in enumerate(objs): timer.log('Applying selector: %s' % selector) marks = {k: mode for k in selector_to_ids(selector, obj, mode)} timer.log('done applying selector') timer.log('filtering object...') filter_object(obj, marks, presumption=presumption) timer.log('done filtering') if obj is None: indices_to_delete.append(i) for index in reversed(indices_to_delete): del objs[index]
1,046,514
Retrieve the ownership tree of all editions of a piece given the hash. Args: hash (str): Hash of the file to check. Can be created with the :class:`File` class Returns: dict: Ownsership tree of all editions of a piece. .. note:: For now we only support searching the blockchain by the piece hash.
def history(self, hash): txs = self._t.get(hash, max_transactions=10000)['transactions'] tree = defaultdict(list) number_editions = 0 for tx in txs: _tx = self._t.get(tx['txid']) txid = _tx['txid'] verb_str = BlockchainSpider.check_script(_tx['vouts']) verb = Spoolverb.from_verb(verb_str) from_address, to_address, piece_address = BlockchainSpider._get_addresses(_tx) timestamp_utc = _tx['time'] action = verb.action edition_number = 0 if action != 'EDITIONS': edition_number = verb.edition_number else: number_editions = verb.num_editions tree[edition_number].append({'txid': txid, 'verb': verb_str, 'from_address': from_address, 'to_address': to_address, 'piece_address': piece_address, 'timestamp_utc': timestamp_utc, 'action': action, 'number_editions': number_editions, 'edition_number': edition_number}) # lets update the records with the number of editions of the piece since we do not know # this information before the EDITIONS transaction for edition, chain in tree.items(): [d.update({'number_editions': number_editions}) for d in chain] return dict(tree)
1,047,002
Checks for the from, to, and piece address of a SPOOL transaction. Args: tx (dict): Transaction payload, as returned by :meth:`transactions.Transactions.get()`. .. note:: Formats as returned by JSON-RPC API ``decoderawtransaction`` have yet to be supported. Returns: Tuple([str]): Sender, receiver, and piece addresses.
def _get_addresses(tx): from_address = set([vin['address'] for vin in tx['vins']]) if len(from_address) != 1: raise InvalidTransactionError("Transaction should have inputs " \ "from only one address {}".format(from_address)) # order vouts. discard the last vout since it's the op_return vouts = sorted(tx['vouts'], key=lambda d: d['n'])[:-1] piece_address = vouts[0]['address'] to_address = vouts[-1]['address'] from_address = from_address.pop() return from_address, to_address, piece_address
1,047,005
Convert a string representation of the time (as returned by blockr.io api) into unix timestamp. Args: time_utc_str (str): String representation of the time, with the format: `'%Y-%m-%dT%H:%M:%S %Z'`. Returns: int: Unix timestamp.
def _get_time_utc(time_utc_str): dt = datetime.strptime(time_utc_str, TIME_FORMAT) return int(calendar.timegm(dt.utctimetuple()))
1,047,006
Mangle names. Args: toplevel: defaults to False. Defines if global scope should be mangled or not.
def mangle(tree, toplevel=False): sym_table = SymbolTable() visitor = ScopeTreeVisitor(sym_table) visitor.visit(tree) fill_scope_references(tree) mangle_scope_tree(sym_table.globals, toplevel) mangler = NameManglerVisitor() mangler.visit(tree)
1,047,096
Initialize. Args: settings (dict): Settings used to create a :class:`Connection` instance room (int): Room queue (:class:`multiprocessing.Queue`): Queue to share data between processes files (dict): Dictionary, where key is the field name, and value is the path
def __init__(self, settings, room, queue, files): Process.__init__(self) self._room = room self._queue = queue self._files = files self._data = {} self._connection = Connection.create_from_settings(settings) self._reactor = None self._producer = None self._receiver = None
1,047,448
Add POST data. Args: data (dict): key => value dictionary
def add_data(self, data): if not self._data: self._data = {} self._data.update(data)
1,047,449
save GFS grib file to DATA_PATH. Args: dataset(function): naming convention function. eg. pgrb2 timestamp(datetime): ??? path(str): if None defaults to DATA_PATH products(list): TMP, etc. if None downloads all. layers(list): surface, etc. if None downloads all. offset(int): should be multiple of 3
def download(timestamp, dataset, path=None, products=None, levels=None, offset=0): if path is None: path = DATA_PATH closest = timestamp.hour//6*6 filename = dataset(closest, offset) gfs_timestamp = '%s%02d' % (timestamp.strftime('%Y%m%d'), closest) url = baseurl(gfs_timestamp, filename) index = url + '.idx' messages = message_index(index) segments = _filter_messages(messages, products, levels) dl_path = path + '/%s/' % gfs_timestamp _verify_path(dl_path) _download_segments(path + filename, url, segments)
1,047,616
get message index of components for urllib2. Args: url(string): Returns: list: messages
def message_index(index_url): idx = csv.reader(urllib2.urlopen(index_url), delimiter=':') messages = [] for line in idx: messages.append(line) return messages
1,047,617
Initialize. Args: campfire (:class:`Campfire`): Campfire Instance Kwargs: data (dict): Entity data
def __init__(self, campfire, data=None): super(CampfireEntity, self).__init__(data) self._campfire = campfire self._connection = None if self._campfire: self._connection = self._campfire.get_connection()
1,047,749
Set entity data Args: data (dict): Entity data datetime_fields (array): Fields that should be parsed as datetimes
def set_data(self, data={}, datetime_fields=[]): if datetime_fields: for field in datetime_fields: if field in data: data[field] = self._parse_datetime(data[field]) super(CampfireEntity, self).set_data(data)
1,047,750
Parses a datetime string from "YYYY/MM/DD HH:MM:SS +HHMM" format Args: value (str): String Returns: datetime. Datetime
def _parse_datetime(self, value): offset = 0 pattern = r"\s+([+-]{1}\d+)\Z" matches = re.search(pattern, value) if matches: value = re.sub(pattern, '', value) offset = datetime.timedelta(hours=int(matches.group(1))/100) return datetime.datetime.strptime(value, "%Y/%m/%d %H:%M:%S") - offset
1,047,751
Read the file content and load it as JSON. Arguments: file_name (:py:class:`str`): The filename. Returns: :py:class:`dict`: The loaded JSON data. Raises: :py:class:`FileNotFoundError`: If the file is not found.
def _read_file(file_name): with open(file_name) as config_file: data = json.load(config_file) return data
1,047,754
Format DSPAM headers with passed results, and add them to the message. Args: results -- A results dictionary from DspamClient.
def add_dspam_headers(self, results): for header in self.headers: hname = self.header_prefix + header if header.lower() in results: hvalue = results[header.lower()] logger.debug( '<{}> Adding header {}: {}'.format(self.id, hname, hvalue)) self.addheader(hname, hvalue) elif header == 'Processed': # X-DSPAM-Processed: Wed Dec 12 02:19:23 2012 hvalue = datetime.datetime.now().strftime( '%a %b %d %H:%M:%S %Y') logger.debug( '<{}> Adding header {}: {}'.format(self.id, hname, hvalue)) self.addheader(hname, hvalue) else: logger.warning( '<{}> Not adding header {}, no data available in ' 'DSPAM results'.format(self.id, hname))
1,047,967
Validate a webfont settings and optionally fill missing ``csspart_path`` option. Args: webfont_settings (dict): Webfont settings (an item value from ``settings.ICOMOON_WEBFONTS``). Returns: dict: Webfont settings
def extend_webfont_settings(webfont_settings): if not webfont_settings.get('fontdir_path', False): raise IcomoonSettingsError(("Webfont settings miss the required key " "item 'fontdir_path'")) if not webfont_settings.get('csspart_path', False): webfont_settings['csspart_path'] = None return webfont_settings
1,048,170
Returns next state fluent canonical name. Args: name (str): The current state fluent name. Returns: str: The next state fluent name.
def rename_next_state_fluent(name: str) -> str: i = name.index('/') functor = name[:i-1] arity = name[i+1:] return "{}/{}".format(functor, arity)
1,048,272
Returns current state fluent canonical name. Args: name (str): The next state fluent name. Returns: str: The current state fluent name.
def rename_state_fluent(name: str) -> str: i = name.index('/') functor = name[:i] arity = name[i+1:] return "{}'/{}".format(functor, arity)
1,048,273
Run the application using a simple WSGI server. Arguments: host (str, optional): Host on which to listen. port (int, optional): Port number on which to listen.
def run(self, host='127.0.0.1', port=8080): from wsgiref import simple_server self._server = simple_server.make_server(host, port, self) self._server.serve_forever()
1,048,516
Decorator to add route for a request with any HTTP method. Arguments: method (str): HTTP method name, e.g. GET, POST, etc. pattern (str): Routing pattern the path must match. Returns: function: Decorator function to add route.
def route(self, method, pattern): def decorator(callback): self._router.add(method, pattern, callback) return callback return decorator
1,048,518
Respond to an HTTP request. Arguments: environ (dict): Dictionary of environment variables start_response (callable): Callable to start HTTP response Returns: list: List containing a single sequence of bytes.
def __call__(self, environ, start_response): self.request = Request(environ) self.response = Response(start_response) route = self._router.resolve(self.request.method, self.request.path) if route is not None: callback, args, kwargs = route value = callback(*args, **kwargs) elif self._router.contains_method(self.request.method): value = 404 # Not found else: value = 501 # Not Implemented if isinstance(value, str) or isinstance(value, bytes): self.response.body = value elif isinstance(value, int) and value in Response._responses: self.response.status = value if self.response.body is None: self.response.body = self._get_error_page_callback()() elif (isinstance(value, tuple) and isinstance(value[0], int) and isinstance(value[1], str) and value[0] in Response._responses and 300 <= value[0] <= 308): self.response.add_header('Location', value[1]) self.response.status = value[0] if self.response.body is None: self.response.body = self._get_error_page_callback()() else: raise Error('Route callback for {} {} returned invalid ' 'value: {}: {!r}'.format(self.request.method, self.request.path, type(value).__name__, value)) return self.response.response()
1,048,522
Add a route. Arguments: method (str): HTTP method, e.g. GET, POST, etc. pattern (str): Pattern that request paths must match. callback (str): Route handler that is invoked when a request path matches the *pattern*.
def add(self, method, pattern, callback): pat_type, pat = self._normalize_pattern(pattern) if pat_type == 'literal': self._literal[method][pat] = callback elif pat_type == 'wildcard': self._wildcard[method].append(WildcardRoute(pat, callback)) else: self._regex[method].append(RegexRoute(pat, callback))
1,048,525
Check if there is at least one handler for *method*. Arguments: method (str): HTTP method name, e.g. GET, POST, etc. Returns: ``True`` if there is at least one route defined for *method*, ``False`` otherwise
def contains_method(self, method): return method in itertools.chain(self._literal, self._wildcard, self._regex)
1,048,526
Resolve a request to a route handler. Arguments: method (str): HTTP method, e.g. GET, POST, etc. (type: str) path (str): Request path Returns: tuple or None: A tuple of three items: 1. Route handler (callable) 2. Positional arguments (list) 3. Keyword arguments (dict) ``None`` if no route matches the request.
def resolve(self, method, path): if method in self._literal and path in self._literal[method]: return self._literal[method][path], [], {} else: return self._resolve_non_literal_route(method, path)
1,048,527
Resolve a request to a wildcard or regex route handler. Arguments: method (str): HTTP method name, e.g. GET, POST, etc. path (str): Request path Returns: tuple or None: A tuple of three items: 1. Route handler (callable) 2. Positional arguments (list) 3. Keyword arguments (dict) ``None`` if no route matches the request.
def _resolve_non_literal_route(self, method, path): for route_dict in (self._wildcard, self._regex): if method in route_dict: for route in reversed(route_dict[method]): callback_data = route.match(path) if callback_data is not None: return callback_data return None
1,048,528
Return a normalized form of the pattern. Normalize the pattern by removing pattern type prefix if it exists in the pattern. Then return the pattern type and the pattern as a tuple of two strings. Arguments: pattern (str): Route pattern to match request paths Returns: tuple: Ruple of pattern type (str) and pattern (str)
def _normalize_pattern(pattern): if pattern.startswith('regex:'): pattern_type = 'regex' pattern = pattern[len('regex:'):] elif pattern.startswith('wildcard:'): pattern_type = 'wildcard' pattern = pattern[len('wildcard:'):] elif pattern.startswith('literal:'): pattern_type = 'literal' pattern = pattern[len('literal:'):] elif RegexRoute.like(pattern): pattern_type = 'regex' elif WildcardRoute.like(pattern): pattern_type = 'wildcard' else: pattern_type = 'literal' return pattern_type, pattern
1,048,529