docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Default deserializer factory. Arguments: init (callable): type constructor. exposes (iterable): attributes to be peeked and passed to `init`. Returns: callable: deserializer (`peek` routine).
def peek(init, exposes, debug=False): def _peek(store, container, _stack=None): args = [ store.peek(objname, container, _stack=_stack) \ for objname in exposes ] if debug: print(args) return init(*args) return _peek
1,102,921
Core engine for the automatic generation of storable instances. Finds the attributes exposed by the objects of a given type. Mostly Python3-only. Does not handle types which `__new__` method requires extra arguments either. Arguments: python_type (type): object type. Returns: list: attributes exposed.
def most_exposes(python_type): _exposes = set() try: # list all standard class attributes and methods: do_not_expose = set(python_type.__dir__(object) + \ ['__slots__', '__module__', '__weakref__']) # may raise `AttributeError` empty = python_type.__new__(python_type) # may raise `TypeError` except AttributeError: # Py2 does not have `__dir__` try: _exposes = python_type.__slots__ except AttributeError: pass except TypeError: # `__new__` requires input arguments for _workaround in storable_workarounds: try: _exposes = _workaround(python_type) except (SystemExit, KeyboardInterrupt): raise except: pass else: break else: # note that slots from parent classes are not in `__dict__` (like all slots) # and - in principle - not in `__slots__` either. all_members = empty.__dir__() # all slots are supposed to appear in this list for attr in all_members: if attr in do_not_expose: # note that '__dict__' is in `do_not_expose` (comes from `object`) continue try: # identify the methods and properties getattr(empty, attr) except AttributeError as e: # then `attr` might be a slot # properties can still throw an `AttributeError`; # try to filter some more out if e.args: msg = e.args[0] if msg == attr or msg.endswith("' object has no attribute '{}'".format(attr)): _exposes.add(attr) except (SystemExit, KeyboardInterrupt): raise except: pass for attr in ('__dict__',): if attr in all_members: _exposes.add(attr) return list(_exposes)
1,102,923
Default mechanics for building the storable instance for a type. Arguments: python_type (type): type. exposes (iterable): attributes exposed by the type. version (tuple): version number. storable_type (str): universal string identifier for the type. peek (callable): peeking routine. Returns: Storable: storable instance.
def default_storable(python_type, exposes=None, version=None, storable_type=None, peek=default_peek): if not exposes: for extension in expose_extensions: try: exposes = extension(python_type) except (SystemExit, KeyboardInterrupt): raise except: pass else: if exposes: break if not exposes: raise AttributeError('`exposes` required for type: {!r}'.format(python_type)) return Storable(python_type, key=storable_type, \ handlers=StorableHandler(version=version, exposes=exposes, \ poke=poke(exposes), peek=peek(python_type, exposes)))
1,102,924
Helper for tagging unserializable types. Arguments: _type (type): type to be ignored. Returns: Storable: storable instance that does not poke.
def not_storable(_type): return Storable(_type, handlers=StorableHandler(poke=fake_poke, peek=fail_peek(_type)))
1,102,938
Helper for forcing autoserialization of a datatype with already registered explicit storable instance. Arguments: service (StorableService): active storable service. _type (type): type to be autoserialized. **Not tested**
def force_auto(service, _type): storable = service.byPythonType(_type, istype=True) version = max(handler.version[0] for handler in storable.handlers) + 1 _storable = default_storable(_type, version=(version, )) storable.handlers.append(_storable.handlers[0])
1,102,939
Serializer factory for types which state can be natively serialized. Arguments: getstate (callable): takes an object and returns the object's state to be passed to `pokeNative`. Returns: callable: serializer (`poke` routine).
def poke_native(getstate): def poke(service, objname, obj, container, visited=None, _stack=None): service.pokeNative(objname, getstate(obj), container) return poke
1,102,940
Deserializer factory for types which state can be natively serialized. Arguments: make (callable): type constructor. Returns: callable: deserializer (`peek` routine)
def peek_native(make): def peek(service, container, _stack=None): return make(service.peekNative(container)) return peek
1,102,941
Simple handler with default `peek` and `poke` procedures. Arguments: init (callable): type constructor. exposes (iterable): attributes to be (de-)serialized. version (tuple): version number. Returns: StorableHandler: storable handler.
def handler(init, exposes, version=None): return StorableHandler(poke=poke(exposes), peek=peek(init, exposes), version=version)
1,102,942
Generate a default storable instance. Arguments: python_type (type): Python type of the object. storable_type (str): storable type name. version (tuple): version number of the storable handler. Returns: StorableHandler: storable instance. Extra keyword arguments are passed to :meth:`registerStorable`.
def defaultStorable(self, python_type=None, storable_type=None, version=None, **kwargs): if python_type is None: python_type = lookup_type(storable_type) if self.verbose: print('generating storable instance for type: {}'.format(python_type)) self.storables.registerStorable(default_storable(python_type, \ version=version, storable_type=storable_type), **kwargs) return self.byPythonType(python_type, True).asVersion(version)
1,102,951
Generates the Sphinx configuration and Makefile. Args: organization (str): the organization name. package (str): the package to be documented. destination (str): the destination directory.
def generate(organization, package, destination): gen = ResourceGenerator(organization, package) tmp = tempfile.NamedTemporaryFile(mode='w+t', delete=False) try: tmp.write(gen.conf()) finally: tmp.close() shutil.copy(tmp.name, os.path.join(destination, 'conf.py')) tmp = tempfile.NamedTemporaryFile(mode='w+t', delete=False) try: tmp.write(gen.makefile()) finally: tmp.close() shutil.copy(tmp.name, os.path.join(destination, 'Makefile'))
1,103,128
Display a graph of the price history for the list of ticker symbols provided Arguments: symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc start (datetime): The date at the start of the period being analyzed. end (datetime): The date at the end of the period being analyzed. normalize (bool): Whether to normalize prices to 1 at the start of the time series.
def chart( symbols=("AAPL", "GLD", "GOOG", "$SPX", "XOM", "msft"), start=datetime.datetime(2008, 1, 1), end=datetime.datetime(2009, 12, 31), # data stops at 2013/1/1 normalize=True, ): start = util.normalize_date(start or datetime.date(2008, 1, 1)) end = util.normalize_date(end or datetime.date(2009, 12, 31)) symbols = [s.upper() for s in symbols] timeofday = datetime.timedelta(hours=16) timestamps = du.getNYSEdays(start, end, timeofday) ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close'] ldf_data = da.get_data(timestamps, symbols, ls_keys) d_data = dict(zip(ls_keys, ldf_data)) na_price = d_data['close'].values if normalize: na_price /= na_price[0, :] plt.clf() plt.plot(timestamps, na_price) plt.legend(symbols) plt.ylabel('Adjusted Close') plt.xlabel('Date') plt.savefig('chart.pdf', format='pdf') plt.grid(True) plt.show() return na_price
1,103,205
Retrieve the prices of a list of equities as a DataFrame (columns = symbols) Arguments: symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc e.g. ["AAPL", " slv ", GLD", "GOOG", "$SPX", "XOM", "msft"] start (datetime): The date at the start of the period being analyzed. end (datetime): The date at the end of the period being analyzed. Yahoo data stops at 2013/1/1
def price_dataframe(symbols='sp5002012', start=datetime.datetime(2008, 1, 1), end=datetime.datetime(2009, 12, 31), price_type='actual_close', cleaner=clean_dataframe, ): if isinstance(price_type, basestring): price_type = [price_type] start = util.normalize_date(start or datetime.date(2008, 1, 1)) end = util.normalize_date(end or datetime.date(2009, 12, 31)) symbols = normalize_symbols(symbols) t = du.getNYSEdays(start, end, datetime.timedelta(hours=16)) df = clean_dataframes(dataobj.get_data(t, symbols, price_type)) if not df or len(df) > 1: return cleaner(df) else: return cleaner(df[0])
1,103,207
Calculate the Sharpe Ratio and other performance metrics for a portfolio Arguments: symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc start (datetime): The date at the start of the period being analyzed. end (datetime): The date at the end of the period being analyzed. normalize (bool): Whether to normalize prices to 1 at the start of the time series. allocation (list of float): The portion of the portfolio allocated to each equity.
def portfolio_prices( symbols=("AAPL", "GLD", "GOOG", "$SPX", "XOM", "msft"), start=datetime.datetime(2005, 1, 1), end=datetime.datetime(2011, 12, 31), # data stops at 2013/1/1 normalize=True, allocation=None, price_type='actual_close', ): symbols = normalize_symbols(symbols) start = util.normalize_date(start) end = util.normalize_date(end) if allocation is None: allocation = [1. / len(symbols)] * len(symbols) if len(allocation) < len(symbols): allocation = list(allocation) + [1. / len(symbols)] * (len(symbols) - len(allocation)) total = np.sum(allocation.sum) allocation = np.array([(float(a) / total) for a in allocation]) timestamps = du.getNYSEdays(start, end, datetime.timedelta(hours=16)) ls_keys = [price_type] ldf_data = da.get_data(timestamps, symbols, ls_keys) d_data = dict(zip(ls_keys, ldf_data)) na_price = d_data[price_type].values if normalize: na_price /= na_price[0, :] na_price *= allocation return np.sum(na_price, axis=1)
1,103,208
Start and end index that clips the price/value of a time series the most Assumes that the integrated maximum includes the peak (instantaneous maximum). Arguments: ts (TimeSeries): Time series to attempt to clip to as low a max value as possible capacity (float): Total "funds" or "energy" available for clipping (integrated area under time series) Returns: 2-tuple: Timestamp of the start and end of the period of the maximum clipped integrated increase
def clipping_params(ts, capacity=100): ts_sorted = ts.order(ascending=False) i, t0, t1, integral = 1, None, None, 0 while integral <= capacity and i+1 < len(ts): i += 1 t0_within_capacity = t0 t1_within_capacity = t1 t0 = min(ts_sorted.index[:i]) t1 = max(ts_sorted.index[:i]) integral = integrated_change(ts[t0:t1]) print i, t0, ts[t0], t1, ts[t1], integral if t0_within_capacity and t1_within_capacity: return t0_within_capacity, t1_within_capacity
1,103,223
Add any predefined or custom extension. Args: extension: Extension to add to the processor. Returns: The DictMentor itself for chaining.
def bind(self, extension: Extension) -> 'DictMentor': if not Extension.is_valid_extension(extension): raise ValueError("Cannot bind extension due to missing interface requirements") self._extensions.append(extension) return self
1,103,331
Augments the given dictionary by using all the bound extensions. Args: dct: Dictionary to augment. document: The document the dictionary was loaded from. Returns: The augmented dictionary.
def augment(self, dct: NonAugmentedDict, document: Optional[YamlDocument] = None) -> AugmentedDict: Validator.instance_of(dict, raise_ex=True, dct=dct) # Apply any configured loader for instance in self._extensions: nodes = list(dict_find_pattern(dct, **instance.config())) for parent, k, val in nodes: parent.pop(k) fragment = instance.apply( ExtensionContext( mentor=self, document=document or dct, dct=dct, parent_node=parent, node=(k, val) ) ) if fragment is not None: parent.update(fragment) return dct
1,103,332
Outputs the report in a zip container. Figs and tabs as pngs and excells. Args: figtype (str): Figure type of images in the zip folder.
def outputZip(self,figtype='png'): from zipfile import ZipFile with ZipFile(self.outfile+'.zip', 'w') as zipcontainer: zipcontainer.writestr( 'summary.txt', '# {}\n\n{}\n{}'.format( self.title, self.p, ('\n## Conclusion\n' if self.conclusion else '')+self.conclusion ).encode() ) c = count(1) for section in self.sections: section.sectionOutZip(zipcontainer,'s{}_{}/'.format(next(c),section.title.replace(' ','_')), figtype=figtype)
1,103,461
Parse region of type chr1:10-20 or chr1:10-20:+ Parameters: ----------- region : str Region of type chr1:10-20 or chr1:10-20:+. Returns ------- groups : tuple Tuple of groups from regex e.g. (chr1, 10, 20) or (chr1, 10, 20, +).
def parse_region(region): m = R_REGEX_STRAND.search(region) if not m: m = R_REGEX.search(region) if m: groups = m.groups() return groups else: return None
1,103,494
Serve app using wsgiref or provided server. Args: - server (callable): An callable
def serve(self, server=None): if server is None: from wsgiref.simple_server import make_server server = lambda app: make_server('', 8000, app).serve_forever() print('Listening on 0.0.0.0:8000') try: server(self) finally: server.socket.close()
1,103,514
Configure the timing Timing context manager. Args: verbose: Print elapsed time
def __init__(self, *, verbose: bool = False) -> None: self.verbose = verbose self._start = None self.elapsed = None
1,103,824
Configure ``gettext`` for given package. Args: __pkg: Package to use as location for :program:`gettext` files Returns: :program:`gettext` functions for singular and plural translations
def setup(__pkg: ModuleType) -> Tuple[Callable[[str], str], Callable[[str, str, int], str]]: package_locale = path.join(path.dirname(__pkg.__file__), 'locale') gettext.install(__pkg.__name__, package_locale) return gettext.gettext, gettext.ngettext
1,104,055
Validates a data dict against this schema. Args: data (dict): The data to be validated. Raises: ValidationError: If the data is invalid.
def validate(self, data): try: self._validator.validate(data) except jsonschema.ValidationError as e: six.raise_from(ValidationError.create_from(e), e)
1,104,325
Gets netCDF file metadata attributes. Arguments: nc (netCDF4.Dataset): an open NetCDF4 Dataset to pull attributes from. Returns: dict: Metadata as extracted from the netCDF file.
def get_nc_attrs(nc): meta = { 'experiment': nc.experiment_id, 'frequency': nc.frequency, 'institute': nc.institute_id, 'model': nc.model_id, 'modeling_realm': nc.modeling_realm, 'ensemble_member': 'r{}i{}p{}'.format(nc.realization, nc.initialization_method, nc.physics_version), } variable_name = get_var_name(nc) if variable_name: meta.update({'variable_name': variable_name}) return meta
1,104,405
Appends a column to the raw data without any integrity checks. Args: default_value: The value which will assigned, not copied into each row
def append_column(table, col_name, default_value=None): table[0].append(col_name.strip()) for row in table[1:]: row.append(default_value)
1,104,551
Inserts a new column before another specified column (by name or index). Args: insert_column: The column index or first row name where the insertion should occur col_name: The name to insert into the first row of the column. Leaving this argument to the default of None will apply the default_value to that row's cell. default_value: Can be a value or function which takes (row, index, value) as arguments to return a value.
def insert_column(table, insert_column, col_name=None, default_value=None): column_labels = table[0] following_index = 0 def set_cell(row, column_index, value): # Allow function calls if hasattr(value, '__call__'): row[column_index] = value(column_labels, row, column_index) else: row[column_index] = value if isinstance(insert_column, basestring): insert_column = insert_column.strip() for column_index in range(len(column_labels)): if column_labels[column_index] == insert_column: following_index = column_index break else: following_index = insert_column col_data_start = 0 if col_name != None: table[0].insert(following_index, col_name.strip()) col_data_start = 1 for row in table[col_data_start:]: row.insert(following_index, None) if default_value: set_cell(row, min(following_index, len(row)-1), default_value)
1,104,553
初始化对象的私有属性. Parameters: send (Callable): - 可执行的发送函数,要求参数要有ID和methodname name (str): - 要远端执行的函数名 ID (str):- 要远端执行的任务ID
def __init__(self, send: Callable, name: str, ID: str): # private self.__send = send self.__name = name self.__ID = ID
1,105,023
执行发送任务. Parameters: args (Any): - 远端名字是<name>的函数的位置参数 kwargs (Any): - 远端名字是<name>的函数的关键字参数 Return: (Any): - 发送函数send的返回值
def __call__(self, *args: Any, **kwargs: Any): sys_method = ("listMethods", "methodSignature", 'methodHelp', 'lenConnections', 'lenUndoneTasks', 'getresult') if self.__name.startswith("system."): if self.__name.split(".")[-1] not in sys_method: raise UnsupportSysMethodError( "UnsupportSysMethod:{}".format(self.__name), self.__ID ) return self.__send( self.__ID, self.__name, *args, **kwargs)
1,105,025
Pass output through pager. See :manpage:`less(1)`, if you wish to configure the default pager. For example, you may wish to check ``FRSX`` options. Args: __text: Text to page pager: Pager to use
def pager(__text: str, *, pager: Optional[str] = 'less'): if pager: run([pager, ], input=__text.encode()) else: print(__text)
1,105,855
Add a file descriptor to the processor and wait for READ. Args: fd (IFileLike): Any obect that exposes a 'fileno' method that returns a valid file descriptor integer. callback (typing.Callable[[IFileLike], typing.Any]): A function that consumes the IFileLike object whenever the READ event is fired.
def add_reader( self, fd: IFileLike, callback: typing.Callable[[IFileLike], typing.Any], ) -> None: raise NotImplementedError()
1,106,120
Root Mean Square" Arguments: x (seq of float): A sequence of numerical values Returns: The square root of the average of the squares of the values math.sqrt(sum(x_i**2 for x_i in x) / len(x)) or return (np.array(x) ** 2).mean() ** 0.5 >>> rms([0, 2, 4, 4]) 3.0
def rms(x): try: return (np.array(x) ** 2).mean() ** 0.5 except: x = np.array(dropna(x)) invN = 1.0 / len(x) return (sum(invN * (x_i ** 2) for x_i in x)) ** .5
1,106,256
Fill NaNs with the previous value, the next value or if all are NaN then 1.0 TODO: Linear interpolation and extrapolation Arguments: dfs (list of dataframes): list of dataframes that contain NaNs to be removed Returns: list of dataframes: list of dataframes with NaNs replaced by interpolated values
def clean_dataframes(dfs): if isinstance(dfs, (list)): for df in dfs: df = clean_dataframe(df) return dfs else: return [clean_dataframe(dfs)]
1,106,261
Insert/append threshold crossing points (time and value) into a timeseries (pd.Series) Arguments: ts (pandas.Series): Time series of values to be interpolated at `thresh` crossings thresh (float or np.float64):
def insert_crossings(ts, thresh): # import time # tic0 = time.clock(); tic = tic0 # int64 for fast processing, pandas.DatetimeIndex is 5-10x slower, 0.3 ms index = ts.index index_type = type(index) ts.index = ts.index.astype(np.int64) # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # value immediately before an upward thresh crossing, 6 ms preup = ts[(ts < thresh) & (ts.shift(-1) > thresh)] # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # values immediately after an upward thresh crossing, 4 ms\ postup = ts[(ts.shift(1) < thresh) & (ts > thresh)] # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # value immediately after a downward thresh crossing, 1.8 ms postdown = ts[(ts < thresh) & (ts.shift(1) > thresh)] # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # value immediately before an upward thresh crossing, 1.9 ms predown = ts[(ts.shift(-1) < thresh) & (ts > thresh)] # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # upward slope (always positive) between preup and postup in units of # "value" per nanosecond (timestamps convert to floats as nanoseconds), 0.04 ms slopeup = (postup.values - preup.values) / (postup.index.values - preup.index.values).astype(np.float64) # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # upward crossing point index/time, 0.04 ms tup = preup.index.values + ((thresh - preup.values) / slopeup).astype(np.int64) # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # downward slope (always negative) between predown and postdown in units of # "value" per nanosecond (timestamps convert to floats as nanoseconds), 0.03 ms slopedown = (postdown.values - predown.values) / \ (postdown.index.values - predown.index.values).astype(np.float64) # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # upward crossing point index/time, 0.02 ms tdown = predown.index.values + ((thresh - predown.values) / slopedown).astype(np.int64) # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # insert crossing points into time-series (if it had a regular sample period before, it won't now!), 2.0 ms ts.index = index # pd.DatetimeIndex(ts.index) # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # insert crossing points into time-series (if it had a regular sample period before, it won't now!), 2.0 ms ts = ts.append(pd.Series(thresh * np.ones(len(tup)), index=index_type(tup.astype(np.int64)))) # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # insert crossing points into time-series (if it had a regular sample period before, it won't now!), 1.9 ms ts = ts.append(pd.Series(thresh * np.ones(len(tdown)), index=index_type(tdown.astype(np.int64)))) # toc = time.clock(); # print((toc-tic)*1000); tic = time.clock() # if you don't `sort_index()`, numerical integrators in `scipy.integrate` will give the wrong answer, 0.1 ms ts = ts.sort_index() # toc = time.clock(); # if you don't `sort_index()`, numerical integrators in `scipy.integrate` will give the wrong answer # print((toc-tic)*1000); tic = time.clock() # print((toc-tic0)*1000); return ts
1,106,267
Init method. Args: value (str): value to match. method (const): Method constant, matching method.
def __init__(self, value, method=Method.PREFIX): self.value = value self.method = method
1,106,466
Check if given name matches. Args: name (str): name to check. Returns: bool: matches name.
def match(self, name): if self.method == Ex.Method.PREFIX: return name.startswith(self.value) elif self.method == Ex.Method.SUFFIX: return name.endswith(self.value) elif self.method == Ex.Method.CONTAINS: return self.value in name elif self.method == Ex.Method.EXACT: return self.value == name elif self.method == Ex.Method.REGEX: return re.search(self.value, name) return False
1,106,467
Init method. Args: exclude_module (list): list of Ex instances. exclude_class (list): list of Ex instances. exclude_function (list): list of Ex instances. nested_class (bool): whether to get nested classes in classes. missing_doc (bool): whether to get doc even when empty.
def __init__(self, exclude_module=None, exclude_class=None, exclude_function=None, nested_class=False, missing_doc=True): self.exclude_module = exclude_module self.exclude_class = exclude_class self.exclude_function = exclude_function self.nested_class = nested_class self.missing_doc = missing_doc
1,106,468
Hashes the first time step. Only will work as long as the hash can fit in a uint64. Parameters: ----------- frame : first frame. Keywords: --------- removedups: specify duplicates for the given frame. Returns a dictionary of everything needed to generate hashes from the genhash function.
def firsthash(frame, removedupes=False): #hashes must have i8 available #overwise, we'll have overflow def avgdiff(d): d=np.sort(d); d = d[1:] - d[:-1] ret = np.average(d[np.nonzero(d)]); if np.isnan(ret): return 1.0; return ret; def hasextent(l,eps=1e-10): #will I one day make pic sims on the pm scale?? dim = frame['data'][l]; return np.abs(dim.max()-dim.min()) > eps; fields = list(frame['data'].dtype.names); dims = [ i for i in ['xi','yi','zi'] if i in fields and hasextent(i) ]; ip = np.array([ frame['data'][l] for l in dims ]).T; avgdiffs = np.array([avgdiff(a) for a in ip.T]); mins = ip.min(axis=0); ips = (((ip - mins)/avgdiffs).round().astype('uint64')) pws = np.floor(np.log10(ips.max(axis=0))).astype('uint64')+1 pws = list(pws); pw = [0]+[ ipw+jpw for ipw,jpw in zip([0]+pws[:-1],pws[:-1]) ]; pw = 10**np.array(pw);#.astype('int64'); #the dictionary used for hashing d=dict(dims=dims, mins=mins, avgdiffs=avgdiffs, pw=pw); hashes = genhash(frame,removedupes=False,**d); if removedupes: #consider if the negation of this is faster for genhash uni,counts = np.unique(hashes,return_counts=True); d['dupes']=uni[counts>1] dupei = np.in1d(hashes, d['dupes']); hashes[dupei] = -1; d['removedupes']=True; return hashes,d
1,106,777
helper function to add hashes to the given frame given in the dictionary d returned from firsthash. Parameters: ----------- frame : frame to hash. Keywords: --------- same as genhash Returns frame with added hashes, although it will be added in place.
def addhash(frame,**kw): hashes = genhash(frame,**kw); frame['data'] = rfn.rec_append_fields( frame['data'],'hash',hashes); return frame;
1,106,780
Obtain good hashes from a .p4 file with the dict hashd and a function that returns good hashes. Any keywords will be sent to read_and_hash. Parameters: ----------- fname -- filename of file. f -- function that returns a list of good hashes.
def filter_hashes_from_file(fname, f, **kw): return np.concatenate([ frame['data']['hash'][f(frame)] for frame in read_and_hash(fname, **kw) ]);
1,106,783
连接建立起来触发的回调函数. 用于设定一些参数,并将监听任务放入事件循环,如果设置了timeout,也会将timeout_callback放入事件循环 Parameters: transport (asyncio.Transports): - 连接的传输对象
def connection_made(self, transport: asyncio.transports.Transport): self._transport = transport self._remote_host = self._transport.get_extra_info('peername') self._extra = {"client": str(self._remote_host)} self.connections.add(self) self._stream_reader = asyncio.StreamReader(loop=self._loop) self._stream_writer = asyncio.StreamWriter(transport, self, self._stream_reader, self._loop) super().connection_made(transport) if self.timeout: self._timeout_handler = self._loop.call_soon( self.timeout_callback) self._handlertask = asyncio.ensure_future(self.query_handler()) if self.debug: access_logger.info("connected", extra=self._extra)
1,107,249
连接丢失时触发的回调函数. 用于清理一些任务和关闭连接,包括: + 取消监听任务 + 取消过期监控任务 + 取消其他还没执行完的任务 + 将流读写器都重置 + 将本连接从当前的连接池中去除 Parameters: exc (Exception): - 异常,如果是None的话说明不是因为异常而关闭的连接
def connection_lost(self, exc: Exception=None): self._handlertask.cancel() super().connection_lost(exc) if self._timeout_handler: self._timeout_handler.cancel() self._transport = None for i, task in self.tasks.items(): task.cancel() self.connections.discard(self) if self.debug: access_logger.info("lost connection", extra=self._extra)
1,107,250
将响应的python结构转化为字节,并写入到流中,同时刷新最后一次响应时间为当前时间 Parameters: response (Dict[str,Any]): - 要写入给客户端的响应的python结构
def writer(self, response: Dict[str, Any]): responseb = self.encoder(response) self._stream_writer.write(responseb) if self.debug: access_logger.info("write {}".format(responseb), extra=self._extra) self._last_response_time = time()
1,107,253
用于验证客户端是否有权限调服务. 如果服务端有验证信息,则会根据验证信息判断是否合法 + 如果合法,那么返回一条信息用于响应验证请求 + 如果不合法,那么返回验证错误 如果服务端没有验证信息 + 如果验证信息都为空,直接返回响应 + 如果信息不为空,那么返回验证错误 Parameters: request (Dict[str, Any]): - python字典形式的请求 Return: (bool): - 请求是否被验证通过,通过了返回True Raise: (LoginError): - 当验证不通过时抛出
def _check_auth_handler(self, request: Dict[str, Any]): a_username = request.get("AUTH").get("USERNAME") a_password = request.get("AUTH").get("PASSWORD") auth_len = len(self.auth) if auth_len == 0: if any([a_username, a_password]): if self.debug: access_logger.info("login failed", extra=self._extra) raise LoginError("login error ,unknown username/password") else: return True else: for username, password in self.auth: if all([a_username == username, a_password == password]): response = { "MPRPC": self.VERSION, "CODE": 100, "VERSION": self.method_wrapper.version, "DESC": self.method_wrapper.__doc__, "DEBUG": self.debug, "COMPRESER": self.compreser.__name__ if ( self.compreser) else None, "TIMEOUT": self.timeout, } self.writer(response) if self.debug: access_logger.info("login succeed", extra=self._extra) break else: if self.debug: access_logger.info("login failed", extra=self._extra) raise LoginError("login error ,unknown username/password") return True
1,107,256
用于调用函数并执行.同时如果执行出错也负责将错误转化为对应的调用错误返回给客户端. 执行成功后根据结果进行不同的处理,如果注册的是函数,实例中的方法,或者协程,则获取计算得的结果,并返回给客户端. 如果是异步生成器函数,那么返回的就是一个对应的异步生成器,我们通过对其包装后循环调用实现流传输. Parameters: request (Dict[str, Any]): - python字典形式的请求 Raise: (Exception): - 当执行调用后抛出了异常,且异常不在定义范围内,则抛出 Return: (bool): - 当正常调用则返回True,如果抛出了规定范围内的异常则返回False
async def _RPC_handler(self, request: Dict[str, Any]): ID = request.get("ID") method = request.get("METHOD") with_return = request.get("RETURN") args = request.get("ARGS") or [] kwargs = request.get("KWARGS") or {} try: if method is None: raise RequestError( "request do not have method", request.get("ID")) if method == "system.getresult": await self._get_result(ID, *args, **kwargs) else: result = await self.method_wrapper.apply(ID, method, *args, **kwargs) except MethodError as se: exinfo = traceback.TracebackException.from_exception( se).format(chain=True) frames = "".join([i + "/n" for i in exinfo]) response = { "MPRPC": self.VERSION, "CODE": se.status_code, "MESSAGE": { "ID": ID, 'EXCEPTION': str(type(se)), 'MESSAGE': str(se), "DATA": { 'METHOD': request.get("METHOD"), "ARGS": request.get("ARGS"), "KWARGS": request.get("KWARGS"), 'FRAME': frames} } } self.writer(response) return False except ServerException as me: response = { "MPRPC": self.VERSION, "CODE": me.status_code, } self.writer(response) return False except Exception as e: if self.debug is True: raise e else: logger.info( "Task[{}]: Unknown Error {}:\nmessage:{}".format( ID, e.__class__.__name__, str(e)) ) else: if with_return: if inspect.isasyncgen(result): await self._asyncgen_wrap(result, ID) else: response = { "MPRPC": self.VERSION, "CODE": 200, "MESSAGE": { "ID": ID, 'RESULT': result } } self.writer(response) if self.debug: access_logger.info( "Task[{}]: response answered".format(ID), extra=self._extra ) return result
1,107,257
流包装器. 通过调用异步生成器传输流数据. Parameters: cor (AsyncIterator): - 异步迭代器 ID (str): - 任务的ID Return: (bool): - 当正常调用则返回True
async def _asyncgen_wrap(self, cor: AsyncIterator, ID: str): response = { "MPRPC": self.VERSION, "CODE": 201, "MESSAGE": { "ID": ID } } self.writer(response) if self.debug: access_logger.info( "Task[{}]: response stream start".format(ID), extra=self._extra ) async for i in cor: response = { "MPRPC": self.VERSION, "CODE": 202, "MESSAGE": { "ID": ID, 'RESULT': i } } self.writer(response) if self.debug: access_logger.info( "Task[{}]: response stream yield".format(ID), extra=self._extra ) response = { "MPRPC": self.VERSION, "CODE": 206, "MESSAGE": { "ID": ID } } self.writer(response) if self.debug: access_logger.info( "Task[{}]: response stream end".format(ID), extra=self._extra ) return True
1,107,258
Take a lists of strings or strings and flatten these into a list of strings. Arguments: - `*args`: "" or [""...] Return: [""...] Exceptions: None
def _stringlist(*args): return list(itertools.chain.from_iterable(itertools.repeat(x,1) if stringy(x) else x for x in args if x))
1,107,449
Parse an outgoing mail and put it into the OUTBOX. Arguments: - `sender`: str - `to`: str - `msgstring`: str Return: None Exceptions: None
def _parse_outgoing_mail(sender, to, msgstring): global OUTBOX OUTBOX.append(email.message_from_string(msgstring)) return
1,107,450
Make sure that our addressees are a unicoded list Arguments: - `to`: str or list Return: [u, ...] Exceptions: None
def tolist(self, to): return ', '.join(isinstance(to, list) and [u(x) for x in to] or [u(to)])
1,107,453
Sanity check the message. If we have PLAIN and HTML versions, send a multipart alternative MIME message, else send whichever we do have. If we have neither, raise NoContentError Arguments: - `sender`: str - `to`: list - `subject`: str - `plain`: str - `html`: str Return: None Exceptions: NoContentError
def sanity_check(self, sender, to, subject, plain=None, html=None, cc=None, bcc=None): if not plain and not html: raise NoContentError()
1,107,454
Deliver our message Arguments: - `message`: MIMEMultipart Return: None Exceptions: None
def deliver(self, message, to): # Send the message via local SMTP server. s = smtplib.SMTP(self.host, self.port) # sendmail function takes 3 arguments: sender's address, recipient's address # and message to send - here it is sent as one string. s.sendmail(message['From'], to, message.as_string()) s.quit() return
1,107,456
Deliver our message Arguments: - `message`: MIMEMultipart Return: None Exceptions: None
def deliver(self, message, to): # Send the message via local SMTP server. s = smtplib.SMTP(self.host, self.port) s.ehlo() s.starttls() s.login(self.user, self.pw) # sendmail function takes 3 arguments: sender's address, recipient's address # and message to send - here it is sent as one string. s.sendmail(message['From'], to, message.as_string()) s.quit() return
1,107,458
Send the message. If we have PLAIN and HTML versions, send a multipart alternative MIME message, else send whichever we do have. If we have neither, raise NoContentError Arguments: - `sender`: str - `to`: list - `subject`: str - `plain`: str - `html`: str - `attach`: str or iterable of str - `replyto`: str Return: None Exceptions: NoContentError
def send(self, sender, to, subject, plain=None, html=None, cc=None, bcc=None, attach=None, replyto=None): headers = {} if attach: raise NotImplementedError('Attachments not implemented for Django yet!') if replyto: headers['Reply-To'] = replyto self.sanity_check(sender, to, subject, plain=plain, html=html, cc=cc, bcc=bcc) if not cc: cc = [] if not bcc: bcc = [] # This comes straight from the docs at # https://docs.djangoproject.com/en/dev/topics/email/ from django.core.mail import EmailMultiAlternatives if not plain: plain = '' msg = EmailMultiAlternatives(u(subject), u(plain), u(sender), _stringlist(to), bcc=bcc, cc=cc, headers=headers) if html: msg.attach_alternative(ensure_unicode(html), "text/html") msg.send() return
1,107,459
Return a Path object representing the Template we're after, searching SELF.tpls or None Arguments: - `name`: str Return: Path or None Exceptions: None
def _find_tpl(self, name, extension='.jinja2'): found = None for loc in self.tpls: if not loc: continue contents = [f for f in loc.ls() if f.find(name) != -1 and f.endswith(extension)] if contents: found = contents[0] break exact = loc + (name + extension) if exact.is_file: found = exact return found
1,107,461
Return plain, html templates for NAME Arguments: - `name`: str Return: tuple Exceptions: None
def _find_tpls(self, name): return self._find_tpl(name, extension='.txt'), self._find_tpl(name, extension='.html')
1,107,462
Send a Letter (MESSAGE) from SENDER to TO, with the subject SUBJECT Arguments: - `sender`: unicode - `to`: unicode - `subject`: unicode - `message`: unicode - `cc`: str or [str] - `bcc`: str or [str] ` `replyto`: str Return: None Exceptions: None
def _send(self, sender, to, subject, message, cc=None, bcc=None, attach=None, replyto=None): self.mailer.send(sender, to, subject, plain=message, cc=cc, bcc=bcc, attach=attach, replyto=replyto) return
1,107,463
Send a Letter from SENDER to TO, with the subject SUBJECT. Use the current template, with KWARGS as the context. Arguments: - `sender`: unicode - `to`: unicode - `subject`: unicode - `cc`: str or [str] - `bcc`: str or [str] - `replyto`: str - `**kwargs`: objects Return: None Exceptions: None
def _sendtpl(self, sender, to, subject, cc=None, bcc=None, attach=None, replyto=None, **kwargs): plain, html = self.body(**kwargs) self.mailer.send(sender, to, subject, plain=plain, html=html, cc=cc, bcc=bcc, replyto=replyto, attach=attach) return
1,107,464
Set an active template to use with our Postman. This changes the call signature of send. Arguments: - `name`: str Return: None Exceptions: None
def template(self, name): self.plain, self.html = self._find_tpls(name) if not self.plain: self.plain = self._find_tpl(name) try: self.send = self._sendtpl yield finally: self.plain, self.html = None, None self.send = self._send
1,107,466
找出对象中的函数. Parameters: obj (Any): - 要执行的对象 Returns: (List[str]): - 所有对象中的公开的方法名
def list_public_methods(obj: Any): return [member for member in dir(obj) if not member.startswith('_') and callable(getattr(obj, member))]
1,107,594
Decorator to enable DebugPrint for a given function. Args: __func: Function to wrap Returns: Wrapped function
def noisy_wrap(__func: Callable) -> Callable: # pylint: disable=missing-docstring def wrapper(*args, **kwargs): DebugPrint.enable() try: __func(*args, **kwargs) finally: DebugPrint.disable() return wrapper
1,107,676
Decorator to display a message when entering a function. Args: __msg: Message to display Returns: Wrapped function
def on_enter(__msg: Optional[Union[Callable, str]] = None) -> Callable: # pylint: disable=missing-docstring def decorator(__func): @wraps(__func) def wrapper(*args, **kwargs): if __msg: print(__msg) else: print('Entering {!r}({!r})'.format(__func.__name__, __func)) return __func(*args, **kwargs) return wrapper if callable(__msg): return on_enter()(__msg) return decorator
1,107,677
Write text to the debug stream. Args: __text: Text to write
def write(self, __text: str) -> None: if __text == os.linesep: self.handle.write(__text) else: frame = inspect.currentframe() if frame is None: filename = 'unknown' lineno = 0 else: outer = frame.f_back filename = outer.f_code.co_filename.split(os.sep)[-1] lineno = outer.f_lineno self.handle.write('[{:>15s}:{:03d}] {}'.format(filename[-15:], lineno, __text))
1,107,678
Save dictionary with user data to passwd file (default :attr:`ftp.settings.LOGIN_FILE`). Args: users (dict): dictionary with user data. For details look at dict returned from :func:`load_users`. path (str, default settings.LOGIN_FILE): path of the file, where the data will be stored (default :attr:`ftp.settings.LOGIN_FILE`).
def save_users(users, path=settings.LOGIN_FILE): with open(path, "w") as fh: for username, data in users.items(): pass_line = username + ":" + ":".join([ data["pass_hash"], data["uid"], data["gid"], data["full_name"], data["home"], data["shell"] ]) fh.write(pass_line + "\n")
1,107,712
Set pemissions for given `filename`. Args: filename (str): name of the file/directory uid (int, default proftpd): user ID - if not set, user ID of `proftpd` is used gid (int): group ID, if not set, it is not changed mode (int, default 0775): unix access mode
def set_permissions(filename, uid=None, gid=None, mode=0775): if uid is None: uid = get_ftp_uid() if gid is None: gid = -1 os.chown(filename, uid, gid) os.chmod(filename, mode)
1,107,713
Restores the full text of either the edited text using the compressed diff. Args: cdiff (dict): compressed diff returned by :func:`~acorn.logging.diff.compress`. a (str or list): *original* string or list of strings to use as a reference to restore the edited version.
def restore(cdiff, a): left = a.splitlines(1) if isinstance(a, string_types) else a lrest = [] iline = 0 for i, line in enumerate(left): if iline not in cdiff: lrest.append(" " + line) iline += 1 else: cs = [l[0] for l in cdiff[iline]] add = cs.count('+') - cs.count('-') lrest.extend(cdiff[iline]) iline += add + 1 for i in sorted(cdiff.keys()): if i >= len(left): lrest.extend(cdiff[i]) from difflib import restore return list(restore(lrest, 2))
1,107,792
Performs the *compressed* diff of `a` and `b` such that the original contents of the :func:`difflib.ndiff` call can be reconstructed using :func:`~acorn.logging.diff.restore`. Args: a (str or list): *original* string or list of strings to diff. b (str or list): *edited* string or list of strings to diff.
def compress(a, b): from difflib import ndiff left = a.splitlines(1) if isinstance(a, string_types) else a right = b.splitlines(1) if isinstance(b, string_types) else b ldiff = list(ndiff(left, right)) result = {} latest = None combo = None icombo = 0 iorig = 0 for i, line in enumerate(ldiff): cs = [l[0] for l in ldiff[i:min((i+4, len(ldiff)))]] if cs[0] != ' ': #Initialize a new entry in the diff list. if latest is None: latest = iorig result[latest] = [] #We have to be careful. At a minimum, there may be a '-' or a '+' when the lines are #completely added or deleted. When they are *altered*, then we also expect one or #more '?' lines showing the differences. if combo is None: if cs[0] == '-': #Check whether the next lines have one of these combinations: if (len(cs) >=3 and cs[1] == '+' and cs[2] == '?'): combo = 3 elif (len(cs) >= 4 and cs[1] == '?' and cs[2] == '+' and cs[3] == '?'): combo = 4 else: #This is a stand-alone deletion. combo = 1 elif cs[0] == '+': #This is for the stand-alone addition. combo = 1 if icombo < combo: result[latest].append(line) icombo += 1 if icombo == combo: if combo > 1: latest = None combo = None icombo = 0 if cs[0] != '+': iorig += 1 else: latest = None iorig += 1 return result
1,107,793
Records the specified markdown text to the acorn database. Args: text (str): the *raw* markdown text entered into the cell in the ipython notebook.
def record_markdown(text, cellid): from acorn.logging.database import record from time import time ekey = "nb-{}".format(cellid) global _cellid_map if cellid not in _cellid_map: from acorn.logging.database import active_db from difflib import SequenceMatcher from acorn.logging.diff import cascade taskdb = active_db() if ekey not in taskdb.entities: #Compute a new ekey if possible with the most similar markdown cell #in the database. possible = [k for k in taskdb.entities if k[0:3] == "nb-"] maxkey, maxvalue = None, 0. for pkey in possible: sequence = [e["c"] for e in taskdb.entities[pkey]] state = ''.join(cascade(sequence)) matcher = SequenceMatcher(a=state, b=text) ratio = matcher.quick_ratio() if ratio > maxvalue and ratio > 0.5: maxkey, maxvalue = pkey, ratio #We expect the similarity to be at least 0.5; otherwise we decide #that it is a new cell. if maxkey is not None: ekey = pkey _cellid_map[cellid] = ekey ekey = _cellid_map[cellid] entry = { "m": "md", "a": None, "s": time(), "r": None, "c": text, } record(ekey, entry, diff=True)
1,107,870
Loads the interacting decorator that ships with `acorn` into the ipython interactive shell. Args: ip (IPython.core.interactiveshell.InteractiveShell): ipython shell instance for interacting with the shell variables.
def load_ipython_extension(ip): decor = InteractiveDecorator(ip) ip.events.register('post_run_cell', decor.post_run_cell) #Unfortunately, the built-in "pre-execute" and "pre-run" methods are #triggered *before* the input from the cell has been stored to #history. Thus, we don't have access to the actual code that is about to be #executed. Instead, we use our own :class:`HistoryManager` that overrides #the :meth:`store_inputs` so we can handle the loop detection. newhist = AcornHistoryManager(ip.history_manager, decor) ip.history_manager = newhist
1,107,871
Store source and raw input in history and create input cache variables ``_i*``. Args: line_num (int): The prompt number of this input. source (str): Python input. source_raw (str): If given, this is the raw input without any IPython transformations applied to it. If not given, ``source`` is used.
def store_inputs(self, line_num, source, source_raw=None): self.old.store_inputs(line_num, source, source_raw) #Now that the input has been stored correctly, intercept the #pre-execution and create logs accordingly. self.decorator.pre_run_cell(line_num, source)
1,107,874
Returns a list of the objects that need to be decorated in the current user namespace based on their type. Args: atype (str): one of the values in :attr:`atypes`. Specifies the type of object to search.
def _get_decoratables(self, atype): result = [] defmsg = "Skipping {}; not decoratable or already decorated." for varname in self.shell.run_line_magic("who_ls", atype): varobj = self.shell.user_ns.get(varname, None) decorate = False if varobj is None: # Nothing useful can be done. continue if atype in ["classobj", "type"]: #Classes are only relevant if they have no __file__ #attribute; all other classes should be decorated by the #full acorn machinery. if (not hasattr(varobj, "__acorn__") and hasattr(varobj, "__module__") and varobj.__module__ == "__main__" and not hasattr(varobj, "__file__")): decorate = True else: msg.std(defmsg.format(varname), 3) elif atype in ["function", "staticmethod"]: # %who_ls will only return functions from the *user* # namespace, so we don't have a lot to worry about here. func = None if atype == "staticmethod" and hasattr(varobj, "__func__"): func = varobj.__func__ elif atype == "function": func = varobj if (func is not None and not hasattr(func, "__acorn__") and hasattr(func, "__code__") and "<ipython-input" in func.__code__.co_filename): decorate = True else: msg.std(defmsg.format(varname), 3) if decorate: self.entities[atype][varname] = varobj result.append((varname, varobj)) return result
1,107,876
Decorates the specified object for automatic logging with acorn. Args: atype (str): one of the types specified in :attr:`atypes`. varobj: object instance to decorate; no additional type checking is performed.
def _decorate(self, atype, n, o): typemap = {"function": "functions", "classobj": "classes", "staticmethod": "methods", "type": "classes"} from acorn.logging.decoration import decorate_obj try: otype = typemap[atype] decorate_obj(self.shell.user_ns, n, o, otype) #Also create a log in the database for this execution; this allows a #user to track the changes they make in prototyping function and #class definitions. self._logdef(n, o, otype) msg.okay("Auto-decorated {}: {}.".format(n, o)) except: msg.err("Error auto-decorating {}: {}.".format(n, o)) raise
1,107,878
Determines the most similar cell (if any) to the specified code. It must have at least 50% overlap ratio and have been a loop-intercepted cell previously. Args: code (str): contents of the code cell that were executed.
def _find_cellid(self, code): from difflib import SequenceMatcher maxvalue = 0. maxid = None for cellid, c in self.cellids.items(): matcher = SequenceMatcher(a=c, b=code) ratio = matcher.quick_ratio() if ratio > maxvalue and ratio > 0.5: maxid, maxvalue = cellid, ratio return maxid
1,107,879
Executes before the user-entered code in `ipython` is run. This intercepts loops and other problematic code that would produce lots of database entries and streamlines it to produce only a single entry. Args: cellno (int): the cell number that is about to be executed. code (str): python source code that is about to be executed.
def pre_run_cell(self, cellno, code): #First, we look for loops and list/dict comprehensions in the code. Find #the id of the latest cell that was executed. self.cellid = cellno #If there is a loop somewhere in the code, it could generate millions of #database entries and make the notebook unusable. import ast if findloop(ast.parse(code)): #Disable the acorn logging systems so that we don't pollute the #database. from acorn.logging.decoration import set_streamlining set_streamlining(True) #Create the pre-execute entry for the database. from time import time self.pre = { "m": "loop", "a": None, "s": time(), "r": None, "c": code, }
1,107,883
Decorator for marking a function as a graft. Parameters: namespace (str): namespace of data, same format as targeting. Returns: Graft For example, these grafts:: @graft def foo_data: return {'foo', True} @graft(namespace='bar') def bar_data: return False will be redered has:: { 'foo': True, 'bar': False }
def graft(func=None, *, namespace=None): if not func: return functools.partial(graft, namespace=namespace) if isinstance(func, Graft): return func return Graft(func, namespace=namespace)
1,108,080
Magical loading of all grafted functions. Parameters: force (bool): force reload
def load(force=False): if GRAFTS and not force: return GRAFTS # insert missing paths # this could be a configurated item userpath = settings.userpath if os.path.isdir(userpath) and userpath not in __path__: __path__.append(userpath) def notify_error(name): logging.error('unable to load %s package' % name) # autoload decorated functions walker = walk_packages(__path__, '%s.' % __name__, onerror=notify_error) for module_finder, name, ispkg in walker: loader = module_finder.find_module(name) mod = loader.load_module(name) for func in mod.__dict__.values(): if is_graft(func): GRAFTS.append(func) # append setuptools modules for entry_point in iter_entry_points(group=settings.entry_point): try: func = entry_point.load() if is_graft(func): GRAFTS.append(func) else: notify_error(entry_point.name) except Exception as error: logging.exception(error) notify_error(entry_point.name) return GRAFTS
1,108,081
Print out the animation cycle to stdout. This function is for use with synchronous functions and must be run in a thread. Args: animation_ (generator): A generator that produces strings for the animation. Should be endless. step (float): Seconds between each animation frame.
def animate_cli(animation_, step, event): while True: # run at least once, important for tests! time.sleep(step) frame = next(animation_) sys.stdout.write(frame) sys.stdout.flush() if event.is_set(): break sys.stdout.write(animation_.get_erase_frame()) sys.stdout.flush() animation_.reset()
1,108,226
Make sure, that all items in `array` has good type and size. Args: array (list): Array of python types. Returns: True/False
def _all_correct_list(array): if type(array) not in _ITERABLE_TYPES: return False for item in array: if not type(item) in _ITERABLE_TYPES: return False if len(item) != 2: return False return True
1,108,325
Convert `data` to dictionary. Tries to get sense in multidimensional arrays. Args: data: List/dict/tuple of variable dimension. Returns: dict: If the data can be converted to dictionary. Raises: MetaParsingException: When the data are unconvertible to dict.
def _convert_to_dict(data): if isinstance(data, dict): return data if isinstance(data, list) or isinstance(data, tuple): if _all_correct_list(data): return dict(data) else: data = zip(data[::2], data[1::2]) return dict(data) else: raise MetaParsingException( "Can't decode provided metadata - unknown structure." )
1,108,326
Check whether the structure is flat dictionary. If not, try to convert it to dictionary. Args: data: Whatever data you have (dict/tuple/list). Returns: dict: When the conversion was successful or `data` was already `good`. Raises: MetaParsingException: When the data couldn't be converted or had `bad` structure.
def check_structure(data): if not isinstance(data, dict): try: data = _convert_to_dict(data) except MetaParsingException: raise except: raise MetaParsingException( "Metadata format has invalid strucure (dict is expected)." ) for key, val in data.iteritems(): if type(key) not in _ALLOWED_TYPES: raise MetaParsingException( "Can't decode the meta file - invalid type of keyword '" + str(key) + "'!" ) if type(val) not in _ALLOWED_TYPES: raise MetaParsingException( "Can't decode the meta file - invalid type of keyword '" + str(key) + "'!" ) return data
1,108,327
Check whether `key` matchs the :attr:`keyword`. If so, set the :attr:`value` to `value`. Args: key (str): Key which will be matched with :attr:`keyword`. value (str): Value which will be assigned to :attr:`value` if keys matches. Returns: True/False: Whether the key matched :attr:`keyword`.
def check(self, key, value): key = key.lower().strip() # try unicode conversion try: key = key.decode("utf-8") except UnicodeEncodeError: pass key = self._remove_accents(key) if self.keyword in key.split(): self.value = value return True return False
1,108,329
Formats an image. Args: path (str): Path to the image file. options (dict): Options to apply to the image. Returns: (list) A list of PIL images. The list will always be of length 1 unless resolutions for resizing are provided in the options.
def format_image(path, options): image = Image.open(path) image_pipeline_results = __pipeline_image(image, options) return image_pipeline_results
1,108,378
Sends an image through a processing pipeline. Applies all (relevant) provided options to a given image. Args: image: An instance of a PIL Image. options: Options to apply to the image (i.e. resolutions). Returns: A list containing instances of PIL Images. This list will always be length 1 if no options exist that require multiple copies to be created for a single image (i.e resolutions).
def __pipeline_image(image, options): results = [] # Begin pipline # 1. Create image copies for each resolution if 'resolutions' in options: resolutions = options['resolutions'] # List of resolution tuples for res in resolutions: img_rs = resize(image, res) # Resized image # Add image to result set. This result set will be pulled from # throughout the pipelining process to perform more processing (watermarking). results.append(img_rs) # 2. Apply watermark to each image copy if 'wmark-img' in options: wtrmk_path = options['wmark-img'] if wtrmk_path: if len(results) == 0: image = watermark_image(image, wtrmk_path) #watermark actual image? else: for i in range(0, len(results)): results[i] = watermark_image( results[i], wtrmk_path) #watermark actual image if 'wmark-txt' in options: wtrmk_txt = options['wmark-txt'] if wtrmk_txt: if len(results) == 0: image = watermark_text(image, wtrmk_txt) #watermark actual image? else: for i in range(0, len(results)): results[i] = watermark_text(results[i], wtrmk_txt) #watermark actual image # Fallback: Nothing was done to the image if len(results) == 0: results.append(image) return results
1,108,379
Returns a set of strings to be used as Slots with Pabianas default Clock. Args: layer: The layer in the hierarchy this Area is placed in. Technically, the number specifies how many of the Clocks signals are relevant to the Area. Between 1 and limit. limit: The number of layers of the hierarchy.
def multiple(layer: int, limit: int) -> Set[str]: return {str(x).zfill(2) for x in [2**x for x in range(limit)] if x % 2**(layer - 1) == 0}
1,108,477
Search a collection for the query provided. Just a raw interface to mongo to do any query you want. Args: collection: The db collection. See main class documentation. query: A mongo find query. Returns: pymongo Cursor object with the results.
def find(self, collection, query): obj = getattr(self.db, collection) result = obj.find(query) return result
1,108,619
Search a collection for all available items. Args: collection: The db collection. See main class documentation. Returns: List of all items in the collection.
def find_all(self, collection): obj = getattr(self.db, collection) result = obj.find() return result
1,108,620
Search a collection for the query provided and return one result. Just a raw interface to mongo to do any query you want. Args: collection: The db collection. See main class documentation. query: A mongo find query. Returns: pymongo Cursor object with the results.
def find_one(self, collection, query): obj = getattr(self.db, collection) result = obj.find_one(query) return result
1,108,621
Search a collection for the distinct key values provided. Args: collection: The db collection. See main class documentation. key: The name of the key to find distinct values. For example with the indicators collection, the key could be "type". Returns: List of distinct values.
def find_distinct(self, collection, key): obj = getattr(self.db, collection) result = obj.distinct(key) return result
1,108,622
Adds an embedded campaign to the TLO. Args: id: the CRITs object id of the TLO collection: The db collection. See main class documentation. campaign: The campaign to assign. confidence: The campaign confidence analyst: The analyst making the assignment date: The date of the assignment description: A description Returns: The resulting mongo object
def add_embedded_campaign(self, id, collection, campaign, confidence, analyst, date, description): if type(id) is not ObjectId: id = ObjectId(id) # TODO: Make sure the object does not already have the campaign # Return if it does. Add it if it doesn't obj = getattr(self.db, collection) result = obj.find({'_id': id, 'campaign.name': campaign}) if result.count() > 0: return else: log.debug('Adding campaign to set: {}'.format(campaign)) campaign_obj = { 'analyst': analyst, 'confidence': confidence, 'date': date, 'description': description, 'name': campaign } result = obj.update( {'_id': id}, {'$push': {'campaign': campaign_obj}} ) return result
1,108,623
Removes an item from the bucket list Args: id: the CRITs object id of the TLO collection: The db collection. See main class documentation. item: the bucket list item to remove Returns: The mongodb result
def remove_bucket_list_item(self, id, collection, item): if type(id) is not ObjectId: id = ObjectId(id) obj = getattr(self.db, collection) result = obj.update( {'_id': id}, {'$pull': {'bucket_list': item}} ) return result
1,108,624
Translates a unit position to a known parking spot Args: unit_pos: unit position as Vec2 Returns: ParkingSpot object
def unit_pos_to_spot(unit_pos) -> ParkingSpot: min_ = 50 res = None for airport in parkings: for spot in parkings[airport]: # type: ignore spot_pos = parkings[airport][spot] # type: ignore dist = math.hypot(unit_pos[0] - spot_pos[0], unit_pos[1] - spot_pos[1]) if dist < min_: min_ = dist # type: ignore res = ParkingSpot(airport=airport, spot=spot) return res
1,108,724
Parse `data` to EPublication. Args: filename (str): Used to choose right parser based at suffix. data (str): Content of the metadata file. Returns: EPublication: object.
def parse_meta(filename, data): if "." not in filename: raise MetaParsingException( "Can't recognize type of your metadata ('%s')!" % filename ) suffix = filename.rsplit(".", 1)[1].lower() if suffix not in SUPPORTED_FILES: raise MetaParsingException("Can't parse file of type '%s'!" % suffix) fp = validator.FieldParser() for key, val in SUPPORTED_FILES[suffix](data).items(): fp.process(key, val) return fp.get_epublication()
1,108,744
Subscribes this Area to the given Areas and optionally given Slots. Must be called before the Area is run. Args: clock_name: The name of the Area that is used as synchronizing Clock. clock_slots: The slots of the Clock relevant to this Area. subscriptions: A dictionary containing the relevant Areas names as keys and optionally the Slots as values.
def subscribe(self, clock_name: str=None, clock_slots: Iterable[str]=None, subscriptions: Dict[str, Any]={}): for area in subscriptions: # type: str init_full(self, area, subscriptions[area]) subscriptions[area] = {'slots': subscriptions[area]} if clock_name is not None: self.clock_name = clock_name self.clock_slots = clock_slots subscriptions[clock_name] = {'slots': clock_slots, 'buffer-length': 1} self.setup(puller=True, subscriptions=subscriptions)
1,108,881
Applies this datetime to a Miz object (it will be mutated in place) Args: miz: MIZ object to mutate Returns: True
def apply_to_miz(self, miz): miz.mission.day = self.date.day miz.mission.month = self.date.month miz.mission.year = self.date.year miz.mission.mission_start_time = self.mission_start_time return True
1,109,112
Creates a MissionTime instance from a string Format: YYYYMMDDHHMMSS Args: input_str: string to parse Returns: MissionTime instance
def from_string(input_str) -> 'MissionTime': # noinspection SpellCheckingInspection match = RE_INPUT_STRING.match(input_str) if not match: raise ValueError(f'badly formatted date/time: {input_str}') return MissionTime( datetime.datetime( int(match.group('year')), int(match.group('month')), int(match.group('day')), int(match.group('hour')), int(match.group('minute')), int(match.group('second')), ) )
1,109,113
Queries AWC for the METAR of a given station Args: icao: station ID as a four letters-digits ICAO code Returns: AWC result for the station
def query_icao(icao: str): params = { 'dataSource': 'metars', 'requestType': 'retrieve', 'format': 'csv', 'hoursBeforeNow': 24, } AWC._validate_icao(icao) params['stationString'] = icao try: return AWC._query(params) except RequestsConnectionError: raise AWCRequestFailed('failed to obtain requested data from AWC')
1,109,151
Retrieves a TAF string from an online database Args: station_icao: ICAO of the station Returns: tuple of error, metar_str
def retrieve_taf(station_icao) -> typing.Tuple[typing.Union[str, None], typing.Union[str, None]]: url = _BASE_TAF_URL.format(station=station_icao) with requests.get(url) as resp: if not resp.ok: return f'unable to obtain TAF for station {station_icao}\n' \ f'Got to "http://tgftp.nws.noaa.gov/data/observations/metar/stations" ' \ f'for a list of valid stations', None return None, resp.content.decode().split('\n')[1]
1,109,199
Retrieves a METAR string from an online database Args: station_icao: ICAO of the station Returns: tuple of error, metar_str
def retrieve_metar(station_icao) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]: url = _BASE_METAR_URL.format(station=station_icao) with requests.get(url) as resp: if not resp.ok: return f'unable to obtain METAR for station {station_icao}\n' \ f'Got to "http://tgftp.nws.noaa.gov/data/observations/metar/stations" ' \ f'for a list of valid stations', None return None, resp.content.decode().split('\n')[1]
1,109,200
Decisions on what verb to use and content headers happen here Args: op a string specifying a http verb
def _get_req_fp(self, op): if(op): op = op.lower() if op == 'get': return requests.get, None if op == 'put': return requests.put, {'Content-Type': 'application/x-www-form-urlencoded'} if op == 'post': return requests.post, {'Content-Type': 'application/json'} if op == 'delete': return requests.delete, None else: raise NotImplementedError('Operation {} is not supported!'.format(op))
1,109,407
HTTP reequest wrapper with data packaging fucntionality Args: op http verb in str uri address of the request payload data to be sent in dict format (default: None) If not provided no data is sent return code and req response dict (single or list)
def _req(self, op, uri, payload = None): if DEBUG: print(('uri', uri)) req_fp, content_type = self._get_req_fp(op) if payload: if content_type: r = req_fp(uri, payload, auth = self.api_auth, headers = content_type) else: r = req_fp(uri, payload, auth = self.api_auth) else: r = req_fp(uri, auth = self.api_auth) if r.status_code == requests.codes.ok: data = r.json() else: data = None pass #keep for debugging #in case there's an error and we're debugging self._parse_req(r) return r.status_code, data
1,109,408
Initializes an instance of the class with an api key Allows multiple instances with distinct keys. Args: my_api_key api key for this instance
def __init__(self, my_api_key): super(self.__class__, self).__init__(my_api_key) self.sort_by_postfix = '?sortBy=' self.boxes_suffix = 'boxes' self.stages_suffix = 'stages' self.pipelines_suffix = 'pipelines' self.search_suffix = 'search?query=' self.snippets_suffix = 'snippets' self.fields_suffix = 'fields' self.newsfeed_suffix = 'newsfeed' self.threads_suffix = 'threads' self.comments_suffix = 'comments' self.files_suffix = 'files' self.file_contents_suffix = 'contents' self.file_link_suffix = 'link' self.reminders_suffix = 'reminders' self.detail_level_suffix = '?detailLevel=' if DEBUG: print((self.api_uri))
1,109,410
Parses a request object for relevant debugging information. Only works if DEBUG is enabled. Args: req requests req object
def _parse_req(self, req): if DEBUG: if req.status_code != requests.codes.ok: print(("code: {}".format(req.status_code))) print(("response {}".format(req.json()))) print(("req headers {}".format(req.request.headers))) print(("req body {}".format(req.request.body)))
1,109,411
Get user information from the server and update the attribute Args: key user key (default: me) return (status code for the get request, dict user data)
def get_user(self, key = None): if key: uri = self.api_uri + "/users/" + key else: uri = self.api_uri + "/users/me" return self._req('get', uri)
1,109,412