docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Watch for file changes and reload config when needed. Arguments: state (_WaffleState): Object that contains reference to app and its configstore.
def _file_watcher(state): conf = state.app.config file_path = conf.get('WAFFLE_WATCHER_FILE', '/tmp/waffleconf.txt') if not os.path.isfile(file_path): # Create watch file open(file_path, 'a').close() while True: tstamp = os.path.getmtime(file_path) # Compare timestamps and update config if needed if tstamp > state._tstamp: state.update_conf() state._tstamp = tstamp # Not too critical time.sleep(10)
1,096,891
Listen to redis channel for a configuration update notifications. Arguments: state (_WaffleState): Object that contains reference to app and its configstore.
def _redis_watcher(state): conf = state.app.config r = redis.client.StrictRedis( host=conf.get('WAFFLE_REDIS_HOST', 'localhost'), port=conf.get('WAFFLE_REDIS_PORT', 6379)) sub = r.pubsub(ignore_subscribe_messages=True) sub.subscribe(conf.get('WAFFLE_REDIS_CHANNEL', 'waffleconf')) while True: for msg in sub.listen(): # Skip non-messages if not msg['type'] == 'message': continue tstamp = float(msg['data']) # Compare timestamps and update config if needed if tstamp > state._tstamp: state.update_conf() state._tstamp = tstamp
1,096,892
Notify of configuration update through file. Arguments: state (_WaffleState): Object that contains reference to app and its configstore.
def _file_notifier(state): tstamp = time.time() state._tstamp = tstamp conf = state.app.config file_path = conf.get('WAFFLE_WATCHER_FILE', '/tmp/waffleconf.txt') if not os.path.isfile(file_path): # Create watch file open(file_path, 'a').close() # Update timestamp os.utime(file_path, (tstamp, tstamp))
1,096,893
Notify of configuration update through redis. Arguments: state (_WaffleState): Object that contains reference to app and its configstore.
def _redis_notifier(state): tstamp = time.time() state._tstamp = tstamp conf = state.app.config # Notify timestamp r = redis.client.StrictRedis() r.publish(conf.get('WAFFLE_REDIS_CHANNEL', 'waffleconf'), tstamp)
1,096,894
Find the model class for a given model path like 'project.app.model' Args: path (str): dot-delimited model path, like 'project.app.model' Returns: Django Model-based class
def model_from_path(model_path, fuzziness=False): app_name = '.'.join(model_path.split('.')[:-1]) model_name = model_path.split('.')[-1] if not app_name: return None module = importlib.import_module(app_name) try: model = getattr(module, model_name) except AttributeError: try: model = getattr(getattr(module, 'models'), model_name) except AttributeError: model = get_model(model_name, app_name, fuzziness=fuzziness) return model
1,096,929
Write a QuerySet or ValuesListQuerySet to a CSV file based on djangosnippets by zbyte64 and http://palewi.re Arguments: qs (QuerySet or ValuesListQuerySet): The records your want to write to a text file (UTF-8) filename (str): full path and file name to write to
def write_queryset_to_csv(qs, filename): model = qs.model with open(filename, 'w') as fp: writer = csv.writer(fp) try: headers = list(qs._fields) except: headers = [field.name for field in model._meta.fields] writer.writerow(headers) for obj in qs: row = [] for colnum, field in enumerate(headers): try: value = getattr(obj, field, obj[colnum]) except: value = '' if callable(value): value = value() if isinstance(value, basestring): value = value.encode("utf-8") else: value = str(value).encode("utf-8") row += [value] writer.writerow(row)
1,096,944
Thin wrapper for pandas.io.excel.read_excel() that accepts a file path and sheet index/name Arguments: path (str): file or folder to retrieve CSV files and `pandas.DataFrame`s from ext (str): file name extension (to filter files by) date_parser (function): if the MultiIndex can be interpretted as a datetime, this parser will be used Returns: dict of DataFrame: { file_path: flattened_data_frame }
def dataframe_from_excel(path, sheetname=0, header=0, skiprows=None): # , parse_dates=False): sheetname = sheetname or 0 if isinstance(sheetname, (basestring, float)): try: sheetname = int(sheetname) except (TypeError, ValueError, OverflowError): sheetname = str(sheetname) wb = xlrd.open_workbook(path) # if isinstance(sheetname, int): # sheet = wb.sheet_by_index(sheetname) # else: # sheet = wb.sheet_by_name(sheetname) # assert(not parse_dates, "`parse_dates` argument and function not yet implemented!") # table = [sheet.row_values(i) for i in range(sheet.nrows)] return pd.io.excel.read_excel(wb, sheetname=sheetname, header=header, skiprows=skiprows, engine='xlrd')
1,096,946
Ignore date information in a datetime string or object Arguments: dt (str or datetime.datetime or atetime.time or numpy.Timestamp): time or date to be coerced into a `datetime.time` object Returns: datetime.time: Time of day portion of a `datetime` string or object >>> make_time(None) datetime.time(0, 0) >>> make_time("11:59 PM") datetime.time(23, 59) >>> make_time(datetime.datetime(1999, 12, 31, 23, 59, 59)) datetime.time(23, 59, 59)
def make_time(dt, date_parser=parse_date): if not dt: return datetime.time(0, 0) if isinstance(dt, basestring): try: dt = date_parser(dt) except: print 'Unable to parse {0}'.format(repr(dt)) print_exc() return datetime.time(0, 0) try: dt = dt.timetuple()[3:6] except: dt = tuple(dt)[3:6] return datetime.time(*dt)
1,096,948
Load all Excel files in the given path, write .flat.csv files, return `DataFrame` dict Arguments: path (str): file or folder to retrieve CSV files and `pandas.DataFrame`s from ext (str): file name extension (to filter files by) date_parser (function): if the MultiIndex can be interpretted as a datetime, this parser will be used Returns: dict of DataFrame: { file_path: flattened_data_frame }
def flatten_excel(path='.', ext='xlsx', sheetname=0, skiprows=None, header=0, date_parser=parse_date, verbosity=0, output_ext=None): date_parser = date_parser or (lambda x: x) dotted_ext, dotted_output_ext = None, None if ext != None and output_ext != None: dotted_ext = ('' if ext.startswith('.') else '.') + ext dotted_output_ext = ('' if output_ext.startswith('.') else '.') + output_ext table = {} for file_properties in util.find_files(path, ext=ext or '', verbosity=verbosity): file_path = file_properties['path'] if output_ext and (dotted_output_ext + '.') in file_path: continue df = dataframe_from_excel(file_path, sheetname=sheetname, header=header, skiprows=skiprows) df = flatten_dataframe(df, verbosity=verbosity) if dotted_ext != None and dotted_output_ext != None: df.to_csv(file_path[:-len(dotted_ext)] + dotted_output_ext + dotted_ext) return table
1,096,950
Parse ISO-8601 duration string. Args: __string: Duration string to parse Returns: Parsed delta object
def parse_delta(__string: str) -> datetime.timedelta: if not __string: return datetime.timedelta(0) match = re.fullmatch(r, __string, re.VERBOSE) if not match: raise ValueError('Unable to parse delta {!r}'.format(__string)) match_dict = {k: int(v) if v else 0 for k, v in match.groupdict().items()} return datetime.timedelta(**match_dict)
1,097,114
Format ISO-8601 duration string. Args: __timedelta: Duration to process Returns: ISO-8601 representation of duration
def format_delta(__timedelta: datetime.timedelta) -> str: if __timedelta == datetime.timedelta(0): return '' days_s = '{}D'.format(__timedelta.days) if __timedelta.days else '' hours, minutes = divmod(__timedelta.seconds, 3600) minutes, seconds = divmod(minutes, 60) hours_s = '{:02d}H'.format(hours) if hours else '' minutes_s = '{:02d}M'.format(minutes) if minutes else '' seconds_s = '{:02d}S'.format(seconds) if seconds else '' return 'P{}{}{}{}{}'.format(days_s, 'T' if hours or minutes or seconds else '', hours_s, minutes_s, seconds_s)
1,097,115
Parse ISO-8601 datetime string. Args: __string: Datetime string to parse Returns: Parsed datetime object
def parse_datetime(__string: str) -> datetime.datetime: if not __string: datetime_ = datetime.datetime.now(datetime.timezone.utc) else: # pylint: disable=no-member datetime_ = ciso8601.parse_datetime(__string) if datetime_.tzinfo is None: datetime_ = datetime_.replace(tzinfo=datetime.timezone.utc) return datetime_
1,097,116
Context handler to temporarily switch directories. Args: __path: Directory to change to Yields: Execution context in ``path``
def chdir(__path: str) -> ContextManager: old = os.getcwd() try: os.chdir(__path) yield finally: os.chdir(old)
1,097,362
Context handler to temporarily alter environment. If you supply a value of ``None``, then the associated key will be deleted from the environment. Args: kwargs: Environment variables to override Yields: Execution context with modified environment
def env(**kwargs: Union[Dict[str, str], None]) -> ContextManager: old = os.environ.copy() try: os.environ.clear() # This apparent duplication is because ``putenv`` doesn’t update # ``os.environ``, and ``os.environ`` changes aren’t propagated to # subprocesses. for key, value in old.items(): os.environ[key] = value # NOQA: B003 os.putenv(key, value) for key, value in kwargs.items(): if value is None: del os.environ[key] else: os.environ[key] = value # NOQA: B003 os.putenv(key, value) yield finally: os.environ.clear() for key, value in old.items(): os.environ[key] = value # NOQA: B003 os.putenv(key, value)
1,097,363
Create a new Kippt List. Parameters: - title (Required) - args Dictionary of other fields Accepted fields can be found here: https://github.com/kippt/api-documentation/blob/master/objects/list.md
def create(self, title, **args): # Merge our title as a parameter and JSONify it. data = json.dumps(dict({'title': title}, **args)) r = requests.post( "https://kippt.com/api/lists", headers=self.kippt.header, data=data ) return (r.json())
1,097,410
Colourise text using click’s style function. Returns text untouched if colour output is not enabled, or ``stdout`` is not a tty. See :func:`click.style` for parameters Args: __text: Text to colourise Returns: Colourised text, when possible
def colourise(__text: str, *args, **kwargs) -> str: if sys.stdout.isatty(): __text = style(__text, *args, **kwargs) return __text
1,098,029
Highlight text highlighted using ``pygments``. Returns text untouched if colour output is not enabled. See also: :pypi:`Pygments` Args: __text: Text to highlight lexer: Jinja lexer to use formatter: Jinja formatter to use Returns: Syntax highlighted output, when possible
def highlight(__text: str, *, lexer: str = 'diff', formatter: str = 'terminal') -> str: if sys.stdout.isatty(): lexer = get_lexer_by_name(lexer) formatter = get_formatter_by_name(formatter) __text = pyg_highlight(__text, lexer, formatter) return __text
1,098,030
HTML to plain text renderer. See also: :pypi:`html2text` Args: __html: Text to process width: Paragraph width ascii_replacements: Use pseudo-ASCII replacements for Unicode Returns: Rendered text
def html2text(__html: str, *, width: int = 80, ascii_replacements: bool = False) -> str: html2.BODY_WIDTH = width html2.UNICODE_SNOB = ascii_replacements return html2.html2text(__html).strip()
1,098,031
Configure a new Jinja environment with our filters. Args: __pkg: Package name to use as base for templates searches Returns: Configured Jinja environment
def setup(__pkg: str) -> jinja2.Environment: dirs = [path.join(d, 'templates') for d in xdg_basedir.get_data_dirs(__pkg)] env = jinja2.Environment( autoescape=jinja2.select_autoescape(['html', 'xml']), loader=jinja2.ChoiceLoader([jinja2.FileSystemLoader(s) for s in dirs])) env.loader.loaders.append(jinja2.PackageLoader(__pkg, 'templates')) env.filters.update(FILTERS) return env
1,098,033
编码请求为bytes. 检查是否使用debug模式和是否对数据进行压缩.之后根据状态将python字典形式的请求编码为字节串. Parameters: query (Dict[str, Any]): - python字典形式的请求数据 Return: (bytes): - 请求的字节串
def encoder(self, query: Dict[str, Any]): if self.debug is True: queryb = json.dumps( query, ensure_ascii=False).encode("utf-8") else: queryb = msgpack.packb(query) if self.compreser: queryb = self.compreser.compress(queryb) return queryb + self.SEPARATOR
1,098,036
编码请求为bytes. 检查是否使用debug模式和是否对数据进行压缩.之后根据状态将python字典形式的请求编码为字节串. Parameters: response (bytes): - 响应的字节串编码 Return: (Dict[str, Any]): - python字典形式的响应
def decoder(self, response: bytes): response = response[:-(len(self.SEPARATOR))] if self.compreser is not None: response = self.compreser.decompress(response) if self.debug is True: response = json.loads(response.decode('utf-8')) else: response = msgpack.unpackb(response, encoding='utf-8') version = response.get("MPRPC") if version and version == self.VERSION: return response else: raise ProtocolException("Wrong Protocol")
1,098,037
Print get_version() return value in a readable format. Params: None Returns: None
def print_version(): v = get_version() try: s = _STR_WIN[v] except KeyError: s = "Unknow OS" print("-----------------------------------------------------------") print("###################### WinVer Report ######################") print("Python Version : {}.{}.{}".format(*sys.version_info[:3])) print("Windows Version String : {}".format(s)) print("Windows Major Version : {}".format(v[0])) print("Windows Minor Version : {}".format(v[1])) print("Windows Service Pack (or Build) Version : {}".format(v[2])) print("Is Windows Server : {}".format('Yes' if v[3]==1 else 'No')) print("Is Windows 10 (or Windows Server 2016) : {}".format('Yes' if v >= WIN_10 else 'No')) print("-----------------------------------------------------------")
1,098,248
Convert VCF file into input for WASP. Only bi-allelic heterozygous sites are used. Parameters: ----------- vcf : str Path to VCF file. directory : str Output directory. This is the directory that will hold the files for WASP. sample_name : str If provided, use this sample name to get heterozygous SNPs from VCF file.
def wasp_snp_directory(vcf, directory, sample_name=None): chrom = [] pos = [] ref = [] alt = [] vcf_reader = pyvcf.Reader(open(vcf, 'r')) if sample_name: def condition(record, sample_name): return sample_name in [x.sample for x in record.get_hets()] else: def condition(record, sample_name): return len(record.get_hets()) > 0 for record in vcf_reader: if condition(record, sample_name): if len(record.ALT) == 1: chrom.append(record.CHROM) pos.append(record.POS) ref.append(record.REF) alt.append(record.ALT[0].sequence) df = pd.DataFrame([chrom, pos, ref, alt], index=['chrom', 'position', 'RefAllele', 'AltAllele']).T if not os.path.exists(directory): os.makedirs(directory) for c in set(df.chrom): tdf = df[df.chrom == c] if tdf.shape[0] > 0: f = gzip.open(os.path.join(directory, '{}.snps.txt.gz'.format(c)), 'wb') lines = (tdf.position.astype(str) + '\t' + tdf.RefAllele + '\t' + tdf.AltAllele) f.write('\n'.join(lines) + '\n') f.close()
1,098,316
Read VCF file into pandas DataFrame. Parameters: ----------- fn : str Path to VCF file. Returns ------- df : pandas.DataFrame The VCF file as a data frame. Note that all header information is thrown away.
def vcf_as_df(fn): header_lines = 0 with open(fn, 'r') as f: line = f.readline().strip() header_lines += 1 while line[0] == '#': line = f.readline().strip() header_lines += 1 header_lines -= 2 df = pd.read_table(fn, skiprows=header_lines, header=0) df.columns = ['CHROM'] + list(df.columns[1:]) return df
1,098,317
Make boolean matrix of samples by variants. One indicates that the sample is heterozygous for that variant. Parameters: ----------- vcf : str Path to VCF file.
def make_het_matrix(fn): # TODO: parallelize? vcf_df = vcf_as_df(fn) variant_ids = vcf_df.apply(lambda x: df_variant_id(x), axis=1) vcf_reader = pyvcf.Reader(open(fn, 'r')) record = vcf_reader.next() hets = pd.DataFrame(0, index=variant_ids, columns=[x.sample for x in record.samples]) vcf_reader = pyvcf.Reader(open(fn, 'r')) for record in vcf_reader: h = record.get_hets() i = record_variant_id(record) hets.ix[i, [x.sample for x in h]] = 1 return hets
1,098,318
Obtain the dimensionality of a .lsp file. This should work for all well formatted .lsp files. Parameters: ----------- lsp : .lsp string Returns a list of dimensions.
def getdim(lsp): dims= ['x','y', 'z']; rxs = ['{}-cells *([0-9]+)'.format(x) for x in ['x','y','z']]; return [ x for x,rx in zip(dims,rxs) if re.search(rx,lsp) and int(re.search(rx,lsp).group(1)) > 0 ];
1,098,474
Get information from pext planes. This might or might not work, use with caution! Parameters: ----------- lsp : .lsp string Returns a list of dicts with information for all pext planes
def getpexts(lsp): lines=lsp.split('\n'); #unfortunately regex doesn't work here lns,planens = zip( *[ (i,int(re.search('^ *extract *([0-9]+)',line).group(1))) for i,line in enumerate(lines) if re.search('^ *extract *[0-9]+', line)]); if len(lns) == 0: return []; end = lns[-1]; for i,line in enumerate(lines[end+1:]): if re.match(' *\[',line): break; end += i; lineranges = zip(lns,(lns+(end,))[1:]); planes=dict() for (i,end),plane in zip(lineranges,planens): d=dict(); labels = [ 'species', 'direction', 'position',]; datarx = [ '^ *species *([0-9]+)', '^ *direction *([xXyYzZ])', '^ *at *(.*)',]; convs = [ lambda s: int(s), lambda i: i, lambda s: np.array( map(float,s.split(' '))), ]; for line in lines[i:end]: for label,rx,conv in zip(labels,datarx,convs): if re.match(rx,line): d[label]=conv(re.match(rx,line).group(1)); pass pass planes[plane] = d; return planes;
1,098,475
初始化RPC客户端. Parameters: addr (str): - 形如`tcp://xxx:xxx@xxx:xxx`的字符串 debug (bool): - 是否使用debug模式,默认为否 compreser(Optional[str]): - 是否使用压缩工具压缩传输信息,以及压缩工具是什么,默认为不使用.
def __init__(self, addr: str, debug: bool=False, compreser: Optional[str]=None): pas = urlparse(addr) if pas.scheme != "tcp": raise abort(505, "unsupported scheme for this protocol") # public self.username = pas.username self.password = pas.password self.hostname = pas.hostname self.port = pas.port self.debug = debug if compreser is not None: _compreser = self.COMPRESERS.get(compreser) if _compreser is not None: self.compreser = _compreser else: raise RuntimeError("compreser unsupport") else: self.compreser = None self.closed = True self.reader = None self.writer = None self.tasks = {} self.remote_info = None # protected self._client = None
1,098,963
检查响应码并进行对不同的响应进行处理. 主要包括: + 编码在500~599段为服务异常,直接抛出对应异常 + 编码在400~499段为调用异常,为对应ID的future设置异常 + 编码在300~399段为警告,会抛出对应警告 + 编码在200~399段为执行成功响应,将结果设置给对应ID的future. + 编码在100~199段为服务器响应,主要是处理验证响应和心跳响应 Parameters: response (Dict[str, Any]): - 响应的python字典形式数据 Return: (bool): - 如果是非服务异常类的响应,那么返回True
def _status_code_check(self, response: Dict[str, Any]): code = response.get("CODE") if self.debug: print("resv:{}".format(response)) print(code) if code >= 500: if self.debug: print("server error") return self._server_error_handler(code) elif 500 > code >= 400: if self.debug: print("call method error") return self._method_error_handler(response) elif 400 > code >= 200: if code >= 300: self._warning_handler(code) if code in (200, 201, 202, 206, 300, 301): if self.debug is True: print("resv resp {}".format(response)) return self._method_response_handler(response) elif 200 > code >= 100: return self._server_response_handler(response) else: raise MprpcException("unknow status code {}".format(code))
1,098,969
处理400~499段状态码,为对应的任务设置异常. Parameters: (response): - 响应的python字典形式数据 Return: (bool): - 准确地说没有错误就会返回True
def _method_error_handler(self, response: Dict[str, Any]): exp = response.get('MESSAGE') code = response.get("CODE") ID = exp.get("ID") raise abort(code, ID=ID, message=exp.get('MESSAGE'))
1,098,970
处理300~399段状态码,抛出对应警告. Parameters: (code): - 响应的状态码 Return: (bool): - 已知的警告类型则返回True,否则返回False
def _warning_handler(self, code: int): if code == 300: warnings.warn( "ExpireWarning", RuntimeWarning, stacklevel=3 ) elif code == 301: warnings.warn( "ExpireStreamWarning", RuntimeWarning, stacklevel=3 ) else: if self.debug: print("unknow code {}".format(code)) return False return True
1,098,971
应答结果响应处理. 将结果解析出来设置给任务对应的Future对象上 Parameters: (response): - 响应的python字典形式数据 Return: (bool): - 准确地说没有错误就会返回True
def _result_handler(self, response: Dict[str, Any]): res = response.get("MESSAGE") result = res.get("RESULT") return result
1,098,972
将调用请求的ID,方法名,参数包装为请求数据. Parameters: ID (str): - 任务ID methodname (str): - 要调用的方法名 args (Any): - 要调用的方法的位置参数 kwargs (Any): - 要调用的方法的关键字参数 Return: (Dict[str, Any]) : - 请求的python字典形式
def _make_query(self, ID: str, methodname: str, *args: Any, **kwargs: Any): query = { "MPRPC": self.VERSION, "ID": ID, "METHOD": methodname, "RETURN": True, "ARGS": args, "KWARGS": kwargs } print(query) return query
1,098,974
将请求编码为字节串发送出去给服务端. Parameters: (query): - 请求的的python字典形式数据 Return: (bool): - 准确地说没有错误就会返回True
def _send_query(self, query: Dict[str, Any]): queryb = self.encoder(query) self._client.write(queryb) if self.debug is True: print("send query {}".format(queryb)) return True
1,098,975
将调用请求的ID,方法名,参数包装为请求数据后编码为字节串发送出去. Parameters: ID (str): - 任务ID methodname (str): - 要调用的方法名 args (Any): - 要调用的方法的位置参数 kwargs (Any): - 要调用的方法的关键字参数 Return: (bool): - 准确地说没有错误就会返回True
def send_query(self, ID, methodname, *args, **kwargs): query = self._make_query(ID, methodname, *args, **kwargs) self._send_query(query) return True
1,098,976
将调用请求的ID,方法名,参数包装为请求数据后编码为字节串发送出去.并创建一个Future对象占位. Parameters: ID (str): - 任务ID methodname (str): - 要调用的方法名 args (Any): - 要调用的方法的位置参数 kwargs (Any): - 要调用的方法的关键字参数 Return: (asyncio.Future): - 返回对应ID的Future对象
def _query(self, ID, methodname, *args, **kwargs): self.send_query(ID, methodname, *args, **kwargs) result = self._responsehandler() return result
1,098,977
异步调用一个远端的函数. 为调用创建一个ID,并将调用请求的方法名,参数包装为请求数据后编码为字节串发送出去.并创建一个Future对象占位. Parameters: methodname (str): - 要调用的方法名 args (Any): - 要调用的方法的位置参数 kwargs (Any): - 要调用的方法的关键字参数 Return: (asyncio.Future): - 返回对应ID的Future对象
def query(self, methodname, *args, **kwargs): ID = str(uuid.uuid4()) result = self._query(ID=ID, methodname=methodname, *args, **kwargs) return result
1,098,978
Fetch default value for a function argument Args: __func: Function to inspect __arg: Argument to extract default value for
def get_default(__func: Callable, __arg: str) -> str: return signature(__func).parameters[__arg].default
1,099,033
Converts given argument to float. On fail logs warning and returns 0.0. Args: v (any): value to convert to float Returns: float: converted v or 0.0 if conversion failed.
def _force_float(v): try: return float(v) except Exception as exc: return float('nan') logger.warning('Failed to convert {} to float with {} error. Using 0 instead.'.format(v, exc))
1,099,343
Colour text, if possible. Args: text: Text to colourise colour: Colour to display text in Returns: Colourised text, if possible
def _colourise(text: str, colour: str) -> str: if COLOUR: text = style(text, fg=colour, bold=True) return text
1,099,433
Initializer. Args: base_path: Base path. Fast way to construct a `FileLocator` if no locator is passed. Will be ignored when a locator is given. locator:
def __init__(self, base_path: Optional[str] = None, locator: Optional[FileLocator] = None): self._base_path = str(base_path) if locator is None: self.locator = FileLocator(self._base_path) else: self.locator = locator
1,099,435
Performs the actual loading of an external resource into the current model. Args: ctx: The processing context. Returns: Returns a dictionary that gets incorporated into the actual model.
def _apply(self, ctx: ExtensionContext) -> AugmentedDict: def process(pattern: Pattern[str], _str: str) -> Any: _match = pattern.match(_str) if _match is None: return _str # pragma: no cover # We got a match # Group 0: Whole match; Group 1: Our placeholder; # Group 2: file path to external resource placeholder, external_path = _match.group(1), _match.group(2) with open(self.locator( external_path, cast(str, ctx.document) if Validator.is_file(document=ctx.document) else None )) as fhandle: # Json does not support line breaks. We will have to mask them content = fhandle.read() return _str.replace(placeholder, content) node_key, node_value = ctx.node _pattern = re.compile(self.__pattern__) return {node_key: process(_pattern, node_value)}
1,099,436
Loads a yaml fragment from an external file. Args: ctx: The processing context. Returns: The external resource as a python dictionary. The fragment is already send through the processor as well.
def _apply(self, ctx: ExtensionContext) -> Any: _, external_path = ctx.node return ctx.mentor.load_yaml(self.locator( external_path, cast(str, ctx.document) if Validator.is_file(document=ctx.document) else None ))
1,099,438
Initializer. Args: fail_on_unset (bool): If set to True an exception will be raised when the environment variable is unset; otherwise the default value (see next) will be used instead. default (str): If a environment variable is unset, it will get this value instead.
def __init__(self, fail_on_unset: bool = False, default: str = 'none'): self.fail_on_unset = bool(fail_on_unset) self.default = str(default)
1,099,439
Replaces any {{env::*}} directives with it's actual environment variable value or a default. Args: ctx: The processing context. Returns: Returns the altered node key and value.
def _apply(self, ctx: ExtensionContext) -> AugmentedDict: node_key, node_value = ctx.node def process(pattern: Pattern[str], _str: str) -> str: _match = pattern.match(_str) if _match is None: return _str # We got a match # Group 0: Whole match; Group 1: Our placeholder; Group 2: The environment variable placeholder, envvar = _match.group(1), _match.group(2) envvalue = os.environ.get(envvar, None) if envvalue is None and self.fail_on_unset: raise ExtensionError("Environment variable '{}' is unset.".format(envvar)) return _str.replace(placeholder, envvalue or self.default) _pattern = re.compile(self.__pattern__) node_key = process(_pattern, node_key) node_value = process(_pattern, node_value) return {node_key: node_value}
1,099,440
Initializer. Args: fail_on_unset (bool): If set to True an exception will be raised when the environment variable is unset; otherwise the default value (see next) will be used instead. default (str): If a environment variable is unset, it will get this value instead.
def __init__(self, fail_on_unset: bool = False, default: str = 'none', **_vars: Any): self.fail_on_unset = bool(fail_on_unset) self.default = str(default) self.vars = _vars
1,099,441
Replaces any {{var::*}} directives with it's actual variable value or a default. Args: ctx: The processing context. Returns: Returns the altered node key and value.
def _apply(self, ctx: ExtensionContext) -> AugmentedDict: node_key, node_value = ctx.node def process(pattern: Pattern[str], _str: str) -> Any: _match = pattern.match(_str) if _match is None: return _str # We got a match # Group 0: Whole match; Group 1: Our placeholder; Group 2: The environment variable placeholder, varname = _match.group(1), _match.group(2) varval = self.vars.get(varname, None) if varval is None and self.fail_on_unset: raise ExtensionError("Variable '{}' is unset.".format(varname)) return _str.replace(placeholder, varval or self.default) _pattern = re.compile(self.__pattern__) node_key = process(_pattern, node_key) node_value = process(_pattern, node_value) return {node_key: node_value}
1,099,442
Returns the Scrabble score of a letter. Args: letter: a single character string Raises: TypeError if a non-Scrabble character is supplied
def letter_score(letter): score_map = { 1: ["a", "e", "i", "o", "u", "l", "n", "r", "s", "t"], 2: ["d", "g"], 3: ["b", "c", "m", "p"], 4: ["f", "h", "v", "w", "y"], 5: ["k"], 8: ["j", "x"], 10: ["q", "z"], } for score, letters in score_map.items(): if letter.lower() in letters: return score else: raise TypeError("Invalid letter: %s", letter)
1,099,758
Checks the Scrabble score of a single word. Args: word: a string to check the Scrabble score of input_letters: the letters in our rack questions: integer of the tiles already on the board to build on Returns: an integer Scrabble score amount for the word
def word_score(word, input_letters, questions=0): score = 0 bingo = 0 filled_by_blanks = [] rack = list(input_letters) # make a copy to speed up find_anagrams() for letter in word: if letter in rack: bingo += 1 score += letter_score(letter) rack.remove(letter) else: filled_by_blanks.append(letter_score(letter)) # we can have both ?'s and _'s in the word. this will apply the ?s to the # highest scrabble score value letters and leave the blanks for low points. for blank_score in sorted(filled_by_blanks, reverse=True): if questions > 0: score += blank_score questions -= 1 # 50 bonus points for using all the tiles in your rack if bingo > 6: score += 50 return score
1,099,759
Searches a string for blank tile characters ("?" and "_"). Args: input_word: the user supplied string to search through Returns: a tuple of: input_word without blanks integer number of blanks (no points) integer number of questions (points)
def blank_tiles(input_word): blanks = 0 questions = 0 input_letters = [] for letter in input_word: if letter == "_": blanks += 1 elif letter == "?": questions += 1 else: input_letters.append(letter) return input_letters, blanks, questions
1,099,760
Opens the word list file. Args: sowpods: a boolean to declare using the sowpods list or TWL (default) start: a string of starting characters to find anagrams based on end: a string of ending characters to find anagrams based on Yeilds: a word at a time out of 178691 words for TWL, 267751 for sowpods. Much less if either start or end are used (filtering is applied here)
def word_list(sowpods=False, start="", end=""): location = os.path.join( os.path.dirname(os.path.realpath(__file__)), "wordlists", ) if sowpods: filename = "sowpods.txt" else: filename = "twl.txt" filepath = os.path.join(location, filename) with open(filepath) as wordfile: for word in wordfile.readlines(): word = word.strip() if start and end and word.startswith(start) and word.endswith(end): yield word elif start and word.startswith(start) and not end: yield word elif end and word.endswith(end) and not start: yield word elif not start and not end: yield word
1,099,761
Attempts to read the cache to fetch missing arguments. This method will attempt to find a '.license' file in the 'CACHE_DIRECTORY', to read any arguments that were not passed to the license utility. Arguments: author (str): The author passed, if any. kind (str): The kind of license passed, if any. Throws: LicenseError, if there was a cache miss or I/O error.
def read(author, kind): if not os.path.exists(CACHE_PATH): raise LicenseError('No cache found. You must ' 'supply at least -a and -k.') cache = read_cache() if author is None: author = read_author(cache) if kind is None: kind = read_kind(cache) return author, kind
1,100,070
Generate a <D x D> ill-conditioned correlation matrix with random coefficients Parameters: ----------- D : int Dimension of the matrix Return: ------- cmat : ndarray DxD matrix with +1 as diagonal elements, mirrored random numbers [-1,+1].
def illmat(D, random_state=None): if random_state: np.random.seed(random_state) uni = np.random.uniform(size=(D, D)) tmp = np.minimum(1., np.maximum(-1., 2 * uni - 1.0)) tmp = np.triu(tmp, k=1) return np.eye(D) + tmp + tmp.T
1,100,912
For given `file_obj` return iterator, which will read the file in `self.read_bs` chunks. Args: file_obj (file): File-like object. Return: iterator: Iterator reading the file-like object in chunks.
def _get_file_iterator(self, file_obj): file_obj.seek(0) return iter(lambda: file_obj.read(self.read_bs), '')
1,101,063
Create proper filesystem paths for given `file_hash`. Args: file_hash (str): Hash of the file for which the path should be created. path (str, default None): Recursion argument, don't set this. hash_list (list, default None): Recursion argument, don't set this. Returns: str: Created path.
def _create_dir_path(self, file_hash, path=None, hash_list=None): # first, non-recursive call - parse `file_hash` if hash_list is None: hash_list = list(file_hash) if not hash_list: raise IOError("Directory structure is too full!") # first, non-recursive call - look for subpath of `self.path` if not path: path = os.path.join( self.path, hash_list.pop(0) ) # if the path not yet exists, create it and work on it if not os.path.exists(path): os.mkdir(path) return self._create_dir_path( file_hash=file_hash, path=path, hash_list=hash_list ) files = os.listdir(path) # file is already in storage if file_hash in files: return path # if the directory is not yet full, use it if len(files) < self.dir_limit: return path # in full directories create new sub-directories return self._create_dir_path( file_hash=file_hash, path=os.path.join(path, hash_list.pop(0)), hash_list=hash_list )
1,101,065
Add new file into the storage. Args: file_obj (file): Opened file-like object. Returns: obj: Path where the file-like object is stored contained with hash\ in :class:`.PathAndHash` object. Raises: AssertionError: If the `file_obj` is not file-like object. IOError: If the file couldn't be added to storage.
def add_file(self, file_obj): BalancedDiscStorage._check_interface(file_obj) file_hash = self._get_hash(file_obj) dir_path = self._create_dir_path(file_hash) final_path = os.path.join(dir_path, file_hash) def copy_to_file(from_file, to_path): with open(to_path, "wb") as out_file: for part in self._get_file_iterator(from_file): out_file.write(part) try: copy_to_file(from_file=file_obj, to_path=final_path) except Exception: os.unlink(final_path) raise return PathAndHash(path=final_path, hash=file_hash)
1,101,067
Remove file from the storage. File is identified by opened `file_obj`, from which the hashes / path are computed. Args: file_obj (file): Opened file-like object, which is used to compute hashes. Raises: IOError: If the `file_obj` is not in storage.
def delete_by_file(self, file_obj): BalancedDiscStorage._check_interface(file_obj) file_hash = self._get_hash(file_obj) return self.delete_by_hash(file_hash)
1,101,068
Remove file/archive by it's `file_hash`. Args: file_hash (str): Hash, which is used to find the file in storage. Raises: IOError: If the file for given `file_hash` was not found in \ storage.
def delete_by_hash(self, file_hash): full_path = self.file_path_from_hash(file_hash) return self.delete_by_path(full_path)
1,101,069
Make sure, that blank directories are removed from the storage. Args: path (str): Path which you suspect that is blank.
def _recursive_remove_blank_dirs(self, path): path = os.path.abspath(path) # never delete root of the storage or smaller paths if path == self.path or len(path) <= len(self.path): return # if the path doesn't exists, go one level upper if not os.path.exists(path): return self._recursive_remove_blank_dirs( os.path.dirname(path) ) # if the directory contains files, end yourself if os.listdir(path): return # blank directories can be removed shutil.rmtree(path) # go one level up, check whether the directory is blank too return self._recursive_remove_blank_dirs( os.path.dirname(path) )
1,101,070
Delete file/directory identified by `path` argument. Warning: `path` have to be in :attr:`path`. Args: path (str): Path of the file / directory you want to remove. Raises: IOError: If the file / directory doesn't exists, or is not in \ :attr:`path`.
def delete_by_path(self, path): if not os.path.exists(path): raise IOError("Unknown path '%s'!" % path) if not path.startswith(self.path): raise IOError( "Path '%s' is not in the root of the storage ('%s')!" % ( path, self.path ) ) if os.path.isfile(path): os.unlink(path) return self._recursive_remove_blank_dirs(path) shutil.rmtree(path) self._recursive_remove_blank_dirs(path)
1,101,071
Constructor throws an InvalidBlockError if the block is not valid or convertible to a valid configuration. Args: complete_block: Tells the validator to assume every cell is filled in the block, which speeds up checks. max_title_rows: Restricts the title detector to stop looking for titles after max_title_rows rows.
def __init__(self, table_conversion, used_cells, block_start, block_end, worksheet=None, flags=None, units=None, complete_block=False, max_title_rows=sys.maxint / 2): self.table = table_conversion self.used = used_cells self.start = block_start self.end = block_end self.complete_block = complete_block self.max_title_row = min(self.end[0], self.start[0] + int(max_title_rows)) self.flags = flags if flags != None else {} self.units = units if units != None else {} self.worksheet = worksheet validator = BlockValidator(self.table, self.worksheet, self.flags, self.used, self.start, self.end, complete_block=self.complete_block, max_title_rows=max_title_rows) if not validator.validate_block(): raise InvalidBlockError()
1,101,186
Converts the block into row titled elements. These elements are copied into the return table, which can be much longer than the original block. Args: add_units: Indicates if units should be appened to each row item. Returns: A row-titled table representing the data in the block.
def convert_to_row_table(self, add_units=True): rtable = [] if add_units: relavent_units = self.get_relavent_units() # Create a row for each data element for row_index in range(self.start[0], self.end[0]): for column_index in range(self.start[1], self.end[1]): cell = self.table[row_index][column_index] if cell != None and isinstance(cell, (int, float, long)): titles = self._find_titles(row_index, column_index) titles.append(cell) if add_units: titles.append(relavent_units.get((row_index, column_index))) rtable.append(titles) # If we had all 'titles', just return the original block if not rtable: for row_index in range(self.start[0], self.end[0]): row = [] rtable.append(row) for column_index in range(self.start[1], self.end[1]): row.append(self.table[row_index][column_index]) if add_units: row.append(relavent_units.get((row_index, column_index))) return rtable
1,101,190
Normalize unicode strings tensor. Args: source: `Tensor` or `SparseTensor` of any shape, strings to normalize. form: Scalar value, name of normalization algorithm. One of `"NFD"`, `"NFC"`, `"NFKD"`, `"NFKC"`. name: A name for the operation (optional). Returns: `Tensor` or `SparseTensor` of same shape and size as input.
def transform_normalize_unicode(source, form, name=None): with ops.name_scope(name, "TransformNormalizeUnicode", [source]): source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string) if isinstance(source, tf.SparseTensor): result = tf.SparseTensor( indices=source.indices, values=ops_module.transform_normalize_unicode(source.values, form), dense_shape=source.dense_shape ) else: result = ops_module.transform_normalize_unicode(source, form) return result
1,101,238
Replace all substrings from `needle` to corresponding strings in `haystack` with source. Args: source: `Tensor` or `SparseTensor` of any shape, source strings for replacing. pattern: List of RE2 patterns to search in source rewrite: List of strings to replace with. Should have same length as `needle`. name: A name for the operation (optional). Returns: `Tensor` or `SparseTensor` of same shape and size as input.
def transform_regex_replace(source, pattern, rewrite, name=None): with ops.name_scope(name, "TransformRegexReplace", [source]): source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string) if isinstance(source, tf.SparseTensor): result = tf.SparseTensor( indices=source.indices, values=ops_module.transform_regex_replace(source.values, pattern, rewrite), dense_shape=source.dense_shape ) else: result = ops_module.transform_regex_replace(source, pattern, rewrite) return result
1,101,239
Replace all substrings from `needle` to corresponding strings in `haystack` with source. Args: source: `Tensor` or `SparseTensor` of any shape, source strings for replacing. needle: List of strings to search in source haystack: List of strings to replace with. Should have same length as `needle`. name: A name for the operation (optional). Returns: `Tensor` or `SparseTensor` of same shape and size as input.
def transform_string_replace(source, needle, haystack, name=None): with ops.name_scope(name, "TransformStringReplace", [source]): source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string) if isinstance(source, tf.SparseTensor): result = tf.SparseTensor( indices=source.indices, values=ops_module.transform_string_replace(source.values, needle, haystack), dense_shape=source.dense_shape ) else: result = ops_module.transform_string_replace(source, needle, haystack) return result
1,101,240
Wrap source strings with "left" and "right" strings Args: source: `Tensor` or `SparseTensor` of any shape, strings to replace digits. left: Scalar string to add in the beginning right: Scalar string to add in the ending name: A name for the operation (optional). Returns: `SparseTensor` of same shape and size as input.
def transform_wrap_with(source, left, right, name=None): with ops.name_scope(name, "TransformWrapWith", [source]): source = convert_to_tensor_or_sparse_tensor(source, dtype=tf.string) if isinstance(source, tf.SparseTensor): result = tf.SparseTensor( indices=source.indices, values=ops_module.transform_wrap_with(source.values, left, right), dense_shape=source.dense_shape ) else: result = ops_module.transform_wrap_with(source, left, right) return result
1,101,241
初始化RPC客户端. Parameters: addr (str): - 形如`tcp://xxx:xxx@xxx:xxx`的字符串 loop (Optional[asyncio.AbstractEventLoop]): - 事件循环 debug (bool): - 是否使用debug模式,默认为否 compreser(Optional[str]): - 是否使用压缩工具压缩传输信息,以及压缩工具是什么,默认为不使用. heart_beat (Optional[int]):- 是否使用心跳机制确保连接不会因过期而断开,默认为不使用.
def __init__(self, addr: str, loop: Optional[asyncio.AbstractEventLoop]=None, debug: bool=False, compreser: Optional[str]=None, heart_beat: Optional[int]=None): pas = urlparse(addr) if pas.scheme != "tcp": raise abort(505, "unsupported scheme for this protocol") # public self.username = pas.username self.password = pas.password self.hostname = pas.hostname self.port = pas.port self.loop = loop or asyncio.get_event_loop() self.debug = debug if compreser is not None: _compreser = self.COMPRESERS.get(compreser) if _compreser is not None: self.compreser = _compreser else: raise RuntimeError("compreser unsupport") else: self.compreser = None self.heart_beat = heart_beat self.closed = True self.reader = None self.writer = None self.tasks = {} self.remote_info = None # protected self._gens_queue = {} self._login_fut = None self._response_task = None self._heartbeat_task = None if self.debug is True: self.loop.set_debug(True)
1,101,373
处理500~599段状态码,抛出对应警告. Parameters: (code): - 响应的状态码 Return: (bool): - 已知的警告类型则返回True,否则返回False Raise: (ServerException): - 当返回为服务异常时则抛出对应异常
def _server_error_handler(self, code: int): if code == 501: self._login_fut.set_result(False) else: self.clean() raise abort(code) return True
1,101,380
处理200~399段状态码,为对应的响应设置结果. Parameters: (response): - 响应的python字典形式数据 Return: (bool): - 准确地说没有错误就会返回True
def _method_response_handler(self, response: Dict[str, Any]): code = response.get("CODE") if code in (200, 300): self._result_handler(response) else: asyncio.ensure_future(self._gen_result_handler(response))
1,101,382
处理100~199段状态码,针对不同的服务响应进行操作. Parameters: (response): - 响应的python字典形式数据 Return: (bool): - 准确地说没有错误就会返回True
def _server_response_handler(self, response: Dict[str, Any]): code = response.get("CODE") if code == 100: if self.debug: print("auth succeed") self._login_fut.set_result(response) if code == 101: if self.debug: print('pong') return True
1,101,383
应答结果响应处理. 将结果解析出来设置给任务对应的Future对象上 Parameters: (response): - 响应的python字典形式数据 Return: (bool): - 准确地说没有错误就会返回True
def _result_handler(self, response: Dict[str, Any]): res = response.get("MESSAGE") ID = res.get("ID") result = res.get("RESULT") fut = self.tasks.get(ID) fut.set_result(result) return True
1,101,384
流式结果响应处理. + 收到状态码标识201或301的响应后,将tasks中ID对应的Future对象的结果设置为一个用于包装的异步生成器. 并为这个ID创建一个异步队列保存在`_gens_queue[ID]`中用于存取结果 + 收到状态码标识为202的响应后向对应ID的存取队列中存入一条结果. + 收到终止状态码206后向对应ID的异步生成器结果获取队列中存入一个`StopAsyncIteration`对象用于终止异步迭代器 Parameters: (response): - 响应的python字典形式数据 Return: (bool): - 准确地说没有错误就会返回True
async def _gen_result_handler(self, response: Dict[str, Any]): code = response.get("CODE") res = response.get("MESSAGE") ID = res.get("ID") if code in (201, 301): ait = self._wrap_gen(ID) self.tasks.get(ID).set_result(ait) self._gens_queue[ID] = asyncio.Queue() if code == 202: result = res.get('RESULT') await self._gens_queue[ID].put(result) if code == 206: await self._gens_queue[ID].put(StopAsyncIteration()) return True
1,101,385
异步迭代器包装. Parameters: ID (str): - 任务ID Yield: (Any): - 从异步迭代器结果队列中获取的结果 Raise: (StopAsyncIteration): - 异步迭代器终止时抛出该异常
async def _wrap_gen(self, ID: str): while True: result = await self._gens_queue[ID].get() if isinstance(result, StopAsyncIteration): del self._gens_queue[ID] break else: yield result
1,101,386
将调用请求的ID,方法名,参数包装为请求数据. Parameters: ID (str): - 任务ID methodname (str): - 要调用的方法名 returnable (bool): - 是否要求返回结果 args (Any): - 要调用的方法的位置参数 kwargs (Any): - 要调用的方法的关键字参数 Return: (Dict[str, Any]) : - 请求的python字典形式
def _make_query(self, ID: str, methodname: str, returnable: bool, *args: Any, **kwargs: Any): query = { "MPRPC": self.VERSION, "ID": ID, "METHOD": methodname, "RETURN": returnable, "ARGS": args, "KWARGS": kwargs } print(query) return query
1,101,387
将调用请求的ID,方法名,参数包装为请求数据后编码为字节串发送出去. Parameters: ID (str): - 任务ID methodname (str): - 要调用的方法名 returnable (bool): - 是否要求返回结果 args (Any): - 要调用的方法的位置参数 kwargs (Any): - 要调用的方法的关键字参数 Return: (bool): - 准确地说没有错误就会返回True
def send_query(self, ID, methodname, returnable, *args, **kwargs): query = self._make_query(ID, methodname, returnable, *args, **kwargs) self._send_query(query) self.tasks[ID] = self.loop.create_future() return True
1,101,388
调用但不要求返回结果,而是通过系统方法getresult来获取. Parameters: methodname (str): - 要调用的方法名 args (Any): - 要调用的方法的位置参数 kwargs (Any): - 要调用的方法的关键字参数
def delay(self, methodname, *args, **kwargs): ID = str(uuid.uuid4()) self.send_query(ID, methodname, False, *args, **kwargs) return ID
1,101,389
将调用请求的ID,方法名,参数包装为请求数据后编码为字节串发送出去.并创建一个Future对象占位. Parameters: ID (str): - 任务ID methodname (str): - 要调用的方法名 args (Any): - 要调用的方法的位置参数 kwargs (Any): - 要调用的方法的关键字参数 Return: (asyncio.Future): - 返回对应ID的Future对象
def _async_query(self, ID, methodname, *args, **kwargs): self.send_query(ID, methodname, True, *args, **kwargs) task = self.tasks[ID] return task
1,101,390
异步调用一个远端的函数. 为调用创建一个ID,并将调用请求的方法名,参数包装为请求数据后编码为字节串发送出去.并创建一个Future对象占位. Parameters: methodname (str): - 要调用的方法名 args (Any): - 要调用的方法的位置参数 kwargs (Any): - 要调用的方法的关键字参数 Return: (asyncio.Future): - 返回对应ID的Future对象
def async_query(self, methodname, *args, **kwargs): ID = str(uuid.uuid4()) task = self._async_query(ID=ID, methodname=methodname, *args, **kwargs) return task
1,101,391
Has `glob` changed in `dirname` Args: dirname: directory to measure filename: filename to store checksum
def changed(dirname, filename='.md5', args=None, glob=None): root = Path(dirname) if not root.exists(): # if dirname doesn't exist it is changed (by definition) return True cachefile = root / filename current_digest = cachefile.open().read() if cachefile.exists() else "" _digest = digest(dirname, glob=glob) if args and args.verbose: # pragma: nocover print("md5:", _digest) has_changed = current_digest != _digest if has_changed: with open(os.path.join(dirname, filename), 'w') as fp: fp.write(_digest) return has_changed
1,101,921
Calculates the classification error rate for an N-class classification problem Parameters: prediction (numpy.ndarray): A 1D :py:class:`numpy.ndarray` containing your prediction true_labels (numpy.ndarray): A 1D :py:class:`numpy.ndarray` containing the ground truth labels for the input array, organized in the same order.
def CER(prediction, true_labels): errors = (prediction != true_labels).sum() return float(errors)/len(prediction)
1,102,057
Initialise the store. Args: host (string): The host of your api, e.g. http://localhost:8000 namespace (string, optional): An aditional name space to append to the host, e.g. api/v1 cache (:class:`cinder_data.cache.Cache`, optional): An instance of your chosen caching system that must be must adhear to the :class:`cinder_data.cache.Cache` interface.
def __init__(self, host, namespace='', cache=None): super(Store, self).__init__() self._host = host self._namespace = namespace self._cache = cache
1,102,264
Return a instance of model_class from the API or the local cache. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. record_id (int): The id of the record requested. reload (bool, optional): Don't return the cached version if reload==True. Returns: :class:`cinder_data.model.CinderModel`: An instance of model_class or None.
def find_record(self, model_class, record_id, reload=False): cached_model = self.peek_record(model_class, record_id) if cached_model is not None and reload is False: return cached_model else: return self._get_record(model_class, record_id)
1,102,265
Return an instance of the model_class from the cache if it is present. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. record_id (int): The id of the record requested. Returns: :class:`cinder_data.model.CinderModel`: An instance of model_class or None.
def peek_record(self, model_class, record_id): if self._cache: return self._cache.get_record(model_class.__name__, record_id) else: return None
1,102,266
Return an list of models from the API and caches the result. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. params (dict, optional): Description Returns: list: A list of instances of you model_class or and empty list.
def find_all(self, model_class, params={}): url = '{host}/{namespace}/{model}{params}'.format( host=self._host, namespace=self._namespace, model=self._translate_name(model_class.__name__), params=self._build_param_string(params) ) data = self._get_json(url)['data'] fresh_models = [] for item in data: fresh_model = model_class(item['attributes']) fresh_model.id = item['id'] fresh_model.validate() fresh_models.append(fresh_model) if self._cache is not None: self._cache.set_record(model_class.__name__, fresh_model.id, fresh_model) return fresh_models
1,102,267
Return a list of models from the local cache. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. Returns: list: A list of instances of you model_class or and empty list.
def peek_all(self, model_class): if self._cache: return self._cache.get_records(model_class.__name__) else: return []
1,102,268
Get a single record from the API. Args: model_class (:class:`cinder_data.model.CinderModel`): A subclass of :class:`cinder_data.model.CinderModel` of your chosen model. record_id (int): The id of the record requested. Returns: :class:`cinder_data.model.CinderModel`: An instance of model_class or None.
def _get_record(self, model_class, record_id): url = '{host}/{namespace}/{model}/{id}'.format( host=self._host, namespace=self._namespace, model=self._translate_name(model_class.__name__), id=record_id ) data = self._get_json(url)['data'] fresh_model = model_class(data['attributes']) fresh_model.id = data['id'] fresh_model.validate() if self._cache is not None: self._cache.set_record(model_class.__name__, fresh_model.id, fresh_model) return fresh_model
1,102,269
Translate the class name to the API endpoint. For example, Car would become cars, FastCar would become fast-cars. Args: name (string): Camel case name (singular) Returns: string: A pluraised, dasherized string.
def _translate_name(name): underscored = inflection.underscore(name) dasherized = inflection.dasherize(underscored) words = dasherized.split('-') last_word = words.pop() words.append(inflection.pluralize(last_word)) return '-'.join(words)
1,102,270
Build query params string from a dictionary. Args: params (dict): A dictionary of params Returns: string: A valid url query params string.
def _build_param_string(params): pairs = [] for key, value in params.iteritems(): if value is None: value = '' pairs.append('{0}={1}'.format(key, value)) if len(pairs) > 0: return '?{0}'.format('&'.join(pairs)) return ''
1,102,271
Parse ISO-8601 values from JSON databases. See :class:`json.JSONDecoder` Args: __obj: Object to decode
def json_using_iso8601(__obj: Dict) -> Dict: for key, value in __obj.items(): with suppress(TypeError, ValueError): __obj[key] = parse_datetime(value) with suppress(TypeError, ValueError): __obj[key] = parse_delta(value) return __obj
1,102,296
Execute function when module is run directly. Note: This allows fall through for importing modules that use it. Args: __func: Function to run
def entry_point(__func: Callable) -> Callable: if __func.__module__ == '__main__': import sys sys.exit(__func()) else: return __func
1,102,322
Find callable for the specified URL path and HTTP method. Args: path (:obj:`str`): URL path to match method (:obj:`str`): HTTP method Note: A trailing '/' is always assumed in the path.
def find_call(self, path, method): if not path.endswith('/'): path += '/' path = path.split('/')[1:] return self._recursive_route_match(self._routes, path, method, [])
1,102,499
Calculates the value of a percent of a number ie: 5% of 20 is what --> 1 Args: percent (float): The percent of a number whole (float): The whole of the number Returns: float: The value of a percent Example: >>> percent_of(25, 100) 25.0 >>> percent_of(5, 20) 1.0
def percent_of(percent, whole): percent = float(percent) whole = float(whole) return (percent * whole) / 100
1,102,648
Name of the root directory is used as ``<packageid>`` in ``info.xml``. This function makes sure, that :func:`os.path.basename` doesn't return blank string in case that there is `/` at the end of the `path`. Args: path (str): Path to the root directory. Returns: str: Basename of the `path`.
def _path_to_id(path): if path.endswith("/"): path = path[:-1] return os.path.basename(path)
1,102,685
Calculate size of all files in `path`. Args: path (str): Path to the directory. Returns: int: Size of the directory in bytes.
def _calc_dir_size(path): dir_size = 0 for (root, dirs, files) in os.walk(path): for fn in files: full_fn = os.path.join(root, fn) dir_size += os.path.getsize(full_fn) return dir_size
1,102,686
Return absolute `path` relative to `root_dir`. When `path` == ``/home/xex/somefile.txt`` and `root_dir` == ``/home``, returned path will be ``/xex/somefile.txt``. Args: path (str): Absolute path beginning in `root_dir`. root_dir (str): Absolute path containing `path` argument. Returns: str: Local `path` when `root_dir` is considered as root of FS.
def _get_localized_fn(path, root_dir): local_fn = path if path.startswith(root_dir): local_fn = path.replace(root_dir, "", 1) if not local_fn.startswith("/"): return "/" + local_fn return local_fn
1,102,687
Copy a file to the repository Args: source: Absolute path to the source file, or a file-like object rel_path: path relative to the root of the repository
def put_key(self, source, rel_path): k = self._get_boto_key(rel_path) try: k.set_contents_from_file(source) except AttributeError: if os.path.getsize(source) > 4.8 * 1024 * 1024 * 1024: # Need to do multi-part uploads here k.set_contents_from_filename(source) else: k.set_contents_from_filename(source)
1,102,785
Default serializer factory. Arguments: exposes (iterable): attributes to serialized. Returns: callable: serializer (`poke` routine).
def poke(exposes): def _poke(store, objname, obj, container, visited=None, _stack=None): try: sub_container = store.newContainer(objname, obj, container) except (SystemExit, KeyboardInterrupt): raise except: raise ValueError('generic poke not supported by store') #_stack = _add_to_stack(_stack, objname) for iobjname in exposes: try: iobj = getattr(obj, iobjname) except AttributeError: pass else: store.poke(iobjname, iobj, sub_container, visited=visited, \ _stack=_stack) return _poke
1,102,916
Autoserializer factory. Works best in Python 3. Arguments: python_type (type): type constructor. exposes (iterable): sequence of attributes. Returns: callable: deserializer (`peek` routine).
def default_peek(python_type, exposes): with_args = False make = python_type try: make() except (SystemExit, KeyboardInterrupt): raise except: make = lambda: python_type.__new__(python_type) try: make() except (SystemExit, KeyboardInterrupt): raise except: make = lambda args: python_type.__new__(python_type, *args) with_args = True def missing(attr): return AttributeError("can't set attribute '{}' ({})".format(attr, python_type)) if with_args: def peek(store, container, _stack=None): state = [] for attr in exposes: # force order instead of iterating over `container` #print((attr, attr in container)) # debugging if attr in container: state.append(store.peek(attr, container, _stack=_stack)) else: state.append(None) return make(state) elif '__dict__' in exposes: def peek(store, container, _stack=None): obj = make() for attr in container: val = store.peek(attr, container, _stack=_stack) try: setattr(obj, attr, val) except AttributeError: raise missing(attr) return obj else: def peek(store, container, _stack=None): obj = make() for attr in exposes: # force order instead of iterating over `container` #print((attr, attr in container)) # debugging if attr in container: val = store.peek(attr, container, _stack=_stack) else: val = None try: setattr(obj, attr, val) except AttributeError: raise missing(attr) return obj return peek
1,102,918
Deserialize all the attributes available in the container and pass them in the same order as they come in the container. This is a factory function; returns the actual `peek` routine. Arguments: init: type constructor. Returns: callable: deserializer (`peek` routine).
def unsafe_peek(init): def peek(store, container, _stack=None): return init(*[ store.peek(attr, container, _stack=_stack) for attr in container ]) return peek
1,102,919
Make datatypes passing keyworded arguments to the constructor. This is a factory function; returns the actual `peek` routine. Arguments: init (callable): type constructor. args (iterable): arguments NOT to be keyworded; order does matter. Returns: callable: deserializer (`peek` routine). All the peeked attributes that are not referenced in `args` are passed to `init` as keyworded arguments.
def peek_with_kwargs(init, args=[]): def peek(store, container, _stack=None): return init(\ *[ store.peek(attr, container, _stack=_stack) for attr in args ], \ **dict([ (attr, store.peek(attr, container, _stack=_stack)) \ for attr in container if attr not in args ])) return peek
1,102,920