docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Initialize wildcard route. Arguments: pattern (str): Pattern associated with the route. callback (callable): Route handler.
def __init__(self, pattern, callback): self._re = [] self._wildcards = [] for token in WildcardRoute.tokens(pattern): if token and token.startswith('<') and token.endswith('>'): w = Wildcard(token) self._wildcards.append(w) self._re.append(w.regex()) else: self._re.append(re.escape(token)) self._re = re.compile('^' + ''.join(self._re) + '$') self._callback = callback
1,048,530
Return route handler with arguments if path matches this route. Arguments: path (str): Request path Returns: tuple or None: A tuple of three items: 1. Route handler (callable) 2. Positional arguments (list) 3. Keyword arguments (dict) ``None`` if the route does not match the path.
def match(self, path): match = self._re.search(path) if match is None: return None args = [] kwargs = {} for i, wildcard in enumerate(self._wildcards): if wildcard.name == '!': continue value = wildcard.value(match.groups()[i]) if not wildcard.name: args.append(value) else: kwargs[wildcard.name] = value return self._callback, args, kwargs
1,048,531
Initialize wildcard definition. Arguments: spec (str): An angle-bracket delimited wildcard specification.
def __init__(self, spec): # Split '<foo:int>' into ['foo', 'int'] tokens = spec[1:-1].split(':', 1) if len(tokens) == 1: # Split '<foo>' into ['foo', ''] tokens.append('') self.name, self._type = tokens if not self._type: self._type = 'str' if Wildcard._name_re.search(self.name) is None: raise RouteError('Invalid wildcard name {!r} in {!r}' .format(self.name, spec)) if self._type not in Wildcard._types_re.keys(): raise RouteError('Invalid wildcard type {!r} in {!r}' .format(self._type, spec))
1,048,532
Initialize regular expression route. Arguments: pattern (str): Pattern associated with the route. callback (callable): Route handler.
def __init__(self, pattern, callback): self._re = re.compile(pattern) self._callback = callback
1,048,533
Return route handler with arguments if path matches this route. Arguments: path (str): Request path Returns: tuple or None: A tuple of three items: 1. Route handler (callable) 2. Positional arguments (list) 3. Keyword arguments (dict) ``None`` if the route does not match the path.
def match(self, path): match = self._re.search(path) if match is None: return None kwargs_indexes = match.re.groupindex.values() args_indexes = [i for i in range(1, match.re.groups + 1) if i not in kwargs_indexes] args = [match.group(i) for i in args_indexes] kwargs = {} for name, index in match.re.groupindex.items(): kwargs[name] = match.group(index) return self._callback, args, kwargs
1,048,534
Initialize the current request object. Arguments: environ (dict): Dictionary of environment variables.
def __init__(self, environ): self.environ = environ self.method = environ.get('REQUEST_METHOD', 'GET') self.path = environ.get('PATH_INFO', '/') if not self.path: self.path = '/' self.query = MultiDict() self.form = MultiDict() self.cookies = MultiDict() if 'QUERY_STRING' in environ: for k, v in urllib.parse.parse_qsl(environ['QUERY_STRING']): self.query[k] = v if 'wsgi.input' in environ: fs = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ) for k in fs: for v in fs.getlist(k): self.form[k] = v if 'HTTP_COOKIE' in environ: cookies = http.cookies.SimpleCookie(environ['HTTP_COOKIE']) for c in cookies.values(): self.cookies[c.key] = c.value
1,048,535
Initialize the current response object. Arguments: start_response_callable (callable): Callable that starts response.
def __init__(self, start_response_callable): self.start = start_response_callable self.status = 200 self.media_type = 'text/html' self.charset = 'UTF-8' self._headers = [] self.body = None self.state = {}
1,048,536
Add an HTTP header to response object. Arguments: name (str): HTTP header field name value (str): HTTP header field value
def add_header(self, name, value): if value is not None: self._headers.append((name, value))
1,048,538
Add a Set-Cookie header to response object. For a description about cookie attribute values, see https://docs.python.org/3/library/http.cookies.html#http.cookies.Morsel. Arguments: name (str): Name of the cookie value (str): Value of the cookie attrs (dict): Dicitionary with cookie attribute keys and values.
def set_cookie(self, name, value, attrs={}): cookie = http.cookies.SimpleCookie() cookie[name] = value for key, value in attrs.items(): cookie[name][key] = value self.add_header('Set-Cookie', cookie[name].OutputString())
1,048,539
Adds value to the list of values for the specified key. Arguments: key (object): Key value (object): Value
def __setitem__(self, key, value): if key not in self.data: self.data[key] = [value] else: self.data[key].append(value)
1,048,542
Return the list of all values for the specified key. Arguments: key (object): Key default (list): Default value to return if the key does not exist, defaults to ``[]``, i.e. an empty list. Returns: list: List of all values for the specified key if the key exists, ``default`` otherwise.
def getall(self, key, default=[]): return self.data[key] if key in self.data else default
1,048,543
Split a line on the closest space, or break the last word with '-'. Args: what(str): text to spli one line of. indent(str): will prepend this indent to the split line, taking it into account in the column count. cols(int): maximum length of the split line. Returns: tuple(str, str): rest of the text and split line in that order. Raises: ValueError: when the indent is greater than the indent, or the cols param is too small
def split_line(what, indent='', cols=79): if len(indent) > cols: raise ValueError("The indent can't be longer than cols.") if cols < 2: raise ValueError( "The cols can't be smaller than 2 (a char plus a possible '-')" ) what = indent + what.lstrip() if len(what) <= cols: what, new_line = '', what else: try: closest_space = what[:cols].rindex(' ') except ValueError: closest_space = -1 if closest_space > len(indent): what, new_line = ( what[closest_space:], what[:closest_space], ) elif what[cols] == ' ': what, new_line = ( what[cols:], what[:cols], ) else: what, new_line = what[cols - 1:], what[:cols - 1] + '-' return what.lstrip(), new_line.rstrip()
1,048,708
Wrap the given text to the columns, prepending the indent to each line. Args: what(str): text to wrap. indent(str): indentation to use. cols(int): colt to wrap to. Returns: str: Wrapped text
def fit_to_cols(what, indent='', cols=79): lines = [] while what: what, next_line = split_line( what=what, cols=cols, indent=indent, ) lines.append(next_line) return '\n'.join(lines)
1,048,709
Create the Trackr API interface object. Args: email (str): Trackr account email address. password (str): Trackrr account password.
def __init__(self, email, password): self.email = email self.password = password self.token = None self.last_api_call = None self.state = [] # get a token self.authenticate() # get the latest state from the API self.update_state_from_api()
1,048,867
Genreate a Band object given band metadata Args: band (dict): dictionary containing metadata for a given band Return: Band : the loaded Band onject
def GenerateBand(self, band, meta_only=False, cast=False): # Read the band data and add it to dictionary if not meta_only: fname = band.get('file_name') data = self.ReadTif('%s/%s' % (os.path.dirname(self.filename), fname)) # band['data'] = data # TODO: data is not a properties object so do not set yet def FixBitmap(d): p = d.get('bitmap_description') if p: lis = p.get('bit') bm = dict() # Fix bitmap_description from list of dicts to one dict for i in lis: key = i['num'] value = i['text'] bm[key] = value del d['bitmap_description'] d['bitmap_description'] = bm return d band = SetProperties(Band, FixBitmap(self.CleanDict(band))) if not meta_only: if cast: # cast as floats and fill bad values with nans data = data.astype(np.float32) data[data==band.fill_value] = -9999 if band.valid_range is not None: data[data<band.valid_range.min] = -9999 data[data>band.valid_range.max] = -9999 data[data==-9999] = np.nan else: data = np.ma.masked_where(data==band.fill_value, data) if band.valid_range is not None: data = np.ma.masked_where(data<band.valid_range.min, data) data = np.ma.masked_where(data>band.valid_range.max, data) # Flip y axis if requested if self.yflip: data = np.flip(data, 0) band.data = data if not meta_only: band.validate() return band
1,049,866
Turn the running process into a proper daemon according to PEP3143. Args: pidfile --The pidfile to create.
def daemonize(pidfile=None): # Prevent core dumps resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) # Change working directory os.chdir("/") # Change file creation mask os.umask(0) # Detach process context: do double fork pid = os.fork() if pid > 0: os._exit(0) os.setsid() pid = os.fork() if pid > 0: os._exit(0) # Create signal handler for SIGTERM def terminate(signal, stack_frame): msg = 'Terminating on signal {}'.format(signal) logger.info(msg) raise SystemExit(msg) signal.signal(signal.SIGTERM, terminate) # Redirect input/output streams streams = [sys.stdin, sys.stdout, sys.stderr] for stream in streams: devnull = os.open(os.devnull, os.O_RDWR) os.dup2(devnull, stream.fileno()) # Close file descriptors for fd in [stream.fileno() for stream in streams]: try: os.close(fd) except OSError as err: if err.errno == errno.EBADF: # File descriptor was not open pass # Create pidfile if pidfile is None or pidfile.strip() == '': logger.debug('Empty pidfile set') else: pid = os.getpid() try: with open(pidfile, 'w') as f: f.write('{}\n'.format(pid)) f.close() except EnvironmentError: logger.error('Failed to create pidfile at {}'.format(pidfile)) def remove_pid_file(): os.remove(pidfile) atexit.register(remove_pid_file) logger.debug('Process daemonized')
1,050,257
Parse the value of a config option and convert it to a dictionary. The configuration allows lines formatted like: foo = Bar:1,Baz,Flub:0.75 This gets converted to a dictionary: foo = { 'Bar': 1, 'Baz': 0, 'Flub': 0.75 } Args: option_value -- The config string to parse.
def config_str2dict(option_value): dict = {} for key in option_value.split(','): if ':' in key: key, value = pair.split(':') value = float(value) else: value = 0 dict[key] = value return dict
1,050,258
Write a line of data to the server. Args: line -- A single line of data to write to the socket.
def _send(self, line): if not line.endswith('\r\n'): if line.endswith('\n'): logger.debug('Fixing bare LF before sending data to socket') line = line[0:-1] + '\r\n' else: logger.debug( 'Fixing missing CRLF before sending data to socket') line = line + '\r\n' logger.debug('Client sent: ' + line.rstrip()) self._socket.send(line)
1,050,294
Peek at the data in the server response. Peeking should only be done when the response can be predicted. Make sure that the socket will not block by requesting too much data from it while peeking. Args: chars -- the number of characters to peek.
def _peek(self, chars=1): line = self._socket.recv(chars, socket.MSG_PEEK) logger.debug('Server sent (peek): ' + line.rstrip()) return line
1,050,296
Retrieves the release notes, from the RELEASE_NOTES file (if in a package) or generates it from the git history. Args: project_dir(str): Path to the git repo of the project. bugtracker_url(str): Url to the bug tracker for the issues. Returns: str: release notes Raises: RuntimeError: If the release notes could not be retrieved
def get_releasenotes(project_dir=os.curdir, bugtracker_url=''): releasenotes = '' pkg_info_file = os.path.join(project_dir, 'PKG-INFO') releasenotes_file = os.path.join(project_dir, 'RELEASE_NOTES') if os.path.exists(pkg_info_file) and os.path.exists(releasenotes_file): with open(releasenotes_file) as releasenotes_fd: releasenotes = releasenotes_fd.read() else: releasenotes = api.get_releasenotes( repo_path=project_dir, bugtracker_url=bugtracker_url, ) return releasenotes
1,050,347
Creates the release notes file, if not in a package. Args: project_dir(str): Path to the git repo of the project. bugtracker_url(str): Url to the bug tracker for the issues. Returns: None Raises: RuntimeError: If the release notes could not be retrieved
def create_releasenotes(project_dir=os.curdir, bugtracker_url=''): pkg_info_file = os.path.join(project_dir, 'PKG-INFO') if os.path.exists(pkg_info_file): return with open('RELEASE_NOTES', 'wb') as releasenotes_fd: releasenotes_fd.write( get_releasenotes( project_dir=project_dir, bugtracker_url=bugtracker_url, ).encode('utf-8') + b'\n' )
1,050,350
Returns the set of fluents in the expression's scope. Args: expr: Expression object or nested tuple of Expressions. Returns: The set of fluents in the expression's scope.
def __get_scope(cls, expr: Union['Expression', Tuple]) -> Set[str]: scope = set() for i, atom in enumerate(expr): if isinstance(atom, Expression): scope.update(cls.__get_scope(atom._expr)) elif type(atom) in [tuple, list]: scope.update(cls.__get_scope(atom)) elif atom == 'pvar_expr': functor, params = expr[i+1] arity = len(params) if params is not None else 0 name = '{}/{}'.format(functor, arity) scope.add(name) break return scope
1,050,623
Copies a record and its fields, recurses for any field that is a Record. For records that have nested mutable fields, use copy.deepcopy. Args: record: A Record instance to be copied. **field_overrides: Fields and their values to override in the new copy. Returns: A copy of the given record with any fields overridden.
def CopyRecord(record, **field_overrides): fields = field_overrides for field in record.__slots__: if field in field_overrides: continue value = getattr(record, field) if isinstance(value, RecordClass): # Recurse for records. new_value = CopyRecord(value) else: new_value = copy.copy(value) fields[field] = new_value return type(record)(**fields)
1,050,942
Given a repo will add a tag for each major version. Args: repo_path(str): path to the git repository to tag.
def tag_versions(repo_path): repo = dulwich.repo.Repo(repo_path) tags = get_tags(repo) maj_version = 0 feat_version = 0 fix_version = 0 last_maj_version = 0 last_feat_version = 0 result = [] for commit_sha, children in reversed( get_children_per_first_parent(repo_path).items() ): commit = get_repo_object(repo, commit_sha) maj_version, feat_version, fix_version = get_version( commit=commit, tags=tags, maj_version=maj_version, feat_version=feat_version, fix_version=fix_version, children=children, ) if ( last_maj_version != maj_version or last_feat_version != feat_version ): last_maj_version = maj_version last_feat_version = feat_version tag_name = 'refs/tags/v%d.%d' % (maj_version, feat_version) if ON_PYTHON3: repo[str.encode(tag_name)] = commit else: repo[tag_name] = commit result.append( 'v%d.%d -> %s' % (maj_version, feat_version, commit_sha) ) return '\n'.join(result)
1,051,588
Initialise data object Arguments: o multiplicon_file - location of iADHoRe multiplicon.txt o segment_file - location of iADHoRe segment.txt file o db_filename - location to write SQLite3 database (defaults to in-memory)
def __init__(self, multiplicon_file=None, segment_file=None, db_filename=":memory:"): # Attributes later populated in methods self._dbconn = None self._redundant_multiplicon_cache = None # Get arguments and initialise self._multiplicon_file = multiplicon_file self._segment_file = segment_file self._db_file = db_filename self._multiplicon_graph = nx.DiGraph() # Set up database self._dbsetup() # Load multiplicon and segment data into tree/SQL database self._parse_multiplicons() self._parse_segments()
1,051,645
Return a generator of the IDs of multiplicons found at leaves of the tree (i.e. from which no further multiplicons were derived). Arguments: o redundant - if true, report redundant multiplicons
def get_multiplicon_leaves(self, redundant=False): for node in self._multiplicon_graph.nodes(): if not len(self._multiplicon_graph.out_edges(node)): if not self.is_redundant_multiplicon(node): yield node elif redundant: yield node else: continue else: continue
1,051,649
Return a generator of the IDs of multiplicons that are initial seeding 'pairs' in level 2 multiplicons. Arguments: o redundant - if true, report redundant multiplicons
def get_multiplicon_seeds(self, redundant=False): for node in self._multiplicon_graph.nodes(): if not len(self._multiplicon_graph.in_edges(node)): if not self.is_redundant_multiplicon(node): yield node elif redundant: yield node else: continue else: continue
1,051,650
Return a generator of the IDs of multiplicons that are neither seeding 'pairs' in level 2 multiplicons, nor leaves. Arguments: o redundant - if true, report redundant multiplicons
def get_multiplicon_intermediates(self, redundant=False): for node in self._multiplicon_graph.nodes(): if len(self._multiplicon_graph.in_edges(node)) and \ len(self._multiplicon_graph.out_edges(node)): if not self.is_redundant_multiplicon(node): yield node elif redundant: yield node else: continue else: continue
1,051,651
Get a manifest file, parse and store it. Args: webfont_name (string): Webfont key name. Used to store manifest and potentially its parser error. webfont_settings (dict): Webfont settings (an item value from ``settings.ICOMOON_WEBFONTS``).
def get(self, webfont_name, webfont_settings): try: webfont_settings = extend_webfont_settings(webfont_settings) except IcomoonSettingsError as e: msg = "Invalid webfont settings for '{}': {}" self.errors[webfont_name] = msg.format(webfont_name, e.value) return filepath = os.path.join(webfont_settings['fontdir_path'], self.manifest_filename) if os.path.exists(filepath): self.manifests[webfont_name] = self.parse_manifest(filepath) else: msg = ( ) self.errors[webfont_name] = msg.format(name=webfont_name, filepath=filepath)
1,051,874
Store every defined webfonts. Webfont are stored with sort on their name. Args: webfonts (dict): Dictionnary of webfont settings from ``settings.ICOMOON_WEBFONTS``.
def fetch(self, webfonts): sorted_keys = sorted(webfonts.keys()) for webfont_name in sorted_keys: self.get(webfont_name, webfonts[webfont_name])
1,051,875
Get an update from the specified service. Arguments: name (:py:class:`str`): The name of the service. service_map (:py:class:`dict`): A mapping of service names to :py:class:`flash.service.core.Service` instances. Returns: :py:class:`dict`: The updated data.
def update_service(name, service_map): if name in service_map: service = service_map[name] data = service.update() if not data: logger.warning('no data received for service: %s', name) else: data['service_name'] = service.service_name CACHE[name] = dict(data=data, updated=datetime.now()) else: logger.warning('service not found: %s', name) if name in CACHE: return add_time(CACHE[name]) return {}
1,051,917
And a friendly update time to the supplied data. Arguments: data (:py:class:`dict`): The response data and its update time. Returns: :py:class:`dict`: The data with a friendly update time.
def add_time(data): payload = data['data'] updated = data['updated'].date() if updated == date.today(): payload['last_updated'] = data['updated'].strftime('today at %H:%M:%S') elif updated >= (date.today() - timedelta(days=1)): payload['last_updated'] = 'yesterday' elif updated >= (date.today() - timedelta(days=7)): payload['last_updated'] = updated.strftime('on %A') else: payload['last_updated'] = updated.strftime('%Y-%m-%d') return payload
1,051,918
Initialize the BabelfyClient. Arguments: api_key -- key to connect the babelfy api Keyword arguments: params -- params for the api request
def __init__(self, api_key, params=None): self._api_key = api_key self._params = params or dict() self._data = list() self._entities = list() self._all_entities = list() self._merged_entities = list() self._all_merged_entities = list() self._text = None
1,051,961
Run PCA and calculate the cumulative fraction of variance Args: Y: phenotype values standardize: if True, phenotypes are standardized Returns: var: cumulative distribution of variance explained
def PC_varExplained(Y,standardized=True): # figuring out the number of latent factors if standardized: Y-=Y.mean(0) Y/=Y.std(0) covY = sp.cov(Y) S,U = linalg.eigh(covY+1e-6*sp.eye(covY.shape[0])) S = S[::-1] rv = np.array([S[0:i].sum() for i in range(1,S.shape[0])]) rv/= S.sum() return rv
1,052,114
split into windows using a slide criterion Args: size: window size step: moving step (default: 0.5*size) Returns: wnd_i: number of windows nSnps: vector of per-window number of SNPs
def splitGenoSlidingWindow(pos,out_file,size=5e4,step=None): if step is None: step = 0.5*size chroms = SP.unique(pos[:,0]) RV = [] wnd_i = 0 wnd_file = csv.writer(open(out_file,'w'),delimiter='\t') nSnps = [] for chrom_i in chroms: Ichrom = pos[:,0]==chrom_i idx_chrom_start = SP.where(Ichrom)[0][0] pos_chr = pos[Ichrom,1] start = pos_chr.min() pos_chr_max = pos_chr.max() while 1: if start>pos_chr_max: break end = start+size Ir = (pos_chr>=start)*(pos_chr<end) _nSnps = Ir.sum() if _nSnps>0: idx_wnd_start = idx_chrom_start+SP.where(Ir)[0][0] nSnps.append(_nSnps) line = SP.array([wnd_i,chrom_i,start,end,idx_wnd_start,_nSnps],dtype=int) wnd_file.writerow(line) wnd_i+=1 start += step nSnps = SP.array(nSnps) return wnd_i,nSnps
1,052,132
add fixed effect term to the model Args: F: sample design matrix for the fixed effect [N,K] A: trait design matrix for the fixed effect (e.g. sp.ones((1,P)) common effect; sp.eye(P) any effect) [L,P] Ftest: sample design matrix for test samples [Ntest,K]
def addFixedEffect(self, F=None, A=None, Ftest=None): if A is None: A = sp.eye(self.P) if F is None: F = sp.ones((self.N,1)) if self.Ntest is not None: Ftest = sp.ones((self.Ntest,1)) assert A.shape[1]==self.P, 'VarianceDecomposition:: A has incompatible shape' assert F.shape[0]==self.N, 'VarianceDecimposition:: F has incompatible shape' if Ftest is not None: assert self.Ntest is not None, 'VarianceDecomposition:: specify Ntest for predictions (method VarianceDecomposition::setTestSampleSize)' assert Ftest.shape[0]==self.Ntest, 'VarianceDecimposition:: Ftest has incompatible shape' assert Ftest.shape[1]==F.shape[1], 'VarianceDecimposition:: Ftest has incompatible shape' # add fixed effect self.sample_designs.append(F) self.sample_test_designs.append(Ftest) self.trait_designs.append(A) self._desync()
1,052,138
Return weights for fixed effect term term_i Args: term_i: fixed effect term index Returns: weights of the spefied fixed effect term. The output will be a KxL matrix of weights will be returned, where K is F.shape[1] and L is A.shape[1] of the correspoding fixed effect term (L will be always 1 for single-trait analysis).
def getWeights(self, term_i=None): assert self.init, 'GP not initialised' if term_i==None: if self.gp.mean.n_terms==1: term_i = 0 else: print('VarianceDecomposition: Specify fixed effect term index') return self.gp.mean.B[term_i]
1,052,140
Return the estimated trait covariance matrix for term_i (or the total if term_i is None) To retrieve the matrix of correlation coefficient use \see getTraitCorrCoef Args: term_i: index of the random effect term we want to retrieve the covariance matrix Returns: estimated trait covariance
def getTraitCovar(self, term_i=None): assert term_i < self.n_randEffs, 'VarianceDecomposition:: specied term out of range' if term_i is None: RV = sp.zeros((self.P,self.P)) for term_i in range(self.n_randEffs): RV += self.getTraitCovarFun().K() else: assert term_i<self.n_randEffs, 'Term index non valid' RV = self.getTraitCovarFun(term_i).K() return RV
1,052,141
Return the estimated trait correlation coefficient matrix for term_i (or the total if term_i is None) To retrieve the trait covariance matrix use \see getTraitCovar Args: term_i: index of the random effect term we want to retrieve the correlation coefficients Returns: estimated trait correlation coefficient matrix
def getTraitCorrCoef(self,term_i=None): cov = self.getTraitCovar(term_i) stds = sp.sqrt(cov.diagonal())[:,sp.newaxis] RV = cov / stds / stds.T return RV
1,052,142
Return the estimated variance components Args: univariance: Boolean indicator, if True variance components are normalized to sum up to 1 for each trait Returns: variance components of all random effects on all phenotypes [P, n_randEffs matrix]
def getVarianceComps(self, univariance=False): RV=sp.zeros((self.P,self.n_randEffs)) for term_i in range(self.n_randEffs): RV[:,term_i] = self.getTraitCovar(term_i).diagonal() if univariance: RV /= RV.sum(1)[:,sp.newaxis] return RV
1,052,143
Returns standard errors on trait covariances from term_i (for the covariance estimate \see getTraitCovar) Args: term_i: index of the term we are interested in
def getTraitCovarStdErrors(self,term_i): assert self.init, 'GP not initialised' assert self.fast==False, 'Not supported for fast implementation' if self.P==1: out = (2*self.getScales()[term_i])**2*self._getLaplaceCovar()[term_i,term_i] else: C = self.vd.getTerm(term_i).getTraitCovar() n_params = C.getNumberParams() par_index = 0 for term in range(term_i-1): par_index += self.vd.getTerm(term_i).getNumberScales() Sigma1 = self._getLaplaceCovar()[par_index:(par_index+n_params),:][:,par_index:(par_index+n_params)] out = sp.zeros((self.P,self.P)) for param_i in range(n_params): out += C.Kgrad_param(param_i)**2*Sigma1[param_i,param_i] for param_j in range(param_i): out += 2*abs(C.Kgrad_param(param_i)*C.Kgrad_param(param_j))*Sigma1[param_i,param_j] out = sp.sqrt(out) return out
1,052,150
Return the standard errors on the estimated variance components (for variance component estimates \see getVarianceComps) Args: univariance: Boolean indicator, if True variance components are normalized to sum up to 1 for each trait Returns: standard errors on variance components [P, n_randEffs matrix]
def getVarianceCompStdErrors(self,univariance=False): RV=sp.zeros((self.P,self.n_randEffs)) for term_i in range(self.n_randEffs): #RV[:,term_i] = self.getTraitCovarStdErrors(term_i).diagonal() RV[:,term_i] = self.getTraitCovarStdErrors(term_i) var = self.getVarianceComps() if univariance: RV /= var.sum(1)[:,sp.newaxis] return RV
1,052,151
predict the conditional mean (BLUP) Args: use_fixed: list of fixed effect indeces to use for predictions use_random: list of random effect indeces to use for predictions Returns: predictions (BLUP)
def predictPhenos(self,use_fixed=None,use_random=None): assert self.noisPos is not None, 'No noise element' assert self.init, 'GP not initialised' assert self.Ntest is not None, 'VarianceDecomposition:: specify Ntest for predictions (method VarianceDecomposition::setTestSampleSize)' use_fixed = list(range(self.n_fixedEffs)) use_random = list(range(self.n_randEffs)) KiY = self.gp.agetKEffInvYCache() if self.fast==False: KiY = KiY.reshape(self.P,self.N).T Ypred = sp.zeros((self.Ntest,self.P)) # predicting from random effects for term_i in use_random: if term_i!=self.noisPos: Kstar = self.Kstar[term_i] if Kstar is None: warnings.warn('warning: random effect term %d not used for predictions as it has None cross covariance'%term_i) continue term = sp.dot(Kstar.T,KiY) if self.P>1: C = self.getTraitCovar(term_i) term = sp.dot(term,C) else: term *= self.getVarianceComps()[0,term_i] Ypred += term # predicting from fixed effects weights = self.getWeights() w_i = 0 for term_i in use_fixed: Fstar = self.Fstar[term_i] if Fstar is None: warnings.warn('warning: fixed effect term %d not used for predictions as it has None test sample design'%term_i) continue if self.P==1: A = sp.eye(1) else: A = self.vd.getDesign(term_i) Fstar = self.Fstar[term_i] W = weights[w_i:w_i+A.shape[0],0:1].T term = sp.dot(Fstar,sp.dot(W,A)) w_i += A.shape[0] Ypred += term return Ypred
1,052,152
Internal function for parameter initialization estimate variance components and fixed effect using a linear mixed model with an intercept and 2 random effects (one is noise) Args: K: covariance matrix of the non-noise random effect term
def _getH2singleTrait(self, K, verbose=None): verbose = dlimix.getVerbose(verbose) # Fit single trait model varg = sp.zeros(self.P) varn = sp.zeros(self.P) fixed = sp.zeros((1,self.P)) for p in range(self.P): y = self.Y[:,p:p+1] # check if some sull value I = sp.isnan(y[:,0]) if I.sum()>0: y = y[~I,:] _K = K[~I,:][:,~I] else: _K = copy.copy(K) lmm = dlimix.CLMM() lmm.setK(_K) lmm.setSNPs(sp.ones((y.shape[0],1))) lmm.setPheno(y) lmm.setCovs(sp.zeros((y.shape[0],1))) lmm.setVarcompApprox0(-20, 20, 1000) lmm.process() delta = sp.exp(lmm.getLdelta0()[0,0]) Vtot = sp.exp(lmm.getLSigma()[0,0]) varg[p] = Vtot varn[p] = delta*Vtot fixed[:,p] = lmm.getBetaSNP() if verbose: print(p) sth = {} sth['varg'] = varg sth['varn'] = varn sth['fixed'] = fixed return sth
1,052,154
Internal function for parameter initialization Uses 2 term single trait model to get covar params for initialization Args: termx: non-noise term terms that is used for initialization
def _getScalesDiag(self,termx=0): assert self.P>1, 'VarianceDecomposition:: diagonal init_method allowed only for multi trait models' assert self.noisPos is not None, 'VarianceDecomposition:: noise term has to be set' assert termx<self.n_randEffs-1, 'VarianceDecomposition:: termx>=n_randEffs-1' assert self.trait_covar_type[self.noisPos] not in ['lowrank','block','fixed'], 'VarianceDecomposition:: diagonal initializaiton not posible for such a parametrization' assert self.trait_covar_type[termx] not in ['lowrank','block','fixed'], 'VarianceDecimposition:: diagonal initializaiton not posible for such a parametrization' scales = [] res = self._getH2singleTrait(self.vd.getTerm(termx).getK()) scaleg = sp.sqrt(res['varg'].mean()) scalen = sp.sqrt(res['varn'].mean()) for term_i in range(self.n_randEffs): if term_i==termx: _scales = scaleg*self.diag[term_i] elif term_i==self.noisPos: _scales = scalen*self.diag[term_i] else: _scales = 0.*self.diag[term_i] if self.jitter[term_i]>0: _scales = sp.concatenate((_scales,sp.array([sp.sqrt(self.jitter[term_i])]))) scales.append(_scales) return sp.concatenate(scales)
1,052,155
Correctly joins tokens to multiple sentences Instead of always placing white-space between the tokens, it will distinguish between the next symbol and *not* insert whitespace if it is a sentence symbol (e.g. '.', or '?') Args: tokens: array of string tokens Returns: Joint sentences as one string
def join_tokens_to_sentences(tokens): text = "" for (entry, next_entry) in zip(tokens, tokens[1:]): text += entry if next_entry not in SENTENCE_STOPS: text += " " text += tokens[-1] return text
1,052,165
Returns a sorted list of prefixes. Args: orig (str): Unsorted list of prefixes. prefixes (str): List of prefixes, from highest-priv to lowest.
def sort_prefixes(orig, prefixes='@+'): new = '' for prefix in prefixes: if prefix in orig: new += prefix return new
1,052,216
Return a modelist. Args: params (list of str): Parameters from MODE event. mode_types (list): CHANMODES-like mode types. prefixes (str): PREFIX-like mode types.
def parse_modes(params, mode_types=None, prefixes=''): # we don't accept bare strings because we don't want to try to do # intelligent parameter splitting params = list(params) if params[0][0] not in '+-': raise Exception('first param must start with + or -') if mode_types is None: mode_types = ['', '', '', ''] mode_string = params.pop(0) args = params assembled_modes = [] direction = mode_string[0] for char in mode_string: if char in '+-': direction = char continue if (char in mode_types[0] or char in mode_types[1] or char in prefixes or (char in mode_types[2] and direction == '+') and len(args)): value = args.pop(0) else: value = None assembled_modes.append([direction, char, value]) return assembled_modes
1,052,217
Initialize auth method with existing credentials. Args: credentials: OAuth2 credentials obtained via GAP OAuth2 library.
def __init__(self, credentials): if not has_httplib2: raise ImportError("No module named httplib2") super(GAPDecoratorAuthMethod, self).__init__() self._http = None self._credentials = credentials self._action_token = None
1,052,317
A Generic parser for arbitrary tags in a node. Parameters: - node: A node in the DOM. - pad: `int` (default: 0) If 0 the node data is not padded with newlines. If 1 it appends a newline after parsing the childNodes. If 2 it pads before and after the nodes are processed. Defaults to 0.
def generic_parse(self, node, pad=0): npiece = 0 if pad: npiece = len(self.pieces) if pad == 2: self.add_text('\n') for n in node.childNodes: self.parse(n) if pad: if len(self.pieces) > npiece: self.add_text('\n')
1,052,326
Scp a remote file to local Args: remote_path (str) local_path (str)
def scp_file_remote_to_local(self, remote_path, local_path): scp_command = [ 'scp', '-o', 'StrictHostKeyChecking=no', '-i', self.browser_config.get('ssh_key_path'), '%s@%s:"%s"' % ( self.browser_config.get('username'), self.get_ip(), remote_path ), local_path ] self.info_log( "executing command: %s" % ' '.join(scp_command) ) p = Popen(scp_command) p.wait()
1,053,052
Execute a command on the node Args: command (str)
def execute_command(self, command): self.info_log("executing command: %s" % command) try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) k = paramiko.RSAKey.from_private_key_file( self.browser_config.get('ssh_key_path') ) ssh.connect( self.private_ip, username=self.browser_config.get('username'), pkey=k ) sleep_time = 0.1 stdout = [] stderr = [] ssh_transport = ssh.get_transport() channel = ssh_transport.open_session() channel.setblocking(0) channel.exec_command(command) while True: while channel.recv_ready(): stdout.append(channel.recv(1000)) while channel.recv_stderr_ready(): stderr.append(channel.recv_stderr(1000)) if channel.exit_status_ready(): break sleep(sleep_time) # ret = channel.recv_exit_status() ssh_transport.close() ssh.close() return b''.join(stdout), b''.join(stderr) except Exception as e: msg = "Execute_command exception: %s" % str(e) self.error_log(msg) raise Exception(msg)
1,053,053
Load json or yaml data from file handle. Args: fh (file): File handle to load from. Examlple: >>> with open('data.json', 'r') as json: >>> jsdata = composite.load(json) >>> >>> with open('data.yml', 'r') as yml: >>> ymldata = composite.load(yml)
def load(cls, fh): dat = fh.read() try: ret = cls.from_json(dat) except: ret = cls.from_yaml(dat) return ret
1,053,059
Load json from file handle. Args: fh (file): File handle to load from. Examlple: >>> with open('data.json', 'r') as json: >>> data = composite.load(json)
def from_json(cls, fh): if isinstance(fh, str): return cls(json.loads(fh)) else: return cls(json.load(fh))
1,053,060
Recursively compute intersection of data. For dictionaries, items for specific keys will be reduced to unique items. For lists, items will be reduced to unique items. This method is meant to be analogous to set.intersection for composite objects. Args: other (composite): Other composite object to intersect with. recursive (bool): Whether or not to perform the operation recursively, for all nested composite objects.
def intersection(self, other, recursive=True): if not isinstance(other, composite): raise AssertionError('Cannot intersect composite and {} types'.format(type(other))) if self.meta_type != other.meta_type: return composite({}) if self.meta_type == 'list': keep = [] for item in self._list: if item in other._list: if recursive and isinstance(item, composite): keep.extend(item.intersection(other.index(item), recursive=True)) else: keep.append(item) return composite(keep) elif self.meta_type == 'dict': keep = {} for key in self._dict: item = self._dict[key] if key in other._dict: if recursive and \ isinstance(item, composite) and \ isinstance(other.get(key), composite): keep[key] = item.intersection(other.get(key), recursive=True) elif item == other[key]: keep[key] = item return composite(keep) return
1,053,070
Write composite object to file handle in JSON format. Args: fh (file): File handle to write to. pretty (bool): Sort keys and indent in output.
def write_json(self, fh, pretty=True): sjson = json.JSONEncoder().encode(self.json()) if pretty: json.dump(json.loads(sjson), fh, sort_keys=True, indent=4) else: json.dump(json.loads(sjson), fh) return
1,053,078
Prune leaves of filetree according to specified regular expression. Args: regex (str): Regular expression to use in pruning tree.
def prune(self, regex=r".*"): return filetree(self.root, ignore=self.ignore, regex=regex)
1,053,086
Initialize the Micropub extension if it was not given app in the constructor. Args: app (flask.Flask): the flask application to extend. client_id (string, optional): the IndieAuth client id, will be displayed when the user is asked to authorize this client. If not provided, the app name will be used.
def init_app(self, app, client_id=None): if not self.client_id: if client_id: self.client_id = client_id else: self.client_id = app.name
1,053,426
Add a field in the index of the model. Args: fieldname (Text): This parameters register a new field in specified model. fieldspec (Name, optional): This option adds various options as were described before. Returns: TYPE: The new schema after deleted is returned.
def add_field(self, fieldname, fieldspec=whoosh_module_fields.TEXT): self._whoosh.add_field(fieldname, fieldspec) return self._whoosh.schema
1,053,504
This function deletes one determined field using the command MODEL.pw.delete_field(FIELD) Args: field_name (string): This argument let you delete some field for some model registered in the index. Returns: (WhooshSchema): The new schema after deleted is returned.
def delete_field(self, field_name): self._whoosh.remove_field(field_name.strip()) return self._whoosh.schema
1,053,505
Prepares search string as a proper whoosh search string. Args: search_string (str): it prepares the search string and see if the lenght is correct. Optional Args: add_wildcards (bool): It runs a query for inexact queries. Raises: ValueError: When the search string does not have the appropriate lenght. This lenght may be changed in the config options.
def prep_search_string(self, search_string, add_wildcards=False): s = search_string.strip() try: s = str(s) except: pass s = s.replace('*', '') if len(s) < self._pw.search_string_min_len: raise ValueError('Search string must have at least {} characters' .format(self._pw.search_string_min_len)) if add_wildcards: s = '*{0}*'.format(re.sub('[\s]+', '* *', s)) return s
1,053,509
Convert MCS index to rate in Mbps. See http://mcsindex.com/ Args: mcs (int): MCS index bw (int): bandwidth, 20, 40, 80, ... long_gi(bool): True if long GI is used. Returns: rate (float): bitrate in Mbps >>> mcs_to_rate(5, bw=20, long_gi=False) 57.8 >>> mcs_to_rate(4, bw=40, long_gi=True) 81 >>> mcs_to_rate(3, bw=80, long_gi=False) 130 >>> mcs_to_rate(13, bw=160, long_gi=True) 936
def mcs_to_rate(mcs, bw=20, long_gi=True): if bw not in [20, 40, 80, 160]: raise Exception("Unknown bandwidth: %d MHz" % (bw)) if mcs not in MCS_TABLE: raise Exception("Unknown MCS: %d" % (mcs)) idx = int((math.log(bw/10, 2)-1)*2) if not long_gi: idx += 1 return MCS_TABLE[mcs][idx]
1,053,621
Convert bit rate to MCS index. Args: rate (float): bit rate in Mbps bw (int): bandwidth, 20, 40, 80, ... long_gi (bool): True if long GI is used. Returns: mcs (int): MCS index >>> rate_to_mcs(120, bw=40, long_gi=False) 5
def rate_to_mcs(rate, bw=20, long_gi=True): if bw not in [20, 40, 80, 160]: raise Exception("Unknown bandwidth: %d MHz" % (bw)) idx = int((math.log(bw/10, 2)-1)*2) if not long_gi: idx += 1 for mcs, rates in MCS_TABLE.items(): if abs(rates[idx] - rate) < 1e-3: return mcs # failed. Try dot11a rates for idx, r in enumerate(DOT11A_RATES): if abs(r-rate) < 1e-3: return idx raise Exception("MCS not found: rate=%f, bw=%d, long_gi=%s" % (rate, bw, long_gi))
1,053,622
After each :py:meth:`predict`, this method may be called repeatedly to provide additional measurements for each time step. Args: measurement (MultivariateNormal): Measurement for this time step with specified mean and covariance. measurement_matrix (array): Measurement matrix for this measurement.
def update(self, measurement, measurement_matrix): # Sanitise input arguments measurement_matrix = np.atleast_2d(measurement_matrix) expected_meas_mat_shape = (measurement.mean.shape[0], self.state_length) if measurement_matrix.shape != expected_meas_mat_shape: raise ValueError("Measurement matrix is wrong shape ({}). " \ "Expected: {}".format( measurement_matrix.shape, expected_meas_mat_shape)) # Add measurement list self.measurements[-1].append(measurement) self.measurement_matrices[-1].append(measurement_matrix) # "Prior" in this case means "before we've updated with this # measurement". prior = self.posterior_state_estimates[-1] # Compute Kalman gain innovation = measurement.mean - measurement_matrix.dot(prior.mean) innovation_cov = measurement_matrix.dot(prior.cov).dot( measurement_matrix.T) innovation_cov += measurement.cov kalman_gain = prior.cov.dot(measurement_matrix.T).dot( np.linalg.inv(innovation_cov)) # Update estimates post = self.posterior_state_estimates[-1] self.posterior_state_estimates[-1] = MultivariateNormal( mean=post.mean + kalman_gain.dot(innovation), cov=post.cov - kalman_gain.dot(measurement_matrix).dot(prior.cov) )
1,053,711
Truncate the filter as if only *new_count* :py:meth:`.predict`, :py:meth:`.update` steps had been performed. If *new_count* is greater than :py:attr:`.state_count` then this function is a no-op. Measurements, state estimates, process matrices and process noises which are truncated are discarded. Args: new_count (int): Number of states to retain.
def truncate(self, new_count): self.posterior_state_estimates = self.posterior_state_estimates[:new_count] self.prior_state_estimates = self.prior_state_estimates[:new_count] self.measurements = self.measurements[:new_count] self.process_matrices = self.process_matrices[:new_count] self.process_covariances = self.process_covariances[:new_count]
1,053,712
Scrape a twitter archive csv, yielding tweet text. Args: directory (str): CSV file or (directory containing tweets.csv). field (str): Field with the tweet's text (default: text). fieldnames (list): The column names for a csv with no header. Must contain <field>. Leave as None to read CSV header (default: None). Returns: generator
def read_csv(directory): if path.isdir(directory): csvfile = path.join(directory, 'tweets.csv') else: csvfile = directory with open(csvfile, 'r') as f: for tweet in csv.DictReader(f): try: tweet['text'] = tweet['text'].decode('utf-8') except AttributeError: pass yield tweet
1,053,738
A quick regular expression check to see that the input is sane Args: equation_str (str): String of equation to be parsed by sympify function. Expected to be valid Python. Raises: BadInputError: If input does not look safe to parse as an equation.
def regex_check(equation_str): match1 = re.match( r'^(([xy+\-*/()0-9. ]+|sin\(|cos\(|exp\(|log\()?)+$', equation_str ) match2 = re.match(r'^.*([xy]) *([xy]).*$', equation_str) if match1 and not match2: return True raise BadInputError('Cannot parse entered equation')
1,053,894
Write the data out as base64 binary Args: output (file-like object): Output to write figure to.
def write_data(self, output): if self.figure: canvas = FigureCanvas(self.figure) self.figure.savefig(output, format='png', bbox_inches='tight') output.seek(0) return output.getvalue() return None
1,053,899
Return section of the config for a specific context (sub-command). Parameters: ctx (Context): The Click context object. optional (bool): If ``True``, return an empty config object when section is missing. Returns: Section: The configuration section belonging to the active (sub-)command (based on ``ctx.info_name``).
def section(self, ctx, optional=False): values = self.load() try: return values[ctx.info_name] except KeyError: if optional: return configobj.ConfigObj({}, **self.DEFAULT_CONFIG_OPTS) raise LoggedFailure("Configuration section '{}' not found!".format(ctx.info_name))
1,054,042
Return the specified name from the root section. Parameters: name (str): The name of the requested value. default (optional): If set, the default value to use instead of raising :class:`LoggedFailure` for unknown names. Returns: The value for `name`. Raises: LoggedFailure: The requested `name` was not found.
def get(self, name, default=NO_DEFAULT): values = self.load() try: return values[name] except KeyError: if default is self.NO_DEFAULT: raise LoggedFailure("Configuration value '{}' not found in root section!".format(name)) return default
1,054,043
Import identifier ``name`` from module ``modulename``. If ``name`` is omitted, ``modulename`` must contain the name after the module path, delimited by a colon. Parameters: modulename (str): Fully qualified module name, e.g. ``x.y.z``. name (str): Name to import from ``modulename``. Returns: object: Requested object.
def import_name(modulename, name=None): if name is None: modulename, name = modulename.rsplit(':', 1) module = __import__(modulename, globals(), {}, [name]) return getattr(module, name)
1,054,150
Load a Python module from a path under a specified name. Parameters: modulename (str): Fully qualified module name, e.g. ``x.y.z``. modulepath (str): Filename of the module. Returns: Loaded module.
def load_module(modulename, modulepath): if '.' in modulename: modulepackage, modulebase = modulename.rsplit('.', 1) else: modulepackage = '' imp.acquire_lock() try: # Check if module is already loaded if modulename not in sys.modules: # Find module on disk and try to load it path, name = os.path.split(modulepath) name = os.path.splitext(name)[0] handle, path, info = imp.find_module(name, [path]) try: # Load the module and put into sys.modules module = imp.load_module(modulename, handle, path, info) if modulepackage: setattr(sys.modules[modulepackage], modulebase, module) finally: # Make sure handle is closed properly if handle: handle.close() finally: imp.release_lock() return sys.modules[modulename]
1,054,151
Kill process by pid Args: pid (int)
def kill_pid(self, pid): try: p = psutil.Process(pid) p.terminate() self.info_log('Killed [pid:%s][name:%s]' % (p.pid, p.name())) except psutil.NoSuchProcess: self.error_log('No such process: [pid:%s]' % pid)
1,054,188
Kill by process name Args: procname (str)
def kill(self, procname): for proc in psutil.process_iter(): if proc.name() == procname: self.info_log( '[pid:%s][name:%s] killed' % (proc.pid, proc.name()) ) proc.kill()
1,054,189
Print test summary When the test batch is finished a test summary will be printed Args: executed_tests (list)
def print_test_summary(self, executed_tests): separator = '---------------------' with DbSessionContext(BROME_CONFIG['database']['mongo_database_name']) as session: # noqa test_batch = session.query(Testbatch).filter(Testbatch.mongo_id == self.test_batch_id).one() # noqa # TITLE self.info_log('******* TEST BATCH SUMMARY ********') # TOTAL NUMBER OF EXECUTED TESTS base_query = session.query(Testresult).filter(Testresult.test_batch_id == self.test_batch_id) # noqa total_test = base_query.count() total_test_successful = base_query.filter(Testresult.result == True).count() # noqa base_query = session.query(Testresult).filter(Testresult.test_batch_id == self.test_batch_id) # noqa total_test_failed = base_query.filter(Testresult.result == False).count() # noqa self.info_log( 'Total_test: %s; Total_test_successful: %s; Total_test_failed: %s' % # noqa (total_test, total_test_successful, total_test_failed) ) # EXECUTION TIME self.info_log( "Total execution time: %s" % (test_batch.ending_timestamp - test_batch.starting_timestamp) ) # SEPARATOR self.info_log(separator) self.info_log('Failed tests:') # FAILED TESTS failed_test_list = [] test_results = session.query(Testresult)\ .filter(Testresult.result == False)\ .filter(Testresult.test_batch_id == self.test_batch_id).all() # noqa for test_result in test_results: if test_result.title not in failed_test_list: failed_test_list.append(test_result.title) query = session.query(Test)\ .filter(Test.mongo_id == test_result.test_id) if query.count(): test = query.one() self.info_log( "[%s] %s" % (test.test_id, test.name) ) else: self.info_log( "[noid] %s" % (test_result.title) ) if not failed_test_list: self.info_log('No test failed!') # SEPARATOR self.info_log(separator) # TEST INSTANCE REPORT for test in executed_tests: # TITLE self.info_log( '%s %s' % (test._name, test.pdriver.get_id()) ) test_instance = session.query(Testinstance)\ .filter(Testinstance.mongo_id == test._test_instance_id)\ .one() # TEST EXECUTION TIME try: self.info_log( "Test execution time: %s" % (test_instance.ending_timestamp - test_instance.starting_timestamp) # noqa ) except TypeError: self.info_log("Test execution time exception") # TEST INSTANCE SUMMARY results = test.get_test_result_summary() for result in results: self.info_log(result) # CRASH REPORT if test._crash_error: self.info_log(test._crash_error) else: self.info_log('No crash!') # SEPARATOR self.info_log(separator) # END self.info_log('Finished')
1,054,193
Navigate to a specific url This specific implementation inject a javascript script to intercept the javascript error Configurable with the "proxy_driver:intercept_javascript_error" config Args: url (str): the url to navigate to Returns: bool
def get(self, url): self._driver.get(url) if self.bot_diary: self.bot_diary.add_auto_entry( "I went on", target=url, take_screenshot=True ) if BROME_CONFIG['proxy_driver']['intercept_javascript_error']: self.init_javascript_error_interception() return True
1,054,268
Return the gathered javascript error Args: return_type: 'string' | 'list'; default: 'string'
def get_javascript_error(self, return_type='string'): if BROME_CONFIG['proxy_driver']['intercept_javascript_error']: js_errors = self._driver.execute_script( 'return window.jsErrors; window.jsErrors = [];' ) if not js_errors: js_errors = [] if return_type == 'list': if len(js_errors): return js_errors else: return [] else: if len(js_errors): return os.linesep.join(js_errors) else: return self.no_javascript_error_string else: if return_type == 'list': return [] else: return self.no_javascript_error_string
1,054,270
Drag and drop Args: source_selector: (str) destination_selector: (str) Kwargs: use_javascript_dnd: bool; default: config proxy_driver:use_javascript_dnd
def drag_and_drop(self, source_selector, destination_selector, **kwargs): self.info_log( "Drag and drop: source (%s); destination (%s)" % (source_selector, destination_selector) ) use_javascript_dnd = kwargs.get( "use_javascript_dnd", "proxy_driver:use_javascript_dnd" ) source_el = self.find(source_selector) destination_el = self.find(destination_selector) if use_javascript_dnd: try: dnd_script = [ "function simulate(f,c,d,e){var b,a=null;for(b in eventMatchers)if(eventMatchers[b].test(c)){a=b;break}if(!a)return!1;document.createEvent?(b=document.createEvent(a),a=='HTMLEvents'?b.initEvent(c,!0,!0):b.initMouseEvent(c,!0,!0,document.defaultView,0,d,e,d,e,!1,!1,!1,!1,0,null),f.dispatchEvent(b)):(a=document.createEventObject(),a.detail=0,a.screenX=d,a.screenY=e,a.clientX=d,a.clientY=e,a.ctrlKey=!1,a.altKey=!1,a.shiftKey=!1,a.metaKey=!1,a.button=1,f.fireEvent('on'+c,a));return!0} var eventMatchers={HTMLEvents:/^(?:load|unload|abort|error|select|change|submit|reset|focus|blur|resize|scroll)$/,MouseEvents:/^(?:click|dblclick|mouse(?:down|up|over|move|out))$/};", # noqa "var source = arguments[0],destination = arguments[1];", "simulate(source, 'mousedown', 0, 0);", "simulate(source, 'mousemove', destination.offsetLeft, destination.offsetTop);", # noqa "simulate(source, 'mouseup', destination.offsetLeft, destination.offsetTop);" # noqa ] self._driver.execute_script( '\n'.join(dnd_script), source_el._element, destination_el._element ) except Exception as e: self.error_log(u'drag_and_drop exception: %s' % str(e)) raise else: try: ActionChains(self._driver).drag_and_drop( source_el, destination_el ).perform() except Exception as e: self.error_log(u'drag_and_drop exception: %s' % str(e)) raise
1,054,272
Take a screenshot of a node Args: element (object): the proxy_element screenshot_path (str): the path where the screenshot will be saved
def take_node_screenshot(self, element, screenshot_path): from PIL import Image temp_path = os.path.join(tempdir, screenshot_path) el_x = int(element.location['x']) el_y = int(element.location['y']) el_height = int(element.size['height']) el_width = int(element.size['width']) if el_height == 0 or el_width == 0: self.debug_log("take_node_screenshot cannot be taken because element width or height equal zero") # noqa return False bounding_box = ( el_x, el_y, (el_x + el_width), (el_y + el_height) ) self._driver.save_screenshot(temp_path) base_image = Image.open(temp_path) cropped_image = base_image.crop(bounding_box) base_image = base_image.resize(cropped_image.size) base_image.paste(cropped_image, (0, 0)) base_image.save(screenshot_path)
1,054,274
Take a quality screenshot Use the screenshot_name args when you want to take a screenshot for reference Args: screenshot_name (str) the name of the screenshot
def take_quality_screenshot(self, screenshot_name): self.info_log("Taking a quality screenshot...") if self.test_instance._runner_dir: _screenshot_name = '%s.png' % string_to_filename(screenshot_name) relative_path = os.path.join( self.test_instance._quality_screenshot_relative_dir, _screenshot_name ) full_path = os.path.join( self.test_instance._quality_screenshot_dir, _screenshot_name ) self._driver.save_screenshot( full_path ) with DbSessionContext(BROME_CONFIG['database']['mongo_database_name']) as session: # noqa capabilities = { 'browserName': self.capabilities['browserName'], 'platform': self.capabilities['platform'], 'version': self.capabilities['version'] } quality_screenshot = Testqualityscreenshot() quality_screenshot.timestamp = utcnow() quality_screenshot.browser_capabilities = capabilities quality_screenshot.browser_id = self.get_id() quality_screenshot.file_path = relative_path # TODO support s3 quality_screenshot.location = 'local_file_system' quality_screenshot.root_path = self.test_instance._runner.root_test_result_dir # noqa quality_screenshot.extra_data = {} quality_screenshot.title = screenshot_name quality_screenshot.test_instance_id = self.test_instance._test_instance_id # noqa quality_screenshot.test_batch_id = self.test_instance._test_batch_id # noqa session.save(quality_screenshot, safe=True) self.debug_log("Quality screenshot taken (%s)" % full_path)
1,054,276
Assert that the element is present in the dom Args: selector (str): the selector used to find the element test_id (str): the test_id or a str Kwargs: wait_until_present (bool) Returns: bool: True is the assertion succeed; False otherwise.
def assert_present(self, selector, testid=None, **kwargs): self.info_log( "Assert present selector(%s) testid(%s)" % (selector, testid) ) wait_until_present = kwargs.get( 'wait_until_present', BROME_CONFIG['proxy_driver']['wait_until_present_before_assert_present'] # noqa ) self.debug_log( "effective wait_until_present: %s" % wait_until_present ) if wait_until_present: element = self.wait_until_present(selector, raise_exception=False) else: element = self.is_present(selector) if element: if testid is not None: self.create_test_result(testid, True) return True else: if testid is not None: self.create_test_result(testid, False) return False
1,054,277
Assert that the element is not present in the dom Args: selector (str): the selector used to find the element test_id (str): the test_id or a str Kwargs: wait_until_not_present (bool) Returns: bool: True is the assertion succeed; False otherwise.
def assert_not_present(self, selector, testid=None, **kwargs): self.info_log( "Assert not present selector(%s) testid(%s)" % (selector, testid) ) wait_until_not_present = kwargs.get( 'wait_until_not_present', BROME_CONFIG['proxy_driver']['wait_until_not_present_before_assert_not_present'] # noqa ) self.debug_log( "effective wait_until_not_present: %s" % wait_until_not_present ) if wait_until_not_present: ret = self.wait_until_not_present(selector, raise_exception=False) else: ret = not self.is_present(selector) if ret: if testid is not None: self.create_test_result(testid, True) return True else: if testid is not None: self.create_test_result(testid, False) return False
1,054,278
Assert that the element is visible in the dom Args: selector (str): the selector used to find the element testid (str): the test_id or a str Kwargs: wait_until_visible (bool) highlight (bool) Returns: bool: True is the assertion succeed; False otherwise.
def assert_visible(self, selector, testid=None, **kwargs): self.info_log( "Assert visible selector(%s) testid(%s)" % (selector, testid) ) highlight = kwargs.get( 'highlight', BROME_CONFIG['highlight']['highlight_on_assertion_success'] ) self.debug_log("effective highlight: %s" % highlight) wait_until_visible = kwargs.get( 'wait_until_visible', BROME_CONFIG['proxy_driver']['wait_until_visible_before_assert_visible'] # noqa ) self.debug_log("effective wait_until_visible: %s" % wait_until_visible) if wait_until_visible: self.wait_until_visible(selector, raise_exception=False) element = self.find( selector, raise_exception=False, wait_until_visible=False, wait_until_present=False ) if element and element.is_displayed(raise_exception=False): if highlight: element.highlight( style=BROME_CONFIG['highlight']['style_on_assertion_success'] # noqa ) if testid is not None: self.create_test_result(testid, True) return True else: if testid is not None: self.create_test_result(testid, False) return False
1,054,279
Assert that the element is not visible in the dom Args: selector (str): the selector used to find the element test_id (str): the test_id or a str Kwargs: wait_until_not_visible (bool) highlight (bool) Returns: bool: True is the assertion succeed; False otherwise.
def assert_not_visible(self, selector, testid=None, **kwargs): self.info_log( "Assert not visible selector(%s) testid(%s)" % (selector, testid) ) highlight = kwargs.get( 'highlight', BROME_CONFIG['highlight']['highlight_on_assertion_failure'] ) self.debug_log("effective highlight: %s" % highlight) wait_until_not_visible = kwargs.get( 'wait_until_not_visible', BROME_CONFIG['proxy_driver']['wait_until_not_visible_before_assert_not_visible'] # noqa ) self.debug_log( "effective wait_until_not_visible: %s" % wait_until_not_visible ) if wait_until_not_visible: self.wait_until_not_visible(selector, raise_exception=False) element = self.find( selector, raise_exception=False, wait_until_visible=False, wait_until_present=False ) if element and element.is_displayed(raise_exception=False): data = self.execute_script( "return arguments[0].getBoundingClientRect();", element._element ) if highlight: element.highlight( style=BROME_CONFIG['highlight']['style_on_assertion_failure'] # noqa ) if testid is not None: self.create_test_result(testid, False, extra_data={ 'bounding_client_rect': data, 'video_x_offset': self.browser_config.get('video_x_offset', 0), # noqa 'video_y_offset': self.browser_config.get('video_y_offset', 0) # noqa }) return False else: if testid is not None: self.create_test_result(testid, True) return True
1,054,280
Assert that the element's text is equal to the provided value Args: selector (str): the selector used to find the element value (str): the value that will be compare with the element.text value test_id (str): the test_id or a str Kwargs: wait_until_visible (bool) highlight (bool) Returns: bool: True is the assertion succeed; False otherwise.
def assert_text_equal(self, selector, value, testid=None, **kwargs): self.info_log( "Assert text equal selector(%s) testid(%s)" % (selector, testid) ) highlight = kwargs.get( 'highlight', BROME_CONFIG['highlight']['highlight_on_assertion_success'] ) self.debug_log("effective highlight: %s" % highlight) wait_until_visible = kwargs.get( 'wait_until_visible', BROME_CONFIG['proxy_driver']['wait_until_visible_before_assert_visible'] # noqa ) self.debug_log("effective wait_until_visible: %s" % wait_until_visible) element = self.find( selector, raise_exception=False, wait_until_visible=wait_until_visible ) if element: if element.text == value: if highlight: element.highlight( highlight=BROME_CONFIG['highlight']['style_on_assertion_success'] # noqa ) if testid is not None: self.create_test_result(testid, True) return True else: if highlight: element.highlight( style=BROME_CONFIG['highlight']['style_on_assertion_failure'] # noqa ) if testid is not None: self.create_test_result(testid, False) return False else: if testid is not None: self.create_test_result(testid, False) return False
1,054,281
Create a test result entry in the persistence layer Args: testid (str) result (bool) Keyword Args: extra_data (dict): the extra data that will be saved with the test result Returns: None
def create_test_result(self, testid, result, **kwargs): embed = True videocapture_path = self.test_instance._video_capture_file_relative_path # noqa screenshot_relative_path = '' extra_data = {} # JAVASCRIPT ERROR if not result: extra_data['javascript_error'] = self.get_javascript_error() with DbSessionContext(BROME_CONFIG['database']['mongo_database_name']) as session: # noqa test = None if testid in BROME_CONFIG['test_dict']: query = session.query(Test).filter(Test.test_id == testid) if query.count(): test = query.one() test_config = BROME_CONFIG['test_dict'][testid] if type(test_config) == dict: if 'embed' in test_config: embed = test_config['embed'] test_name = test_config['name'] else: test_name = test_config embed_title = '[%s] %s' % (testid, test_name) if not test: test_name = testid embed_title = test_name if result: # SCREENSHOT if BROME_CONFIG['proxy_driver']['take_screenshot_on_assertion_success']: # noqa if self.test_instance._runner_dir: screenshot_name = 'succeed_%s_%s_%s.png' % ( string_to_filename(testid), get_timestamp(), self.get_id(join_char='_') ) screenshot_path = os.path.join( self.test_instance._assertion_screenshot_dir, screenshot_name ) screenshot_relative_path = os.path.join( self.test_instance._assertion_screenshot_relative_dir, # noqa screenshot_name ) self.take_screenshot(screenshot_path=screenshot_path) # SOUND NOTIFICATION if BROME_CONFIG['runner']['play_sound_on_assertion_success']: # noqa say( BROME_CONFIG['runner']['sound_on_assertion_success'] .format(testid=testid) ) # EMBED if BROME_CONFIG['runner']['embed_on_assertion_success'] and embed: # noqa self.embed(title=embed_title) else: # SCREENSHOT if BROME_CONFIG['proxy_driver']['take_screenshot_on_assertion_failure']: # noqa if self.test_instance._runner_dir: screenshot_name = 'failed_%s_%s_%s.png' % ( string_to_filename(testid), get_timestamp(), self.get_id(join_char='_') ) screenshot_path = os.path.join( self.test_instance._assertion_screenshot_dir, screenshot_name ) screenshot_relative_path = os.path.join( self.test_instance._assertion_screenshot_relative_dir, # noqa screenshot_name ) self.take_screenshot(screenshot_path=screenshot_path) # SOUND NOTIFICATION if BROME_CONFIG['runner']['play_sound_on_assertion_failure']: # noqa say( BROME_CONFIG['runner']['sound_on_assertion_failure'] .format(testid=testid) ) # EMBED if BROME_CONFIG['runner']['embed_on_assertion_failure'] and embed: # noqa self.embed(title=embed_title) capabilities = { 'browserName': self.capabilities['browserName'], 'platform': self.capabilities['platform'], 'version': self.capabilities['version'] } test_result = Testresult() test_result.result = result test_result.timestamp = utcnow() test_result.browser_capabilities = capabilities test_result.browser_id = self.get_id() test_result.root_path = self.test_instance._runner.root_test_result_dir # noqa test_result.screenshot_path = screenshot_relative_path test_result.video_capture_path = videocapture_path test_result.extra_data = extra_data test_result.title = test_name if test: test_result.test_id = test.get_uid() test_result.testid = test.test_id else: test_result.testid = test_name test_result.test_instance_id = self.test_instance._test_instance_id test_result.test_batch_id = self.runner.test_batch_id session.save(test_result, safe=True)
1,054,282
Return an iterator tweets from users in these locations. See https://dev.twitter.com/streaming/overview/request-parameters#locations Params: locations...list of bounding box locations of the form: southwest_longitude, southwest_latitude, northeast_longitude, northeast_latitude, ...
def track_locations(locations): if len(locations) % 4 != 0: raise Exception('length of bounding box list should be a multiple of four') results = twapi.request('statuses/filter', {'locations': ','.join('%f' % l for l in locations)}) return results.get_iterator()
1,054,305
Constructor Args: pos: position chrom: chromosome
def __init__(self,pos=None,chrom=None,separate_chroms=False): # assert assert pos is not None, 'Slider:: set pos' assert chrom is not None, 'Slider:: set chrom' self.pos = pos self.chrom = chrom # sep chroms self.separate_chroms = separate_chroms # windows self.windows = None # additional info for windows self.info = {} pass
1,054,686
split into windows using a slide criterion Args: size: window size step: moving step (default: 0.5*size) minSnps: only windows with nSnps>=minSnps are considered maxSnps: only windows with nSnps>=maxSnps are considered
def _splitGenoSlidingWindow(self,size=5e4,step=None,minSnps=1.,maxSnps=SP.inf): if step is None: step = 0.5*size chroms = SP.unique(self.chrom) wnd_pos = [] idx_wnd_start = [] nSnps = [] wnd_i = 0 nSnps = [] for chrom_i in chroms: start = 0 Ichrom = self.chrom==chrom_i idx_chrom_start = SP.where(Ichrom)[0][0] pos_chr = self.pos[Ichrom] pos_chr_max = pos_chr.max() while 1: if start>pos_chr_max: break end = start+size Ir = (self.pos>=start)*(self.pos<end) _nSnps = Ir.sum() if _nSnps>minSnps and _nSnps<maxSnps: wnd_pos.append([chrom_i,start,start+size]) nSnps.append(_nSnps) idx_wnd_start.append(idx_chrom_start+SP.where(Ir)[0][0]) wnd_i+=1 start += step self._wnd_pos = SP.array(wnd_pos) self._idx_wnd_start = SP.array(idx_wnd_start) self._nSnps = SP.array(nSnps)
1,054,688
Return the default args as a parent parser, optionally adding a version Args: version (str): version to return on <cli> --version include (Sequence): default arguments to add to cli. Default: (config, user, dry-run, verbose, quiet)
def parent(version=None, include=None): parser = argparse.ArgumentParser(add_help=False) add_default_args(parser, version=version, include=include) return parser
1,054,699
Set up a stdout logger. Args: name (str): name of the logger level: defaults to logging.INFO format (str): format string for logging output. defaults to ``%(filename)-11s %(lineno)-3d: %(message)s``. Returns: The logger object.
def add_logger(name, level=None, format=None): format = format or '%(filename)-11s %(lineno)-3d: %(message)s' log = logging.getLogger(name) # Set logging level. log.setLevel(level or logging.INFO) ch = logging.StreamHandler(sys.stdout) ch.setFormatter(logging.Formatter(format)) log.addHandler(ch) return log
1,054,700
Navigate an open ftplib.FTP to appropriate directory for ClinVar VCF files. Args: ftp: (type: ftplib.FTP) an open connection to ftp.ncbi.nlm.nih.gov build: (type: string) genome build, either 'b37' or 'b38'
def nav_to_vcf_dir(ftp, build): if build == 'b37': ftp.cwd(DIR_CLINVAR_VCF_B37) elif build == 'b38': ftp.cwd(DIR_CLINVAR_VCF_B38) else: raise IOError("Genome build not recognized.")
1,054,729
Determine the filename for the most recent comprehensive ClinVar VCF. Args: build: (type: string) genome build, either 'b37' or 'b38' Returns: (type: string) Filename of the most recent comprehensive ClinVar VCF.
def latest_vcf_filename(build): ftp = FTP('ftp.ncbi.nlm.nih.gov') ftp.login() nav_to_vcf_dir(ftp, build=build) clinvar_datestamped = [f for f in ftp.nlst() if re.match('^clinvar_[0-9]{8}.vcf.gz$', f)] if len(clinvar_datestamped) == 1: return clinvar_datestamped[0] raise IOError("Unable to determine the most recent ClinVar VCF file on " + "NCBI's FTP site.")
1,054,730
Computes the transition probabilities of a corpus Args: corpus: the given corpus (a corpus_entry needs to be iterable) order: the maximal Markov chain order
def _compute_transitions(self, corpus, order=1): self.transitions = defaultdict(lambda: defaultdict(int)) for corpus_entry in corpus: tokens = self.tokenize(corpus_entry) last_tokens = utils.prefilled_buffer( self._start_symbol, length=self.order) # count the occurrences of "present | past" for token_value in chain(tokens, self._end_symbol): for suffix in utils.get_suffixes(last_tokens): self.transitions[suffix][token_value] += 1 last_tokens.append(token_value) self._compute_relative_probs(self.transitions)
1,054,766
Returns true if a Status object has entities. Args: status: either a tweepy.Status object or a dict returned from Twitter API
def has_entities(status): try: if sum(len(v) for v in status.entities.values()) > 0: return True except AttributeError: if sum(len(v) for v in status['entities'].values()) > 0: return True return False
1,054,781
Replace shorturls in a status with expanded urls. Args: status (tweepy.status): A tweepy status object Returns: str
def replace_urls(status): text = status.text if not has_url(status): return text urls = [(e['indices'], e['expanded_url']) for e in status.entities['urls']] urls.sort(key=lambda x: x[0][0], reverse=True) for (start, end), url in urls: text = text[:start] + url + text[end:] return text
1,054,783
Create query from list of terms, using OR but intelligently excluding terms beginning with '-' (Twitter's NOT operator). Optionally add -from:exclude_screen_name. >>> helpers.queryize(['apple', 'orange', '-peach']) u'apple OR orange -peach' Args: terms (list): Search terms. exclude_screen_name (str): A single screen name to exclude from the search. Returns: A string ready to be passed to tweepy.API.search
def queryize(terms, exclude_screen_name=None): ors = ' OR '.join('"{}"'.format(x) for x in terms if not x.startswith('-')) nots = ' '.join('-"{}"'.format(x[1:]) for x in terms if x.startswith('-')) sn = "-from:{}".format(exclude_screen_name) if exclude_screen_name else '' return ' '.join((ors, nots, sn))
1,054,785
Shorten a string so that it fits under max_len, splitting it at 'split'. Not guaranteed to return a string under max_len, as it may not be possible Args: text (str): String to shorten max_len (int): maximum length. default 140 split (str): strings to split on (default is common punctuation: "-;,.")
def chomp(text, max_len=280, split=None): split = split or '—;,.' while length(text) > max_len: try: text = re.split(r'[' + split + ']', text[::-1], 1)[1][::-1] except IndexError: return text return text
1,054,786
Count the length of a str the way Twitter does, double-counting "wide" characters (e.g. ideographs, emoji) Args: text (str): Text to count. Must be a unicode string in Python 2 maxval (int): The maximum encoding that will be counted as 1 character. Defaults to 4351 (ჿ GEORGIAN LETTER LABIAL SIGN, U+10FF) Returns: int
def length(text, maxval=None, encoding=None): maxval = maxval or 4351 try: assert not isinstance(text, six.binary_type) except AssertionError: raise TypeError('helpers.length requires a unicode argument') return sum(2 if ord(x) > maxval else 1 for x in unicodedata.normalize('NFC', text))
1,054,787
Execute a command on the node Args: command (str) Kwargs: username (str)
def execute_command(self, command, **kwargs): self.info_log("executing command: %s" % command) try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) username = kwargs.get( 'username', self.browser_config.get('username') ) password = self.browser_config.get('password') ssh.connect(self.get_ip(), username=username, password=password) stdin, stdout, stderr = ssh.exec_command(command) ssh.close() return (stdout, stderr) except Exception as e: msg = "Execute_command exception: %s" % str(e) self.error_log(msg) raise Exception(msg)
1,054,907