text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def update(): ''' Execute an svn update on all of the repos ''' # data for the fileserver event data = {'changed': False, 'backend': 'svnfs'} # _clear_old_remotes runs init(), so use the value from there to avoid a # second init() data['changed'], repos = _clear_old_remotes() for repo in repos: if os.path.exists(repo['lockfile']): log.warning( 'Update lockfile is present for svnfs remote %s, skipping. ' 'If this warning persists, it is possible that the update ' 'process was interrupted. Removing %s or running ' '\'salt-run fileserver.clear_lock svnfs\' will allow updates ' 'to continue for this remote.', repo['url'], repo['lockfile'] ) continue _, errors = lock(repo) if errors: log.error( 'Unable to set update lock for svnfs remote %s, skipping.', repo['url'] ) continue log.debug('svnfs is fetching from %s', repo['url']) old_rev = _rev(repo) try: CLIENT.update(repo['repo']) except pysvn._pysvn.ClientError as exc: log.error( 'Error updating svnfs remote %s (cachedir: %s): %s', repo['url'], repo['cachedir'], exc ) new_rev = _rev(repo) if any((x is None for x in (old_rev, new_rev))): # There were problems getting the revision ID continue if new_rev != old_rev: data['changed'] = True clear_lock(repo) env_cache = os.path.join(__opts__['cachedir'], 'svnfs/envs.p') if data.get('changed', False) is True or not os.path.isfile(env_cache): env_cachedir = os.path.dirname(env_cache) if not os.path.exists(env_cachedir): os.makedirs(env_cachedir) new_envs = envs(ignore_cache=True) serial = salt.payload.Serial(__opts__) with salt.utils.files.fopen(env_cache, 'wb+') as fp_: fp_.write(serial.dumps(new_envs)) log.trace('Wrote env cache data to %s', env_cache) # if there is a change, fire an event if __opts__.get('fileserver_events', False): event = salt.utils.event.get_event( 'master', __opts__['sock_dir'], __opts__['transport'], opts=__opts__, listen=False) event.fire_event(data, tagify(['svnfs', 'update'], prefix='fileserver')) try: salt.fileserver.reap_fileserver_cache_dir( os.path.join(__opts__['cachedir'], 'svnfs/hash'), find_file ) except (IOError, OSError): # Hash file won't exist if no files have yet been served up pass
[ "def", "update", "(", ")", ":", "# data for the fileserver event", "data", "=", "{", "'changed'", ":", "False", ",", "'backend'", ":", "'svnfs'", "}", "# _clear_old_remotes runs init(), so use the value from there to avoid a", "# second init()", "data", "[", "'changed'", "]", ",", "repos", "=", "_clear_old_remotes", "(", ")", "for", "repo", "in", "repos", ":", "if", "os", ".", "path", ".", "exists", "(", "repo", "[", "'lockfile'", "]", ")", ":", "log", ".", "warning", "(", "'Update lockfile is present for svnfs remote %s, skipping. '", "'If this warning persists, it is possible that the update '", "'process was interrupted. Removing %s or running '", "'\\'salt-run fileserver.clear_lock svnfs\\' will allow updates '", "'to continue for this remote.'", ",", "repo", "[", "'url'", "]", ",", "repo", "[", "'lockfile'", "]", ")", "continue", "_", ",", "errors", "=", "lock", "(", "repo", ")", "if", "errors", ":", "log", ".", "error", "(", "'Unable to set update lock for svnfs remote %s, skipping.'", ",", "repo", "[", "'url'", "]", ")", "continue", "log", ".", "debug", "(", "'svnfs is fetching from %s'", ",", "repo", "[", "'url'", "]", ")", "old_rev", "=", "_rev", "(", "repo", ")", "try", ":", "CLIENT", ".", "update", "(", "repo", "[", "'repo'", "]", ")", "except", "pysvn", ".", "_pysvn", ".", "ClientError", "as", "exc", ":", "log", ".", "error", "(", "'Error updating svnfs remote %s (cachedir: %s): %s'", ",", "repo", "[", "'url'", "]", ",", "repo", "[", "'cachedir'", "]", ",", "exc", ")", "new_rev", "=", "_rev", "(", "repo", ")", "if", "any", "(", "(", "x", "is", "None", "for", "x", "in", "(", "old_rev", ",", "new_rev", ")", ")", ")", ":", "# There were problems getting the revision ID", "continue", "if", "new_rev", "!=", "old_rev", ":", "data", "[", "'changed'", "]", "=", "True", "clear_lock", "(", "repo", ")", "env_cache", "=", "os", ".", "path", ".", "join", "(", "__opts__", "[", "'cachedir'", "]", ",", "'svnfs/envs.p'", ")", "if", "data", ".", "get", "(", "'changed'", ",", "False", ")", "is", "True", "or", "not", "os", ".", "path", ".", "isfile", "(", "env_cache", ")", ":", "env_cachedir", "=", "os", ".", "path", ".", "dirname", "(", "env_cache", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "env_cachedir", ")", ":", "os", ".", "makedirs", "(", "env_cachedir", ")", "new_envs", "=", "envs", "(", "ignore_cache", "=", "True", ")", "serial", "=", "salt", ".", "payload", ".", "Serial", "(", "__opts__", ")", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "env_cache", ",", "'wb+'", ")", "as", "fp_", ":", "fp_", ".", "write", "(", "serial", ".", "dumps", "(", "new_envs", ")", ")", "log", ".", "trace", "(", "'Wrote env cache data to %s'", ",", "env_cache", ")", "# if there is a change, fire an event", "if", "__opts__", ".", "get", "(", "'fileserver_events'", ",", "False", ")", ":", "event", "=", "salt", ".", "utils", ".", "event", ".", "get_event", "(", "'master'", ",", "__opts__", "[", "'sock_dir'", "]", ",", "__opts__", "[", "'transport'", "]", ",", "opts", "=", "__opts__", ",", "listen", "=", "False", ")", "event", ".", "fire_event", "(", "data", ",", "tagify", "(", "[", "'svnfs'", ",", "'update'", "]", ",", "prefix", "=", "'fileserver'", ")", ")", "try", ":", "salt", ".", "fileserver", ".", "reap_fileserver_cache_dir", "(", "os", ".", "path", ".", "join", "(", "__opts__", "[", "'cachedir'", "]", ",", "'svnfs/hash'", ")", ",", "find_file", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "# Hash file won't exist if no files have yet been served up", "pass" ]
37.216216
18.945946
def get_stop_times(feed: "Feed", date: Optional[str] = None) -> DataFrame: """ Return a subset of ``feed.stop_times``. Parameters ---------- feed : Feed date : string YYYYMMDD date string restricting the output to trips active on the date Returns ------- DataFrame Subset of ``feed.stop_times`` Notes ----- Assume the following feed attributes are not ``None``: - ``feed.stop_times`` - Those used in :func:`.trips.get_trips` """ f = feed.stop_times.copy() if date is None: return f g = feed.get_trips(date) return f[f["trip_id"].isin(g["trip_id"])]
[ "def", "get_stop_times", "(", "feed", ":", "\"Feed\"", ",", "date", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "DataFrame", ":", "f", "=", "feed", ".", "stop_times", ".", "copy", "(", ")", "if", "date", "is", "None", ":", "return", "f", "g", "=", "feed", ".", "get_trips", "(", "date", ")", "return", "f", "[", "f", "[", "\"trip_id\"", "]", ".", "isin", "(", "g", "[", "\"trip_id\"", "]", ")", "]" ]
21.1
22.766667
def sys_pipes(encoding=_default_encoding): """Redirect C-level stdout/stderr to sys.stdout/stderr This is useful of sys.sdout/stderr are already being forwarded somewhere. DO NOT USE THIS if sys.stdout and sys.stderr are not already being forwarded. """ return pipes(sys.stdout, sys.stderr, encoding=encoding)
[ "def", "sys_pipes", "(", "encoding", "=", "_default_encoding", ")", ":", "return", "pipes", "(", "sys", ".", "stdout", ",", "sys", ".", "stderr", ",", "encoding", "=", "encoding", ")" ]
41.5
21.375
def sparkify(series): u"""Converts <series> to a sparkline string. Example: >>> sparkify([ 0.5, 1.2, 3.5, 7.3, 8.0, 12.5, 13.2, 15.0, 14.2, 11.8, 6.1, ... 1.9 ]) u'โ–โ–โ–‚โ–„โ–…โ–‡โ–‡โ–ˆโ–ˆโ–†โ–„โ–‚' >>> sparkify([1, 1, -2, 3, -5, 8, -13]) u'โ–†โ–†โ–…โ–†โ–„โ–ˆโ–' Raises ValueError if input data cannot be converted to float. Raises TypeError if series is not an iterable. """ series = [ float(i) for i in series ] minimum = min(series) maximum = max(series) data_range = maximum - minimum if data_range == 0.0: # Graph a baseline if every input value is equal. return u''.join([ spark_chars[0] for i in series ]) coefficient = (len(spark_chars) - 1.0) / data_range return u''.join([ spark_chars[ int(round((x - minimum) * coefficient)) ] for x in series ])
[ "def", "sparkify", "(", "series", ")", ":", "series", "=", "[", "float", "(", "i", ")", "for", "i", "in", "series", "]", "minimum", "=", "min", "(", "series", ")", "maximum", "=", "max", "(", "series", ")", "data_range", "=", "maximum", "-", "minimum", "if", "data_range", "==", "0.0", ":", "# Graph a baseline if every input value is equal.", "return", "u''", ".", "join", "(", "[", "spark_chars", "[", "0", "]", "for", "i", "in", "series", "]", ")", "coefficient", "=", "(", "len", "(", "spark_chars", ")", "-", "1.0", ")", "/", "data_range", "return", "u''", ".", "join", "(", "[", "spark_chars", "[", "int", "(", "round", "(", "(", "x", "-", "minimum", ")", "*", "coefficient", ")", ")", "]", "for", "x", "in", "series", "]", ")" ]
30.333333
19.037037
def phantom_decorate(f, get_or_add): """ Decorator for version-dependent fields. If get_or_add is True (means get), we return s, self.phantom_value. If it is False (means add), we return s. """ def wrapper(*args): self, pkt, s = args[:3] if phantom_mode(pkt): if get_or_add: return s, self.phantom_value return s return f(*args) return wrapper
[ "def", "phantom_decorate", "(", "f", ",", "get_or_add", ")", ":", "def", "wrapper", "(", "*", "args", ")", ":", "self", ",", "pkt", ",", "s", "=", "args", "[", ":", "3", "]", "if", "phantom_mode", "(", "pkt", ")", ":", "if", "get_or_add", ":", "return", "s", ",", "self", ".", "phantom_value", "return", "s", "return", "f", "(", "*", "args", ")", "return", "wrapper" ]
30.142857
11.142857
def comment_request(self, request_id, body, commit=None, filename=None, row=None): """ Create a comment on the request. :param request_id: the id of the request :param body: the comment body :param commit: which commit to comment on :param filename: which file to comment on :param row: which line of code to comment on :return: """ request_url = ("{}pull-request/{}/comment" .format(self.create_basic_url(), request_id)) payload = {'comment': body} if commit is not None: payload['commit'] = commit if filename is not None: payload['filename'] = filename if row is not None: payload['row'] = row return_value = self._call_api(request_url, method='POST', data=payload) LOG.debug(return_value)
[ "def", "comment_request", "(", "self", ",", "request_id", ",", "body", ",", "commit", "=", "None", ",", "filename", "=", "None", ",", "row", "=", "None", ")", ":", "request_url", "=", "(", "\"{}pull-request/{}/comment\"", ".", "format", "(", "self", ".", "create_basic_url", "(", ")", ",", "request_id", ")", ")", "payload", "=", "{", "'comment'", ":", "body", "}", "if", "commit", "is", "not", "None", ":", "payload", "[", "'commit'", "]", "=", "commit", "if", "filename", "is", "not", "None", ":", "payload", "[", "'filename'", "]", "=", "filename", "if", "row", "is", "not", "None", ":", "payload", "[", "'row'", "]", "=", "row", "return_value", "=", "self", ".", "_call_api", "(", "request_url", ",", "method", "=", "'POST'", ",", "data", "=", "payload", ")", "LOG", ".", "debug", "(", "return_value", ")" ]
35.346154
13.115385
def create_db(self, models): """Creates the in-memory SQLite database from the model configuration.""" # first create the table definitions self.tables = dict( [ (model_name, self.create_model_table(model)) for model_name, model in iteritems(models) ] ) # now create the tables in memory logger.debug("Creating %d database table(s)...", len(self.tables)) try: self.Base.metadata.create_all(self.engine) except Exception as exc: raise StatikError( message="Failed to create in-memory data model.", orig_exc=exc ) self.load_all_model_data(models)
[ "def", "create_db", "(", "self", ",", "models", ")", ":", "# first create the table definitions", "self", ".", "tables", "=", "dict", "(", "[", "(", "model_name", ",", "self", ".", "create_model_table", "(", "model", ")", ")", "for", "model_name", ",", "model", "in", "iteritems", "(", "models", ")", "]", ")", "# now create the tables in memory", "logger", ".", "debug", "(", "\"Creating %d database table(s)...\"", ",", "len", "(", "self", ".", "tables", ")", ")", "try", ":", "self", ".", "Base", ".", "metadata", ".", "create_all", "(", "self", ".", "engine", ")", "except", "Exception", "as", "exc", ":", "raise", "StatikError", "(", "message", "=", "\"Failed to create in-memory data model.\"", ",", "orig_exc", "=", "exc", ")", "self", ".", "load_all_model_data", "(", "models", ")" ]
36.45
15.55
def from_pyfile(self, filename): """ ๅœจไธ€ไธช Python ๆ–‡ไปถไธญ่ฏปๅ–้…็ฝฎใ€‚ :param filename: ้…็ฝฎๆ–‡ไปถ็š„ๆ–‡ไปถๅ :return: ๅฆ‚ๆžœ่ฏปๅ–ๆˆๅŠŸ๏ผŒ่ฟ”ๅ›ž ``True``๏ผŒๅฆ‚ๆžœๅคฑ่ดฅ๏ผŒไผšๆŠ›ๅ‡บ้”™่ฏฏๅผ‚ๅธธ """ d = types.ModuleType('config') d.__file__ = filename with open(filename) as config_file: exec(compile(config_file.read(), filename, 'exec'), d.__dict__) self.from_object(d) return True
[ "def", "from_pyfile", "(", "self", ",", "filename", ")", ":", "d", "=", "types", ".", "ModuleType", "(", "'config'", ")", "d", ".", "__file__", "=", "filename", "with", "open", "(", "filename", ")", "as", "config_file", ":", "exec", "(", "compile", "(", "config_file", ".", "read", "(", ")", ",", "filename", ",", "'exec'", ")", ",", "d", ".", "__dict__", ")", "self", ".", "from_object", "(", "d", ")", "return", "True" ]
30.230769
12.384615
def _get_diffs(cls, dict1, dict2, ignore_missing_keys): ''' Returns a dict with the differences between dict1 and dict2 Notes: Keys that only exist in dict2 are not included in the diff if ignore_missing_keys is True, otherwise they are Simple compares are done on lists ''' ret_dict = {} for p in dict1.keys(): if p not in dict2: ret_dict.update({p: {'new': dict1[p], 'old': cls.NONE_VALUE}}) elif dict1[p] != dict2[p]: if isinstance(dict1[p], dict) and isinstance(dict2[p], dict): sub_diff_dict = cls._get_diffs(dict1[p], dict2[p], ignore_missing_keys) if sub_diff_dict: ret_dict.update({p: sub_diff_dict}) else: ret_dict.update({p: {'new': dict1[p], 'old': dict2[p]}}) if not ignore_missing_keys: for p in dict2.keys(): if p not in dict1.keys(): ret_dict.update({p: {'new': cls.NONE_VALUE, 'old': dict2[p]}}) return ret_dict
[ "def", "_get_diffs", "(", "cls", ",", "dict1", ",", "dict2", ",", "ignore_missing_keys", ")", ":", "ret_dict", "=", "{", "}", "for", "p", "in", "dict1", ".", "keys", "(", ")", ":", "if", "p", "not", "in", "dict2", ":", "ret_dict", ".", "update", "(", "{", "p", ":", "{", "'new'", ":", "dict1", "[", "p", "]", ",", "'old'", ":", "cls", ".", "NONE_VALUE", "}", "}", ")", "elif", "dict1", "[", "p", "]", "!=", "dict2", "[", "p", "]", ":", "if", "isinstance", "(", "dict1", "[", "p", "]", ",", "dict", ")", "and", "isinstance", "(", "dict2", "[", "p", "]", ",", "dict", ")", ":", "sub_diff_dict", "=", "cls", ".", "_get_diffs", "(", "dict1", "[", "p", "]", ",", "dict2", "[", "p", "]", ",", "ignore_missing_keys", ")", "if", "sub_diff_dict", ":", "ret_dict", ".", "update", "(", "{", "p", ":", "sub_diff_dict", "}", ")", "else", ":", "ret_dict", ".", "update", "(", "{", "p", ":", "{", "'new'", ":", "dict1", "[", "p", "]", ",", "'old'", ":", "dict2", "[", "p", "]", "}", "}", ")", "if", "not", "ignore_missing_keys", ":", "for", "p", "in", "dict2", ".", "keys", "(", ")", ":", "if", "p", "not", "in", "dict1", ".", "keys", "(", ")", ":", "ret_dict", ".", "update", "(", "{", "p", ":", "{", "'new'", ":", "cls", ".", "NONE_VALUE", ",", "'old'", ":", "dict2", "[", "p", "]", "}", "}", ")", "return", "ret_dict" ]
44.37037
20.296296
def scrape(ctx, url): """ Rip the events from a given rss feed, normalize the data and store. """ data = load_feed(url) feed = data['feed'] entries = data['entries'] # THIS IS SPECIFIC TO # http://konfery.cz/rss/ _type = 'community' country = 'Czech Republic' # title, title_detail, links, link, published, summary, tags # unused: summary_detail, guidislink, published_parsed for entry in entries: _id = sluggify(entry['id']) city = entry['tags'][0]['term'] landing = entry['link'] start_time = dt_normalize(entry['published_parsed'], local_tz=True) title = entry['title'] summary = entry['summary'] link = entry['link'] ipdb.set_trace()
[ "def", "scrape", "(", "ctx", ",", "url", ")", ":", "data", "=", "load_feed", "(", "url", ")", "feed", "=", "data", "[", "'feed'", "]", "entries", "=", "data", "[", "'entries'", "]", "# THIS IS SPECIFIC TO # http://konfery.cz/rss/", "_type", "=", "'community'", "country", "=", "'Czech Republic'", "# title, title_detail, links, link, published, summary, tags", "# unused: summary_detail, guidislink, published_parsed", "for", "entry", "in", "entries", ":", "_id", "=", "sluggify", "(", "entry", "[", "'id'", "]", ")", "city", "=", "entry", "[", "'tags'", "]", "[", "0", "]", "[", "'term'", "]", "landing", "=", "entry", "[", "'link'", "]", "start_time", "=", "dt_normalize", "(", "entry", "[", "'published_parsed'", "]", ",", "local_tz", "=", "True", ")", "title", "=", "entry", "[", "'title'", "]", "summary", "=", "entry", "[", "'summary'", "]", "link", "=", "entry", "[", "'link'", "]", "ipdb", ".", "set_trace", "(", ")" ]
29.16
17.64
def update_gl_state(self, *args, **kwargs): """Modify the set of GL state parameters to use when drawing Parameters ---------- *args : tuple Arguments. **kwargs : dict Keyword argments. """ for v in self._subvisuals: v.update_gl_state(*args, **kwargs)
[ "def", "update_gl_state", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "v", "in", "self", ".", "_subvisuals", ":", "v", ".", "update_gl_state", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
27.75
13.666667
def c_handle_array(objs): """Create ctypes const void ** from a list of MXNet objects with handles. Parameters ---------- objs : list of NDArray/Symbol. MXNet objects. Returns ------- (ctypes.c_void_p * len(objs)) A void ** pointer that can be passed to C API. """ arr = (ctypes.c_void_p * len(objs))() arr[:] = [o.handle for o in objs] return arr
[ "def", "c_handle_array", "(", "objs", ")", ":", "arr", "=", "(", "ctypes", ".", "c_void_p", "*", "len", "(", "objs", ")", ")", "(", ")", "arr", "[", ":", "]", "=", "[", "o", ".", "handle", "for", "o", "in", "objs", "]", "return", "arr" ]
24.625
17.5
def read_features(self, tol=1e-3): """Reads the features from a file and stores them in the current object. Parameters ---------- tol: float Tolerance level to detect duration of audio. """ try: # Read JSON file with open(self.file_struct.features_file) as f: feats = json.load(f) # Store duration if self.dur is None: self.dur = float(feats["globals"]["dur"]) # Check that we have the correct global parameters assert(np.isclose( self.dur, float(feats["globals"]["dur"]), rtol=tol)) assert(self.sr == int(feats["globals"]["sample_rate"])) assert(self.hop_length == int(feats["globals"]["hop_length"])) assert(os.path.basename(self.file_struct.audio_file) == os.path.basename(feats["globals"]["audio_file"])) # Check for specific features params feat_params_err = FeatureParamsError( "Couldn't find features for %s id in file %s" % (self.get_id(), self.file_struct.features_file)) if self.get_id() not in feats.keys(): raise feat_params_err for param_name in self.get_param_names(): value = getattr(self, param_name) if hasattr(value, '__call__'): # Special case of functions if value.__name__ != \ feats[self.get_id()]["params"][param_name]: raise feat_params_err else: if str(value) != \ feats[self.get_id()]["params"][param_name]: raise feat_params_err # Store actual features self._est_beats_times = np.array(feats["est_beats"]) self._est_beatsync_times = np.array(feats["est_beatsync_times"]) self._est_beats_frames = librosa.core.time_to_frames( self._est_beats_times, sr=self.sr, hop_length=self.hop_length) self._framesync_features = \ np.array(feats[self.get_id()]["framesync"]) self._est_beatsync_features = \ np.array(feats[self.get_id()]["est_beatsync"]) # Read annotated beats if available if "ann_beats" in feats.keys(): self._ann_beats_times = np.array(feats["ann_beats"]) self._ann_beatsync_times = np.array(feats["ann_beatsync_times"]) self._ann_beats_frames = librosa.core.time_to_frames( self._ann_beats_times, sr=self.sr, hop_length=self.hop_length) self._ann_beatsync_features = \ np.array(feats[self.get_id()]["ann_beatsync"]) except KeyError: raise WrongFeaturesFormatError( "The features file %s is not correctly formatted" % self.file_struct.features_file) except AssertionError: raise FeaturesNotFound( "The features for the given parameters were not found in " "features file %s" % self.file_struct.features_file) except IOError: raise NoFeaturesFileError("Could not find features file %s", self.file_struct.features_file)
[ "def", "read_features", "(", "self", ",", "tol", "=", "1e-3", ")", ":", "try", ":", "# Read JSON file", "with", "open", "(", "self", ".", "file_struct", ".", "features_file", ")", "as", "f", ":", "feats", "=", "json", ".", "load", "(", "f", ")", "# Store duration", "if", "self", ".", "dur", "is", "None", ":", "self", ".", "dur", "=", "float", "(", "feats", "[", "\"globals\"", "]", "[", "\"dur\"", "]", ")", "# Check that we have the correct global parameters", "assert", "(", "np", ".", "isclose", "(", "self", ".", "dur", ",", "float", "(", "feats", "[", "\"globals\"", "]", "[", "\"dur\"", "]", ")", ",", "rtol", "=", "tol", ")", ")", "assert", "(", "self", ".", "sr", "==", "int", "(", "feats", "[", "\"globals\"", "]", "[", "\"sample_rate\"", "]", ")", ")", "assert", "(", "self", ".", "hop_length", "==", "int", "(", "feats", "[", "\"globals\"", "]", "[", "\"hop_length\"", "]", ")", ")", "assert", "(", "os", ".", "path", ".", "basename", "(", "self", ".", "file_struct", ".", "audio_file", ")", "==", "os", ".", "path", ".", "basename", "(", "feats", "[", "\"globals\"", "]", "[", "\"audio_file\"", "]", ")", ")", "# Check for specific features params", "feat_params_err", "=", "FeatureParamsError", "(", "\"Couldn't find features for %s id in file %s\"", "%", "(", "self", ".", "get_id", "(", ")", ",", "self", ".", "file_struct", ".", "features_file", ")", ")", "if", "self", ".", "get_id", "(", ")", "not", "in", "feats", ".", "keys", "(", ")", ":", "raise", "feat_params_err", "for", "param_name", "in", "self", ".", "get_param_names", "(", ")", ":", "value", "=", "getattr", "(", "self", ",", "param_name", ")", "if", "hasattr", "(", "value", ",", "'__call__'", ")", ":", "# Special case of functions", "if", "value", ".", "__name__", "!=", "feats", "[", "self", ".", "get_id", "(", ")", "]", "[", "\"params\"", "]", "[", "param_name", "]", ":", "raise", "feat_params_err", "else", ":", "if", "str", "(", "value", ")", "!=", "feats", "[", "self", ".", "get_id", "(", ")", "]", "[", "\"params\"", "]", "[", "param_name", "]", ":", "raise", "feat_params_err", "# Store actual features", "self", ".", "_est_beats_times", "=", "np", ".", "array", "(", "feats", "[", "\"est_beats\"", "]", ")", "self", ".", "_est_beatsync_times", "=", "np", ".", "array", "(", "feats", "[", "\"est_beatsync_times\"", "]", ")", "self", ".", "_est_beats_frames", "=", "librosa", ".", "core", ".", "time_to_frames", "(", "self", ".", "_est_beats_times", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ")", "self", ".", "_framesync_features", "=", "np", ".", "array", "(", "feats", "[", "self", ".", "get_id", "(", ")", "]", "[", "\"framesync\"", "]", ")", "self", ".", "_est_beatsync_features", "=", "np", ".", "array", "(", "feats", "[", "self", ".", "get_id", "(", ")", "]", "[", "\"est_beatsync\"", "]", ")", "# Read annotated beats if available", "if", "\"ann_beats\"", "in", "feats", ".", "keys", "(", ")", ":", "self", ".", "_ann_beats_times", "=", "np", ".", "array", "(", "feats", "[", "\"ann_beats\"", "]", ")", "self", ".", "_ann_beatsync_times", "=", "np", ".", "array", "(", "feats", "[", "\"ann_beatsync_times\"", "]", ")", "self", ".", "_ann_beats_frames", "=", "librosa", ".", "core", ".", "time_to_frames", "(", "self", ".", "_ann_beats_times", ",", "sr", "=", "self", ".", "sr", ",", "hop_length", "=", "self", ".", "hop_length", ")", "self", ".", "_ann_beatsync_features", "=", "np", ".", "array", "(", "feats", "[", "self", ".", "get_id", "(", ")", "]", "[", "\"ann_beatsync\"", "]", ")", "except", "KeyError", ":", "raise", "WrongFeaturesFormatError", "(", "\"The features file %s is not correctly formatted\"", "%", "self", ".", "file_struct", ".", "features_file", ")", "except", "AssertionError", ":", "raise", "FeaturesNotFound", "(", "\"The features for the given parameters were not found in \"", "\"features file %s\"", "%", "self", ".", "file_struct", ".", "features_file", ")", "except", "IOError", ":", "raise", "NoFeaturesFileError", "(", "\"Could not find features file %s\"", ",", "self", ".", "file_struct", ".", "features_file", ")" ]
45.445946
18.594595
def serialize_value(self, parent_elem, value): """ Serializes str, Attrib, or PathAttrib objects. Example:: <attribute>foobar</attribute> """ if isinstance(value, (str, int)) or type(value).__name__ == 'str': parent_elem.text = str(value) elif value is None: parent_elem.text = None else: parent_elem.append(value.serialize(self))
[ "def", "serialize_value", "(", "self", ",", "parent_elem", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "(", "str", ",", "int", ")", ")", "or", "type", "(", "value", ")", ".", "__name__", "==", "'str'", ":", "parent_elem", ".", "text", "=", "str", "(", "value", ")", "elif", "value", "is", "None", ":", "parent_elem", ".", "text", "=", "None", "else", ":", "parent_elem", ".", "append", "(", "value", ".", "serialize", "(", "self", ")", ")" ]
30.214286
15.5
def convert_ppm_and_pgm(self, ppmfile, pgmfile, outfile): """ Convert a PPM and PGM file containing raw pixel data into a PNG outfile with the parameters set in the writer object. """ pixels = array('B') pixels.fromfile(ppmfile, (self.bitdepth/8) * self.color_planes * self.width * self.height) apixels = array('B') apixels.fromfile(pgmfile, (self.bitdepth/8) * self.width * self.height) pixels = interleave_planes(pixels, apixels, (self.bitdepth/8) * self.color_planes, (self.bitdepth/8)) if self.interlace: self.write_passes(outfile, self.array_scanlines_interlace(pixels)) else: self.write_passes(outfile, self.array_scanlines(pixels))
[ "def", "convert_ppm_and_pgm", "(", "self", ",", "ppmfile", ",", "pgmfile", ",", "outfile", ")", ":", "pixels", "=", "array", "(", "'B'", ")", "pixels", ".", "fromfile", "(", "ppmfile", ",", "(", "self", ".", "bitdepth", "/", "8", ")", "*", "self", ".", "color_planes", "*", "self", ".", "width", "*", "self", ".", "height", ")", "apixels", "=", "array", "(", "'B'", ")", "apixels", ".", "fromfile", "(", "pgmfile", ",", "(", "self", ".", "bitdepth", "/", "8", ")", "*", "self", ".", "width", "*", "self", ".", "height", ")", "pixels", "=", "interleave_planes", "(", "pixels", ",", "apixels", ",", "(", "self", ".", "bitdepth", "/", "8", ")", "*", "self", ".", "color_planes", ",", "(", "self", ".", "bitdepth", "/", "8", ")", ")", "if", "self", ".", "interlace", ":", "self", ".", "write_passes", "(", "outfile", ",", "self", ".", "array_scanlines_interlace", "(", "pixels", ")", ")", "else", ":", "self", ".", "write_passes", "(", "outfile", ",", "self", ".", "array_scanlines", "(", "pixels", ")", ")" ]
44.95
15.95
def connect(self, **settings): """ Connect to redis and cache the new connection """ # compute a unique key for this settings, for caching. Work on the whole # dict without directly using known keys to allow the use of unix socket # connection or any other (future ?) way to connect to redis if not settings: settings = self.connection_settings connection_key = ':'.join([str(settings[k]) for k in sorted(settings)]) if connection_key not in self._connections: self._connections[connection_key] = redis.StrictRedis( decode_responses=True, **settings) return self._connections[connection_key]
[ "def", "connect", "(", "self", ",", "*", "*", "settings", ")", ":", "# compute a unique key for this settings, for caching. Work on the whole", "# dict without directly using known keys to allow the use of unix socket", "# connection or any other (future ?) way to connect to redis", "if", "not", "settings", ":", "settings", "=", "self", ".", "connection_settings", "connection_key", "=", "':'", ".", "join", "(", "[", "str", "(", "settings", "[", "k", "]", ")", "for", "k", "in", "sorted", "(", "settings", ")", "]", ")", "if", "connection_key", "not", "in", "self", ".", "_connections", ":", "self", ".", "_connections", "[", "connection_key", "]", "=", "redis", ".", "StrictRedis", "(", "decode_responses", "=", "True", ",", "*", "*", "settings", ")", "return", "self", ".", "_connections", "[", "connection_key", "]" ]
51.857143
19.714286
def _get_required_fn(fn, root_path): """ Definition of the MD5 file requires, that all paths will be absolute for the package directory, not for the filesystem. This function converts filesystem-absolute paths to package-absolute paths. Args: fn (str): Local/absolute path to the file. root_path (str): Local/absolute path to the package directory. Returns: str: Package-absolute path to the file. Raises: ValueError: When `fn` is absolute and `root_path` relative or \ conversely. """ if not fn.startswith(root_path): raise ValueError("Both paths have to be absolute or local!") replacer = "/" if root_path.endswith("/") else "" return fn.replace(root_path, replacer, 1)
[ "def", "_get_required_fn", "(", "fn", ",", "root_path", ")", ":", "if", "not", "fn", ".", "startswith", "(", "root_path", ")", ":", "raise", "ValueError", "(", "\"Both paths have to be absolute or local!\"", ")", "replacer", "=", "\"/\"", "if", "root_path", ".", "endswith", "(", "\"/\"", ")", "else", "\"\"", "return", "fn", ".", "replace", "(", "root_path", ",", "replacer", ",", "1", ")" ]
31.583333
23.083333
def _parse_message_to_mqtt(self, data): """Parse a mysensors command string. Return a MQTT topic, payload and qos-level as a tuple. """ msg = Message(data, self) payload = str(msg.payload) msg.payload = '' # prefix/node/child/type/ack/subtype : payload return ('{}/{}'.format(self._out_prefix, msg.encode('/'))[:-2], payload, msg.ack)
[ "def", "_parse_message_to_mqtt", "(", "self", ",", "data", ")", ":", "msg", "=", "Message", "(", "data", ",", "self", ")", "payload", "=", "str", "(", "msg", ".", "payload", ")", "msg", ".", "payload", "=", "''", "# prefix/node/child/type/ack/subtype : payload", "return", "(", "'{}/{}'", ".", "format", "(", "self", ".", "_out_prefix", ",", "msg", ".", "encode", "(", "'/'", ")", ")", "[", ":", "-", "2", "]", ",", "payload", ",", "msg", ".", "ack", ")" ]
36.818182
13.090909
def _parse_metadata(self, meta): """Return the dict containing document metadata""" formatted_fields = self.settings['FORMATTED_FIELDS'] output = collections.OrderedDict() for name, value in meta.items(): name = name.lower() if name in formatted_fields: rendered = self._render(value).strip() output[name] = self.process_metadata(name, rendered) else: output[name] = self.process_metadata(name, value) return output
[ "def", "_parse_metadata", "(", "self", ",", "meta", ")", ":", "formatted_fields", "=", "self", ".", "settings", "[", "'FORMATTED_FIELDS'", "]", "output", "=", "collections", ".", "OrderedDict", "(", ")", "for", "name", ",", "value", "in", "meta", ".", "items", "(", ")", ":", "name", "=", "name", ".", "lower", "(", ")", "if", "name", "in", "formatted_fields", ":", "rendered", "=", "self", ".", "_render", "(", "value", ")", ".", "strip", "(", ")", "output", "[", "name", "]", "=", "self", ".", "process_metadata", "(", "name", ",", "rendered", ")", "else", ":", "output", "[", "name", "]", "=", "self", ".", "process_metadata", "(", "name", ",", "value", ")", "return", "output" ]
40.615385
14.461538
def vmstats(): ''' .. versionchanged:: 2016.3.2 Return the virtual memory stats for this minion .. versionchanged:: 2016.11.4 Added support for AIX CLI Example: .. code-block:: bash salt '*' status.vmstats ''' def linux_vmstats(): ''' linux specific implementation of vmstats ''' ret = {} try: with salt.utils.files.fopen('/proc/vmstat', 'r') as fp_: stats = salt.utils.stringutils.to_unicode(fp_.read()) except IOError: pass else: for line in stats.splitlines(): if not line: continue comps = line.split() ret[comps[0]] = _number(comps[1]) return ret def generic_vmstats(): ''' generic implementation of vmstats note: works on FreeBSD, SunOS and OpenBSD (possibly others) ''' ret = {} for line in __salt__['cmd.run']('vmstat -s').splitlines(): comps = line.split() if comps[0].isdigit(): ret[' '.join(comps[1:])] = _number(comps[0].strip()) return ret # dict that returns a function that does the right thing per platform get_version = { 'Linux': linux_vmstats, 'FreeBSD': generic_vmstats, 'OpenBSD': generic_vmstats, 'SunOS': generic_vmstats, 'AIX': generic_vmstats, } errmsg = 'This method is unsupported on the current operating system!' return get_version.get(__grains__['kernel'], lambda: errmsg)()
[ "def", "vmstats", "(", ")", ":", "def", "linux_vmstats", "(", ")", ":", "'''\n linux specific implementation of vmstats\n '''", "ret", "=", "{", "}", "try", ":", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "'/proc/vmstat'", ",", "'r'", ")", "as", "fp_", ":", "stats", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_unicode", "(", "fp_", ".", "read", "(", ")", ")", "except", "IOError", ":", "pass", "else", ":", "for", "line", "in", "stats", ".", "splitlines", "(", ")", ":", "if", "not", "line", ":", "continue", "comps", "=", "line", ".", "split", "(", ")", "ret", "[", "comps", "[", "0", "]", "]", "=", "_number", "(", "comps", "[", "1", "]", ")", "return", "ret", "def", "generic_vmstats", "(", ")", ":", "'''\n generic implementation of vmstats\n note: works on FreeBSD, SunOS and OpenBSD (possibly others)\n '''", "ret", "=", "{", "}", "for", "line", "in", "__salt__", "[", "'cmd.run'", "]", "(", "'vmstat -s'", ")", ".", "splitlines", "(", ")", ":", "comps", "=", "line", ".", "split", "(", ")", "if", "comps", "[", "0", "]", ".", "isdigit", "(", ")", ":", "ret", "[", "' '", ".", "join", "(", "comps", "[", "1", ":", "]", ")", "]", "=", "_number", "(", "comps", "[", "0", "]", ".", "strip", "(", ")", ")", "return", "ret", "# dict that returns a function that does the right thing per platform", "get_version", "=", "{", "'Linux'", ":", "linux_vmstats", ",", "'FreeBSD'", ":", "generic_vmstats", ",", "'OpenBSD'", ":", "generic_vmstats", ",", "'SunOS'", ":", "generic_vmstats", ",", "'AIX'", ":", "generic_vmstats", ",", "}", "errmsg", "=", "'This method is unsupported on the current operating system!'", "return", "get_version", ".", "get", "(", "__grains__", "[", "'kernel'", "]", ",", "lambda", ":", "errmsg", ")", "(", ")" ]
28.236364
21.472727
def _srvc_set_config(self, trajectory): """Sets a config value to the Trajectory or changes it if the trajectory was loaded a the settings no longer match""" def _set_config(name, value, comment): if not trajectory.f_contains('config.'+name, shortcuts=False): trajectory.f_add_config(Parameter, name, value, comment=comment) for attr_name in HDF5StorageService.NAME_TABLE_MAPPING: table_name = HDF5StorageService.NAME_TABLE_MAPPING[attr_name] value = getattr(self, attr_name) _set_config('hdf5.overview.' + table_name, value, comment='Whether or not to have an overview ' 'table with that name') _set_config('hdf5.purge_duplicate_comments', self._purge_duplicate_comments, comment='Whether comments of results and' ' derived parameters should only' ' be stored for the very first instance.' ' Works only if the summary tables are' ' active.') _set_config('hdf5.results_per_run', self._results_per_run, comment='Expected number of results per run,' ' a good guess can increase storage performance') _set_config('hdf5.derived_parameters_per_run', self._derived_parameters_per_run, comment='Expected number of derived parameters per run,' ' a good guess can increase storage performance') _set_config('hdf5.complevel', self._complevel, comment='Compression Level (0 no compression ' 'to 9 highest compression)') _set_config('hdf5.complib', self._complib, comment='Compression Algorithm') _set_config('hdf5.encoding', self._encoding, comment='Encoding for unicode characters') _set_config('hdf5.fletcher32', self._fletcher32, comment='Whether to use fletcher 32 checksum') _set_config('hdf5.shuffle', self._shuffle, comment='Whether to use shuffle filtering.') _set_config('hdf5.pandas_format', self._pandas_format, comment='''How to store pandas data frames, either''' ''' 'fixed' ('f') or 'table' ('t').''') if trajectory.f_contains('config.hdf5', shortcuts=False): if trajectory.config.hdf5.v_comment == '': # If this has not happened yet, add a description of the hdf5 config group trajectory.config.hdf5.v_comment = 'Settings for the standard HDF5 storage service' trajectory.v_storage_service = self
[ "def", "_srvc_set_config", "(", "self", ",", "trajectory", ")", ":", "def", "_set_config", "(", "name", ",", "value", ",", "comment", ")", ":", "if", "not", "trajectory", ".", "f_contains", "(", "'config.'", "+", "name", ",", "shortcuts", "=", "False", ")", ":", "trajectory", ".", "f_add_config", "(", "Parameter", ",", "name", ",", "value", ",", "comment", "=", "comment", ")", "for", "attr_name", "in", "HDF5StorageService", ".", "NAME_TABLE_MAPPING", ":", "table_name", "=", "HDF5StorageService", ".", "NAME_TABLE_MAPPING", "[", "attr_name", "]", "value", "=", "getattr", "(", "self", ",", "attr_name", ")", "_set_config", "(", "'hdf5.overview.'", "+", "table_name", ",", "value", ",", "comment", "=", "'Whether or not to have an overview '", "'table with that name'", ")", "_set_config", "(", "'hdf5.purge_duplicate_comments'", ",", "self", ".", "_purge_duplicate_comments", ",", "comment", "=", "'Whether comments of results and'", "' derived parameters should only'", "' be stored for the very first instance.'", "' Works only if the summary tables are'", "' active.'", ")", "_set_config", "(", "'hdf5.results_per_run'", ",", "self", ".", "_results_per_run", ",", "comment", "=", "'Expected number of results per run,'", "' a good guess can increase storage performance'", ")", "_set_config", "(", "'hdf5.derived_parameters_per_run'", ",", "self", ".", "_derived_parameters_per_run", ",", "comment", "=", "'Expected number of derived parameters per run,'", "' a good guess can increase storage performance'", ")", "_set_config", "(", "'hdf5.complevel'", ",", "self", ".", "_complevel", ",", "comment", "=", "'Compression Level (0 no compression '", "'to 9 highest compression)'", ")", "_set_config", "(", "'hdf5.complib'", ",", "self", ".", "_complib", ",", "comment", "=", "'Compression Algorithm'", ")", "_set_config", "(", "'hdf5.encoding'", ",", "self", ".", "_encoding", ",", "comment", "=", "'Encoding for unicode characters'", ")", "_set_config", "(", "'hdf5.fletcher32'", ",", "self", ".", "_fletcher32", ",", "comment", "=", "'Whether to use fletcher 32 checksum'", ")", "_set_config", "(", "'hdf5.shuffle'", ",", "self", ".", "_shuffle", ",", "comment", "=", "'Whether to use shuffle filtering.'", ")", "_set_config", "(", "'hdf5.pandas_format'", ",", "self", ".", "_pandas_format", ",", "comment", "=", "'''How to store pandas data frames, either'''", "''' 'fixed' ('f') or 'table' ('t').'''", ")", "if", "trajectory", ".", "f_contains", "(", "'config.hdf5'", ",", "shortcuts", "=", "False", ")", ":", "if", "trajectory", ".", "config", ".", "hdf5", ".", "v_comment", "==", "''", ":", "# If this has not happened yet, add a description of the hdf5 config group", "trajectory", ".", "config", ".", "hdf5", ".", "v_comment", "=", "'Settings for the standard HDF5 storage service'", "trajectory", ".", "v_storage_service", "=", "self" ]
48.465517
24.5
def fix_related_item_tag(dom): """ Remove <mods:relatedItem> tag in case that there is only <mods:location> subtag. """ location = dom.match( "mods:mods", "mods:relatedItem", "mods:location" ) if not location: return location = first(location) location.replaceWith( dhtmlparser.HTMLElement() ) # remove whole <mods:relatedItem> tag, if there is nothing else left in it related_item = dom.match( "mods:mods", "mods:relatedItem" ) related_item = first(related_item) if not related_item.getContent().strip(): related_item.replaceWith(dhtmlparser.HTMLElement())
[ "def", "fix_related_item_tag", "(", "dom", ")", ":", "location", "=", "dom", ".", "match", "(", "\"mods:mods\"", ",", "\"mods:relatedItem\"", ",", "\"mods:location\"", ")", "if", "not", "location", ":", "return", "location", "=", "first", "(", "location", ")", "location", ".", "replaceWith", "(", "dhtmlparser", ".", "HTMLElement", "(", ")", ")", "# remove whole <mods:relatedItem> tag, if there is nothing else left in it", "related_item", "=", "dom", ".", "match", "(", "\"mods:mods\"", ",", "\"mods:relatedItem\"", ")", "related_item", "=", "first", "(", "related_item", ")", "if", "not", "related_item", ".", "getContent", "(", ")", ".", "strip", "(", ")", ":", "related_item", ".", "replaceWith", "(", "dhtmlparser", ".", "HTMLElement", "(", ")", ")" ]
23.5
21.142857
def get_opt_val(obj_pyxb, attr_str, default_val=None): """Get an optional Simple Content value from a PyXB element. The attributes for elements that are optional according to the schema and not set in the PyXB object are present and set to None. PyXB validation will fail if required elements are missing. Args: obj_pyxb: PyXB object attr_str: str Name of an attribute that the PyXB object may contain. default_val: any object Value to return if the attribute is not present. Returns: str : Value of the attribute if present, else ``default_val``. """ try: return get_req_val(getattr(obj_pyxb, attr_str)) except (ValueError, AttributeError): return default_val
[ "def", "get_opt_val", "(", "obj_pyxb", ",", "attr_str", ",", "default_val", "=", "None", ")", ":", "try", ":", "return", "get_req_val", "(", "getattr", "(", "obj_pyxb", ",", "attr_str", ")", ")", "except", "(", "ValueError", ",", "AttributeError", ")", ":", "return", "default_val" ]
29.4
24.16
def command_callback(result=None): """ :type result: opendnp3.ICommandTaskResult """ print("Received command result with summary: {}".format(opendnp3.TaskCompletionToString(result.summary))) result.ForeachItem(collection_callback)
[ "def", "command_callback", "(", "result", "=", "None", ")", ":", "print", "(", "\"Received command result with summary: {}\"", ".", "format", "(", "opendnp3", ".", "TaskCompletionToString", "(", "result", ".", "summary", ")", ")", ")", "result", ".", "ForeachItem", "(", "collection_callback", ")" ]
40.833333
13.833333
def make_light_tarfile(self, name=None): """Lightweight tarball file. Mainly used for debugging. Return the name of the tarball file.""" name = os.path.basename(self.workdir) + "-light.tar.gz" if name is None else name return self.make_tarfile(name=name, exclude_dirs=["outdata", "indata", "tmpdata"])
[ "def", "make_light_tarfile", "(", "self", ",", "name", "=", "None", ")", ":", "name", "=", "os", ".", "path", ".", "basename", "(", "self", ".", "workdir", ")", "+", "\"-light.tar.gz\"", "if", "name", "is", "None", "else", "name", "return", "self", ".", "make_tarfile", "(", "name", "=", "name", ",", "exclude_dirs", "=", "[", "\"outdata\"", ",", "\"indata\"", ",", "\"tmpdata\"", "]", ")" ]
80.5
24.75
def _result(self) -> ResultLazyType: """ ``self.config.replacer_function``(``Callable[[str], str]``) must exists. """ config = cast(IntervalsCollectionBasedReplacerConfig, self.config) diff_acc = 0 for interval, aggregated_mark in self.continuous_intervals(): start, end = interval processed_start = start + diff_acc processed_end = end + diff_acc segment = self.input_sequence[start:end] if aggregated_mark is not None: processed_segment = config.labeler2repl[cast(Type[workflow.IntervalLabeler], aggregated_mark)](segment) if not processed_segment: # segment is removed. processed_end = processed_start else: processed_end = processed_start + len(processed_segment) diff_acc += len(processed_segment) - len(segment) segment = processed_segment yield segment, (interval, (processed_start, processed_end), aggregated_mark is not None)
[ "def", "_result", "(", "self", ")", "->", "ResultLazyType", ":", "config", "=", "cast", "(", "IntervalsCollectionBasedReplacerConfig", ",", "self", ".", "config", ")", "diff_acc", "=", "0", "for", "interval", ",", "aggregated_mark", "in", "self", ".", "continuous_intervals", "(", ")", ":", "start", ",", "end", "=", "interval", "processed_start", "=", "start", "+", "diff_acc", "processed_end", "=", "end", "+", "diff_acc", "segment", "=", "self", ".", "input_sequence", "[", "start", ":", "end", "]", "if", "aggregated_mark", "is", "not", "None", ":", "processed_segment", "=", "config", ".", "labeler2repl", "[", "cast", "(", "Type", "[", "workflow", ".", "IntervalLabeler", "]", ",", "aggregated_mark", ")", "]", "(", "segment", ")", "if", "not", "processed_segment", ":", "# segment is removed.", "processed_end", "=", "processed_start", "else", ":", "processed_end", "=", "processed_start", "+", "len", "(", "processed_segment", ")", "diff_acc", "+=", "len", "(", "processed_segment", ")", "-", "len", "(", "segment", ")", "segment", "=", "processed_segment", "yield", "segment", ",", "(", "interval", ",", "(", "processed_start", ",", "processed_end", ")", ",", "aggregated_mark", "is", "not", "None", ")" ]
40.5
23.285714
def apnumber(value): """For numbers 1-9, returns the number spelled out. Otherwise, returns the number. This follows Associated Press style. This always returns a string unless the value was not int-able, unlike the Django filter.""" try: value = int(value) except (TypeError, ValueError): return value if not 0 < value < 10: return str(value) return (_('one'), _('two'), _('three'), _('four'), _('five'), _('six'), _('seven'), _('eight'), _('nine'))[value - 1]
[ "def", "apnumber", "(", "value", ")", ":", "try", ":", "value", "=", "int", "(", "value", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "return", "value", "if", "not", "0", "<", "value", "<", "10", ":", "return", "str", "(", "value", ")", "return", "(", "_", "(", "'one'", ")", ",", "_", "(", "'two'", ")", ",", "_", "(", "'three'", ")", ",", "_", "(", "'four'", ")", ",", "_", "(", "'five'", ")", ",", "_", "(", "'six'", ")", ",", "_", "(", "'seven'", ")", ",", "_", "(", "'eight'", ")", ",", "_", "(", "'nine'", ")", ")", "[", "value", "-", "1", "]" ]
42.916667
17.5
def cut_video_stream(stream, start, end, fmt): """ cut video stream from `start` to `end` time Parameters ---------- stream : bytes video file content start : float start time end : float end time Returns ------- result : bytes content of cut video """ with TemporaryDirectory() as tmp: in_file = Path(tmp) / f"in{fmt}" out_file = Path(tmp) / f"out{fmt}" in_file.write_bytes(stream) try: ret = subprocess.run( [ "ffmpeg", "-ss", f"{start}", "-i", f"{in_file}", "-to", f"{end}", "-c", "copy", f"{out_file}", ], capture_output=True, ) except FileNotFoundError: result = stream else: if ret.returncode: result = stream else: result = out_file.read_bytes() return result
[ "def", "cut_video_stream", "(", "stream", ",", "start", ",", "end", ",", "fmt", ")", ":", "with", "TemporaryDirectory", "(", ")", "as", "tmp", ":", "in_file", "=", "Path", "(", "tmp", ")", "/", "f\"in{fmt}\"", "out_file", "=", "Path", "(", "tmp", ")", "/", "f\"out{fmt}\"", "in_file", ".", "write_bytes", "(", "stream", ")", "try", ":", "ret", "=", "subprocess", ".", "run", "(", "[", "\"ffmpeg\"", ",", "\"-ss\"", ",", "f\"{start}\"", ",", "\"-i\"", ",", "f\"{in_file}\"", ",", "\"-to\"", ",", "f\"{end}\"", ",", "\"-c\"", ",", "\"copy\"", ",", "f\"{out_file}\"", ",", "]", ",", "capture_output", "=", "True", ",", ")", "except", "FileNotFoundError", ":", "result", "=", "stream", "else", ":", "if", "ret", ".", "returncode", ":", "result", "=", "stream", "else", ":", "result", "=", "out_file", ".", "read_bytes", "(", ")", "return", "result" ]
22.285714
17.836735
def _validate_xor_args(self, p): """ Raises ValueError if 2 arguments are not passed to an XOR """ if len(p[1]) != 2: raise ValueError('Invalid syntax: XOR only accepts 2 arguments, got {0}: {1}'.format(len(p[1]), p))
[ "def", "_validate_xor_args", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", "[", "1", "]", ")", "!=", "2", ":", "raise", "ValueError", "(", "'Invalid syntax: XOR only accepts 2 arguments, got {0}: {1}'", ".", "format", "(", "len", "(", "p", "[", "1", "]", ")", ",", "p", ")", ")" ]
42.666667
19.666667
async def change_presence(self, *, activity=None, status=None, afk=False): """|coro| Changes the client's presence. The activity parameter is a :class:`.Activity` object (not a string) that represents the activity being done currently. This could also be the slimmed down versions, :class:`.Game` and :class:`.Streaming`. Example --------- .. code-block:: python3 game = discord.Game("with the API") await client.change_presence(status=discord.Status.idle, activity=game) Parameters ---------- activity: Optional[Union[:class:`.Game`, :class:`.Streaming`, :class:`.Activity`]] The activity being done. ``None`` if no currently active activity is done. status: Optional[:class:`.Status`] Indicates what status to change to. If None, then :attr:`.Status.online` is used. afk: :class:`bool` Indicates if you are going AFK. This allows the discord client to know how to handle push notifications better for you in case you are actually idle and not lying. Raises ------ InvalidArgument If the ``activity`` parameter is not the proper type. """ if status is None: status = 'online' status_enum = Status.online elif status is Status.offline: status = 'invisible' status_enum = Status.offline else: status_enum = status status = str(status) await self.ws.change_presence(activity=activity, status=status, afk=afk) for guild in self._connection.guilds: me = guild.me if me is None: continue me.activities = (activity,) me.status = status_enum
[ "async", "def", "change_presence", "(", "self", ",", "*", ",", "activity", "=", "None", ",", "status", "=", "None", ",", "afk", "=", "False", ")", ":", "if", "status", "is", "None", ":", "status", "=", "'online'", "status_enum", "=", "Status", ".", "online", "elif", "status", "is", "Status", ".", "offline", ":", "status", "=", "'invisible'", "status_enum", "=", "Status", ".", "offline", "else", ":", "status_enum", "=", "status", "status", "=", "str", "(", "status", ")", "await", "self", ".", "ws", ".", "change_presence", "(", "activity", "=", "activity", ",", "status", "=", "status", ",", "afk", "=", "afk", ")", "for", "guild", "in", "self", ".", "_connection", ".", "guilds", ":", "me", "=", "guild", ".", "me", "if", "me", "is", "None", ":", "continue", "me", ".", "activities", "=", "(", "activity", ",", ")", "me", ".", "status", "=", "status_enum" ]
33.555556
22.518519
def _send_notification(self, handle, payload): """Send a notification over BLE It is executed in the baBLE working thread: should not be blocking. Args: handle (int): The handle to notify on payload (bytearray): The value to notify """ self.bable.notify( connection_handle=self._connection_handle, attribute_handle=handle, value=payload )
[ "def", "_send_notification", "(", "self", ",", "handle", ",", "payload", ")", ":", "self", ".", "bable", ".", "notify", "(", "connection_handle", "=", "self", ".", "_connection_handle", ",", "attribute_handle", "=", "handle", ",", "value", "=", "payload", ")" ]
31.071429
17.642857
def set_change_request_state(change_id, state='approved'): ''' Set the approval state of a change request/record :param change_id: The ID of the change request, e.g. CHG123545 :type change_id: ``str`` :param state: The target state, e.g. approved :type state: ``str`` CLI Example: .. code-block:: bash salt myminion servicenow.set_change_request_state CHG000123 declined salt myminion servicenow.set_change_request_state CHG000123 approved ''' client = _get_client() client.table = 'change_request' # Get the change record first record = client.get({'number': change_id}) if not record: log.error('Failed to fetch change record, maybe it does not exist?') return False # Use the sys_id as the unique system record sys_id = record[0]['sys_id'] response = client.update({'approval': state}, sys_id) return response
[ "def", "set_change_request_state", "(", "change_id", ",", "state", "=", "'approved'", ")", ":", "client", "=", "_get_client", "(", ")", "client", ".", "table", "=", "'change_request'", "# Get the change record first", "record", "=", "client", ".", "get", "(", "{", "'number'", ":", "change_id", "}", ")", "if", "not", "record", ":", "log", ".", "error", "(", "'Failed to fetch change record, maybe it does not exist?'", ")", "return", "False", "# Use the sys_id as the unique system record", "sys_id", "=", "record", "[", "0", "]", "[", "'sys_id'", "]", "response", "=", "client", ".", "update", "(", "{", "'approval'", ":", "state", "}", ",", "sys_id", ")", "return", "response" ]
32
22.642857
def to_string(self, value): """ In python3, json.dumps() cannot sort keys of different types, so preconvert None to 'null'. """ self.enforce_type(value) if isinstance(value, dict) and None in value: value = value.copy() value['null'] = value[None] del value[None] return super(Dict, self).to_string(value)
[ "def", "to_string", "(", "self", ",", "value", ")", ":", "self", ".", "enforce_type", "(", "value", ")", "if", "isinstance", "(", "value", ",", "dict", ")", "and", "None", "in", "value", ":", "value", "=", "value", ".", "copy", "(", ")", "value", "[", "'null'", "]", "=", "value", "[", "None", "]", "del", "value", "[", "None", "]", "return", "super", "(", "Dict", ",", "self", ")", ".", "to_string", "(", "value", ")" ]
35.181818
8.818182
def set_bp(cls, ctx, register, address, trigger, watch): """ Sets a hardware breakpoint. @see: clear_bp, find_slot @type ctx: dict( str S{->} int ) @param ctx: Thread context dictionary. @type register: int @param register: Slot (debug register). @type address: int @param address: Memory address. @type trigger: int @param trigger: Trigger flag. See L{HardwareBreakpoint.validTriggers}. @type watch: int @param watch: Watch flag. See L{HardwareBreakpoint.validWatchSizes}. """ Dr7 = ctx['Dr7'] Dr7 |= cls.enableMask[register] orMask, andMask = cls.triggerMask[register][trigger] Dr7 &= andMask Dr7 |= orMask orMask, andMask = cls.watchMask[register][watch] Dr7 &= andMask Dr7 |= orMask ctx['Dr7'] = Dr7 ctx['Dr%d' % register] = address
[ "def", "set_bp", "(", "cls", ",", "ctx", ",", "register", ",", "address", ",", "trigger", ",", "watch", ")", ":", "Dr7", "=", "ctx", "[", "'Dr7'", "]", "Dr7", "|=", "cls", ".", "enableMask", "[", "register", "]", "orMask", ",", "andMask", "=", "cls", ".", "triggerMask", "[", "register", "]", "[", "trigger", "]", "Dr7", "&=", "andMask", "Dr7", "|=", "orMask", "orMask", ",", "andMask", "=", "cls", ".", "watchMask", "[", "register", "]", "[", "watch", "]", "Dr7", "&=", "andMask", "Dr7", "|=", "orMask", "ctx", "[", "'Dr7'", "]", "=", "Dr7", "ctx", "[", "'Dr%d'", "%", "register", "]", "=", "address" ]
29.322581
17.83871
def execute(commands, serial=None): """ Sends the command to the connected micro:bit via serial and returns the result. If no serial connection is provided, attempts to autodetect the device. For this to work correctly, a particular sequence of commands needs to be sent to put the device into a good state to process the incoming command. Returns the stdout and stderr output from the micro:bit. """ close_serial = False if serial is None: serial = get_serial() close_serial = True time.sleep(0.1) result = b'' raw_on(serial) time.sleep(0.1) # Write the actual command and send CTRL-D to evaluate. for command in commands: command_bytes = command.encode('utf-8') for i in range(0, len(command_bytes), 32): serial.write(command_bytes[i:min(i + 32, len(command_bytes))]) time.sleep(0.01) serial.write(b'\x04') response = serial.read_until(b'\x04>') # Read until prompt. out, err = response[2:-2].split(b'\x04', 1) # Split stdout, stderr result += out if err: return b'', err time.sleep(0.1) raw_off(serial) if close_serial: serial.close() time.sleep(0.1) return result, err
[ "def", "execute", "(", "commands", ",", "serial", "=", "None", ")", ":", "close_serial", "=", "False", "if", "serial", "is", "None", ":", "serial", "=", "get_serial", "(", ")", "close_serial", "=", "True", "time", ".", "sleep", "(", "0.1", ")", "result", "=", "b''", "raw_on", "(", "serial", ")", "time", ".", "sleep", "(", "0.1", ")", "# Write the actual command and send CTRL-D to evaluate.", "for", "command", "in", "commands", ":", "command_bytes", "=", "command", ".", "encode", "(", "'utf-8'", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "command_bytes", ")", ",", "32", ")", ":", "serial", ".", "write", "(", "command_bytes", "[", "i", ":", "min", "(", "i", "+", "32", ",", "len", "(", "command_bytes", ")", ")", "]", ")", "time", ".", "sleep", "(", "0.01", ")", "serial", ".", "write", "(", "b'\\x04'", ")", "response", "=", "serial", ".", "read_until", "(", "b'\\x04>'", ")", "# Read until prompt.", "out", ",", "err", "=", "response", "[", "2", ":", "-", "2", "]", ".", "split", "(", "b'\\x04'", ",", "1", ")", "# Split stdout, stderr", "result", "+=", "out", "if", "err", ":", "return", "b''", ",", "err", "time", ".", "sleep", "(", "0.1", ")", "raw_off", "(", "serial", ")", "if", "close_serial", ":", "serial", ".", "close", "(", ")", "time", ".", "sleep", "(", "0.1", ")", "return", "result", ",", "err" ]
33.864865
20.675676
def to_kaf(self): """ Converts the object to KAF (if it is NAF) """ if self.type == 'NAF': self.type = 'KAF' for node in self.__get_node_terms(): node.set('cid', node.get('id')) del node.attrib['id']
[ "def", "to_kaf", "(", "self", ")", ":", "if", "self", ".", "type", "==", "'NAF'", ":", "self", ".", "type", "=", "'KAF'", "for", "node", "in", "self", ".", "__get_node_terms", "(", ")", ":", "node", ".", "set", "(", "'cid'", ",", "node", ".", "get", "(", "'id'", ")", ")", "del", "node", ".", "attrib", "[", "'id'", "]" ]
31
7.888889
def guess_timefmt(datestr): """ Try to guess the format a date is written in. The following formats are supported: ================= ============== =============== Format Example Python format ----------------- -------------- --------------- ``YYYY-MM-DD`` 2002-04-21 %Y-%m-%d ``YYYY.MM.DD`` 2002.04.21 %Y.%m.%d ``YYYY MM DD`` 2002 04 21 %Y %m %d ``DD-MM-YYYY`` 21-04-2002 %d-%m-%Y ``DD.MM.YYYY`` 21.04.2002 %d.%m.%Y ``DD MM YYYY`` 21 04 2002 %d %m %Y ``DD/MM/YYYY`` 21/04/2002 %d/%m/%Y ================= ============== =============== These formats can also be used for seasonal (yearly recurring) time series. The year needs to be replaced by ``9999`` or another configurable year representing the seasonal year.. The following formats are recognised depending on your locale setting. There is no guarantee that this will work. ================= ============== =============== Format Example Python format ----------------- -------------- --------------- ``DD-mmm-YYYY`` 21-Apr-2002 %d-%b-%Y ``DD.mmm.YYYY`` 21.Apr.2002 %d.%b.%Y ``DD mmm YYYY`` 21 Apr 2002 %d %b %Y ``mmm DD YYYY`` Apr 21 2002 %b %d %Y ``Mmmmm DD YYYY`` April 21 2002 %B %d %Y ================= ============== =============== .. note:: - The time needs to follow this definition without exception: `%H:%M:%S.%f`. A complete date and time should therefore look like this:: 2002-04-21 15:29:37.522 - Be aware that in a file with comma separated values you should not use a date format that contains commas. """ if isinstance(datestr, float) or isinstance(datestr, int): return None seasonal_key = str(config.get('DEFAULT', 'seasonal_key', '9999')) #replace 'T' with space to handle ISO times. if datestr.find('T') > 0: dt_delim = 'T' else: dt_delim = ' ' delimiters = ['-', '.', ' ', '/'] formatstrings = [['%Y', '%m', '%d'], ['%d', '%m', '%Y'], ['%d', '%b', '%Y'], ['XXXX', '%m', '%d'], ['%d', '%m', 'XXXX'], ['%d', '%b', 'XXXX'], [seasonal_key, '%m', '%d'], ['%d', '%m', seasonal_key], ['%d', '%b', seasonal_key]] timeformats = ['%H:%M:%S.%f', '%H:%M:%S', '%H:%M', '%H:%M:%S.%f000Z', '%H:%M:%S.%fZ'] # Check if a time is indicated or not for timefmt in timeformats: try: datetime.strptime(datestr.split(dt_delim)[-1].strip(), timefmt) usetime = True break except ValueError: usetime = False # Check the simple ones: for fmt in formatstrings: for delim in delimiters: datefmt = fmt[0] + delim + fmt[1] + delim + fmt[2] if usetime: for timefmt in timeformats: complfmt = datefmt + dt_delim + timefmt try: datetime.strptime(datestr, complfmt) return complfmt except ValueError: pass else: try: datetime.strptime(datestr, datefmt) return datefmt except ValueError: pass # Check for other formats: custom_formats = ['%d/%m/%Y', '%b %d %Y', '%B %d %Y','%d/%m/XXXX', '%d/%m/'+seasonal_key] for fmt in custom_formats: if usetime: for timefmt in timeformats: complfmt = fmt + dt_delim + timefmt try: datetime.strptime(datestr, complfmt) return complfmt except ValueError: pass else: try: datetime.strptime(datestr, fmt) return fmt except ValueError: pass return None
[ "def", "guess_timefmt", "(", "datestr", ")", ":", "if", "isinstance", "(", "datestr", ",", "float", ")", "or", "isinstance", "(", "datestr", ",", "int", ")", ":", "return", "None", "seasonal_key", "=", "str", "(", "config", ".", "get", "(", "'DEFAULT'", ",", "'seasonal_key'", ",", "'9999'", ")", ")", "#replace 'T' with space to handle ISO times.", "if", "datestr", ".", "find", "(", "'T'", ")", ">", "0", ":", "dt_delim", "=", "'T'", "else", ":", "dt_delim", "=", "' '", "delimiters", "=", "[", "'-'", ",", "'.'", ",", "' '", ",", "'/'", "]", "formatstrings", "=", "[", "[", "'%Y'", ",", "'%m'", ",", "'%d'", "]", ",", "[", "'%d'", ",", "'%m'", ",", "'%Y'", "]", ",", "[", "'%d'", ",", "'%b'", ",", "'%Y'", "]", ",", "[", "'XXXX'", ",", "'%m'", ",", "'%d'", "]", ",", "[", "'%d'", ",", "'%m'", ",", "'XXXX'", "]", ",", "[", "'%d'", ",", "'%b'", ",", "'XXXX'", "]", ",", "[", "seasonal_key", ",", "'%m'", ",", "'%d'", "]", ",", "[", "'%d'", ",", "'%m'", ",", "seasonal_key", "]", ",", "[", "'%d'", ",", "'%b'", ",", "seasonal_key", "]", "]", "timeformats", "=", "[", "'%H:%M:%S.%f'", ",", "'%H:%M:%S'", ",", "'%H:%M'", ",", "'%H:%M:%S.%f000Z'", ",", "'%H:%M:%S.%fZ'", "]", "# Check if a time is indicated or not", "for", "timefmt", "in", "timeformats", ":", "try", ":", "datetime", ".", "strptime", "(", "datestr", ".", "split", "(", "dt_delim", ")", "[", "-", "1", "]", ".", "strip", "(", ")", ",", "timefmt", ")", "usetime", "=", "True", "break", "except", "ValueError", ":", "usetime", "=", "False", "# Check the simple ones:", "for", "fmt", "in", "formatstrings", ":", "for", "delim", "in", "delimiters", ":", "datefmt", "=", "fmt", "[", "0", "]", "+", "delim", "+", "fmt", "[", "1", "]", "+", "delim", "+", "fmt", "[", "2", "]", "if", "usetime", ":", "for", "timefmt", "in", "timeformats", ":", "complfmt", "=", "datefmt", "+", "dt_delim", "+", "timefmt", "try", ":", "datetime", ".", "strptime", "(", "datestr", ",", "complfmt", ")", "return", "complfmt", "except", "ValueError", ":", "pass", "else", ":", "try", ":", "datetime", ".", "strptime", "(", "datestr", ",", "datefmt", ")", "return", "datefmt", "except", "ValueError", ":", "pass", "# Check for other formats:", "custom_formats", "=", "[", "'%d/%m/%Y'", ",", "'%b %d %Y'", ",", "'%B %d %Y'", ",", "'%d/%m/XXXX'", ",", "'%d/%m/'", "+", "seasonal_key", "]", "for", "fmt", "in", "custom_formats", ":", "if", "usetime", ":", "for", "timefmt", "in", "timeformats", ":", "complfmt", "=", "fmt", "+", "dt_delim", "+", "timefmt", "try", ":", "datetime", ".", "strptime", "(", "datestr", ",", "complfmt", ")", "return", "complfmt", "except", "ValueError", ":", "pass", "else", ":", "try", ":", "datetime", ".", "strptime", "(", "datestr", ",", "fmt", ")", "return", "fmt", "except", "ValueError", ":", "pass", "return", "None" ]
33.87395
17.773109
def _encode_ndef_text_params(self, data): """ Prepend language and enconding information to data, according to nfcforum-ts-rtd-text-1-0.pdf """ status = len(self.ndef_text_lang) if self.ndef_text_enc == 'UTF16': status = status & 0b10000000 return yubico_util.chr_byte(status) + self.ndef_text_lang + data
[ "def", "_encode_ndef_text_params", "(", "self", ",", "data", ")", ":", "status", "=", "len", "(", "self", ".", "ndef_text_lang", ")", "if", "self", ".", "ndef_text_enc", "==", "'UTF16'", ":", "status", "=", "status", "&", "0b10000000", "return", "yubico_util", ".", "chr_byte", "(", "status", ")", "+", "self", ".", "ndef_text_lang", "+", "data" ]
40.555556
7.888889
def modify_conf(): """ pip install redbaron """ import redbaron import ubelt as ub conf_path = 'docs/conf.py' source = ub.readfrom(conf_path) red = redbaron.RedBaron(source) # Insert custom extensions extra_extensions = [ '"sphinxcontrib.napoleon"' ] ext_node = red.find('name', value='extensions').parent ext_node.value.value.extend(extra_extensions) # Overwrite theme to read-the-docs theme_node = red.find('name', value='html_theme').parent theme_node.value.value = '"sphinx_rtd_theme"' ub.writeto(conf_path, red.dumps())
[ "def", "modify_conf", "(", ")", ":", "import", "redbaron", "import", "ubelt", "as", "ub", "conf_path", "=", "'docs/conf.py'", "source", "=", "ub", ".", "readfrom", "(", "conf_path", ")", "red", "=", "redbaron", ".", "RedBaron", "(", "source", ")", "# Insert custom extensions", "extra_extensions", "=", "[", "'\"sphinxcontrib.napoleon\"'", "]", "ext_node", "=", "red", ".", "find", "(", "'name'", ",", "value", "=", "'extensions'", ")", ".", "parent", "ext_node", ".", "value", ".", "value", ".", "extend", "(", "extra_extensions", ")", "# Overwrite theme to read-the-docs", "theme_node", "=", "red", ".", "find", "(", "'name'", ",", "value", "=", "'html_theme'", ")", ".", "parent", "theme_node", ".", "value", ".", "value", "=", "'\"sphinx_rtd_theme\"'", "ub", ".", "writeto", "(", "conf_path", ",", "red", ".", "dumps", "(", ")", ")" ]
24.25
17.666667
def get_acl(self, key_name='', headers=None, version_id=None): """returns a bucket's acl. We include a version_id argument to support a polymorphic interface for callers, however, version_id is not relevant for Google Cloud Storage buckets and is therefore ignored here.""" return self.get_acl_helper(key_name, headers, STANDARD_ACL)
[ "def", "get_acl", "(", "self", ",", "key_name", "=", "''", ",", "headers", "=", "None", ",", "version_id", "=", "None", ")", ":", "return", "self", ".", "get_acl_helper", "(", "key_name", ",", "headers", ",", "STANDARD_ACL", ")" ]
63.333333
18
def _round_hex(q, r): ''' Round floating point axial hex coordinates to integer *(q,r)* coordinates. This code was adapted from: https://www.redblobgames.com/grids/hexagons/#rounding Args: q (array[float]) : NumPy array of Floating point axial *q* coordinates to round r (array[float]) : NumPy array of Floating point axial *q* coordinates to round Returns: (array[int], array[int]) ''' x = q z = r y = -x-z rx = np.round(x) ry = np.round(y) rz = np.round(z) dx = np.abs(rx - x) dy = np.abs(ry - y) dz = np.abs(rz - z) cond = (dx > dy) & (dx > dz) q = np.where(cond , -(ry + rz), rx) r = np.where(~cond & ~(dy > dz), -(rx + ry), rz) return q.astype(int), r.astype(int)
[ "def", "_round_hex", "(", "q", ",", "r", ")", ":", "x", "=", "q", "z", "=", "r", "y", "=", "-", "x", "-", "z", "rx", "=", "np", ".", "round", "(", "x", ")", "ry", "=", "np", ".", "round", "(", "y", ")", "rz", "=", "np", ".", "round", "(", "z", ")", "dx", "=", "np", ".", "abs", "(", "rx", "-", "x", ")", "dy", "=", "np", ".", "abs", "(", "ry", "-", "y", ")", "dz", "=", "np", ".", "abs", "(", "rz", "-", "z", ")", "cond", "=", "(", "dx", ">", "dy", ")", "&", "(", "dx", ">", "dz", ")", "q", "=", "np", ".", "where", "(", "cond", ",", "-", "(", "ry", "+", "rz", ")", ",", "rx", ")", "r", "=", "np", ".", "where", "(", "~", "cond", "&", "~", "(", "dy", ">", "dz", ")", ",", "-", "(", "rx", "+", "ry", ")", ",", "rz", ")", "return", "q", ".", "astype", "(", "int", ")", ",", "r", ".", "astype", "(", "int", ")" ]
21.888889
25.777778
def compute_start_timeperiod(self, process_name, timeperiod): """ computes lowest *inclusive* timeperiod boundary for job to process for process with time_grouping == 1, it returns given timeperiod with no change for process with time_grouping != 1, it computes first timeperiod, not processed by the previous job run For instance: with time_grouping = 3, QUALIFIER_HOURLY, and timeperiod = 2016042018, the start_timeperiod will be = 2016042016 (computed as 2016042018 - 3 + 1) """ time_grouping = context.process_context[process_name].time_grouping if time_grouping == 1: return timeperiod # step1: translate given timeperiod to the time grouped one process_hierarchy = self.timetable.get_tree(process_name).process_hierarchy timeperiod_dict = process_hierarchy[process_name].timeperiod_dict translated_timeperiod = timeperiod_dict._translate_timeperiod(timeperiod) # step 2: compute previous grouped period # NOTICE: simple `time_helper.increment_timeperiod(time_qualifier, timeperiod)` is insufficient # as it does not address edge cases, such as the last day of the month or the last hour of the day # For instance: with time_grouping=3, QUALIFIER_DAILY, and 2016123100 # the `increment_timeperiod` will yield 2016122800 instead of 2016123100 time_qualifier = context.process_context[process_name].time_qualifier for i in range(1, time_grouping + 1): prev_timeperiod = time_helper.increment_timeperiod(time_qualifier, translated_timeperiod, delta=-i) if prev_timeperiod == timeperiod_dict._translate_timeperiod(prev_timeperiod): # prev_timeperiod is currently at the last grouped timeperiod break # step 3: compute first exclusive timeperiod after the *prev_timeperiod*, # which becomes first inclusive timeperiod for this job run over_the_edge_timeperiod = time_helper.increment_timeperiod(time_qualifier, prev_timeperiod, delta=-1) if prev_timeperiod != timeperiod_dict._translate_timeperiod(over_the_edge_timeperiod): # over_the_edge_timeperiod fell into previous day or month or year # *prev_timeperiod* points to the first month, first day of the month or 00 hour start_timeperiod = prev_timeperiod else: start_timeperiod = self.compute_end_timeperiod(process_name, prev_timeperiod) return start_timeperiod
[ "def", "compute_start_timeperiod", "(", "self", ",", "process_name", ",", "timeperiod", ")", ":", "time_grouping", "=", "context", ".", "process_context", "[", "process_name", "]", ".", "time_grouping", "if", "time_grouping", "==", "1", ":", "return", "timeperiod", "# step1: translate given timeperiod to the time grouped one", "process_hierarchy", "=", "self", ".", "timetable", ".", "get_tree", "(", "process_name", ")", ".", "process_hierarchy", "timeperiod_dict", "=", "process_hierarchy", "[", "process_name", "]", ".", "timeperiod_dict", "translated_timeperiod", "=", "timeperiod_dict", ".", "_translate_timeperiod", "(", "timeperiod", ")", "# step 2: compute previous grouped period", "# NOTICE: simple `time_helper.increment_timeperiod(time_qualifier, timeperiod)` is insufficient", "# as it does not address edge cases, such as the last day of the month or the last hour of the day", "# For instance: with time_grouping=3, QUALIFIER_DAILY, and 2016123100", "# the `increment_timeperiod` will yield 2016122800 instead of 2016123100", "time_qualifier", "=", "context", ".", "process_context", "[", "process_name", "]", ".", "time_qualifier", "for", "i", "in", "range", "(", "1", ",", "time_grouping", "+", "1", ")", ":", "prev_timeperiod", "=", "time_helper", ".", "increment_timeperiod", "(", "time_qualifier", ",", "translated_timeperiod", ",", "delta", "=", "-", "i", ")", "if", "prev_timeperiod", "==", "timeperiod_dict", ".", "_translate_timeperiod", "(", "prev_timeperiod", ")", ":", "# prev_timeperiod is currently at the last grouped timeperiod", "break", "# step 3: compute first exclusive timeperiod after the *prev_timeperiod*,", "# which becomes first inclusive timeperiod for this job run", "over_the_edge_timeperiod", "=", "time_helper", ".", "increment_timeperiod", "(", "time_qualifier", ",", "prev_timeperiod", ",", "delta", "=", "-", "1", ")", "if", "prev_timeperiod", "!=", "timeperiod_dict", ".", "_translate_timeperiod", "(", "over_the_edge_timeperiod", ")", ":", "# over_the_edge_timeperiod fell into previous day or month or year", "# *prev_timeperiod* points to the first month, first day of the month or 00 hour", "start_timeperiod", "=", "prev_timeperiod", "else", ":", "start_timeperiod", "=", "self", ".", "compute_end_timeperiod", "(", "process_name", ",", "prev_timeperiod", ")", "return", "start_timeperiod" ]
64.641026
36.512821
def pretty_print_match(match, parameterized=True): """Return a human-readable representation of a parameterized MATCH query string.""" left_curly = '{{' if parameterized else '{' right_curly = '}}' if parameterized else '}' match = remove_custom_formatting(match) parts = re.split('({}|{})'.format(left_curly, right_curly), match) inside_braces = False indent_size = 4 indent = ' ' * indent_size output = [parts[0]] for current_index, current_part in enumerate(parts[1:]): if current_part == left_curly: if inside_braces: raise AssertionError(u'Found open-braces pair while already inside braces: ' u'{} {} {}'.format(current_index, parts, match)) inside_braces = True output.append(current_part + '\n') elif current_part == right_curly: if not inside_braces: raise AssertionError(u'Found close-braces pair while not inside braces: ' u'{} {} {}'.format(current_index, parts, match)) inside_braces = False output.append(current_part) else: if not inside_braces: stripped_part = current_part.lstrip() if stripped_part.startswith('.'): # Strip whitespace before traversal steps. output.append(stripped_part) else: # Do not strip whitespace before e.g. the RETURN keyword. output.append(current_part) else: # Split out the keywords, initially getting rid of commas. separate_keywords = re.split(', ([a-z]+:)', current_part) # The first item in the separated list is the full first "keyword: value" pair. # For every subsequent item, the keyword and value are separated; join them # back together, outputting the comma, newline and indentation before them. output.append(indent + separate_keywords[0].lstrip()) for i in six.moves.xrange(1, len(separate_keywords) - 1, 2): output.append(',\n{indent}{keyword} {value}'.format( keyword=separate_keywords[i].strip(), value=separate_keywords[i + 1].strip(), indent=indent)) output.append('\n') return ''.join(output).strip()
[ "def", "pretty_print_match", "(", "match", ",", "parameterized", "=", "True", ")", ":", "left_curly", "=", "'{{'", "if", "parameterized", "else", "'{'", "right_curly", "=", "'}}'", "if", "parameterized", "else", "'}'", "match", "=", "remove_custom_formatting", "(", "match", ")", "parts", "=", "re", ".", "split", "(", "'({}|{})'", ".", "format", "(", "left_curly", ",", "right_curly", ")", ",", "match", ")", "inside_braces", "=", "False", "indent_size", "=", "4", "indent", "=", "' '", "*", "indent_size", "output", "=", "[", "parts", "[", "0", "]", "]", "for", "current_index", ",", "current_part", "in", "enumerate", "(", "parts", "[", "1", ":", "]", ")", ":", "if", "current_part", "==", "left_curly", ":", "if", "inside_braces", ":", "raise", "AssertionError", "(", "u'Found open-braces pair while already inside braces: '", "u'{} {} {}'", ".", "format", "(", "current_index", ",", "parts", ",", "match", ")", ")", "inside_braces", "=", "True", "output", ".", "append", "(", "current_part", "+", "'\\n'", ")", "elif", "current_part", "==", "right_curly", ":", "if", "not", "inside_braces", ":", "raise", "AssertionError", "(", "u'Found close-braces pair while not inside braces: '", "u'{} {} {}'", ".", "format", "(", "current_index", ",", "parts", ",", "match", ")", ")", "inside_braces", "=", "False", "output", ".", "append", "(", "current_part", ")", "else", ":", "if", "not", "inside_braces", ":", "stripped_part", "=", "current_part", ".", "lstrip", "(", ")", "if", "stripped_part", ".", "startswith", "(", "'.'", ")", ":", "# Strip whitespace before traversal steps.", "output", ".", "append", "(", "stripped_part", ")", "else", ":", "# Do not strip whitespace before e.g. the RETURN keyword.", "output", ".", "append", "(", "current_part", ")", "else", ":", "# Split out the keywords, initially getting rid of commas.", "separate_keywords", "=", "re", ".", "split", "(", "', ([a-z]+:)'", ",", "current_part", ")", "# The first item in the separated list is the full first \"keyword: value\" pair.", "# For every subsequent item, the keyword and value are separated; join them", "# back together, outputting the comma, newline and indentation before them.", "output", ".", "append", "(", "indent", "+", "separate_keywords", "[", "0", "]", ".", "lstrip", "(", ")", ")", "for", "i", "in", "six", ".", "moves", ".", "xrange", "(", "1", ",", "len", "(", "separate_keywords", ")", "-", "1", ",", "2", ")", ":", "output", ".", "append", "(", "',\\n{indent}{keyword} {value}'", ".", "format", "(", "keyword", "=", "separate_keywords", "[", "i", "]", ".", "strip", "(", ")", ",", "value", "=", "separate_keywords", "[", "i", "+", "1", "]", ".", "strip", "(", ")", ",", "indent", "=", "indent", ")", ")", "output", ".", "append", "(", "'\\n'", ")", "return", "''", ".", "join", "(", "output", ")", ".", "strip", "(", ")" ]
48.74
21.68
def init(context, reset, force): """Setup the database.""" existing_tables = context.obj['store'].engine.table_names() if force or reset: if existing_tables and not force: message = f"Delete existing tables? [{', '.join(existing_tables)}]" click.confirm(click.style(message, fg='yellow'), abort=True) context.obj['store'].drop_all() elif existing_tables: click.echo(click.style("Database already exists, use '--reset'", fg='red')) context.abort() context.obj['store'].setup() message = f"Success! New tables: {', '.join(context.obj['store'].engine.table_names())}" click.echo(click.style(message, fg='green'))
[ "def", "init", "(", "context", ",", "reset", ",", "force", ")", ":", "existing_tables", "=", "context", ".", "obj", "[", "'store'", "]", ".", "engine", ".", "table_names", "(", ")", "if", "force", "or", "reset", ":", "if", "existing_tables", "and", "not", "force", ":", "message", "=", "f\"Delete existing tables? [{', '.join(existing_tables)}]\"", "click", ".", "confirm", "(", "click", ".", "style", "(", "message", ",", "fg", "=", "'yellow'", ")", ",", "abort", "=", "True", ")", "context", ".", "obj", "[", "'store'", "]", ".", "drop_all", "(", ")", "elif", "existing_tables", ":", "click", ".", "echo", "(", "click", ".", "style", "(", "\"Database already exists, use '--reset'\"", ",", "fg", "=", "'red'", ")", ")", "context", ".", "abort", "(", ")", "context", ".", "obj", "[", "'store'", "]", ".", "setup", "(", ")", "message", "=", "f\"Success! New tables: {', '.join(context.obj['store'].engine.table_names())}\"", "click", ".", "echo", "(", "click", ".", "style", "(", "message", ",", "fg", "=", "'green'", ")", ")" ]
45.333333
20.333333
def regex(self, regex = None): """Regex Sets or gets the regular expression used to validate the Node Arguments: regex {str} -- A standard regular expression string Raises: ValueError Returns: None | str """ # If regex was not set, this is a getter if regex is None: return self._regex # If the type is not a string if self._type != 'string': sys.stderr.write('can not set __regex__ for %s' % self._type) return # If it's not a valid string or regex if not isinstance(regex, (basestring, _REGEX_TYPE)): raise ValueError('__regex__') # Store the regex self._regex = regex
[ "def", "regex", "(", "self", ",", "regex", "=", "None", ")", ":", "# If regex was not set, this is a getter", "if", "regex", "is", "None", ":", "return", "self", ".", "_regex", "# If the type is not a string", "if", "self", ".", "_type", "!=", "'string'", ":", "sys", ".", "stderr", ".", "write", "(", "'can not set __regex__ for %s'", "%", "self", ".", "_type", ")", "return", "# If it's not a valid string or regex", "if", "not", "isinstance", "(", "regex", ",", "(", "basestring", ",", "_REGEX_TYPE", ")", ")", ":", "raise", "ValueError", "(", "'__regex__'", ")", "# Store the regex", "self", ".", "_regex", "=", "regex" ]
19.933333
23.033333
def list_all_zones_by_id(region=None, key=None, keyid=None, profile=None): ''' List, by their IDs, all hosted zones in the bound account. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. CLI Example: .. code-block:: bash salt myminion boto_route53.list_all_zones_by_id ''' ret = describe_hosted_zones(region=region, key=key, keyid=keyid, profile=profile) return [r['Id'].replace('/hostedzone/', '') for r in ret]
[ "def", "list_all_zones_by_id", "(", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "ret", "=", "describe_hosted_zones", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "return", "[", "r", "[", "'Id'", "]", ".", "replace", "(", "'/hostedzone/'", ",", "''", ")", "for", "r", "in", "ret", "]" ]
25.807692
27.346154
def force_overlap(move_this, from_positions, to_positions, add_bond=True): """Computes an affine transformation that maps the from_positions to the respective to_positions, and applies this transformation to the compound. Parameters ---------- move_this : mb.Compound The Compound to be moved. from_positions : np.ndarray, shape=(n, 3), dtype=float Original positions. to_positions : np.ndarray, shape=(n, 3), dtype=float New positions. add_bond : bool, optional, default=True If `from_positions` and `to_positions` are `Ports`, create a bond between the two anchor atoms. """ from mbuild.port import Port T = None if isinstance(from_positions, (list, tuple)) and isinstance(to_positions, (list, tuple)): equivalence_pairs = zip(from_positions, to_positions) elif isinstance(from_positions, Port) and isinstance(to_positions, Port): equivalence_pairs, T = _choose_correct_port(from_positions, to_positions) from_positions.used = True to_positions.used = True else: equivalence_pairs = [(from_positions, to_positions)] if not T: T = _create_equivalence_transform(equivalence_pairs) atom_positions = move_this.xyz_with_ports atom_positions = T.apply_to(atom_positions) move_this.xyz_with_ports = atom_positions if add_bond: if isinstance(from_positions, Port) and isinstance(to_positions, Port): if not from_positions.anchor or not to_positions.anchor: warn("Attempting to form bond from port that has no anchor") else: from_positions.anchor.parent.add_bond((from_positions.anchor, to_positions.anchor)) to_positions.anchor.parent.add_bond((from_positions.anchor, to_positions.anchor)) from_positions.anchor.parent.remove(from_positions) to_positions.anchor.parent.remove(to_positions)
[ "def", "force_overlap", "(", "move_this", ",", "from_positions", ",", "to_positions", ",", "add_bond", "=", "True", ")", ":", "from", "mbuild", ".", "port", "import", "Port", "T", "=", "None", "if", "isinstance", "(", "from_positions", ",", "(", "list", ",", "tuple", ")", ")", "and", "isinstance", "(", "to_positions", ",", "(", "list", ",", "tuple", ")", ")", ":", "equivalence_pairs", "=", "zip", "(", "from_positions", ",", "to_positions", ")", "elif", "isinstance", "(", "from_positions", ",", "Port", ")", "and", "isinstance", "(", "to_positions", ",", "Port", ")", ":", "equivalence_pairs", ",", "T", "=", "_choose_correct_port", "(", "from_positions", ",", "to_positions", ")", "from_positions", ".", "used", "=", "True", "to_positions", ".", "used", "=", "True", "else", ":", "equivalence_pairs", "=", "[", "(", "from_positions", ",", "to_positions", ")", "]", "if", "not", "T", ":", "T", "=", "_create_equivalence_transform", "(", "equivalence_pairs", ")", "atom_positions", "=", "move_this", ".", "xyz_with_ports", "atom_positions", "=", "T", ".", "apply_to", "(", "atom_positions", ")", "move_this", ".", "xyz_with_ports", "=", "atom_positions", "if", "add_bond", ":", "if", "isinstance", "(", "from_positions", ",", "Port", ")", "and", "isinstance", "(", "to_positions", ",", "Port", ")", ":", "if", "not", "from_positions", ".", "anchor", "or", "not", "to_positions", ".", "anchor", ":", "warn", "(", "\"Attempting to form bond from port that has no anchor\"", ")", "else", ":", "from_positions", ".", "anchor", ".", "parent", ".", "add_bond", "(", "(", "from_positions", ".", "anchor", ",", "to_positions", ".", "anchor", ")", ")", "to_positions", ".", "anchor", ".", "parent", ".", "add_bond", "(", "(", "from_positions", ".", "anchor", ",", "to_positions", ".", "anchor", ")", ")", "from_positions", ".", "anchor", ".", "parent", ".", "remove", "(", "from_positions", ")", "to_positions", ".", "anchor", ".", "parent", ".", "remove", "(", "to_positions", ")" ]
44.674419
24.186047
def insertBefore(self, child, beforeChild): ''' insertBefore - Inserts a child before #beforeChild @param child <AdvancedTag/str> - Child block to insert @param beforeChild <AdvancedTag/str> - Child block to insert before. if None, will be appended @return - The added child. Note, if it is a text block (str), the return isl NOT be linked by reference. @raises ValueError - If #beforeChild is defined and is not a child of this node ''' # When the second arg is null/None, the node is appended. The argument is required per JS API, but null is acceptable.. if beforeChild is None: return self.appendBlock(child) # If #child is an AdvancedTag, we need to add it to both blocks and children. isChildTag = isTagNode(child) myBlocks = self.blocks myChildren = self.children # Find the index #beforeChild falls under current element try: blocksIdx = myBlocks.index(beforeChild) if isChildTag: childrenIdx = myChildren.index(beforeChild) except ValueError: # #beforeChild is not a child of this element. Raise error. raise ValueError('Provided "beforeChild" is not a child of element, cannot insert.') # Add to blocks in the right spot self.blocks = myBlocks[:blocksIdx] + [child] + myBlocks[blocksIdx:] # Add to child in the right spot if isChildTag: self.children = myChildren[:childrenIdx] + [child] + myChildren[childrenIdx:] return child
[ "def", "insertBefore", "(", "self", ",", "child", ",", "beforeChild", ")", ":", "# When the second arg is null/None, the node is appended. The argument is required per JS API, but null is acceptable..", "if", "beforeChild", "is", "None", ":", "return", "self", ".", "appendBlock", "(", "child", ")", "# If #child is an AdvancedTag, we need to add it to both blocks and children.", "isChildTag", "=", "isTagNode", "(", "child", ")", "myBlocks", "=", "self", ".", "blocks", "myChildren", "=", "self", ".", "children", "# Find the index #beforeChild falls under current element", "try", ":", "blocksIdx", "=", "myBlocks", ".", "index", "(", "beforeChild", ")", "if", "isChildTag", ":", "childrenIdx", "=", "myChildren", ".", "index", "(", "beforeChild", ")", "except", "ValueError", ":", "# #beforeChild is not a child of this element. Raise error.", "raise", "ValueError", "(", "'Provided \"beforeChild\" is not a child of element, cannot insert.'", ")", "# Add to blocks in the right spot", "self", ".", "blocks", "=", "myBlocks", "[", ":", "blocksIdx", "]", "+", "[", "child", "]", "+", "myBlocks", "[", "blocksIdx", ":", "]", "# Add to child in the right spot", "if", "isChildTag", ":", "self", ".", "children", "=", "myChildren", "[", ":", "childrenIdx", "]", "+", "[", "child", "]", "+", "myChildren", "[", "childrenIdx", ":", "]", "return", "child" ]
40.3
30.45
def grid_coords_from_corners(upper_left_corner, lower_right_corner, size): ''' Points are the outer edges of the UL and LR pixels. Size is rows, columns. GC projection type is taken from Points. ''' assert upper_left_corner.wkt == lower_right_corner.wkt geotransform = np.array([upper_left_corner.lon, -(upper_left_corner.lon - lower_right_corner.lon) / float(size[1]), 0, upper_left_corner.lat, 0, -(upper_left_corner.lat - lower_right_corner.lat) / float(size[0])]) return GridCoordinates(geotransform=geotransform, wkt=upper_left_corner.wkt, y_size=size[0], x_size=size[1])
[ "def", "grid_coords_from_corners", "(", "upper_left_corner", ",", "lower_right_corner", ",", "size", ")", ":", "assert", "upper_left_corner", ".", "wkt", "==", "lower_right_corner", ".", "wkt", "geotransform", "=", "np", ".", "array", "(", "[", "upper_left_corner", ".", "lon", ",", "-", "(", "upper_left_corner", ".", "lon", "-", "lower_right_corner", ".", "lon", ")", "/", "float", "(", "size", "[", "1", "]", ")", ",", "0", ",", "upper_left_corner", ".", "lat", ",", "0", ",", "-", "(", "upper_left_corner", ".", "lat", "-", "lower_right_corner", ".", "lat", ")", "/", "float", "(", "size", "[", "0", "]", ")", "]", ")", "return", "GridCoordinates", "(", "geotransform", "=", "geotransform", ",", "wkt", "=", "upper_left_corner", ".", "wkt", ",", "y_size", "=", "size", "[", "0", "]", ",", "x_size", "=", "size", "[", "1", "]", ")" ]
70.8
30.8
def pull(iterable, n): """Return last n items of the iterable as a list. Example:: >>> pull([0, 1, 2], 3) [1, 2] **ไธญๆ–‡ๆ–‡ๆกฃ** ๅ–ๅ‡บๅฏๅพช็Žฏๅฏน่ฑกไธญ็š„ๆœ€ๅŽnไธชๅ…ƒ็ด ใ€‚็ญ‰ๆ•ˆไบŽlist(iterable)[-n:], ไฝ†ๅ ็”จๆžๅฐ็š„ๅ†…ๅญ˜ใ€‚ ๅ› ไธบlist(iterable)่ฆๅฐ†ๆ‰€ๆœ‰ๅ…ƒ็ด ๆ”พๅœจๅ†…ๅญ˜ไธญๅนถ็”Ÿๆˆไธ€ไธชๆ–ฐๅˆ—่กจใ€‚่ฏฅๆ–นๆณ•ๅธธ็”จ่ฏญๅฏนไบŽ ้‚ฃไบ›ๅ–indexๆ“ไฝœ่ขซๆ”นๅ†™ไบ†็š„ๅฏๅพช็Žฏๅฏน่ฑกใ€‚ """ fifo = collections.deque(maxlen=n) for i in iterable: fifo.append(i) return list(fifo)
[ "def", "pull", "(", "iterable", ",", "n", ")", ":", "fifo", "=", "collections", ".", "deque", "(", "maxlen", "=", "n", ")", "for", "i", "in", "iterable", ":", "fifo", ".", "append", "(", "i", ")", "return", "list", "(", "fifo", ")" ]
21.111111
20.111111
def quantize(self): ''' Clone self and quantize it, at last return a new quantized model. :return: A new quantized model. >>> fc = Linear(4, 2) creating: createLinear >>> fc.set_weights([np.ones((2, 4)), np.ones((2,))]) >>> input = np.ones((2, 4)) >>> output = fc.forward(input) >>> expected_output = np.array([[5., 5.], [5., 5.]]) >>> np.testing.assert_allclose(output, expected_output) >>> quantized_fc = fc.quantize() >>> quantized_output = quantized_fc.forward(input) >>> expected_quantized_output = np.array([[5., 5.], [5., 5.]]) >>> np.testing.assert_allclose(quantized_output, expected_quantized_output) >>> assert("quantized.Linear" in quantized_fc.__str__()) >>> conv = SpatialConvolution(1, 2, 3, 3) creating: createSpatialConvolution >>> conv.set_weights([np.ones((2, 1, 3, 3)), np.zeros((2,))]) >>> input = np.ones((2, 1, 4, 4)) >>> output = conv.forward(input) >>> expected_output = np.array([[[[9., 9.], [9., 9.]], [[9., 9.], [9., 9.]]], [[[9., 9.], [9., 9.]], [[9., 9.], [9., 9.]]]]) >>> np.testing.assert_allclose(output, expected_output) >>> quantized_conv = conv.quantize() >>> quantized_output = quantized_conv.forward(input) >>> expected_quantized_output = np.array([[[[9., 9.], [9., 9.]], [[9., 9.], [9., 9.]]], [[[9., 9.], [9., 9.]], [[9., 9.], [9., 9.]]]]) >>> np.testing.assert_allclose(quantized_output, expected_quantized_output) >>> assert("quantized.SpatialConvolution" in quantized_conv.__str__()) >>> seq = Sequential() creating: createSequential >>> seq = seq.add(conv) >>> seq = seq.add(Reshape([8, 4], False)) creating: createReshape >>> seq = seq.add(fc) >>> input = np.ones([1, 1, 6, 6]) >>> output = seq.forward(input) >>> expected_output = np.array([[37., 37.], [37., 37.], [37., 37.], [37., 37.], [37., 37.], [37., 37.], [37., 37.], [37., 37.]]) >>> np.testing.assert_allclose(output, expected_output) >>> quantized_seq = seq.quantize() >>> quantized_output = quantized_seq.forward(input) >>> expected_quantized_output = np.array([[37., 37.], [37., 37.], [37., 37.], [37., 37.], [37., 37.], [37., 37.], [37., 37.], [37., 37.]]) >>> np.testing.assert_allclose(quantized_output, expected_quantized_output) >>> assert("quantized.Linear" in quantized_seq.__str__()) >>> assert("quantized.SpatialConvolution" in quantized_seq.__str__()) ''' quantized_model = callBigDlFunc(self.bigdl_type, "quantize", self.value) return Layer.of(quantized_model)
[ "def", "quantize", "(", "self", ")", ":", "quantized_model", "=", "callBigDlFunc", "(", "self", ".", "bigdl_type", ",", "\"quantize\"", ",", "self", ".", "value", ")", "return", "Layer", ".", "of", "(", "quantized_model", ")" ]
55.122449
24.673469
def decouple(fn): """ Inverse operation of couple. Create two functions of one argument and one return from a function that takes two arguments and has two returns Examples -------- >>> h = lambda x: (2*x**3, 6*x**2) >>> f, g = decouple(h) >>> f(5) 250 >>> g(5) 150 """ def fst(*args, **kwargs): return fn(*args, **kwargs)[0] def snd(*args, **kwargs): return fn(*args, **kwargs)[1] return fst, snd
[ "def", "decouple", "(", "fn", ")", ":", "def", "fst", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "[", "0", "]", "def", "snd", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "[", "1", "]", "return", "fst", ",", "snd" ]
18.36
22.12
def corrdfs(df1,df2,method): """ df1 in columns df2 in rows """ dcorr=pd.DataFrame(columns=df1.columns,index=df2.columns) dpval=pd.DataFrame(columns=df1.columns,index=df2.columns) for c1 in df1: for c2 in df2: if method=='spearman': dcorr.loc[c2,c1],dpval.loc[c2,c1]=spearmanr(df1[c1],df2[c2], nan_policy='omit' ) elif method=='pearson': dcorr.loc[c2,c1],dpval.loc[c2,c1]=pearsonr(df1[c1],df2[c2], # nan_policy='omit' ) if not df1.columns.name is None: dcorr.columns.name=df1.columns.name dpval.columns.name=df1.columns.name if not df2.columns.name is None: dcorr.index.name=df2.columns.name dpval.index.name=df2.columns.name return dcorr,dpval
[ "def", "corrdfs", "(", "df1", ",", "df2", ",", "method", ")", ":", "dcorr", "=", "pd", ".", "DataFrame", "(", "columns", "=", "df1", ".", "columns", ",", "index", "=", "df2", ".", "columns", ")", "dpval", "=", "pd", ".", "DataFrame", "(", "columns", "=", "df1", ".", "columns", ",", "index", "=", "df2", ".", "columns", ")", "for", "c1", "in", "df1", ":", "for", "c2", "in", "df2", ":", "if", "method", "==", "'spearman'", ":", "dcorr", ".", "loc", "[", "c2", ",", "c1", "]", ",", "dpval", ".", "loc", "[", "c2", ",", "c1", "]", "=", "spearmanr", "(", "df1", "[", "c1", "]", ",", "df2", "[", "c2", "]", ",", "nan_policy", "=", "'omit'", ")", "elif", "method", "==", "'pearson'", ":", "dcorr", ".", "loc", "[", "c2", ",", "c1", "]", ",", "dpval", ".", "loc", "[", "c2", ",", "c1", "]", "=", "pearsonr", "(", "df1", "[", "c1", "]", ",", "df2", "[", "c2", "]", ",", "# nan_policy='omit'", ")", "if", "not", "df1", ".", "columns", ".", "name", "is", "None", ":", "dcorr", ".", "columns", ".", "name", "=", "df1", ".", "columns", ".", "name", "dpval", ".", "columns", ".", "name", "=", "df1", ".", "columns", ".", "name", "if", "not", "df2", ".", "columns", ".", "name", "is", "None", ":", "dcorr", ".", "index", ".", "name", "=", "df2", ".", "columns", ".", "name", "dpval", ".", "index", ".", "name", "=", "df2", ".", "columns", ".", "name", "return", "dcorr", ",", "dpval" ]
39.96
15.08
def remove_chartjunk(ax, spines, grid=None, ticklabels=None, show_ticks=False, xkcd=False): ''' Removes "chartjunk", such as extra lines of axes and tick marks. If grid="y" or "x", will add a white grid at the "y" or "x" axes, respectively If ticklabels="y" or "x", or ['x', 'y'] will remove ticklabels from that axis ''' all_spines = ['top', 'bottom', 'right', 'left', 'polar'] for spine in spines: # The try/except is for polar coordinates, which only have a 'polar' # spine and none of the others try: ax.spines[spine].set_visible(False) except KeyError: pass # For the remaining spines, make their line thinner and a slightly # off-black dark grey if not xkcd: for spine in set(all_spines).difference(set(spines)): # The try/except is for polar coordinates, which only have a # 'polar' spine and none of the others try: ax.spines[spine].set_linewidth(0.5) except KeyError: pass # ax.spines[spine].set_color(almost_black) # ax.spines[spine].set_tick_params(color=almost_black) # Check that the axes are not log-scale. If they are, leave # the ticks because otherwise people assume a linear scale. x_pos = set(['top', 'bottom']) y_pos = set(['left', 'right']) xy_pos = [x_pos, y_pos] xy_ax_names = ['xaxis', 'yaxis'] for ax_name, pos in zip(xy_ax_names, xy_pos): axis = ax.__dict__[ax_name] # axis.set_tick_params(color=almost_black) #print 'axis.get_scale()', axis.get_scale() if show_ticks or axis.get_scale() == 'log': # if this spine is not in the list of spines to remove for p in pos.difference(spines): #print 'p', p axis.set_tick_params(direction='out') axis.set_ticks_position(p) # axis.set_tick_params(which='both', p) else: axis.set_ticks_position('none') if grid is not None: for g in grid: assert g in ('x', 'y') ax.grid(axis=grid, color='white', linestyle='-', linewidth=0.5) if ticklabels is not None: if type(ticklabels) is str: assert ticklabels in set(('x', 'y')) if ticklabels == 'x': ax.set_xticklabels([]) if ticklabels == 'y': ax.set_yticklabels([]) else: assert set(ticklabels) | set(('x', 'y')) > 0 if 'x' in ticklabels: ax.set_xticklabels([]) elif 'y' in ticklabels: ax.set_yticklabels([])
[ "def", "remove_chartjunk", "(", "ax", ",", "spines", ",", "grid", "=", "None", ",", "ticklabels", "=", "None", ",", "show_ticks", "=", "False", ",", "xkcd", "=", "False", ")", ":", "all_spines", "=", "[", "'top'", ",", "'bottom'", ",", "'right'", ",", "'left'", ",", "'polar'", "]", "for", "spine", "in", "spines", ":", "# The try/except is for polar coordinates, which only have a 'polar'", "# spine and none of the others", "try", ":", "ax", ".", "spines", "[", "spine", "]", ".", "set_visible", "(", "False", ")", "except", "KeyError", ":", "pass", "# For the remaining spines, make their line thinner and a slightly", "# off-black dark grey", "if", "not", "xkcd", ":", "for", "spine", "in", "set", "(", "all_spines", ")", ".", "difference", "(", "set", "(", "spines", ")", ")", ":", "# The try/except is for polar coordinates, which only have a", "# 'polar' spine and none of the others", "try", ":", "ax", ".", "spines", "[", "spine", "]", ".", "set_linewidth", "(", "0.5", ")", "except", "KeyError", ":", "pass", "# ax.spines[spine].set_color(almost_black)", "# ax.spines[spine].set_tick_params(color=almost_black)", "# Check that the axes are not log-scale. If they are, leave", "# the ticks because otherwise people assume a linear scale.", "x_pos", "=", "set", "(", "[", "'top'", ",", "'bottom'", "]", ")", "y_pos", "=", "set", "(", "[", "'left'", ",", "'right'", "]", ")", "xy_pos", "=", "[", "x_pos", ",", "y_pos", "]", "xy_ax_names", "=", "[", "'xaxis'", ",", "'yaxis'", "]", "for", "ax_name", ",", "pos", "in", "zip", "(", "xy_ax_names", ",", "xy_pos", ")", ":", "axis", "=", "ax", ".", "__dict__", "[", "ax_name", "]", "# axis.set_tick_params(color=almost_black)", "#print 'axis.get_scale()', axis.get_scale()", "if", "show_ticks", "or", "axis", ".", "get_scale", "(", ")", "==", "'log'", ":", "# if this spine is not in the list of spines to remove", "for", "p", "in", "pos", ".", "difference", "(", "spines", ")", ":", "#print 'p', p", "axis", ".", "set_tick_params", "(", "direction", "=", "'out'", ")", "axis", ".", "set_ticks_position", "(", "p", ")", "# axis.set_tick_params(which='both', p)", "else", ":", "axis", ".", "set_ticks_position", "(", "'none'", ")", "if", "grid", "is", "not", "None", ":", "for", "g", "in", "grid", ":", "assert", "g", "in", "(", "'x'", ",", "'y'", ")", "ax", ".", "grid", "(", "axis", "=", "grid", ",", "color", "=", "'white'", ",", "linestyle", "=", "'-'", ",", "linewidth", "=", "0.5", ")", "if", "ticklabels", "is", "not", "None", ":", "if", "type", "(", "ticklabels", ")", "is", "str", ":", "assert", "ticklabels", "in", "set", "(", "(", "'x'", ",", "'y'", ")", ")", "if", "ticklabels", "==", "'x'", ":", "ax", ".", "set_xticklabels", "(", "[", "]", ")", "if", "ticklabels", "==", "'y'", ":", "ax", ".", "set_yticklabels", "(", "[", "]", ")", "else", ":", "assert", "set", "(", "ticklabels", ")", "|", "set", "(", "(", "'x'", ",", "'y'", ")", ")", ">", "0", "if", "'x'", "in", "ticklabels", ":", "ax", ".", "set_xticklabels", "(", "[", "]", ")", "elif", "'y'", "in", "ticklabels", ":", "ax", ".", "set_yticklabels", "(", "[", "]", ")" ]
37.971831
18.760563
def generate_additional_context(self, matching_datasets): """Return top tags for a source.""" top_tags = Tag.objects.filter( dataset__in=matching_datasets ).annotate( tag_count=Count('word') ).order_by('-tag_count')[:3] return { 'top_tags': top_tags }
[ "def", "generate_additional_context", "(", "self", ",", "matching_datasets", ")", ":", "top_tags", "=", "Tag", ".", "objects", ".", "filter", "(", "dataset__in", "=", "matching_datasets", ")", ".", "annotate", "(", "tag_count", "=", "Count", "(", "'word'", ")", ")", ".", "order_by", "(", "'-tag_count'", ")", "[", ":", "3", "]", "return", "{", "'top_tags'", ":", "top_tags", "}" ]
29.636364
13.909091
def request_sync_events(blink, network): """ Request events from sync module. :param blink: Blink instance. :param network: Sync module network id. """ url = "{}/events/network/{}".format(blink.urls.base_url, network) return http_get(blink, url)
[ "def", "request_sync_events", "(", "blink", ",", "network", ")", ":", "url", "=", "\"{}/events/network/{}\"", ".", "format", "(", "blink", ".", "urls", ".", "base_url", ",", "network", ")", "return", "http_get", "(", "blink", ",", "url", ")" ]
29.555556
10.222222
def get_path(filename): """ Get absolute path for filename. :param filename: file :return: path """ path = abspath(filename) if os.path.isdir(filename) else dirname(abspath(filename)) return path
[ "def", "get_path", "(", "filename", ")", ":", "path", "=", "abspath", "(", "filename", ")", "if", "os", ".", "path", ".", "isdir", "(", "filename", ")", "else", "dirname", "(", "abspath", "(", "filename", ")", ")", "return", "path" ]
24
19.111111
def to_array(self): """ Serializes this ChatPhoto to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(ChatPhoto, self).to_array() array['small_file_id'] = u(self.small_file_id) # py2: type unicode, py3: type str array['big_file_id'] = u(self.big_file_id) # py2: type unicode, py3: type str return array
[ "def", "to_array", "(", "self", ")", ":", "array", "=", "super", "(", "ChatPhoto", ",", "self", ")", ".", "to_array", "(", ")", "array", "[", "'small_file_id'", "]", "=", "u", "(", "self", ".", "small_file_id", ")", "# py2: type unicode, py3: type str", "array", "[", "'big_file_id'", "]", "=", "u", "(", "self", ".", "big_file_id", ")", "# py2: type unicode, py3: type str", "return", "array" ]
31.846154
24.153846
def plot_gos(fout_img, goids, go2obj, **kws): """Given GO ids and the obo_dag, create a plot of paths from GO ids.""" gosubdag = GoSubDag(goids, go2obj, rcntobj=True) godagplot = GoSubDagPlot(gosubdag, **kws) godagplot.plt_dag(fout_img)
[ "def", "plot_gos", "(", "fout_img", ",", "goids", ",", "go2obj", ",", "*", "*", "kws", ")", ":", "gosubdag", "=", "GoSubDag", "(", "goids", ",", "go2obj", ",", "rcntobj", "=", "True", ")", "godagplot", "=", "GoSubDagPlot", "(", "gosubdag", ",", "*", "*", "kws", ")", "godagplot", ".", "plt_dag", "(", "fout_img", ")" ]
49.6
6.2
def import_bin(filename, **kwargs): """Read a .bin file generated by the IRIS Instruments Syscal Pro System and return a curated dataframe for further processing. This dataframe contains only information currently deemed important. Use the function reda.importers.iris_syscal_pro_binary._import_bin to extract ALL information from a given .bin file. Parameters ---------- filename : string path to input filename x0 : float, optional position of first electrode. If not given, then use the smallest x-position in the data as the first electrode. spacing : float electrode spacing. This is important if not all electrodes are used in a given measurement setup. If not given, then the smallest distance between electrodes is assumed to be the electrode spacing. Naturally, this requires measurements (or injections) with subsequent electrodes. reciprocals : int, optional if provided, then assume that this is a reciprocal measurements where only the electrode cables were switched. The provided number N is treated as the maximum electrode number, and denotations are renamed according to the equation :math:`X_n = N - (X_a - 1)` check_meas_nums : bool if True, then check that the measurement numbers are consecutive. Don't return data after a jump to smaller measurement numbers (this usually indicates that more data points were downloaded than are part of a specific measurement. Default: True skip_rows : int Ignore this number of rows at the beginning, e.g., because they were inadvertently imported from an earlier measurement. Default: 0 Returns ------- data : :py:class:`pandas.DataFrame` Contains the measurement data electrodes : :py:class:`pandas.DataFrame` Contains electrode positions (None at the moment) topography : None No topography information is contained in the text files, so we always return None """ metadata, data_raw = _import_bin(filename) skip_rows = kwargs.get('skip_rows', 0) if skip_rows > 0: data_raw.drop(data_raw.index[range(0, skip_rows)], inplace=True) data_raw = data_raw.reset_index() if kwargs.get('check_meas_nums', True): # check that first number is 0 if data_raw['measurement_num'].iloc[0] != 0: print('WARNING: Measurement numbers do not start with 0 ' + '(did you download ALL data?)') # check that all measurement numbers increase by one if not np.all(np.diff(data_raw['measurement_num'])) == 1: print( 'WARNING ' 'Measurement numbers are not consecutive. ' 'Perhaps the first measurement belongs to another measurement?' ' Use the skip_rows parameter to skip those measurements' ) # now check if there is a jump in measurement numbers somewhere # ignore first entry as this will always be nan diff = data_raw['measurement_num'].diff()[1:] jump = np.where(diff != 1)[0] if len(jump) > 0: print('WARNING: One or more jumps in measurement numbers detected') print('The jump indices are:') for jump_nr in jump: print(jump_nr) print('Removing data points subsequent to the first jump') data_raw = data_raw.iloc[0:jump[0] + 1, :] if data_raw.shape[0] == 0: # no data present, return a bare DataFrame return pd.DataFrame(columns=['a', 'b', 'm', 'n', 'r']), None, None data = _convert_coords_to_abmn_X( data_raw[['x_a', 'x_b', 'x_m', 'x_n']], **kwargs ) # [mV] / [mA] data['r'] = data_raw['vp'] / data_raw['Iab'] data['Vmn'] = data_raw['vp'] data['vab'] = data_raw['vab'] data['Iab'] = data_raw['Iab'] data['mdelay'] = data_raw['mdelay'] data['Tm'] = data_raw['Tm'] data['Mx'] = data_raw['Mx'] data['chargeability'] = data_raw['m'] data['q'] = data_raw['q'] # rename electrode denotations rec_max = kwargs.get('reciprocals', None) if rec_max is not None: print('renumbering electrode numbers') data[['a', 'b', 'm', 'n']] = rec_max + 1 - data[['a', 'b', 'm', 'n']] # print(data) return data, None, None
[ "def", "import_bin", "(", "filename", ",", "*", "*", "kwargs", ")", ":", "metadata", ",", "data_raw", "=", "_import_bin", "(", "filename", ")", "skip_rows", "=", "kwargs", ".", "get", "(", "'skip_rows'", ",", "0", ")", "if", "skip_rows", ">", "0", ":", "data_raw", ".", "drop", "(", "data_raw", ".", "index", "[", "range", "(", "0", ",", "skip_rows", ")", "]", ",", "inplace", "=", "True", ")", "data_raw", "=", "data_raw", ".", "reset_index", "(", ")", "if", "kwargs", ".", "get", "(", "'check_meas_nums'", ",", "True", ")", ":", "# check that first number is 0", "if", "data_raw", "[", "'measurement_num'", "]", ".", "iloc", "[", "0", "]", "!=", "0", ":", "print", "(", "'WARNING: Measurement numbers do not start with 0 '", "+", "'(did you download ALL data?)'", ")", "# check that all measurement numbers increase by one", "if", "not", "np", ".", "all", "(", "np", ".", "diff", "(", "data_raw", "[", "'measurement_num'", "]", ")", ")", "==", "1", ":", "print", "(", "'WARNING '", "'Measurement numbers are not consecutive. '", "'Perhaps the first measurement belongs to another measurement?'", "' Use the skip_rows parameter to skip those measurements'", ")", "# now check if there is a jump in measurement numbers somewhere", "# ignore first entry as this will always be nan", "diff", "=", "data_raw", "[", "'measurement_num'", "]", ".", "diff", "(", ")", "[", "1", ":", "]", "jump", "=", "np", ".", "where", "(", "diff", "!=", "1", ")", "[", "0", "]", "if", "len", "(", "jump", ")", ">", "0", ":", "print", "(", "'WARNING: One or more jumps in measurement numbers detected'", ")", "print", "(", "'The jump indices are:'", ")", "for", "jump_nr", "in", "jump", ":", "print", "(", "jump_nr", ")", "print", "(", "'Removing data points subsequent to the first jump'", ")", "data_raw", "=", "data_raw", ".", "iloc", "[", "0", ":", "jump", "[", "0", "]", "+", "1", ",", ":", "]", "if", "data_raw", ".", "shape", "[", "0", "]", "==", "0", ":", "# no data present, return a bare DataFrame", "return", "pd", ".", "DataFrame", "(", "columns", "=", "[", "'a'", ",", "'b'", ",", "'m'", ",", "'n'", ",", "'r'", "]", ")", ",", "None", ",", "None", "data", "=", "_convert_coords_to_abmn_X", "(", "data_raw", "[", "[", "'x_a'", ",", "'x_b'", ",", "'x_m'", ",", "'x_n'", "]", "]", ",", "*", "*", "kwargs", ")", "# [mV] / [mA]", "data", "[", "'r'", "]", "=", "data_raw", "[", "'vp'", "]", "/", "data_raw", "[", "'Iab'", "]", "data", "[", "'Vmn'", "]", "=", "data_raw", "[", "'vp'", "]", "data", "[", "'vab'", "]", "=", "data_raw", "[", "'vab'", "]", "data", "[", "'Iab'", "]", "=", "data_raw", "[", "'Iab'", "]", "data", "[", "'mdelay'", "]", "=", "data_raw", "[", "'mdelay'", "]", "data", "[", "'Tm'", "]", "=", "data_raw", "[", "'Tm'", "]", "data", "[", "'Mx'", "]", "=", "data_raw", "[", "'Mx'", "]", "data", "[", "'chargeability'", "]", "=", "data_raw", "[", "'m'", "]", "data", "[", "'q'", "]", "=", "data_raw", "[", "'q'", "]", "# rename electrode denotations", "rec_max", "=", "kwargs", ".", "get", "(", "'reciprocals'", ",", "None", ")", "if", "rec_max", "is", "not", "None", ":", "print", "(", "'renumbering electrode numbers'", ")", "data", "[", "[", "'a'", ",", "'b'", ",", "'m'", ",", "'n'", "]", "]", "=", "rec_max", "+", "1", "-", "data", "[", "[", "'a'", ",", "'b'", ",", "'m'", ",", "'n'", "]", "]", "# print(data)", "return", "data", ",", "None", ",", "None" ]
40.271028
21
def _recycle(self): """ Reclaim buffer space before the origin. Note: modifies buffer size """ origin = self._origin if origin == 0: return False available = self._extent - origin self._data[:available] = self._data[origin:self._extent] self._extent = available self._origin = 0 #log_debug("Recycled %d bytes" % origin) return True
[ "def", "_recycle", "(", "self", ")", ":", "origin", "=", "self", ".", "_origin", "if", "origin", "==", "0", ":", "return", "False", "available", "=", "self", ".", "_extent", "-", "origin", "self", ".", "_data", "[", ":", "available", "]", "=", "self", ".", "_data", "[", "origin", ":", "self", ".", "_extent", "]", "self", ".", "_extent", "=", "available", "self", ".", "_origin", "=", "0", "#log_debug(\"Recycled %d bytes\" % origin)", "return", "True" ]
29.928571
13.5
def QA_indicator_MIKE(DataFrame, N=12): """ MIKEๆŒ‡ๆ ‡ ๆŒ‡ๆ ‡่ฏดๆ˜Ž MIKEๆ˜ฏๅฆๅค–ไธ€็งๅฝขๅผ็š„่ทฏๅพ„ๆŒ‡ๆ ‡ใ€‚ ไนฐๅ–ๅŽŸๅˆ™ 1 WEAK-S๏ผŒMEDIUM-S๏ผŒSTRONG-Sไธ‰ๆก็บฟไปฃ่กจๅˆ็บงใ€ไธญ็บงใ€ๅผบๅŠ›ๆ”ฏๆ’‘ใ€‚ 2 WEAK-R๏ผŒMEDIUM-R๏ผŒSTRONG-Rไธ‰ๆก็บฟไปฃ่กจๅˆ็บงใ€ไธญ็บงใ€ๅผบๅŠ›ๅŽ‹ๅŠ›ใ€‚ """ HIGH = DataFrame.high LOW = DataFrame.low CLOSE = DataFrame.close TYP = (HIGH+LOW+CLOSE)/3 LL = LLV(LOW, N) HH = HHV(HIGH, N) WR = TYP+(TYP-LL) MR = TYP+(HH-LL) SR = 2*HH-LL WS = TYP-(HH-TYP) MS = TYP-(HH-LL) SS = 2*LL-HH return pd.DataFrame({ 'WR': WR, 'MR': MR, 'SR': SR, 'WS': WS, 'MS': MS, 'SS': SS })
[ "def", "QA_indicator_MIKE", "(", "DataFrame", ",", "N", "=", "12", ")", ":", "HIGH", "=", "DataFrame", ".", "high", "LOW", "=", "DataFrame", ".", "low", "CLOSE", "=", "DataFrame", ".", "close", "TYP", "=", "(", "HIGH", "+", "LOW", "+", "CLOSE", ")", "/", "3", "LL", "=", "LLV", "(", "LOW", ",", "N", ")", "HH", "=", "HHV", "(", "HIGH", ",", "N", ")", "WR", "=", "TYP", "+", "(", "TYP", "-", "LL", ")", "MR", "=", "TYP", "+", "(", "HH", "-", "LL", ")", "SR", "=", "2", "*", "HH", "-", "LL", "WS", "=", "TYP", "-", "(", "HH", "-", "TYP", ")", "MS", "=", "TYP", "-", "(", "HH", "-", "LL", ")", "SS", "=", "2", "*", "LL", "-", "HH", "return", "pd", ".", "DataFrame", "(", "{", "'WR'", ":", "WR", ",", "'MR'", ":", "MR", ",", "'SR'", ":", "SR", ",", "'WS'", ":", "WS", ",", "'MS'", ":", "MS", ",", "'SS'", ":", "SS", "}", ")" ]
20.592593
18
def _update_imageinfo(self): """ calls get_imageinfo() if data image missing info """ missing = self._missing_imageinfo() deferred = self.flags.get('defer_imageinfo') continuing = self.data.get('continue') if missing and not deferred and not continuing: self.get_imageinfo(show=False)
[ "def", "_update_imageinfo", "(", "self", ")", ":", "missing", "=", "self", ".", "_missing_imageinfo", "(", ")", "deferred", "=", "self", ".", "flags", ".", "get", "(", "'defer_imageinfo'", ")", "continuing", "=", "self", ".", "data", ".", "get", "(", "'continue'", ")", "if", "missing", "and", "not", "deferred", "and", "not", "continuing", ":", "self", ".", "get_imageinfo", "(", "show", "=", "False", ")" ]
34.4
10.6
def _read_cwl_record(rec): """Read CWL records, handling multiple nesting and batching cases. """ keys = set([]) out = [] if isinstance(rec, dict): is_batched = all([isinstance(v, (list, tuple)) for v in rec.values()]) cur = [{} for _ in range(len(rec.values()[0]) if is_batched else 1)] for k in rec.keys(): keys.add(k) val = rec[k] val = val if is_batched else [val] for i, v in enumerate(val): v = _cwlvar_to_wdl(v) cur[i] = _update_nested(k.split("__"), v, cur[i]) if is_batched: out.append(cur) else: assert len(cur) == 1 out.append(cur[0]) else: assert isinstance(rec, (list, tuple)) for sub_rec in rec: sub_keys, sub_out = _read_cwl_record(sub_rec) keys |= sub_keys out.append(sub_out) return keys, out
[ "def", "_read_cwl_record", "(", "rec", ")", ":", "keys", "=", "set", "(", "[", "]", ")", "out", "=", "[", "]", "if", "isinstance", "(", "rec", ",", "dict", ")", ":", "is_batched", "=", "all", "(", "[", "isinstance", "(", "v", ",", "(", "list", ",", "tuple", ")", ")", "for", "v", "in", "rec", ".", "values", "(", ")", "]", ")", "cur", "=", "[", "{", "}", "for", "_", "in", "range", "(", "len", "(", "rec", ".", "values", "(", ")", "[", "0", "]", ")", "if", "is_batched", "else", "1", ")", "]", "for", "k", "in", "rec", ".", "keys", "(", ")", ":", "keys", ".", "add", "(", "k", ")", "val", "=", "rec", "[", "k", "]", "val", "=", "val", "if", "is_batched", "else", "[", "val", "]", "for", "i", ",", "v", "in", "enumerate", "(", "val", ")", ":", "v", "=", "_cwlvar_to_wdl", "(", "v", ")", "cur", "[", "i", "]", "=", "_update_nested", "(", "k", ".", "split", "(", "\"__\"", ")", ",", "v", ",", "cur", "[", "i", "]", ")", "if", "is_batched", ":", "out", ".", "append", "(", "cur", ")", "else", ":", "assert", "len", "(", "cur", ")", "==", "1", "out", ".", "append", "(", "cur", "[", "0", "]", ")", "else", ":", "assert", "isinstance", "(", "rec", ",", "(", "list", ",", "tuple", ")", ")", "for", "sub_rec", "in", "rec", ":", "sub_keys", ",", "sub_out", "=", "_read_cwl_record", "(", "sub_rec", ")", "keys", "|=", "sub_keys", "out", ".", "append", "(", "sub_out", ")", "return", "keys", ",", "out" ]
34.037037
15.259259
def get_strategy(name_or_cls): """Return the strategy identified by its name. If ``name_or_class`` is a class, it will be simply returned. """ if isinstance(name_or_cls, six.string_types): if name_or_cls not in STRATS: raise MutationError("strat is not defined") return STRATS[name_or_cls]() return name_or_cls()
[ "def", "get_strategy", "(", "name_or_cls", ")", ":", "if", "isinstance", "(", "name_or_cls", ",", "six", ".", "string_types", ")", ":", "if", "name_or_cls", "not", "in", "STRATS", ":", "raise", "MutationError", "(", "\"strat is not defined\"", ")", "return", "STRATS", "[", "name_or_cls", "]", "(", ")", "return", "name_or_cls", "(", ")" ]
35.2
10.6
def _single_site(self): """ Make sure the queryset is filtered on a parent site, if that didn't happen already. """ if appsettings.FLUENT_CONTENTS_FILTER_SITE_ID and self._parent_site is None: return self.parent_site(settings.SITE_ID) else: return self
[ "def", "_single_site", "(", "self", ")", ":", "if", "appsettings", ".", "FLUENT_CONTENTS_FILTER_SITE_ID", "and", "self", ".", "_parent_site", "is", "None", ":", "return", "self", ".", "parent_site", "(", "settings", ".", "SITE_ID", ")", "else", ":", "return", "self" ]
38.625
21.125
def _path_with_dir_fd(self, path, fct, dir_fd): """Return the path considering dir_fd. Raise on nmvalid parameters.""" if dir_fd is not None: if sys.version_info < (3, 3): raise TypeError("%s() got an unexpected keyword " "argument 'dir_fd'" % fct.__name__) # check if fd is supported for the built-in real function real_fct = getattr(os, fct.__name__) if real_fct not in self.supports_dir_fd: raise NotImplementedError( 'dir_fd unavailable on this platform') if isinstance(path, int): raise ValueError("%s: Can't specify dir_fd without " "matching path" % fct.__name__) if not self.path.isabs(path): return self.path.join( self.filesystem.get_open_file( dir_fd).get_object().path, path) return path
[ "def", "_path_with_dir_fd", "(", "self", ",", "path", ",", "fct", ",", "dir_fd", ")", ":", "if", "dir_fd", "is", "not", "None", ":", "if", "sys", ".", "version_info", "<", "(", "3", ",", "3", ")", ":", "raise", "TypeError", "(", "\"%s() got an unexpected keyword \"", "\"argument 'dir_fd'\"", "%", "fct", ".", "__name__", ")", "# check if fd is supported for the built-in real function", "real_fct", "=", "getattr", "(", "os", ",", "fct", ".", "__name__", ")", "if", "real_fct", "not", "in", "self", ".", "supports_dir_fd", ":", "raise", "NotImplementedError", "(", "'dir_fd unavailable on this platform'", ")", "if", "isinstance", "(", "path", ",", "int", ")", ":", "raise", "ValueError", "(", "\"%s: Can't specify dir_fd without \"", "\"matching path\"", "%", "fct", ".", "__name__", ")", "if", "not", "self", ".", "path", ".", "isabs", "(", "path", ")", ":", "return", "self", ".", "path", ".", "join", "(", "self", ".", "filesystem", ".", "get_open_file", "(", "dir_fd", ")", ".", "get_object", "(", ")", ".", "path", ",", "path", ")", "return", "path" ]
51.052632
12.842105
def _pre_md5_skip_on_check(self, lpath, rfile): # type: (Downloader, pathlib.Path, # blobxfer.models.azure.StorageEntity) -> None """Perform pre MD5 skip on check :param Downloader self: this :param pathlib.Path lpath: local path :param blobxfer.models.azure.StorageEntity rfile: remote file """ md5 = blobxfer.models.metadata.get_md5_from_metadata(rfile) key = blobxfer.operations.download.Downloader.\ create_unique_transfer_operation_id(rfile) with self._md5_meta_lock: self._md5_map[key] = rfile slpath = str(lpath) # temporarily create a download descriptor view for vectored io if rfile.vectored_io is not None: view, _ = blobxfer.models.download.Descriptor.generate_view(rfile) fpath = str( blobxfer.models.download.Descriptor. convert_vectored_io_slice_to_final_path_name(lpath, rfile) ) else: view = None fpath = slpath self._md5_offload.add_localfile_for_md5_check( key, slpath, fpath, md5, rfile.mode, view)
[ "def", "_pre_md5_skip_on_check", "(", "self", ",", "lpath", ",", "rfile", ")", ":", "# type: (Downloader, pathlib.Path,", "# blobxfer.models.azure.StorageEntity) -> None", "md5", "=", "blobxfer", ".", "models", ".", "metadata", ".", "get_md5_from_metadata", "(", "rfile", ")", "key", "=", "blobxfer", ".", "operations", ".", "download", ".", "Downloader", ".", "create_unique_transfer_operation_id", "(", "rfile", ")", "with", "self", ".", "_md5_meta_lock", ":", "self", ".", "_md5_map", "[", "key", "]", "=", "rfile", "slpath", "=", "str", "(", "lpath", ")", "# temporarily create a download descriptor view for vectored io", "if", "rfile", ".", "vectored_io", "is", "not", "None", ":", "view", ",", "_", "=", "blobxfer", ".", "models", ".", "download", ".", "Descriptor", ".", "generate_view", "(", "rfile", ")", "fpath", "=", "str", "(", "blobxfer", ".", "models", ".", "download", ".", "Descriptor", ".", "convert_vectored_io_slice_to_final_path_name", "(", "lpath", ",", "rfile", ")", ")", "else", ":", "view", "=", "None", "fpath", "=", "slpath", "self", ".", "_md5_offload", ".", "add_localfile_for_md5_check", "(", "key", ",", "slpath", ",", "fpath", ",", "md5", ",", "rfile", ".", "mode", ",", "view", ")" ]
44.153846
15.038462
def create_missing(self): """Conditionally mark ``docker_upstream_name`` as required. Mark ``docker_upstream_name`` as required if ``content_type`` is "docker". """ if getattr(self, 'content_type', '') == 'docker': self._fields['docker_upstream_name'].required = True super(Repository, self).create_missing()
[ "def", "create_missing", "(", "self", ")", ":", "if", "getattr", "(", "self", ",", "'content_type'", ",", "''", ")", "==", "'docker'", ":", "self", ".", "_fields", "[", "'docker_upstream_name'", "]", ".", "required", "=", "True", "super", "(", "Repository", ",", "self", ")", ".", "create_missing", "(", ")" ]
36.1
19.9
async def send_heartbeat(self, short_name): """Post a heartbeat for a service. Args: short_name (string): The short name of the service to query """ if short_name not in self.services: raise ArgumentError("Unknown service name", short_name=short_name) self.services[short_name]['state'].heartbeat() await self._notify_update(short_name, 'heartbeat')
[ "async", "def", "send_heartbeat", "(", "self", ",", "short_name", ")", ":", "if", "short_name", "not", "in", "self", ".", "services", ":", "raise", "ArgumentError", "(", "\"Unknown service name\"", ",", "short_name", "=", "short_name", ")", "self", ".", "services", "[", "short_name", "]", "[", "'state'", "]", ".", "heartbeat", "(", ")", "await", "self", ".", "_notify_update", "(", "short_name", ",", "'heartbeat'", ")" ]
34.416667
21.166667
def get_api_url(server): """Formats the server URL properly in order to query the API. :return: A valid MyGeotab API request URL. :rtype: str """ parsed = urlparse(server) base_url = parsed.netloc if parsed.netloc else parsed.path base_url.replace('/', '') return 'https://' + base_url + '/apiv1'
[ "def", "get_api_url", "(", "server", ")", ":", "parsed", "=", "urlparse", "(", "server", ")", "base_url", "=", "parsed", ".", "netloc", "if", "parsed", ".", "netloc", "else", "parsed", ".", "path", "base_url", ".", "replace", "(", "'/'", ",", "''", ")", "return", "'https://'", "+", "base_url", "+", "'/apiv1'" ]
32
13.4
def feature_needs(*feas): """ Get info about the FUSE API version needed for the support of some features. This function takes a variable number of feature patterns. A feature pattern is either: - an integer (directly referring to a FUSE API version number) - a built-in feature specifier string (meaning defined by dictionary) - a string of the form ``has_foo``, where ``foo`` is a filesystem method (refers to the API version where the method has been introduced) - a list/tuple of other feature patterns (matches each of its members) - a regexp (meant to be matched against the builtins plus ``has_foo`` patterns; can also be given by a string of the from "re:*") - a negated regexp (can be given by a string of the form "!re:*") If called with no arguments, then the list of builtins is returned, mapped to their meaning. Otherwise the function returns the smallest FUSE API version number which has all the matching features. Builtin specifiers worth to explicit mention: - ``stateful_files``: you want to use custom filehandles (eg. a file class). - ``*``: you want all features. - while ``has_foo`` makes sense for all filesystem method ``foo``, some of these can be found among the builtins, too (the ones which can be handled by the general rule). specifiers like ``has_foo`` refer to requirement that the library knows of the fs method ``foo``. """ fmap = {'stateful_files': 22, 'stateful_dirs': 23, 'stateful_io': ('stateful_files', 'stateful_dirs'), 'stateful_files_keep_cache': 23, 'stateful_files_direct_io': 23, 'keep_cache': ('stateful_files_keep_cache',), 'direct_io': ('stateful_files_direct_io',), 'has_opendir': ('stateful_dirs',), 'has_releasedir': ('stateful_dirs',), 'has_fsyncdir': ('stateful_dirs',), 'has_create': 25, 'has_access': 25, 'has_fgetattr': 25, 'has_ftruncate': 25, 'has_fsinit': ('has_init'), 'has_fsdestroy': ('has_destroy'), 'has_lock': 26, 'has_utimens': 26, 'has_bmap': 26, 'has_init': 23, 'has_destroy': 23, '*': '!re:^\*$'} if not feas: return fmap def resolve(args, maxva): for fp in args: if isinstance(fp, int): maxva[0] = max(maxva[0], fp) continue if isinstance(fp, list) or isinstance(fp, tuple): for f in fp: yield f continue ma = isinstance(fp, str) and re.compile("(!\s*|)re:(.*)").match(fp) if isinstance(fp, type(re.compile(''))) or ma: neg = False if ma: mag = ma.groups() fp = re.compile(mag[1]) neg = bool(mag[0]) for f in list(fmap.keys()) + [ 'has_' + a for a in Fuse._attrs ]: if neg != bool(re.search(fp, f)): yield f continue ma = re.compile("has_(.*)").match(fp) if ma and ma.groups()[0] in Fuse._attrs and not fp in fmap: yield 21 continue yield fmap[fp] maxva = [0] while feas: feas = set(resolve(feas, maxva)) return maxva[0]
[ "def", "feature_needs", "(", "*", "feas", ")", ":", "fmap", "=", "{", "'stateful_files'", ":", "22", ",", "'stateful_dirs'", ":", "23", ",", "'stateful_io'", ":", "(", "'stateful_files'", ",", "'stateful_dirs'", ")", ",", "'stateful_files_keep_cache'", ":", "23", ",", "'stateful_files_direct_io'", ":", "23", ",", "'keep_cache'", ":", "(", "'stateful_files_keep_cache'", ",", ")", ",", "'direct_io'", ":", "(", "'stateful_files_direct_io'", ",", ")", ",", "'has_opendir'", ":", "(", "'stateful_dirs'", ",", ")", ",", "'has_releasedir'", ":", "(", "'stateful_dirs'", ",", ")", ",", "'has_fsyncdir'", ":", "(", "'stateful_dirs'", ",", ")", ",", "'has_create'", ":", "25", ",", "'has_access'", ":", "25", ",", "'has_fgetattr'", ":", "25", ",", "'has_ftruncate'", ":", "25", ",", "'has_fsinit'", ":", "(", "'has_init'", ")", ",", "'has_fsdestroy'", ":", "(", "'has_destroy'", ")", ",", "'has_lock'", ":", "26", ",", "'has_utimens'", ":", "26", ",", "'has_bmap'", ":", "26", ",", "'has_init'", ":", "23", ",", "'has_destroy'", ":", "23", ",", "'*'", ":", "'!re:^\\*$'", "}", "if", "not", "feas", ":", "return", "fmap", "def", "resolve", "(", "args", ",", "maxva", ")", ":", "for", "fp", "in", "args", ":", "if", "isinstance", "(", "fp", ",", "int", ")", ":", "maxva", "[", "0", "]", "=", "max", "(", "maxva", "[", "0", "]", ",", "fp", ")", "continue", "if", "isinstance", "(", "fp", ",", "list", ")", "or", "isinstance", "(", "fp", ",", "tuple", ")", ":", "for", "f", "in", "fp", ":", "yield", "f", "continue", "ma", "=", "isinstance", "(", "fp", ",", "str", ")", "and", "re", ".", "compile", "(", "\"(!\\s*|)re:(.*)\"", ")", ".", "match", "(", "fp", ")", "if", "isinstance", "(", "fp", ",", "type", "(", "re", ".", "compile", "(", "''", ")", ")", ")", "or", "ma", ":", "neg", "=", "False", "if", "ma", ":", "mag", "=", "ma", ".", "groups", "(", ")", "fp", "=", "re", ".", "compile", "(", "mag", "[", "1", "]", ")", "neg", "=", "bool", "(", "mag", "[", "0", "]", ")", "for", "f", "in", "list", "(", "fmap", ".", "keys", "(", ")", ")", "+", "[", "'has_'", "+", "a", "for", "a", "in", "Fuse", ".", "_attrs", "]", ":", "if", "neg", "!=", "bool", "(", "re", ".", "search", "(", "fp", ",", "f", ")", ")", ":", "yield", "f", "continue", "ma", "=", "re", ".", "compile", "(", "\"has_(.*)\"", ")", ".", "match", "(", "fp", ")", "if", "ma", "and", "ma", ".", "groups", "(", ")", "[", "0", "]", "in", "Fuse", ".", "_attrs", "and", "not", "fp", "in", "fmap", ":", "yield", "21", "continue", "yield", "fmap", "[", "fp", "]", "maxva", "=", "[", "0", "]", "while", "feas", ":", "feas", "=", "set", "(", "resolve", "(", "feas", ",", "maxva", ")", ")", "return", "maxva", "[", "0", "]" ]
37.728261
19.75
def _killall(self, force=False): """Kill all remaining processes, forcefully if requested.""" for_termination = [] for n, p in iteritems(self._processes): if 'returncode' not in p: for_termination.append(n) for n in for_termination: p = self._processes[n] signame = 'SIGKILL' if force else 'SIGTERM' self._system_print("sending %s to %s (pid %s)\n" % (signame, n, p['pid'])) if force: self._env.kill(p['pid']) else: self._env.terminate(p['pid'])
[ "def", "_killall", "(", "self", ",", "force", "=", "False", ")", ":", "for_termination", "=", "[", "]", "for", "n", ",", "p", "in", "iteritems", "(", "self", ".", "_processes", ")", ":", "if", "'returncode'", "not", "in", "p", ":", "for_termination", ".", "append", "(", "n", ")", "for", "n", "in", "for_termination", ":", "p", "=", "self", ".", "_processes", "[", "n", "]", "signame", "=", "'SIGKILL'", "if", "force", "else", "'SIGTERM'", "self", ".", "_system_print", "(", "\"sending %s to %s (pid %s)\\n\"", "%", "(", "signame", ",", "n", ",", "p", "[", "'pid'", "]", ")", ")", "if", "force", ":", "self", ".", "_env", ".", "kill", "(", "p", "[", "'pid'", "]", ")", "else", ":", "self", ".", "_env", ".", "terminate", "(", "p", "[", "'pid'", "]", ")" ]
36.117647
13.058824
def instance(self, name=None, *args, **kwargs): """Create a new instance using key ``name``. :param name: the name of the class (by default) or the key name of the class used to find the class :param args: given to the ``__init__`` method :param kwargs: given to the ``__init__`` method """ logger.info(f'new instance of {name}') t0 = time() name = self.default_name if name is None else name logger.debug(f'creating instance of {name}') class_name, params = self._class_name_params(name) cls = self._find_class(class_name) params.update(kwargs) if self._has_init_config(cls): logger.debug(f'found config parameter') params['config'] = self.config if self._has_init_name(cls): logger.debug(f'found name parameter') params['name'] = name if logger.level >= logging.DEBUG: for k, v in params.items(): logger.debug(f'populating {k} -> {v} ({type(v)})') inst = self._instance(cls, *args, **params) logger.info(f'created {name} instance of {cls.__name__} ' + f'in {(time() - t0):.2f}s') return inst
[ "def", "instance", "(", "self", ",", "name", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "info", "(", "f'new instance of {name}'", ")", "t0", "=", "time", "(", ")", "name", "=", "self", ".", "default_name", "if", "name", "is", "None", "else", "name", "logger", ".", "debug", "(", "f'creating instance of {name}'", ")", "class_name", ",", "params", "=", "self", ".", "_class_name_params", "(", "name", ")", "cls", "=", "self", ".", "_find_class", "(", "class_name", ")", "params", ".", "update", "(", "kwargs", ")", "if", "self", ".", "_has_init_config", "(", "cls", ")", ":", "logger", ".", "debug", "(", "f'found config parameter'", ")", "params", "[", "'config'", "]", "=", "self", ".", "config", "if", "self", ".", "_has_init_name", "(", "cls", ")", ":", "logger", ".", "debug", "(", "f'found name parameter'", ")", "params", "[", "'name'", "]", "=", "name", "if", "logger", ".", "level", ">=", "logging", ".", "DEBUG", ":", "for", "k", ",", "v", "in", "params", ".", "items", "(", ")", ":", "logger", ".", "debug", "(", "f'populating {k} -> {v} ({type(v)})'", ")", "inst", "=", "self", ".", "_instance", "(", "cls", ",", "*", "args", ",", "*", "*", "params", ")", "logger", ".", "info", "(", "f'created {name} instance of {cls.__name__} '", "+", "f'in {(time() - t0):.2f}s'", ")", "return", "inst" ]
42.034483
12.758621
def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, **kwds): """ Generate a hexagonal binning plot. Generate a hexagonal binning plot of `x` versus `y`. If `C` is `None` (the default), this is a histogram of the number of occurrences of the observations at ``(x[i], y[i])``. If `C` is specified, specifies values at given coordinates ``(x[i], y[i])``. These values are accumulated for each hexagonal bin and then reduced according to `reduce_C_function`, having as default the NumPy's mean function (:meth:`numpy.mean`). (If `C` is specified, it must also be a 1-D sequence of the same length as `x` and `y`, or a column label.) Parameters ---------- x : int or str The column label or position for x points. y : int or str The column label or position for y points. C : int or str, optional The column label or position for the value of `(x, y)` point. reduce_C_function : callable, default `np.mean` Function of one argument that reduces all the values in a bin to a single number (e.g. `np.mean`, `np.max`, `np.sum`, `np.std`). gridsize : int or tuple of (int, int), default 100 The number of hexagons in the x-direction. The corresponding number of hexagons in the y-direction is chosen in a way that the hexagons are approximately regular. Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons in the x-direction and the y-direction. **kwds Additional keyword arguments are documented in :meth:`DataFrame.plot`. Returns ------- matplotlib.AxesSubplot The matplotlib ``Axes`` on which the hexbin is plotted. See Also -------- DataFrame.plot : Make plots of a DataFrame. matplotlib.pyplot.hexbin : Hexagonal binning plot using matplotlib, the matplotlib function that is used under the hood. Examples -------- The following examples are generated with random data from a normal distribution. .. plot:: :context: close-figs >>> n = 10000 >>> df = pd.DataFrame({'x': np.random.randn(n), ... 'y': np.random.randn(n)}) >>> ax = df.plot.hexbin(x='x', y='y', gridsize=20) The next example uses `C` and `np.sum` as `reduce_C_function`. Note that `'observations'` values ranges from 1 to 5 but the result plot shows values up to more than 25. This is because of the `reduce_C_function`. .. plot:: :context: close-figs >>> n = 500 >>> df = pd.DataFrame({ ... 'coord_x': np.random.uniform(-3, 3, size=n), ... 'coord_y': np.random.uniform(30, 50, size=n), ... 'observations': np.random.randint(1,5, size=n) ... }) >>> ax = df.plot.hexbin(x='coord_x', ... y='coord_y', ... C='observations', ... reduce_C_function=np.sum, ... gridsize=10, ... cmap="viridis") """ if reduce_C_function is not None: kwds['reduce_C_function'] = reduce_C_function if gridsize is not None: kwds['gridsize'] = gridsize return self(kind='hexbin', x=x, y=y, C=C, **kwds)
[ "def", "hexbin", "(", "self", ",", "x", ",", "y", ",", "C", "=", "None", ",", "reduce_C_function", "=", "None", ",", "gridsize", "=", "None", ",", "*", "*", "kwds", ")", ":", "if", "reduce_C_function", "is", "not", "None", ":", "kwds", "[", "'reduce_C_function'", "]", "=", "reduce_C_function", "if", "gridsize", "is", "not", "None", ":", "kwds", "[", "'gridsize'", "]", "=", "gridsize", "return", "self", "(", "kind", "=", "'hexbin'", ",", "x", "=", "x", ",", "y", "=", "y", ",", "C", "=", "C", ",", "*", "*", "kwds", ")" ]
40.954545
21.977273
def strip_comments(text): """Comment stripper for JSON. """ regex = r'\s*(#|\/{2}).*$' regex_inline = r'(:?(?:\s)*([A-Za-z\d\.{}]*)|((?<=\").*\"),?)(?:\s)*(((#|(\/{2})).*)|)$' # noqa lines = text.split('\n') for index, line in enumerate(lines): if re.search(regex, line): if re.search(r'^' + regex, line, re.IGNORECASE): lines[index] = "" elif re.search(regex_inline, line): lines[index] = re.sub(regex_inline, r'\1', line) return '\n'.join(lines)
[ "def", "strip_comments", "(", "text", ")", ":", "regex", "=", "r'\\s*(#|\\/{2}).*$'", "regex_inline", "=", "r'(:?(?:\\s)*([A-Za-z\\d\\.{}]*)|((?<=\\\").*\\\"),?)(?:\\s)*(((#|(\\/{2})).*)|)$'", "# noqa", "lines", "=", "text", ".", "split", "(", "'\\n'", ")", "for", "index", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "if", "re", ".", "search", "(", "regex", ",", "line", ")", ":", "if", "re", ".", "search", "(", "r'^'", "+", "regex", ",", "line", ",", "re", ".", "IGNORECASE", ")", ":", "lines", "[", "index", "]", "=", "\"\"", "elif", "re", ".", "search", "(", "regex_inline", ",", "line", ")", ":", "lines", "[", "index", "]", "=", "re", ".", "sub", "(", "regex_inline", ",", "r'\\1'", ",", "line", ")", "return", "'\\n'", ".", "join", "(", "lines", ")" ]
33
18.375
def getMirrorTextureGL(self, eEye): """Access to mirror textures from OpenGL.""" fn = self.function_table.getMirrorTextureGL pglTextureId = glUInt_t() pglSharedTextureHandle = glSharedTextureHandle_t() result = fn(eEye, byref(pglTextureId), byref(pglSharedTextureHandle)) return result, pglTextureId, pglSharedTextureHandle
[ "def", "getMirrorTextureGL", "(", "self", ",", "eEye", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getMirrorTextureGL", "pglTextureId", "=", "glUInt_t", "(", ")", "pglSharedTextureHandle", "=", "glSharedTextureHandle_t", "(", ")", "result", "=", "fn", "(", "eEye", ",", "byref", "(", "pglTextureId", ")", ",", "byref", "(", "pglSharedTextureHandle", ")", ")", "return", "result", ",", "pglTextureId", ",", "pglSharedTextureHandle" ]
45.625
17.125
def min_abs(self): '''Returns minimum absolute value.''' if self.__len__() == 0: return ArgumentError('empty set has no minimum absolute value.') if self.contains(0): return 0 return numpy.min([numpy.abs(val) for val in [self.max_neg(), self.min_pos()] if val is not None])
[ "def", "min_abs", "(", "self", ")", ":", "if", "self", ".", "__len__", "(", ")", "==", "0", ":", "return", "ArgumentError", "(", "'empty set has no minimum absolute value.'", ")", "if", "self", ".", "contains", "(", "0", ")", ":", "return", "0", "return", "numpy", ".", "min", "(", "[", "numpy", ".", "abs", "(", "val", ")", "for", "val", "in", "[", "self", ".", "max_neg", "(", ")", ",", "self", ".", "min_pos", "(", ")", "]", "if", "val", "is", "not", "None", "]", ")" ]
41.444444
15.444444
def get_driver(self, namespace, parsed_args, **kwargs): """Get mutually-exlusive plugin for plugin namespace. """ option, dest = self._namespace_to_option(namespace) dest_prefix = '{0}_'.format(dest) driver_name = getattr(parsed_args, dest, 'default') driver_extension = self.driver_managers[namespace][driver_name] return driver_extension.plugin.from_args( parsed_args, dest_prefix, **kwargs)
[ "def", "get_driver", "(", "self", ",", "namespace", ",", "parsed_args", ",", "*", "*", "kwargs", ")", ":", "option", ",", "dest", "=", "self", ".", "_namespace_to_option", "(", "namespace", ")", "dest_prefix", "=", "'{0}_'", ".", "format", "(", "dest", ")", "driver_name", "=", "getattr", "(", "parsed_args", ",", "dest", ",", "'default'", ")", "driver_extension", "=", "self", ".", "driver_managers", "[", "namespace", "]", "[", "driver_name", "]", "return", "driver_extension", ".", "plugin", ".", "from_args", "(", "parsed_args", ",", "dest_prefix", ",", "*", "*", "kwargs", ")" ]
45.3
14.1
def img2img_transformer_tiny(): """Tiny params.""" hparams = img2img_transformer2d_base() hparams.num_hidden_layers = 2 hparams.hidden_size = 128 hparams.batch_size = 4 hparams.max_length = 128 hparams.attention_key_channels = hparams.attention_value_channels = 0 hparams.filter_size = 128 hparams.num_heads = 1 hparams.pos = "timing" return hparams
[ "def", "img2img_transformer_tiny", "(", ")", ":", "hparams", "=", "img2img_transformer2d_base", "(", ")", "hparams", ".", "num_hidden_layers", "=", "2", "hparams", ".", "hidden_size", "=", "128", "hparams", ".", "batch_size", "=", "4", "hparams", ".", "max_length", "=", "128", "hparams", ".", "attention_key_channels", "=", "hparams", ".", "attention_value_channels", "=", "0", "hparams", ".", "filter_size", "=", "128", "hparams", ".", "num_heads", "=", "1", "hparams", ".", "pos", "=", "\"timing\"", "return", "hparams" ]
30
13.5
def fold(self, **_3to2kwargs): policy = _3to2kwargs['policy']; del _3to2kwargs['policy'] """Fold header according to policy. The parsed representation of the header is folded according to RFC5322 rules, as modified by the policy. If the parse tree contains surrogateescaped bytes, the bytes are CTE encoded using the charset 'unknown-8bit". Any non-ASCII characters in the parse tree are CTE encoded using charset utf-8. XXX: make this a policy setting. The returned value is an ASCII-only string possibly containing linesep characters, and ending with a linesep character. The string includes the header name and the ': ' separator. """ # At some point we need to only put fws here if it was in the source. header = parser.Header([ parser.HeaderLabel([ parser.ValueTerminal(self.name, 'header-name'), parser.ValueTerminal(':', 'header-sep')]), parser.CFWSList([parser.WhiteSpaceTerminal(' ', 'fws')]), self._parse_tree]) return header.fold(policy=policy)
[ "def", "fold", "(", "self", ",", "*", "*", "_3to2kwargs", ")", ":", "policy", "=", "_3to2kwargs", "[", "'policy'", "]", "del", "_3to2kwargs", "[", "'policy'", "]", "# At some point we need to only put fws here if it was in the source.", "header", "=", "parser", ".", "Header", "(", "[", "parser", ".", "HeaderLabel", "(", "[", "parser", ".", "ValueTerminal", "(", "self", ".", "name", ",", "'header-name'", ")", ",", "parser", ".", "ValueTerminal", "(", "':'", ",", "'header-sep'", ")", "]", ")", ",", "parser", ".", "CFWSList", "(", "[", "parser", ".", "WhiteSpaceTerminal", "(", "' '", ",", "'fws'", ")", "]", ")", ",", "self", ".", "_parse_tree", "]", ")", "return", "header", ".", "fold", "(", "policy", "=", "policy", ")" ]
45.68
22
def namedb_genesis_txid(address, metadata): """ Make a "fake" txid for a genesis block entry. Returns a 32-byte hash (double-sha256), hex-encoded """ preimage = '{} genesis {}'.format(address, metadata) return virtualchain.lib.hashing.bin_double_sha256(preimage).encode('hex')
[ "def", "namedb_genesis_txid", "(", "address", ",", "metadata", ")", ":", "preimage", "=", "'{} genesis {}'", ".", "format", "(", "address", ",", "metadata", ")", "return", "virtualchain", ".", "lib", ".", "hashing", ".", "bin_double_sha256", "(", "preimage", ")", ".", "encode", "(", "'hex'", ")" ]
42
11.428571
def normalise_tensor(tensor): ''' Normalise the tensor by dividing it by its norm, defined such that np.sqrt(X:X) ''' tensor_norm = np.linalg.norm(tensor) return tensor / tensor_norm, tensor_norm
[ "def", "normalise_tensor", "(", "tensor", ")", ":", "tensor_norm", "=", "np", ".", "linalg", ".", "norm", "(", "tensor", ")", "return", "tensor", "/", "tensor_norm", ",", "tensor_norm" ]
30.428571
19.285714
async def list_instances(self, project, page_size=100, instance_filter=None): """Fetch all instances in a GCE project. You can find the endpoint documentation `here <https://cloud. google.com/compute/docs/reference/latest/instances/ aggregatedList>`__. Args: project (str): unique, user-provided project ID. page_size (int): hint for the client to only retrieve up to this number of results per API call. instance_filter (str): endpoint-specific filter string used to retrieve a subset of instances. This is passed directly to the endpoint's "filter" URL query parameter. Returns: list(dicts): data of all instances in the given :obj:`project` """ url = (f'{self.BASE_URL}{self.api_version}/projects/{project}' '/aggregated/instances') params = {'maxResults': page_size} if instance_filter: params['filter'] = instance_filter responses = await self.list_all(url, params) instances = self._parse_rsps_for_instances(responses) return instances
[ "async", "def", "list_instances", "(", "self", ",", "project", ",", "page_size", "=", "100", ",", "instance_filter", "=", "None", ")", ":", "url", "=", "(", "f'{self.BASE_URL}{self.api_version}/projects/{project}'", "'/aggregated/instances'", ")", "params", "=", "{", "'maxResults'", ":", "page_size", "}", "if", "instance_filter", ":", "params", "[", "'filter'", "]", "=", "instance_filter", "responses", "=", "await", "self", ".", "list_all", "(", "url", ",", "params", ")", "instances", "=", "self", ".", "_parse_rsps_for_instances", "(", "responses", ")", "return", "instances" ]
41.5
18
def _oauth_request_parameters(self, url, access_token, parameters={}, method="GET"): """Returns the OAuth parameters as a dict for the given request. parameters should include all POST arguments and query string arguments that will be sent with the request. """ consumer_token = self._oauth_consumer_token() base_args = dict( oauth_consumer_key=consumer_token["key"], oauth_token=access_token["key"], oauth_signature_method="HMAC-SHA1", oauth_timestamp=str(int(time.time())), oauth_nonce=binascii.b2a_hex(uuid.uuid4().bytes), oauth_version=getattr(self, "_OAUTH_VERSION", "1.0a"), ) args = {} args.update(base_args) args.update(parameters) if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": signature = _oauth10a_signature(consumer_token, method, url, args, access_token) else: signature = _oauth_signature(consumer_token, method, url, args, access_token) base_args["oauth_signature"] = signature return base_args
[ "def", "_oauth_request_parameters", "(", "self", ",", "url", ",", "access_token", ",", "parameters", "=", "{", "}", ",", "method", "=", "\"GET\"", ")", ":", "consumer_token", "=", "self", ".", "_oauth_consumer_token", "(", ")", "base_args", "=", "dict", "(", "oauth_consumer_key", "=", "consumer_token", "[", "\"key\"", "]", ",", "oauth_token", "=", "access_token", "[", "\"key\"", "]", ",", "oauth_signature_method", "=", "\"HMAC-SHA1\"", ",", "oauth_timestamp", "=", "str", "(", "int", "(", "time", ".", "time", "(", ")", ")", ")", ",", "oauth_nonce", "=", "binascii", ".", "b2a_hex", "(", "uuid", ".", "uuid4", "(", ")", ".", "bytes", ")", ",", "oauth_version", "=", "getattr", "(", "self", ",", "\"_OAUTH_VERSION\"", ",", "\"1.0a\"", ")", ",", ")", "args", "=", "{", "}", "args", ".", "update", "(", "base_args", ")", "args", ".", "update", "(", "parameters", ")", "if", "getattr", "(", "self", ",", "\"_OAUTH_VERSION\"", ",", "\"1.0a\"", ")", "==", "\"1.0a\"", ":", "signature", "=", "_oauth10a_signature", "(", "consumer_token", ",", "method", ",", "url", ",", "args", ",", "access_token", ")", "else", ":", "signature", "=", "_oauth_signature", "(", "consumer_token", ",", "method", ",", "url", ",", "args", ",", "access_token", ")", "base_args", "[", "\"oauth_signature\"", "]", "=", "signature", "return", "base_args" ]
45
17.555556
def PhyDMSComprehensiveParser(): """Returns *argparse.ArgumentParser* for ``phdyms_comprehensive`` script.""" parser = ArgumentParserNoArgHelp(description=("Comprehensive phylogenetic " "model comparison and detection of selection informed by deep " "mutational scanning data. This program runs 'phydms' repeatedly " "to compare substitution models and detect selection. " "{0} Version {1}. Full documentation at {2}").format( phydmslib.__acknowledgments__, phydmslib.__version__, phydmslib.__url__), formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('outprefix', help='Output file prefix.', type=str) parser.add_argument('alignment', help='Existing FASTA file with aligned ' 'codon sequences.', type=ExistingFile) parser.add_argument('prefsfiles', help='Existing files with site-specific ' 'amino-acid preferences.', type=ExistingFile, nargs='+') group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--raxml', help="Path to RAxML (e.g., 'raxml')") group.add_argument('--tree', type=ExistingFile, help="Existing Newick file giving input tree.") parser.add_argument('--ncpus', default=-1, help='Use this many CPUs; -1 ' 'means all available.', type=int) parser.add_argument('--brlen', choices=['scale', 'optimize'], default='optimize', help=("How to handle branch lengths: " "scale by single parameter or optimize each one")) parser.set_defaults(omegabysite=False) parser.add_argument('--omegabysite', dest='omegabysite', action='store_true', help="Fit omega (dN/dS) for each site.") parser.set_defaults(diffprefsbysite=False) parser.add_argument('--diffprefsbysite', dest='diffprefsbysite', action='store_true', help="Fit differential preferences for " "each site.") parser.set_defaults(gammaomega=False) parser.add_argument('--gammaomega', dest='gammaomega', action=\ 'store_true', help="Fit ExpCM with gamma distributed omega.") parser.set_defaults(gammabeta=False) parser.add_argument('--gammabeta', dest='gammabeta', action=\ 'store_true', help="Fit ExpCM with gamma distributed beta.") parser.set_defaults(noavgprefs=False) parser.add_argument('--no-avgprefs', dest='noavgprefs', action='store_true', help="No fitting of models with preferences averaged across sites " "for ExpCM.") parser.set_defaults(randprefs=False) parser.add_argument('--randprefs', dest='randprefs', action='store_true', help="Include ExpCM models with randomized preferences.") parser.add_argument('-v', '--version', action='version', version= '%(prog)s {version}'.format(version=phydmslib.__version__)) return parser
[ "def", "PhyDMSComprehensiveParser", "(", ")", ":", "parser", "=", "ArgumentParserNoArgHelp", "(", "description", "=", "(", "\"Comprehensive phylogenetic \"", "\"model comparison and detection of selection informed by deep \"", "\"mutational scanning data. This program runs 'phydms' repeatedly \"", "\"to compare substitution models and detect selection. \"", "\"{0} Version {1}. Full documentation at {2}\"", ")", ".", "format", "(", "phydmslib", ".", "__acknowledgments__", ",", "phydmslib", ".", "__version__", ",", "phydmslib", ".", "__url__", ")", ",", "formatter_class", "=", "argparse", ".", "ArgumentDefaultsHelpFormatter", ")", "parser", ".", "add_argument", "(", "'outprefix'", ",", "help", "=", "'Output file prefix.'", ",", "type", "=", "str", ")", "parser", ".", "add_argument", "(", "'alignment'", ",", "help", "=", "'Existing FASTA file with aligned '", "'codon sequences.'", ",", "type", "=", "ExistingFile", ")", "parser", ".", "add_argument", "(", "'prefsfiles'", ",", "help", "=", "'Existing files with site-specific '", "'amino-acid preferences.'", ",", "type", "=", "ExistingFile", ",", "nargs", "=", "'+'", ")", "group", "=", "parser", ".", "add_mutually_exclusive_group", "(", "required", "=", "True", ")", "group", ".", "add_argument", "(", "'--raxml'", ",", "help", "=", "\"Path to RAxML (e.g., 'raxml')\"", ")", "group", ".", "add_argument", "(", "'--tree'", ",", "type", "=", "ExistingFile", ",", "help", "=", "\"Existing Newick file giving input tree.\"", ")", "parser", ".", "add_argument", "(", "'--ncpus'", ",", "default", "=", "-", "1", ",", "help", "=", "'Use this many CPUs; -1 '", "'means all available.'", ",", "type", "=", "int", ")", "parser", ".", "add_argument", "(", "'--brlen'", ",", "choices", "=", "[", "'scale'", ",", "'optimize'", "]", ",", "default", "=", "'optimize'", ",", "help", "=", "(", "\"How to handle branch lengths: \"", "\"scale by single parameter or optimize each one\"", ")", ")", "parser", ".", "set_defaults", "(", "omegabysite", "=", "False", ")", "parser", ".", "add_argument", "(", "'--omegabysite'", ",", "dest", "=", "'omegabysite'", ",", "action", "=", "'store_true'", ",", "help", "=", "\"Fit omega (dN/dS) for each site.\"", ")", "parser", ".", "set_defaults", "(", "diffprefsbysite", "=", "False", ")", "parser", ".", "add_argument", "(", "'--diffprefsbysite'", ",", "dest", "=", "'diffprefsbysite'", ",", "action", "=", "'store_true'", ",", "help", "=", "\"Fit differential preferences for \"", "\"each site.\"", ")", "parser", ".", "set_defaults", "(", "gammaomega", "=", "False", ")", "parser", ".", "add_argument", "(", "'--gammaomega'", ",", "dest", "=", "'gammaomega'", ",", "action", "=", "'store_true'", ",", "help", "=", "\"Fit ExpCM with gamma distributed omega.\"", ")", "parser", ".", "set_defaults", "(", "gammabeta", "=", "False", ")", "parser", ".", "add_argument", "(", "'--gammabeta'", ",", "dest", "=", "'gammabeta'", ",", "action", "=", "'store_true'", ",", "help", "=", "\"Fit ExpCM with gamma distributed beta.\"", ")", "parser", ".", "set_defaults", "(", "noavgprefs", "=", "False", ")", "parser", ".", "add_argument", "(", "'--no-avgprefs'", ",", "dest", "=", "'noavgprefs'", ",", "action", "=", "'store_true'", ",", "help", "=", "\"No fitting of models with preferences averaged across sites \"", "\"for ExpCM.\"", ")", "parser", ".", "set_defaults", "(", "randprefs", "=", "False", ")", "parser", ".", "add_argument", "(", "'--randprefs'", ",", "dest", "=", "'randprefs'", ",", "action", "=", "'store_true'", ",", "help", "=", "\"Include ExpCM models with randomized preferences.\"", ")", "parser", ".", "add_argument", "(", "'-v'", ",", "'--version'", ",", "action", "=", "'version'", ",", "version", "=", "'%(prog)s {version}'", ".", "format", "(", "version", "=", "phydmslib", ".", "__version__", ")", ")", "return", "parser" ]
60.914894
23.042553
def artifactCreated(self, *args, **kwargs): """ Artifact Creation Messages Whenever the `createArtifact` end-point is called, the queue will create a record of the artifact and post a message on this exchange. All of this happens before the queue returns a signed URL for the caller to upload the actual artifact with (pending on `storageType`). This means that the actual artifact is rarely available when this message is posted. But it is not unreasonable to assume that the artifact will will become available at some point later. Most signatures will expire in 30 minutes or so, forcing the uploader to call `createArtifact` with the same payload again in-order to continue uploading the artifact. However, in most cases (especially for small artifacts) it's very reasonable assume the artifact will be available within a few minutes. This property means that this exchange is mostly useful for tools monitoring task evaluation. One could also use it count number of artifacts per task, or _index_ artifacts though in most cases it'll be smarter to index artifacts after the task in question have completed successfully. This exchange outputs: ``v1/artifact-created-message.json#``This exchange takes the following keys: * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required) * taskId: `taskId` for the task this message concerns (required) * runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required) * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required) * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required) * provisionerId: `provisionerId` this task is targeted at. (required) * workerType: `workerType` this task must run on. (required) * schedulerId: `schedulerId` this task was created by. (required) * taskGroupId: `taskGroupId` this task was created in. (required) * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified. """ ref = { 'exchange': 'artifact-created', 'name': 'artifactCreated', 'routingKey': [ { 'constant': 'primary', 'multipleWords': False, 'name': 'routingKeyKind', }, { 'multipleWords': False, 'name': 'taskId', }, { 'multipleWords': False, 'name': 'runId', }, { 'multipleWords': False, 'name': 'workerGroup', }, { 'multipleWords': False, 'name': 'workerId', }, { 'multipleWords': False, 'name': 'provisionerId', }, { 'multipleWords': False, 'name': 'workerType', }, { 'multipleWords': False, 'name': 'schedulerId', }, { 'multipleWords': False, 'name': 'taskGroupId', }, { 'multipleWords': True, 'name': 'reserved', }, ], 'schema': 'v1/artifact-created-message.json#', } return self._makeTopicExchange(ref, *args, **kwargs)
[ "def", "artifactCreated", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "ref", "=", "{", "'exchange'", ":", "'artifact-created'", ",", "'name'", ":", "'artifactCreated'", ",", "'routingKey'", ":", "[", "{", "'constant'", ":", "'primary'", ",", "'multipleWords'", ":", "False", ",", "'name'", ":", "'routingKeyKind'", ",", "}", ",", "{", "'multipleWords'", ":", "False", ",", "'name'", ":", "'taskId'", ",", "}", ",", "{", "'multipleWords'", ":", "False", ",", "'name'", ":", "'runId'", ",", "}", ",", "{", "'multipleWords'", ":", "False", ",", "'name'", ":", "'workerGroup'", ",", "}", ",", "{", "'multipleWords'", ":", "False", ",", "'name'", ":", "'workerId'", ",", "}", ",", "{", "'multipleWords'", ":", "False", ",", "'name'", ":", "'provisionerId'", ",", "}", ",", "{", "'multipleWords'", ":", "False", ",", "'name'", ":", "'workerType'", ",", "}", ",", "{", "'multipleWords'", ":", "False", ",", "'name'", ":", "'schedulerId'", ",", "}", ",", "{", "'multipleWords'", ":", "False", ",", "'name'", ":", "'taskGroupId'", ",", "}", ",", "{", "'multipleWords'", ":", "True", ",", "'name'", ":", "'reserved'", ",", "}", ",", "]", ",", "'schema'", ":", "'v1/artifact-created-message.json#'", ",", "}", "return", "self", ".", "_makeTopicExchange", "(", "ref", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
40.557895
26.031579
def get_dimensions_units(names): """Create dictionary of unit dimensions.""" dimensions_uni = {} for name in names: key = get_key_from_dimensions(names[name].dimensions) dimensions_uni[key] = names[name] plain_dimensions = [{'base': name, 'power': 1}] key = get_key_from_dimensions(plain_dimensions) dimensions_uni[key] = names[name] if not names[name].dimensions: names[name].dimensions = plain_dimensions names[name].dimensions = [{'base': names[i['base']].name, 'power': i['power']} for i in names[name].dimensions] return dimensions_uni
[ "def", "get_dimensions_units", "(", "names", ")", ":", "dimensions_uni", "=", "{", "}", "for", "name", "in", "names", ":", "key", "=", "get_key_from_dimensions", "(", "names", "[", "name", "]", ".", "dimensions", ")", "dimensions_uni", "[", "key", "]", "=", "names", "[", "name", "]", "plain_dimensions", "=", "[", "{", "'base'", ":", "name", ",", "'power'", ":", "1", "}", "]", "key", "=", "get_key_from_dimensions", "(", "plain_dimensions", ")", "dimensions_uni", "[", "key", "]", "=", "names", "[", "name", "]", "if", "not", "names", "[", "name", "]", ".", "dimensions", ":", "names", "[", "name", "]", ".", "dimensions", "=", "plain_dimensions", "names", "[", "name", "]", ".", "dimensions", "=", "[", "{", "'base'", ":", "names", "[", "i", "[", "'base'", "]", "]", ".", "name", ",", "'power'", ":", "i", "[", "'power'", "]", "}", "for", "i", "in", "names", "[", "name", "]", ".", "dimensions", "]", "return", "dimensions_uni" ]
33.95
19.6
def _validate_field(self, validation_spec, dictionary_to_validate, parent=None, force_optional=False): """ Validates if field is OK. :param validation_spec: specification of the field :type validation_spec: dict :param dictionary_to_validate: dictionary where the field should be present :type dictionary_to_validate: dict :param parent: full path of parent field :type parent: str :param force_optional: forces the field to be optional (all union fields have force_optional set to True) :type force_optional: bool :return: True if the field is present """ field_name = validation_spec['name'] field_type = validation_spec.get('type') optional = validation_spec.get('optional') regexp = validation_spec.get('regexp') allow_empty = validation_spec.get('allow_empty') children_validation_specs = validation_spec.get('fields') required_api_version = validation_spec.get('api_version') custom_validation = validation_spec.get('custom_validation') full_field_path = self._get_field_name_with_parent(field_name=field_name, parent=parent) if required_api_version and required_api_version != self._api_version: self.log.debug( "Skipping validation of the field '%s' for API version '%s' " "as it is only valid for API version '%s'", field_name, self._api_version, required_api_version) return False value = dictionary_to_validate.get(field_name) if (optional or force_optional) and value is None: self.log.debug("The optional field '%s' is missing. That's perfectly OK.", full_field_path) return False # Certainly down from here the field is present (value is not None) # so we should only return True from now on self._sanity_checks(children_validation_specs=children_validation_specs, field_type=field_type, full_field_path=full_field_path, regexp=regexp, allow_empty=allow_empty, custom_validation=custom_validation, value=value) if allow_empty is False: self._validate_is_empty(full_field_path, value) if regexp: self._validate_regexp(full_field_path, regexp, value) elif field_type == 'dict': if not isinstance(value, dict): raise GcpFieldValidationException( "The field '{}' should be of dictionary type according to the " "specification '{}' but it is '{}'". format(full_field_path, validation_spec, value)) if children_validation_specs is None: self.log.debug( "The dict field '%s' has no nested fields defined in the " "specification '%s'. That's perfectly ok - it's content will " "not be validated.", full_field_path, validation_spec) else: self._validate_dict(children_validation_specs, full_field_path, value) elif field_type == 'union': if not children_validation_specs: raise GcpValidationSpecificationException( "The union field '%s' has no nested fields " "defined in specification '%s'. Unions should have at least one " "nested field defined.", full_field_path, validation_spec) self._validate_union(children_validation_specs, full_field_path, dictionary_to_validate) elif field_type == 'list': if not isinstance(value, list): raise GcpFieldValidationException( "The field '{}' should be of list type according to the " "specification '{}' but it is '{}'". format(full_field_path, validation_spec, value)) elif custom_validation: try: custom_validation(value) except Exception as e: raise GcpFieldValidationException( "Error while validating custom field '{}' specified by '{}': '{}'". format(full_field_path, validation_spec, e)) elif field_type is None: self.log.debug("The type of field '%s' is not specified in '%s'. " "Not validating its content.", full_field_path, validation_spec) else: raise GcpValidationSpecificationException( "The field '{}' is of type '{}' in specification '{}'." "This type is unknown to validation!".format( full_field_path, field_type, validation_spec)) return True
[ "def", "_validate_field", "(", "self", ",", "validation_spec", ",", "dictionary_to_validate", ",", "parent", "=", "None", ",", "force_optional", "=", "False", ")", ":", "field_name", "=", "validation_spec", "[", "'name'", "]", "field_type", "=", "validation_spec", ".", "get", "(", "'type'", ")", "optional", "=", "validation_spec", ".", "get", "(", "'optional'", ")", "regexp", "=", "validation_spec", ".", "get", "(", "'regexp'", ")", "allow_empty", "=", "validation_spec", ".", "get", "(", "'allow_empty'", ")", "children_validation_specs", "=", "validation_spec", ".", "get", "(", "'fields'", ")", "required_api_version", "=", "validation_spec", ".", "get", "(", "'api_version'", ")", "custom_validation", "=", "validation_spec", ".", "get", "(", "'custom_validation'", ")", "full_field_path", "=", "self", ".", "_get_field_name_with_parent", "(", "field_name", "=", "field_name", ",", "parent", "=", "parent", ")", "if", "required_api_version", "and", "required_api_version", "!=", "self", ".", "_api_version", ":", "self", ".", "log", ".", "debug", "(", "\"Skipping validation of the field '%s' for API version '%s' \"", "\"as it is only valid for API version '%s'\"", ",", "field_name", ",", "self", ".", "_api_version", ",", "required_api_version", ")", "return", "False", "value", "=", "dictionary_to_validate", ".", "get", "(", "field_name", ")", "if", "(", "optional", "or", "force_optional", ")", "and", "value", "is", "None", ":", "self", ".", "log", ".", "debug", "(", "\"The optional field '%s' is missing. That's perfectly OK.\"", ",", "full_field_path", ")", "return", "False", "# Certainly down from here the field is present (value is not None)", "# so we should only return True from now on", "self", ".", "_sanity_checks", "(", "children_validation_specs", "=", "children_validation_specs", ",", "field_type", "=", "field_type", ",", "full_field_path", "=", "full_field_path", ",", "regexp", "=", "regexp", ",", "allow_empty", "=", "allow_empty", ",", "custom_validation", "=", "custom_validation", ",", "value", "=", "value", ")", "if", "allow_empty", "is", "False", ":", "self", ".", "_validate_is_empty", "(", "full_field_path", ",", "value", ")", "if", "regexp", ":", "self", ".", "_validate_regexp", "(", "full_field_path", ",", "regexp", ",", "value", ")", "elif", "field_type", "==", "'dict'", ":", "if", "not", "isinstance", "(", "value", ",", "dict", ")", ":", "raise", "GcpFieldValidationException", "(", "\"The field '{}' should be of dictionary type according to the \"", "\"specification '{}' but it is '{}'\"", ".", "format", "(", "full_field_path", ",", "validation_spec", ",", "value", ")", ")", "if", "children_validation_specs", "is", "None", ":", "self", ".", "log", ".", "debug", "(", "\"The dict field '%s' has no nested fields defined in the \"", "\"specification '%s'. That's perfectly ok - it's content will \"", "\"not be validated.\"", ",", "full_field_path", ",", "validation_spec", ")", "else", ":", "self", ".", "_validate_dict", "(", "children_validation_specs", ",", "full_field_path", ",", "value", ")", "elif", "field_type", "==", "'union'", ":", "if", "not", "children_validation_specs", ":", "raise", "GcpValidationSpecificationException", "(", "\"The union field '%s' has no nested fields \"", "\"defined in specification '%s'. Unions should have at least one \"", "\"nested field defined.\"", ",", "full_field_path", ",", "validation_spec", ")", "self", ".", "_validate_union", "(", "children_validation_specs", ",", "full_field_path", ",", "dictionary_to_validate", ")", "elif", "field_type", "==", "'list'", ":", "if", "not", "isinstance", "(", "value", ",", "list", ")", ":", "raise", "GcpFieldValidationException", "(", "\"The field '{}' should be of list type according to the \"", "\"specification '{}' but it is '{}'\"", ".", "format", "(", "full_field_path", ",", "validation_spec", ",", "value", ")", ")", "elif", "custom_validation", ":", "try", ":", "custom_validation", "(", "value", ")", "except", "Exception", "as", "e", ":", "raise", "GcpFieldValidationException", "(", "\"Error while validating custom field '{}' specified by '{}': '{}'\"", ".", "format", "(", "full_field_path", ",", "validation_spec", ",", "e", ")", ")", "elif", "field_type", "is", "None", ":", "self", ".", "log", ".", "debug", "(", "\"The type of field '%s' is not specified in '%s'. \"", "\"Not validating its content.\"", ",", "full_field_path", ",", "validation_spec", ")", "else", ":", "raise", "GcpValidationSpecificationException", "(", "\"The field '{}' is of type '{}' in specification '{}'.\"", "\"This type is unknown to validation!\"", ".", "format", "(", "full_field_path", ",", "field_type", ",", "validation_spec", ")", ")", "return", "True" ]
50.649485
21.391753
def loadtoc(self): """Load the table of contents into memory.""" self.toc = self.TOCTMPLT() self.lib.seek(self.pkgstart+self.tocpos) tocstr = self.lib.read(self.toclen) self.toc.frombinary(tocstr)
[ "def", "loadtoc", "(", "self", ")", ":", "self", ".", "toc", "=", "self", ".", "TOCTMPLT", "(", ")", "self", ".", "lib", ".", "seek", "(", "self", ".", "pkgstart", "+", "self", ".", "tocpos", ")", "tocstr", "=", "self", ".", "lib", ".", "read", "(", "self", ".", "toclen", ")", "self", ".", "toc", ".", "frombinary", "(", "tocstr", ")" ]
38.5
7.333333
def _convert_to_dict(cls, term, replace_value_names=True): """Converts a record heirarchy to nested dicts. :param term: Root term at which to start conversion """ from collections import OrderedDict if not term: return None if term.children: d = OrderedDict() for c in term.children: if c.child_property_type == 'scalar': d[c.record_term_lc] = cls._convert_to_dict(c, replace_value_names) elif c.child_property_type == 'sequence': try: d[c.record_term_lc].append(cls._convert_to_dict(c, replace_value_names)) except (KeyError, AttributeError): # The c.term property doesn't exist, so add a list d[c.record_term_lc] = [cls._convert_to_dict(c, replace_value_names)] elif c.child_property_type == 'sconcat': # Concat with a space if c.record_term_lc in d: s = d[c.record_term_lc] + ' ' else: s = '' d[c.record_term_lc] =s + (cls._convert_to_dict(c, replace_value_names) or '') elif c.child_property_type == 'bconcat': # Concat with a blank d[c.record_term_lc] = d.get(c.record_term_lc, '') + (cls._convert_to_dict(c, replace_value_names) or '') else: try: d[c.record_term_lc].append(cls._convert_to_dict(c, replace_value_names)) except KeyError: # The c.term property doesn't exist, so add a scalar or a map d[c.record_term_lc] = cls._convert_to_dict(c, replace_value_names) except AttributeError as e: # d[c.term] exists, but is a scalar, so convert it to a list d[c.record_term_lc] = [d[c.record_term]] + [cls._convert_to_dict(c, replace_value_names)] if term.value: if replace_value_names: d[term.term_value_name.lower()] = term.value else: d['@value'] = term.value return d else: return term.value
[ "def", "_convert_to_dict", "(", "cls", ",", "term", ",", "replace_value_names", "=", "True", ")", ":", "from", "collections", "import", "OrderedDict", "if", "not", "term", ":", "return", "None", "if", "term", ".", "children", ":", "d", "=", "OrderedDict", "(", ")", "for", "c", "in", "term", ".", "children", ":", "if", "c", ".", "child_property_type", "==", "'scalar'", ":", "d", "[", "c", ".", "record_term_lc", "]", "=", "cls", ".", "_convert_to_dict", "(", "c", ",", "replace_value_names", ")", "elif", "c", ".", "child_property_type", "==", "'sequence'", ":", "try", ":", "d", "[", "c", ".", "record_term_lc", "]", ".", "append", "(", "cls", ".", "_convert_to_dict", "(", "c", ",", "replace_value_names", ")", ")", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "# The c.term property doesn't exist, so add a list", "d", "[", "c", ".", "record_term_lc", "]", "=", "[", "cls", ".", "_convert_to_dict", "(", "c", ",", "replace_value_names", ")", "]", "elif", "c", ".", "child_property_type", "==", "'sconcat'", ":", "# Concat with a space", "if", "c", ".", "record_term_lc", "in", "d", ":", "s", "=", "d", "[", "c", ".", "record_term_lc", "]", "+", "' '", "else", ":", "s", "=", "''", "d", "[", "c", ".", "record_term_lc", "]", "=", "s", "+", "(", "cls", ".", "_convert_to_dict", "(", "c", ",", "replace_value_names", ")", "or", "''", ")", "elif", "c", ".", "child_property_type", "==", "'bconcat'", ":", "# Concat with a blank", "d", "[", "c", ".", "record_term_lc", "]", "=", "d", ".", "get", "(", "c", ".", "record_term_lc", ",", "''", ")", "+", "(", "cls", ".", "_convert_to_dict", "(", "c", ",", "replace_value_names", ")", "or", "''", ")", "else", ":", "try", ":", "d", "[", "c", ".", "record_term_lc", "]", ".", "append", "(", "cls", ".", "_convert_to_dict", "(", "c", ",", "replace_value_names", ")", ")", "except", "KeyError", ":", "# The c.term property doesn't exist, so add a scalar or a map", "d", "[", "c", ".", "record_term_lc", "]", "=", "cls", ".", "_convert_to_dict", "(", "c", ",", "replace_value_names", ")", "except", "AttributeError", "as", "e", ":", "# d[c.term] exists, but is a scalar, so convert it to a list", "d", "[", "c", ".", "record_term_lc", "]", "=", "[", "d", "[", "c", ".", "record_term", "]", "]", "+", "[", "cls", ".", "_convert_to_dict", "(", "c", ",", "replace_value_names", ")", "]", "if", "term", ".", "value", ":", "if", "replace_value_names", ":", "d", "[", "term", ".", "term_value_name", ".", "lower", "(", ")", "]", "=", "term", ".", "value", "else", ":", "d", "[", "'@value'", "]", "=", "term", ".", "value", "return", "d", "else", ":", "return", "term", ".", "value" ]
38.203125
29.671875
def construct_time_based_cache_middleware( cache_class, cache_expire_seconds=15, rpc_whitelist=TIME_BASED_CACHE_RPC_WHITELIST, should_cache_fn=_should_cache): """ Constructs a middleware which caches responses based on the request ``method`` and ``params`` for a maximum amount of time as specified :param cache: Any dictionary-like object :param cache_expire_seconds: The number of seconds an item may be cached before it should expire. :param rpc_whitelist: A set of RPC methods which may have their responses cached. :param should_cache_fn: A callable which accepts ``method`` ``params`` and ``response`` and returns a boolean as to whether the response should be cached. """ def time_based_cache_middleware(make_request, web3): cache = cache_class() lock = threading.Lock() def middleware(method, params): lock_acquired = lock.acquire(blocking=False) try: if lock_acquired and method in rpc_whitelist: cache_key = generate_cache_key((method, params)) if cache_key in cache: # check that the cached response is not expired. cached_at, cached_response = cache[cache_key] cached_for = time.time() - cached_at if cached_for <= cache_expire_seconds: return cached_response else: del cache[cache_key] # cache either missed or expired so make the request. response = make_request(method, params) if should_cache_fn(method, params, response): cache[cache_key] = (time.time(), response) return response else: return make_request(method, params) finally: if lock_acquired: lock.release() return middleware return time_based_cache_middleware
[ "def", "construct_time_based_cache_middleware", "(", "cache_class", ",", "cache_expire_seconds", "=", "15", ",", "rpc_whitelist", "=", "TIME_BASED_CACHE_RPC_WHITELIST", ",", "should_cache_fn", "=", "_should_cache", ")", ":", "def", "time_based_cache_middleware", "(", "make_request", ",", "web3", ")", ":", "cache", "=", "cache_class", "(", ")", "lock", "=", "threading", ".", "Lock", "(", ")", "def", "middleware", "(", "method", ",", "params", ")", ":", "lock_acquired", "=", "lock", ".", "acquire", "(", "blocking", "=", "False", ")", "try", ":", "if", "lock_acquired", "and", "method", "in", "rpc_whitelist", ":", "cache_key", "=", "generate_cache_key", "(", "(", "method", ",", "params", ")", ")", "if", "cache_key", "in", "cache", ":", "# check that the cached response is not expired.", "cached_at", ",", "cached_response", "=", "cache", "[", "cache_key", "]", "cached_for", "=", "time", ".", "time", "(", ")", "-", "cached_at", "if", "cached_for", "<=", "cache_expire_seconds", ":", "return", "cached_response", "else", ":", "del", "cache", "[", "cache_key", "]", "# cache either missed or expired so make the request.", "response", "=", "make_request", "(", "method", ",", "params", ")", "if", "should_cache_fn", "(", "method", ",", "params", ",", "response", ")", ":", "cache", "[", "cache_key", "]", "=", "(", "time", ".", "time", "(", ")", ",", "response", ")", "return", "response", "else", ":", "return", "make_request", "(", "method", ",", "params", ")", "finally", ":", "if", "lock_acquired", ":", "lock", ".", "release", "(", ")", "return", "middleware", "return", "time_based_cache_middleware" ]
40.45098
20.254902
def sendMessage(self, MsgType, CorpNum, Sender, SenderName, Subject, Contents, Messages, reserveDT, adsYN=False, UserID=None, RequestNum=None): """ ๋ฌธ์ž ๋ฉ”์‹œ์ง€ ์ „์†ก args MsgType : ๋ฌธ์ž ์ „์†ก ์œ ํ˜•(๋‹จ๋ฌธ:SMS, ์žฅ๋ฌธ:LMS, ๋‹จ/์žฅ๋ฌธ:XMS) CorpNum : ํŒ๋นŒํšŒ์› ์‚ฌ์—…์ž๋ฒˆํ˜ธ Sender : ๋ฐœ์‹ ์ž๋ฒˆํ˜ธ (๋™๋ณด์ „์†ก์šฉ) Subject : ์žฅ๋ฌธ ๋ฉ”์‹œ์ง€ ์ œ๋ชฉ (๋™๋ณด์ „์†ก์šฉ) Contents : ์žฅ๋ฌธ ๋ฌธ์ž ๋‚ด์šฉ (๋™๋ณด์ „์†ก์šฉ) Messages : ๊ฐœ๋ณ„์ „์†ก์ •๋ณด ๋ฐฐ์—ด reserveDT : ์˜ˆ์•ฝ์ „์†ก์‹œ๊ฐ„ (ํ˜•์‹. yyyyMMddHHmmss) UserID : ํŒ๋นŒํšŒ์› ์•„์ด๋”” RequestNum : ์ „์†ก์š”์ฒญ๋ฒˆํ˜ธ return ์ ‘์ˆ˜๋ฒˆํ˜ธ (receiptNum) raise PopbillException """ if MsgType == None or MsgType == '': raise PopbillException(-99999999, "๋ฌธ์ž ์ „์†ก ์œ ํ˜•์ด ์ž…๋ ฅ๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค.") if Messages == None or len(Messages) < 1: raise PopbillException(-99999999, "์ „์†กํ•  ๋ฉ”์‹œ์ง€๊ฐ€ ์ž…๋ ฅ๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค.") req = {} if Sender != None or Sender != '': req['snd'] = Sender if SenderName != None or SenderName != '': req['sndnm'] = SenderName if Contents != None or Contents != '': req['content'] = Contents if Subject != None or Subject != '': req['subject'] = Subject if reserveDT != None or reserveDT != '': req['sndDT'] = reserveDT if Messages != None or Messages != '': req['msgs'] = Messages if RequestNum != None or RequestNum != '': req['requestnum'] = RequestNum if adsYN: req['adsYN'] = True postData = self._stringtify(req) result = self._httppost('/' + MsgType, postData, CorpNum, UserID) return result.receiptNum
[ "def", "sendMessage", "(", "self", ",", "MsgType", ",", "CorpNum", ",", "Sender", ",", "SenderName", ",", "Subject", ",", "Contents", ",", "Messages", ",", "reserveDT", ",", "adsYN", "=", "False", ",", "UserID", "=", "None", ",", "RequestNum", "=", "None", ")", ":", "if", "MsgType", "==", "None", "or", "MsgType", "==", "''", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"๋ฌธ์ž ์ „์†ก ์œ ํ˜•์ด ์ž…๋ ฅ๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค.\")\r", "", "if", "Messages", "==", "None", "or", "len", "(", "Messages", ")", "<", "1", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"์ „์†กํ•  ๋ฉ”์‹œ์ง€๊ฐ€ ์ž…๋ ฅ๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค.\")\r", "", "req", "=", "{", "}", "if", "Sender", "!=", "None", "or", "Sender", "!=", "''", ":", "req", "[", "'snd'", "]", "=", "Sender", "if", "SenderName", "!=", "None", "or", "SenderName", "!=", "''", ":", "req", "[", "'sndnm'", "]", "=", "SenderName", "if", "Contents", "!=", "None", "or", "Contents", "!=", "''", ":", "req", "[", "'content'", "]", "=", "Contents", "if", "Subject", "!=", "None", "or", "Subject", "!=", "''", ":", "req", "[", "'subject'", "]", "=", "Subject", "if", "reserveDT", "!=", "None", "or", "reserveDT", "!=", "''", ":", "req", "[", "'sndDT'", "]", "=", "reserveDT", "if", "Messages", "!=", "None", "or", "Messages", "!=", "''", ":", "req", "[", "'msgs'", "]", "=", "Messages", "if", "RequestNum", "!=", "None", "or", "RequestNum", "!=", "''", ":", "req", "[", "'requestnum'", "]", "=", "RequestNum", "if", "adsYN", ":", "req", "[", "'adsYN'", "]", "=", "True", "postData", "=", "self", ".", "_stringtify", "(", "req", ")", "result", "=", "self", ".", "_httppost", "(", "'/'", "+", "MsgType", ",", "postData", ",", "CorpNum", ",", "UserID", ")", "return", "result", ".", "receiptNum" ]
36.583333
14.791667
def GetFileEntryByPathSpec(self, path_spec): """Retrieves a file entry for a path specification. Args: path_spec (PathSpec): path specification. Returns: BDEFileEntry: file entry or None. """ return bde_file_entry.BDEFileEntry( self._resolver_context, self, path_spec, is_root=True, is_virtual=True)
[ "def", "GetFileEntryByPathSpec", "(", "self", ",", "path_spec", ")", ":", "return", "bde_file_entry", ".", "BDEFileEntry", "(", "self", ".", "_resolver_context", ",", "self", ",", "path_spec", ",", "is_root", "=", "True", ",", "is_virtual", "=", "True", ")" ]
30.090909
17.363636
def _cimdatetime_representer(dumper, cimdatetime): """ PyYAML representer function for CIMDateTime objects. This is needed for yaml.safe_dump() to support CIMDateTime. """ cimdatetime_str = str(cimdatetime) node = dumper.represent_scalar(CIMDATETIME_TAG, cimdatetime_str) return node
[ "def", "_cimdatetime_representer", "(", "dumper", ",", "cimdatetime", ")", ":", "cimdatetime_str", "=", "str", "(", "cimdatetime", ")", "node", "=", "dumper", ".", "represent_scalar", "(", "CIMDATETIME_TAG", ",", "cimdatetime_str", ")", "return", "node" ]
38
13
def phiME_dens(R,z,phi,dens,Sigma,dSigmadR,d2SigmadR2,hz,Hz,dHzdz,Sigma_amp): """The density corresponding to phi_ME""" r= numpy.sqrt(R**2.+z**2.) out= dens(R,z,phi) for a,s,ds,d2s,h,H,dH \ in zip(Sigma_amp,Sigma,dSigmadR,d2SigmadR2,hz,Hz,dHzdz): out-= a*(s(r)*h(z)+d2s(r)*H(z)+2./r*ds(r)*(H(z)+z*dH(z))) return out
[ "def", "phiME_dens", "(", "R", ",", "z", ",", "phi", ",", "dens", ",", "Sigma", ",", "dSigmadR", ",", "d2SigmadR2", ",", "hz", ",", "Hz", ",", "dHzdz", ",", "Sigma_amp", ")", ":", "r", "=", "numpy", ".", "sqrt", "(", "R", "**", "2.", "+", "z", "**", "2.", ")", "out", "=", "dens", "(", "R", ",", "z", ",", "phi", ")", "for", "a", ",", "s", ",", "ds", ",", "d2s", ",", "h", ",", "H", ",", "dH", "in", "zip", "(", "Sigma_amp", ",", "Sigma", ",", "dSigmadR", ",", "d2SigmadR2", ",", "hz", ",", "Hz", ",", "dHzdz", ")", ":", "out", "-=", "a", "*", "(", "s", "(", "r", ")", "*", "h", "(", "z", ")", "+", "d2s", "(", "r", ")", "*", "H", "(", "z", ")", "+", "2.", "/", "r", "*", "ds", "(", "r", ")", "*", "(", "H", "(", "z", ")", "+", "z", "*", "dH", "(", "z", ")", ")", ")", "return", "out" ]
43.5
19.625
def get_channel_property(self, channel_id, property_name): '''This function returns the data stored under the property name from the given channel. Parameters ---------- channel_id: int The channel id for which the property will be returned property_name: str A property stored by the RecordingExtractor (location, etc.) Returns ---------- property_data The data associated with the given property name. Could be many formats as specified by the user. ''' if isinstance(channel_id, (int, np.integer)): if channel_id in self.get_channel_ids(): if channel_id not in self._channel_properties: self._channel_properties[channel_id] = {} if isinstance(property_name, str): if property_name in list(self._channel_properties[channel_id].keys()): return self._channel_properties[channel_id][property_name] else: raise ValueError(str(property_name) + " has not been added to channel " + str(channel_id)) else: raise ValueError(str(property_name) + " must be a string") else: raise ValueError(str(channel_id) + " is not a valid channel_id") else: raise ValueError(str(channel_id) + " must be an int")
[ "def", "get_channel_property", "(", "self", ",", "channel_id", ",", "property_name", ")", ":", "if", "isinstance", "(", "channel_id", ",", "(", "int", ",", "np", ".", "integer", ")", ")", ":", "if", "channel_id", "in", "self", ".", "get_channel_ids", "(", ")", ":", "if", "channel_id", "not", "in", "self", ".", "_channel_properties", ":", "self", ".", "_channel_properties", "[", "channel_id", "]", "=", "{", "}", "if", "isinstance", "(", "property_name", ",", "str", ")", ":", "if", "property_name", "in", "list", "(", "self", ".", "_channel_properties", "[", "channel_id", "]", ".", "keys", "(", ")", ")", ":", "return", "self", ".", "_channel_properties", "[", "channel_id", "]", "[", "property_name", "]", "else", ":", "raise", "ValueError", "(", "str", "(", "property_name", ")", "+", "\" has not been added to channel \"", "+", "str", "(", "channel_id", ")", ")", "else", ":", "raise", "ValueError", "(", "str", "(", "property_name", ")", "+", "\" must be a string\"", ")", "else", ":", "raise", "ValueError", "(", "str", "(", "channel_id", ")", "+", "\" is not a valid channel_id\"", ")", "else", ":", "raise", "ValueError", "(", "str", "(", "channel_id", ")", "+", "\" must be an int\"", ")" ]
44.75
26.5
def triggers(self): """ Access the triggers :returns: twilio.rest.api.v2010.account.usage.trigger.TriggerList :rtype: twilio.rest.api.v2010.account.usage.trigger.TriggerList """ if self._triggers is None: self._triggers = TriggerList(self._version, account_sid=self._solution['account_sid'], ) return self._triggers
[ "def", "triggers", "(", "self", ")", ":", "if", "self", ".", "_triggers", "is", "None", ":", "self", ".", "_triggers", "=", "TriggerList", "(", "self", ".", "_version", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'", "]", ",", ")", "return", "self", ".", "_triggers" ]
37.5
21.5
def add_route(route, endpoint=None, **kw): """Add a new JSON API route """ def wrapper(f): try: router.DefaultRouter.add_url_rule(route, endpoint=endpoint, view_func=f, options=kw) except AssertionError, e: logger.warn("Failed to register route {}: {}".format(route, e)) return f return wrapper
[ "def", "add_route", "(", "route", ",", "endpoint", "=", "None", ",", "*", "*", "kw", ")", ":", "def", "wrapper", "(", "f", ")", ":", "try", ":", "router", ".", "DefaultRouter", ".", "add_url_rule", "(", "route", ",", "endpoint", "=", "endpoint", ",", "view_func", "=", "f", ",", "options", "=", "kw", ")", "except", "AssertionError", ",", "e", ":", "logger", ".", "warn", "(", "\"Failed to register route {}: {}\"", ".", "format", "(", "route", ",", "e", ")", ")", "return", "f", "return", "wrapper" ]
37.230769
16.153846
def status_load(): ''' Return load CLI Example: .. code-block:: bash salt '*' apcups.status_load ''' data = status() if 'LOADPCT' in data: load = data['LOADPCT'].split() if load[1].lower() == 'percent': return float(load[0]) return {'Error': 'Load not available.'}
[ "def", "status_load", "(", ")", ":", "data", "=", "status", "(", ")", "if", "'LOADPCT'", "in", "data", ":", "load", "=", "data", "[", "'LOADPCT'", "]", ".", "split", "(", ")", "if", "load", "[", "1", "]", ".", "lower", "(", ")", "==", "'percent'", ":", "return", "float", "(", "load", "[", "0", "]", ")", "return", "{", "'Error'", ":", "'Load not available.'", "}" ]
18.823529
21.529412