text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def get(self, name: str, sig: Tuple) -> Optional[object]: """ Return the object representing name if it is cached :param name: name of object :param sig: unique signature of object :return: object if exists and signature matches """ if name not in self._cache: return None if self._cache[name].sig != sig: del self._cache[name] self._update() return None with open(self._cache[name].loc, 'rb') as f: return pickle.load(f)
[ "def", "get", "(", "self", ",", "name", ":", "str", ",", "sig", ":", "Tuple", ")", "->", "Optional", "[", "object", "]", ":", "if", "name", "not", "in", "self", ".", "_cache", ":", "return", "None", "if", "self", ".", "_cache", "[", "name", "]", ".", "sig", "!=", "sig", ":", "del", "self", ".", "_cache", "[", "name", "]", "self", ".", "_update", "(", ")", "return", "None", "with", "open", "(", "self", ".", "_cache", "[", "name", "]", ".", "loc", ",", "'rb'", ")", "as", "f", ":", "return", "pickle", ".", "load", "(", "f", ")" ]
35.933333
9.4
def compute_wcs(key, challenge): """ Compute an WAMP-CRA authentication signature from an authentication challenge and a (derived) key. :param key: The key derived (via PBKDF2) from the secret. :type key: str/bytes :param challenge: The authentication challenge to sign. :type challenge: str/bytes :return: The authentication signature. :rtype: bytes """ key = key.encode('utf8') challenge = challenge.encode('utf8') sig = hmac.new(key, challenge, hashlib.sha256).digest() return binascii.b2a_base64(sig).strip()
[ "def", "compute_wcs", "(", "key", ",", "challenge", ")", ":", "key", "=", "key", ".", "encode", "(", "'utf8'", ")", "challenge", "=", "challenge", ".", "encode", "(", "'utf8'", ")", "sig", "=", "hmac", ".", "new", "(", "key", ",", "challenge", ",", "hashlib", ".", "sha256", ")", ".", "digest", "(", ")", "return", "binascii", ".", "b2a_base64", "(", "sig", ")", ".", "strip", "(", ")" ]
32.588235
14.705882
def set_dict_value(dictionary, keys, value): """ Set a value in a (nested) dictionary by defining a list of keys. .. note:: Side-effects This function does not make a copy of dictionary, but directly edits it. Parameters ---------- dictionary : dict keys : List[Any] value : object Returns ------- dictionary : dict Examples -------- >>> d = {'a': {'b': 'c', 'd': 'e'}} >>> expected = {'a': {'b': 'foobar', 'd': 'e'}} >>> set_dict_value(d, ['a', 'b'], 'foobar') == expected True """ orig = dictionary for key in keys[:-1]: dictionary = dictionary.setdefault(key, {}) dictionary[keys[-1]] = value return orig
[ "def", "set_dict_value", "(", "dictionary", ",", "keys", ",", "value", ")", ":", "orig", "=", "dictionary", "for", "key", "in", "keys", "[", ":", "-", "1", "]", ":", "dictionary", "=", "dictionary", ".", "setdefault", "(", "key", ",", "{", "}", ")", "dictionary", "[", "keys", "[", "-", "1", "]", "]", "=", "value", "return", "orig" ]
23.533333
21.533333
def tag_structure(tag, site): """ A tag structure. """ return {'tag_id': tag.pk, 'name': tag.name, 'count': tag.count, 'slug': tag.name, 'html_url': '%s://%s%s' % ( PROTOCOL, site.domain, reverse('zinnia:tag_detail', args=[tag.name])), 'rss_url': '%s://%s%s' % ( PROTOCOL, site.domain, reverse('zinnia:tag_feed', args=[tag.name])) }
[ "def", "tag_structure", "(", "tag", ",", "site", ")", ":", "return", "{", "'tag_id'", ":", "tag", ".", "pk", ",", "'name'", ":", "tag", ".", "name", ",", "'count'", ":", "tag", ".", "count", ",", "'slug'", ":", "tag", ".", "name", ",", "'html_url'", ":", "'%s://%s%s'", "%", "(", "PROTOCOL", ",", "site", ".", "domain", ",", "reverse", "(", "'zinnia:tag_detail'", ",", "args", "=", "[", "tag", ".", "name", "]", ")", ")", ",", "'rss_url'", ":", "'%s://%s%s'", "%", "(", "PROTOCOL", ",", "site", ".", "domain", ",", "reverse", "(", "'zinnia:tag_feed'", ",", "args", "=", "[", "tag", ".", "name", "]", ")", ")", "}" ]
31.333333
10
def from_labeled_point(rdd, categorical=False, nb_classes=None): """Convert a LabeledPoint RDD back to a pair of numpy arrays :param rdd: LabeledPoint RDD :param categorical: boolean, if labels should be one-hot encode when returned :param nb_classes: optional int, indicating the number of class labels :return: pair of numpy arrays, features and labels """ features = np.asarray( rdd.map(lambda lp: from_vector(lp.features)).collect()) labels = np.asarray(rdd.map(lambda lp: lp.label).collect(), dtype='int32') if categorical: if not nb_classes: nb_classes = np.max(labels) + 1 temp = np.zeros((len(labels), nb_classes)) for i, label in enumerate(labels): temp[i, label] = 1. labels = temp return features, labels
[ "def", "from_labeled_point", "(", "rdd", ",", "categorical", "=", "False", ",", "nb_classes", "=", "None", ")", ":", "features", "=", "np", ".", "asarray", "(", "rdd", ".", "map", "(", "lambda", "lp", ":", "from_vector", "(", "lp", ".", "features", ")", ")", ".", "collect", "(", ")", ")", "labels", "=", "np", ".", "asarray", "(", "rdd", ".", "map", "(", "lambda", "lp", ":", "lp", ".", "label", ")", ".", "collect", "(", ")", ",", "dtype", "=", "'int32'", ")", "if", "categorical", ":", "if", "not", "nb_classes", ":", "nb_classes", "=", "np", ".", "max", "(", "labels", ")", "+", "1", "temp", "=", "np", ".", "zeros", "(", "(", "len", "(", "labels", ")", ",", "nb_classes", ")", ")", "for", "i", ",", "label", "in", "enumerate", "(", "labels", ")", ":", "temp", "[", "i", ",", "label", "]", "=", "1.", "labels", "=", "temp", "return", "features", ",", "labels" ]
42.210526
17.210526
def get_config_h_filename(): """Return the path of pyconfig.h.""" if _PYTHON_BUILD: if os.name == "nt": inc_dir = os.path.join(_PROJECT_BASE, "PC") else: inc_dir = _PROJECT_BASE else: inc_dir = get_path('platinclude') return os.path.join(inc_dir, 'pyconfig.h')
[ "def", "get_config_h_filename", "(", ")", ":", "if", "_PYTHON_BUILD", ":", "if", "os", ".", "name", "==", "\"nt\"", ":", "inc_dir", "=", "os", ".", "path", ".", "join", "(", "_PROJECT_BASE", ",", "\"PC\"", ")", "else", ":", "inc_dir", "=", "_PROJECT_BASE", "else", ":", "inc_dir", "=", "get_path", "(", "'platinclude'", ")", "return", "os", ".", "path", ".", "join", "(", "inc_dir", ",", "'pyconfig.h'", ")" ]
31.5
12.9
def save(self): """ Apply a series of actions from a set of (action, arg) tuples, probably as parsed from a URL. Each action is a code into PROCESSORS. Then save the mogrified image. """ from settings import PROCESSORS from .filesystem import makedirs if self.im is None: # If we got here something very strange is going on that I can't even # predict. return # pragma: no cover makedirs(self.output_path) for action, arg in self.actions: action = PROCESSORS[action] if self.frames: new_frames = [] for frame in self.frames: new_frames.append(action.process(frame, arg)) self.frames = new_frames else: self.im = action.process(self.im, arg) self.im = optimize.optimize(self.im, fmt=self.format, quality=self.quality) kwargs = { 'format': self.format, 'optimize': True, 'quality': self.quality, } if self.format == 'jpeg': kwargs['progressive'] = True if self.filename.startswith('s3://'): import cStringIO from filesystem import s3 output = cStringIO.StringIO() if self.frames: images2gif.write_gif(output, self.frames) else: self.im.save(output, **kwargs) output.reset() s3.put_file(output, self.filename) else: if self.frames: images2gif.write_gif(self.filename, self.frames) else: self.im.save(self.filename, **kwargs)
[ "def", "save", "(", "self", ")", ":", "from", "settings", "import", "PROCESSORS", "from", ".", "filesystem", "import", "makedirs", "if", "self", ".", "im", "is", "None", ":", "# If we got here something very strange is going on that I can't even", "# predict.", "return", "# pragma: no cover", "makedirs", "(", "self", ".", "output_path", ")", "for", "action", ",", "arg", "in", "self", ".", "actions", ":", "action", "=", "PROCESSORS", "[", "action", "]", "if", "self", ".", "frames", ":", "new_frames", "=", "[", "]", "for", "frame", "in", "self", ".", "frames", ":", "new_frames", ".", "append", "(", "action", ".", "process", "(", "frame", ",", "arg", ")", ")", "self", ".", "frames", "=", "new_frames", "else", ":", "self", ".", "im", "=", "action", ".", "process", "(", "self", ".", "im", ",", "arg", ")", "self", ".", "im", "=", "optimize", ".", "optimize", "(", "self", ".", "im", ",", "fmt", "=", "self", ".", "format", ",", "quality", "=", "self", ".", "quality", ")", "kwargs", "=", "{", "'format'", ":", "self", ".", "format", ",", "'optimize'", ":", "True", ",", "'quality'", ":", "self", ".", "quality", ",", "}", "if", "self", ".", "format", "==", "'jpeg'", ":", "kwargs", "[", "'progressive'", "]", "=", "True", "if", "self", ".", "filename", ".", "startswith", "(", "'s3://'", ")", ":", "import", "cStringIO", "from", "filesystem", "import", "s3", "output", "=", "cStringIO", ".", "StringIO", "(", ")", "if", "self", ".", "frames", ":", "images2gif", ".", "write_gif", "(", "output", ",", "self", ".", "frames", ")", "else", ":", "self", ".", "im", ".", "save", "(", "output", ",", "*", "*", "kwargs", ")", "output", ".", "reset", "(", ")", "s3", ".", "put_file", "(", "output", ",", "self", ".", "filename", ")", "else", ":", "if", "self", ".", "frames", ":", "images2gif", ".", "write_gif", "(", "self", ".", "filename", ",", "self", ".", "frames", ")", "else", ":", "self", ".", "im", ".", "save", "(", "self", ".", "filename", ",", "*", "*", "kwargs", ")" ]
33.64
15.68
def plot_ebands(self, **kwargs): """ Plot the band structure. kwargs are passed to the plot method of :class:`ElectronBands`. Returns: `matplotlib` figure """ with self.nscf_task.open_gsr() as gsr: return gsr.ebands.plot(**kwargs)
[ "def", "plot_ebands", "(", "self", ",", "*", "*", "kwargs", ")", ":", "with", "self", ".", "nscf_task", ".", "open_gsr", "(", ")", "as", "gsr", ":", "return", "gsr", ".", "ebands", ".", "plot", "(", "*", "*", "kwargs", ")" ]
31.888889
16.333333
def is_upstart(conn): """ This helper should only used as a fallback (last resort) as it is not guaranteed that it will be absolutely correct. """ # it may be possible that we may be systemd and the caller never checked # before so lets do that if is_systemd(conn): return False # get the initctl executable, if it doesn't exist we can't proceed so we # are probably not upstart initctl = conn.remote_module.which('initctl') if not initctl: return False # finally, try and get output from initctl that might hint this is an upstart # system. On a Ubuntu 14.04.2 system this would look like: # $ initctl version # init (upstart 1.12.1) stdout, stderr, _ = remoto.process.check( conn, [initctl, 'version'], ) result_string = b' '.join(stdout) if b'upstart' in result_string: return True return False
[ "def", "is_upstart", "(", "conn", ")", ":", "# it may be possible that we may be systemd and the caller never checked", "# before so lets do that", "if", "is_systemd", "(", "conn", ")", ":", "return", "False", "# get the initctl executable, if it doesn't exist we can't proceed so we", "# are probably not upstart", "initctl", "=", "conn", ".", "remote_module", ".", "which", "(", "'initctl'", ")", "if", "not", "initctl", ":", "return", "False", "# finally, try and get output from initctl that might hint this is an upstart", "# system. On a Ubuntu 14.04.2 system this would look like:", "# $ initctl version", "# init (upstart 1.12.1)", "stdout", ",", "stderr", ",", "_", "=", "remoto", ".", "process", ".", "check", "(", "conn", ",", "[", "initctl", ",", "'version'", "]", ",", ")", "result_string", "=", "b' '", ".", "join", "(", "stdout", ")", "if", "b'upstart'", "in", "result_string", ":", "return", "True", "return", "False" ]
31.857143
19.5
def sigma(htilde, psd = None, low_frequency_cutoff=None, high_frequency_cutoff=None): """ Return the sigma of the waveform. See sigmasq for more details. Parameters ---------- htilde : TimeSeries or FrequencySeries The input vector containing a waveform. psd : {None, FrequencySeries}, optional The psd used to weight the accumulated power. low_frequency_cutoff : {None, float}, optional The frequency to begin considering waveform power. high_frequency_cutoff : {None, float}, optional The frequency to stop considering waveform power. Returns ------- sigmasq: float """ return sqrt(sigmasq(htilde, psd, low_frequency_cutoff, high_frequency_cutoff))
[ "def", "sigma", "(", "htilde", ",", "psd", "=", "None", ",", "low_frequency_cutoff", "=", "None", ",", "high_frequency_cutoff", "=", "None", ")", ":", "return", "sqrt", "(", "sigmasq", "(", "htilde", ",", "psd", ",", "low_frequency_cutoff", ",", "high_frequency_cutoff", ")", ")" ]
36.05
17.75
def replace_wrep(t:str) -> str: "Replace word repetitions in `t`." def _replace_wrep(m:Collection[str]) -> str: c,cc = m.groups() return f' {TK_WREP} {len(cc.split())+1} {c} ' re_wrep = re.compile(r'(\b\w+\W+)(\1{3,})') return re_wrep.sub(_replace_wrep, t)
[ "def", "replace_wrep", "(", "t", ":", "str", ")", "->", "str", ":", "def", "_replace_wrep", "(", "m", ":", "Collection", "[", "str", "]", ")", "->", "str", ":", "c", ",", "cc", "=", "m", ".", "groups", "(", ")", "return", "f' {TK_WREP} {len(cc.split())+1} {c} '", "re_wrep", "=", "re", ".", "compile", "(", "r'(\\b\\w+\\W+)(\\1{3,})'", ")", "return", "re_wrep", ".", "sub", "(", "_replace_wrep", ",", "t", ")" ]
40.285714
7.714286
def upload(self, params={}): """start uploading the file until upload is complete or error. This is the main method to used, If you do not care about state of process. Args: params: a dict object describe video info, eg title, tags, description, category. all video params see the doc of prepare_video_params. Returns: return video_id if upload successfully """ if self.upload_token is not None: # resume upload status = self.check() if status['status'] != 4: return self.commit() else: self.new_slice() while self.slice_task_id != 0: self.upload_slice() return self.commit() else: # new upload self.create(self.prepare_video_params(**params)) self.create_file() self.new_slice() while self.slice_task_id != 0: self.upload_slice() return self.commit()
[ "def", "upload", "(", "self", ",", "params", "=", "{", "}", ")", ":", "if", "self", ".", "upload_token", "is", "not", "None", ":", "# resume upload", "status", "=", "self", ".", "check", "(", ")", "if", "status", "[", "'status'", "]", "!=", "4", ":", "return", "self", ".", "commit", "(", ")", "else", ":", "self", ".", "new_slice", "(", ")", "while", "self", ".", "slice_task_id", "!=", "0", ":", "self", ".", "upload_slice", "(", ")", "return", "self", ".", "commit", "(", ")", "else", ":", "# new upload", "self", ".", "create", "(", "self", ".", "prepare_video_params", "(", "*", "*", "params", ")", ")", "self", ".", "create_file", "(", ")", "self", ".", "new_slice", "(", ")", "while", "self", ".", "slice_task_id", "!=", "0", ":", "self", ".", "upload_slice", "(", ")", "return", "self", ".", "commit", "(", ")" ]
34.935484
13.612903
def get_stp_mst_detail_output_msti_port_if_state(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") msti = ET.SubElement(output, "msti") instance_id_key = ET.SubElement(msti, "instance-id") instance_id_key.text = kwargs.pop('instance_id') port = ET.SubElement(msti, "port") if_state = ET.SubElement(port, "if-state") if_state.text = kwargs.pop('if_state') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_stp_mst_detail_output_msti_port_if_state", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_stp_mst_detail", "=", "ET", ".", "Element", "(", "\"get_stp_mst_detail\"", ")", "config", "=", "get_stp_mst_detail", "output", "=", "ET", ".", "SubElement", "(", "get_stp_mst_detail", ",", "\"output\"", ")", "msti", "=", "ET", ".", "SubElement", "(", "output", ",", "\"msti\"", ")", "instance_id_key", "=", "ET", ".", "SubElement", "(", "msti", ",", "\"instance-id\"", ")", "instance_id_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'instance_id'", ")", "port", "=", "ET", ".", "SubElement", "(", "msti", ",", "\"port\"", ")", "if_state", "=", "ET", ".", "SubElement", "(", "port", ",", "\"if-state\"", ")", "if_state", ".", "text", "=", "kwargs", ".", "pop", "(", "'if_state'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
42.8125
12.375
def yml_fnc(fname, *args, **options): """ :param fname: "load" or "dump", not checked but it should be OK. see also :func:`yml_load` and :func:`yml_dump` :param args: [stream] for load or [cnf, stream] for dump :param options: keyword args may contain "ac_safe" to load/dump safely """ options = common.filter_from_options("ac_dict", options) if "ac_safe" in options: options["typ"] = "safe" # Override it. iopts = anyconfig.utils.filter_options(_YAML_INIT_KWARGS, options) oopts = anyconfig.utils.filter_options(_YAML_INSTANCE_MEMBERS, options) yml = ryaml.YAML(**iopts) for attr, val in oopts.items(): setattr(yml, attr, val) # e.g. yml.preserve_quotes = True return getattr(yml, fname)(*args)
[ "def", "yml_fnc", "(", "fname", ",", "*", "args", ",", "*", "*", "options", ")", ":", "options", "=", "common", ".", "filter_from_options", "(", "\"ac_dict\"", ",", "options", ")", "if", "\"ac_safe\"", "in", "options", ":", "options", "[", "\"typ\"", "]", "=", "\"safe\"", "# Override it.", "iopts", "=", "anyconfig", ".", "utils", ".", "filter_options", "(", "_YAML_INIT_KWARGS", ",", "options", ")", "oopts", "=", "anyconfig", ".", "utils", ".", "filter_options", "(", "_YAML_INSTANCE_MEMBERS", ",", "options", ")", "yml", "=", "ryaml", ".", "YAML", "(", "*", "*", "iopts", ")", "for", "attr", ",", "val", "in", "oopts", ".", "items", "(", ")", ":", "setattr", "(", "yml", ",", "attr", ",", "val", ")", "# e.g. yml.preserve_quotes = True", "return", "getattr", "(", "yml", ",", "fname", ")", "(", "*", "args", ")" ]
36.238095
20.047619
def find_release(package, releases, dependencies=None): """Return the best release.""" dependencies = dependencies if dependencies is not None else {} for release in releases: url = release['url'] old_priority = dependencies.get(package, {}).get('priority', 0) for suffix, priority in SUFFIXES.items(): if url.endswith(suffix): if old_priority < priority: sha256 = release['digests']['sha256'] dependencies[package] = { 'package': package, 'url': url, 'sha256': sha256, 'priority': priority, } return dependencies[package]
[ "def", "find_release", "(", "package", ",", "releases", ",", "dependencies", "=", "None", ")", ":", "dependencies", "=", "dependencies", "if", "dependencies", "is", "not", "None", "else", "{", "}", "for", "release", "in", "releases", ":", "url", "=", "release", "[", "'url'", "]", "old_priority", "=", "dependencies", ".", "get", "(", "package", ",", "{", "}", ")", ".", "get", "(", "'priority'", ",", "0", ")", "for", "suffix", ",", "priority", "in", "SUFFIXES", ".", "items", "(", ")", ":", "if", "url", ".", "endswith", "(", "suffix", ")", ":", "if", "old_priority", "<", "priority", ":", "sha256", "=", "release", "[", "'digests'", "]", "[", "'sha256'", "]", "dependencies", "[", "package", "]", "=", "{", "'package'", ":", "package", ",", "'url'", ":", "url", ",", "'sha256'", ":", "sha256", ",", "'priority'", ":", "priority", ",", "}", "return", "dependencies", "[", "package", "]" ]
38.421053
13.473684
def _prepare_io_handler(self, handler): """Call the `interfaces.IOHandler.prepare` method and remove the handler from unprepared handler list when done. """ logger.debug(" preparing handler: {0!r}".format(handler)) self._unprepared_pending.discard(handler) ret = handler.prepare() logger.debug(" prepare result: {0!r}".format(ret)) if isinstance(ret, HandlerReady): del self._unprepared_handlers[handler] prepared = True elif isinstance(ret, PrepareAgain): if ret.timeout == 0: tag = glib.idle_add(self._prepare_io_handler_cb, handler) self._prepare_sources[handler] = tag elif ret.timeout is not None: timeout = ret.timeout timeout = int(timeout * 1000) if not timeout: timeout = 1 tag = glib.timeout_add(timeout, self._prepare_io_handler_cb, handler) self._prepare_sources[handler] = tag else: self._unprepared_pending.add(handler) prepared = False else: raise TypeError("Unexpected result type from prepare()") return prepared
[ "def", "_prepare_io_handler", "(", "self", ",", "handler", ")", ":", "logger", ".", "debug", "(", "\" preparing handler: {0!r}\"", ".", "format", "(", "handler", ")", ")", "self", ".", "_unprepared_pending", ".", "discard", "(", "handler", ")", "ret", "=", "handler", ".", "prepare", "(", ")", "logger", ".", "debug", "(", "\" prepare result: {0!r}\"", ".", "format", "(", "ret", ")", ")", "if", "isinstance", "(", "ret", ",", "HandlerReady", ")", ":", "del", "self", ".", "_unprepared_handlers", "[", "handler", "]", "prepared", "=", "True", "elif", "isinstance", "(", "ret", ",", "PrepareAgain", ")", ":", "if", "ret", ".", "timeout", "==", "0", ":", "tag", "=", "glib", ".", "idle_add", "(", "self", ".", "_prepare_io_handler_cb", ",", "handler", ")", "self", ".", "_prepare_sources", "[", "handler", "]", "=", "tag", "elif", "ret", ".", "timeout", "is", "not", "None", ":", "timeout", "=", "ret", ".", "timeout", "timeout", "=", "int", "(", "timeout", "*", "1000", ")", "if", "not", "timeout", ":", "timeout", "=", "1", "tag", "=", "glib", ".", "timeout_add", "(", "timeout", ",", "self", ".", "_prepare_io_handler_cb", ",", "handler", ")", "self", ".", "_prepare_sources", "[", "handler", "]", "=", "tag", "else", ":", "self", ".", "_unprepared_pending", ".", "add", "(", "handler", ")", "prepared", "=", "False", "else", ":", "raise", "TypeError", "(", "\"Unexpected result type from prepare()\"", ")", "return", "prepared" ]
44.517241
13.827586
def videoWrite(path, imgs, levels=None, shape=None, frames=15, annotate_names=None, lut=None, updateFn=None): ''' TODO ''' frames = int(frames) if annotate_names is not None: assert len(annotate_names) == len(imgs) if levels is None: if imgs[0].dtype == np.uint8: levels = 0, 255 elif imgs[0].dtype == np.uint16: levels = 0, 2**16 - 1 else: levels = np.min(imgs), np.max(imgs) fourcc = cv2.VideoWriter_fourcc(*'XVID') h, w = imgs.shape[1:3] if shape and shape != (h, w): h, w = shape imgs = [cv2.resize(i, (w, h)) for i in imgs] assert path[-3:] in ('avi', 'png'), 'video export only supports *.avi or *.png' isVideo = path[-3:] == 'avi' if isVideo: cap = cv2.VideoCapture(0) # im.ndim==4) out = cv2.VideoWriter(path, fourcc, frames, (w, h), isColor=1) times = np.linspace(0, len(imgs) - 1, len(imgs) * frames) interpolator = LinearInterpolateImageStack(imgs) if lut is not None: lut = lut(imgs[0]) for n, time in enumerate(times): if updateFn: # update progress: updateFn.emit(100 * n / len(times)) image = interpolator(time) cimg = makeRGBA(image, lut=lut, levels=levels)[0] cimg = cv2.cvtColor(cimg, cv2.COLOR_RGBA2BGR) if annotate_names: text = annotate_names[n // frames] alpha = 0.5 org = (0, cimg.shape[0]) fontFace = cv2.FONT_HERSHEY_PLAIN fontScale = 2 thickness = 3 putTextAlpha(cimg, text, alpha, org, fontFace, fontScale, (0, 255, 0), thickness ) if isVideo: out.write(cimg) else: cv2.imwrite('%s_%i_%.3f.png' % (path[:-4], n, time), cimg) if isVideo: cap.release() out.release()
[ "def", "videoWrite", "(", "path", ",", "imgs", ",", "levels", "=", "None", ",", "shape", "=", "None", ",", "frames", "=", "15", ",", "annotate_names", "=", "None", ",", "lut", "=", "None", ",", "updateFn", "=", "None", ")", ":", "frames", "=", "int", "(", "frames", ")", "if", "annotate_names", "is", "not", "None", ":", "assert", "len", "(", "annotate_names", ")", "==", "len", "(", "imgs", ")", "if", "levels", "is", "None", ":", "if", "imgs", "[", "0", "]", ".", "dtype", "==", "np", ".", "uint8", ":", "levels", "=", "0", ",", "255", "elif", "imgs", "[", "0", "]", ".", "dtype", "==", "np", ".", "uint16", ":", "levels", "=", "0", ",", "2", "**", "16", "-", "1", "else", ":", "levels", "=", "np", ".", "min", "(", "imgs", ")", ",", "np", ".", "max", "(", "imgs", ")", "fourcc", "=", "cv2", ".", "VideoWriter_fourcc", "(", "*", "'XVID'", ")", "h", ",", "w", "=", "imgs", ".", "shape", "[", "1", ":", "3", "]", "if", "shape", "and", "shape", "!=", "(", "h", ",", "w", ")", ":", "h", ",", "w", "=", "shape", "imgs", "=", "[", "cv2", ".", "resize", "(", "i", ",", "(", "w", ",", "h", ")", ")", "for", "i", "in", "imgs", "]", "assert", "path", "[", "-", "3", ":", "]", "in", "(", "'avi'", ",", "'png'", ")", ",", "'video export only supports *.avi or *.png'", "isVideo", "=", "path", "[", "-", "3", ":", "]", "==", "'avi'", "if", "isVideo", ":", "cap", "=", "cv2", ".", "VideoCapture", "(", "0", ")", "# im.ndim==4)\r", "out", "=", "cv2", ".", "VideoWriter", "(", "path", ",", "fourcc", ",", "frames", ",", "(", "w", ",", "h", ")", ",", "isColor", "=", "1", ")", "times", "=", "np", ".", "linspace", "(", "0", ",", "len", "(", "imgs", ")", "-", "1", ",", "len", "(", "imgs", ")", "*", "frames", ")", "interpolator", "=", "LinearInterpolateImageStack", "(", "imgs", ")", "if", "lut", "is", "not", "None", ":", "lut", "=", "lut", "(", "imgs", "[", "0", "]", ")", "for", "n", ",", "time", "in", "enumerate", "(", "times", ")", ":", "if", "updateFn", ":", "# update progress:\r", "updateFn", ".", "emit", "(", "100", "*", "n", "/", "len", "(", "times", ")", ")", "image", "=", "interpolator", "(", "time", ")", "cimg", "=", "makeRGBA", "(", "image", ",", "lut", "=", "lut", ",", "levels", "=", "levels", ")", "[", "0", "]", "cimg", "=", "cv2", ".", "cvtColor", "(", "cimg", ",", "cv2", ".", "COLOR_RGBA2BGR", ")", "if", "annotate_names", ":", "text", "=", "annotate_names", "[", "n", "//", "frames", "]", "alpha", "=", "0.5", "org", "=", "(", "0", ",", "cimg", ".", "shape", "[", "0", "]", ")", "fontFace", "=", "cv2", ".", "FONT_HERSHEY_PLAIN", "fontScale", "=", "2", "thickness", "=", "3", "putTextAlpha", "(", "cimg", ",", "text", ",", "alpha", ",", "org", ",", "fontFace", ",", "fontScale", ",", "(", "0", ",", "255", ",", "0", ")", ",", "thickness", ")", "if", "isVideo", ":", "out", ".", "write", "(", "cimg", ")", "else", ":", "cv2", ".", "imwrite", "(", "'%s_%i_%.3f.png'", "%", "(", "path", "[", ":", "-", "4", "]", ",", "n", ",", "time", ")", ",", "cimg", ")", "if", "isVideo", ":", "cap", ".", "release", "(", ")", "out", ".", "release", "(", ")" ]
29.691176
18.191176
def derive(self, peerkey, **kwargs): """ Derives shared key (DH,ECDH,VKO 34.10). Requires private key available @param peerkey - other key (may be public only) Keyword parameters are algorithm-specific """ if not self.cansign: raise ValueError("No private key available") ctx = libcrypto.EVP_PKEY_CTX_new(self.key, None) if ctx is None: raise PKeyError("Initailizing derive context") if libcrypto.EVP_PKEY_derive_init(ctx) < 1: raise PKeyError("derive_init") # This is workaround around missing functionality in GOST engine # it provides only numeric control command to set UKM, not # string one. self._configure_context(ctx, kwargs, ["ukm"]) if libcrypto.EVP_PKEY_derive_set_peer(ctx, peerkey.key) <= 0: raise PKeyError("Cannot set peer key") if "ukm" in kwargs: # We just hardcode numeric command to set UKM here if libcrypto.EVP_PKEY_CTX_ctrl(ctx, -1, 1 << 10, 8, 8, kwargs["ukm"]) <= 0: raise PKeyError("Cannot set UKM") keylen = c_long(0) if libcrypto.EVP_PKEY_derive(ctx, None, byref(keylen)) <= 0: raise PKeyError("computing shared key length") buf = create_string_buffer(keylen.value) if libcrypto.EVP_PKEY_derive(ctx, buf, byref(keylen)) <= 0: raise PKeyError("computing actual shared key") libcrypto.EVP_PKEY_CTX_free(ctx) return buf.raw[:int(keylen.value)]
[ "def", "derive", "(", "self", ",", "peerkey", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "cansign", ":", "raise", "ValueError", "(", "\"No private key available\"", ")", "ctx", "=", "libcrypto", ".", "EVP_PKEY_CTX_new", "(", "self", ".", "key", ",", "None", ")", "if", "ctx", "is", "None", ":", "raise", "PKeyError", "(", "\"Initailizing derive context\"", ")", "if", "libcrypto", ".", "EVP_PKEY_derive_init", "(", "ctx", ")", "<", "1", ":", "raise", "PKeyError", "(", "\"derive_init\"", ")", "# This is workaround around missing functionality in GOST engine", "# it provides only numeric control command to set UKM, not", "# string one.", "self", ".", "_configure_context", "(", "ctx", ",", "kwargs", ",", "[", "\"ukm\"", "]", ")", "if", "libcrypto", ".", "EVP_PKEY_derive_set_peer", "(", "ctx", ",", "peerkey", ".", "key", ")", "<=", "0", ":", "raise", "PKeyError", "(", "\"Cannot set peer key\"", ")", "if", "\"ukm\"", "in", "kwargs", ":", "# We just hardcode numeric command to set UKM here", "if", "libcrypto", ".", "EVP_PKEY_CTX_ctrl", "(", "ctx", ",", "-", "1", ",", "1", "<<", "10", ",", "8", ",", "8", ",", "kwargs", "[", "\"ukm\"", "]", ")", "<=", "0", ":", "raise", "PKeyError", "(", "\"Cannot set UKM\"", ")", "keylen", "=", "c_long", "(", "0", ")", "if", "libcrypto", ".", "EVP_PKEY_derive", "(", "ctx", ",", "None", ",", "byref", "(", "keylen", ")", ")", "<=", "0", ":", "raise", "PKeyError", "(", "\"computing shared key length\"", ")", "buf", "=", "create_string_buffer", "(", "keylen", ".", "value", ")", "if", "libcrypto", ".", "EVP_PKEY_derive", "(", "ctx", ",", "buf", ",", "byref", "(", "keylen", ")", ")", "<=", "0", ":", "raise", "PKeyError", "(", "\"computing actual shared key\"", ")", "libcrypto", ".", "EVP_PKEY_CTX_free", "(", "ctx", ")", "return", "buf", ".", "raw", "[", ":", "int", "(", "keylen", ".", "value", ")", "]" ]
43.5
16.777778
def recv_loop(stream): """Yield Erlang terms from an input stream.""" message = recv(stream) while message: yield message message = recv(stream)
[ "def", "recv_loop", "(", "stream", ")", ":", "message", "=", "recv", "(", "stream", ")", "while", "message", ":", "yield", "message", "message", "=", "recv", "(", "stream", ")" ]
27.833333
13.833333
def log_all(self, file): """Log all data received from RFLink to file.""" global rflink_log if file == None: rflink_log = None else: log.debug('logging to: %s', file) rflink_log = open(file, 'a')
[ "def", "log_all", "(", "self", ",", "file", ")", ":", "global", "rflink_log", "if", "file", "==", "None", ":", "rflink_log", "=", "None", "else", ":", "log", ".", "debug", "(", "'logging to: %s'", ",", "file", ")", "rflink_log", "=", "open", "(", "file", ",", "'a'", ")" ]
32
11.25
def parse(self, date, **kwargs): ''' :param **kwargs: any kwargs accepted by dateutil.parse function. ''' qualifiers = [] if dateutil_parser is None: return None date = orig_date = date.strip() # various normalizations # TODO: call .lower() first date = date.replace('B.C.E.', 'BC') date = date.replace('BCE', 'BC') date = date.replace('B.C.', 'BC') date = date.replace('A.D.', 'AD') date = date.replace('C.E.', 'AD') date = date.replace('CE', 'AD') # deal with pre 0AD dates if date.startswith('-') or 'BC' in date or 'B.C.' in date: pre0AD = True else: pre0AD = False # BC seems to mess up parser date = date.replace('BC', '') # deal with circa: 'c.1950' or 'c1950' circa_match = re.match('([^a-zA-Z]*)c\.?\s*(\d+.*)', date) if circa_match: # remove circa bit qualifiers.append("Note 'circa'") date = ''.join(circa_match.groups()) # deal with p1980 (what does this mean? it can appear in # field 008 of MARC records p_match = re.match("^p(\d+)", date) if p_match: date = date[1:] # Deal with uncertainty: '1985?' uncertainty_match = re.match('([0-9xX]{4})\?', date) if uncertainty_match: # remove the ? date = date[:-1] qualifiers.append('Uncertainty') # Parse the numbers intelligently # do not use std parser function as creates lots of default data res = dateutil_parser._parse(date, **kwargs) try: res = res[0] except: res = res if res is None: # Couldn't parse it return None # Note: Years of less than 3 digits not interpreted by # dateutil correctly # e.g. 87 -> 1987 # 4 -> day 4 (no year) # Both cases are handled in this routine if res.year is None and res.day: year = res.day # If the whole date is simply two digits then dateutil_parser makes # it '86' -> '1986'. So strip off the '19'. (If the date specified # day/month then a two digit year is more likely to be this century # and so allow the '19' prefix to it.) elif self._numeric.match(date) and (len(date) == 2 or date.startswith('00')): year = res.year % 100 else: year = res.year # finally add back in BC stuff if pre0AD: year = -year if not qualifiers: qualifier = '' else: qualifier = ', '.join(qualifiers) + (' : %s' % orig_date) return FlexiDate(year, res.month, res.day, res.hour, res.minute, res.second, res.microsecond, qualifier=qualifier)
[ "def", "parse", "(", "self", ",", "date", ",", "*", "*", "kwargs", ")", ":", "qualifiers", "=", "[", "]", "if", "dateutil_parser", "is", "None", ":", "return", "None", "date", "=", "orig_date", "=", "date", ".", "strip", "(", ")", "# various normalizations", "# TODO: call .lower() first", "date", "=", "date", ".", "replace", "(", "'B.C.E.'", ",", "'BC'", ")", "date", "=", "date", ".", "replace", "(", "'BCE'", ",", "'BC'", ")", "date", "=", "date", ".", "replace", "(", "'B.C.'", ",", "'BC'", ")", "date", "=", "date", ".", "replace", "(", "'A.D.'", ",", "'AD'", ")", "date", "=", "date", ".", "replace", "(", "'C.E.'", ",", "'AD'", ")", "date", "=", "date", ".", "replace", "(", "'CE'", ",", "'AD'", ")", "# deal with pre 0AD dates", "if", "date", ".", "startswith", "(", "'-'", ")", "or", "'BC'", "in", "date", "or", "'B.C.'", "in", "date", ":", "pre0AD", "=", "True", "else", ":", "pre0AD", "=", "False", "# BC seems to mess up parser", "date", "=", "date", ".", "replace", "(", "'BC'", ",", "''", ")", "# deal with circa: 'c.1950' or 'c1950'", "circa_match", "=", "re", ".", "match", "(", "'([^a-zA-Z]*)c\\.?\\s*(\\d+.*)'", ",", "date", ")", "if", "circa_match", ":", "# remove circa bit", "qualifiers", ".", "append", "(", "\"Note 'circa'\"", ")", "date", "=", "''", ".", "join", "(", "circa_match", ".", "groups", "(", ")", ")", "# deal with p1980 (what does this mean? it can appear in", "# field 008 of MARC records", "p_match", "=", "re", ".", "match", "(", "\"^p(\\d+)\"", ",", "date", ")", "if", "p_match", ":", "date", "=", "date", "[", "1", ":", "]", "# Deal with uncertainty: '1985?'", "uncertainty_match", "=", "re", ".", "match", "(", "'([0-9xX]{4})\\?'", ",", "date", ")", "if", "uncertainty_match", ":", "# remove the ?", "date", "=", "date", "[", ":", "-", "1", "]", "qualifiers", ".", "append", "(", "'Uncertainty'", ")", "# Parse the numbers intelligently", "# do not use std parser function as creates lots of default data", "res", "=", "dateutil_parser", ".", "_parse", "(", "date", ",", "*", "*", "kwargs", ")", "try", ":", "res", "=", "res", "[", "0", "]", "except", ":", "res", "=", "res", "if", "res", "is", "None", ":", "# Couldn't parse it", "return", "None", "# Note: Years of less than 3 digits not interpreted by", "# dateutil correctly", "# e.g. 87 -> 1987", "# 4 -> day 4 (no year)", "# Both cases are handled in this routine", "if", "res", ".", "year", "is", "None", "and", "res", ".", "day", ":", "year", "=", "res", ".", "day", "# If the whole date is simply two digits then dateutil_parser makes", "# it '86' -> '1986'. So strip off the '19'. (If the date specified", "# day/month then a two digit year is more likely to be this century", "# and so allow the '19' prefix to it.)", "elif", "self", ".", "_numeric", ".", "match", "(", "date", ")", "and", "(", "len", "(", "date", ")", "==", "2", "or", "date", ".", "startswith", "(", "'00'", ")", ")", ":", "year", "=", "res", ".", "year", "%", "100", "else", ":", "year", "=", "res", ".", "year", "# finally add back in BC stuff", "if", "pre0AD", ":", "year", "=", "-", "year", "if", "not", "qualifiers", ":", "qualifier", "=", "''", "else", ":", "qualifier", "=", "', '", ".", "join", "(", "qualifiers", ")", "+", "(", "' : %s'", "%", "orig_date", ")", "return", "FlexiDate", "(", "year", ",", "res", ".", "month", ",", "res", ".", "day", ",", "res", ".", "hour", ",", "res", ".", "minute", ",", "res", ".", "second", ",", "res", ".", "microsecond", ",", "qualifier", "=", "qualifier", ")" ]
34.876543
17.518519
def _add_to_conf(self, new_conf): """Add new configuration to self.conf. Adds configuration parameters in new_con to self.conf. If they already existed in conf, overwrite them. :param new_conf: new configuration, to add """ for section in new_conf: if section not in self.conf: self.conf[section] = new_conf[section] else: for param in new_conf[section]: self.conf[section][param] = new_conf[section][param]
[ "def", "_add_to_conf", "(", "self", ",", "new_conf", ")", ":", "for", "section", "in", "new_conf", ":", "if", "section", "not", "in", "self", ".", "conf", ":", "self", ".", "conf", "[", "section", "]", "=", "new_conf", "[", "section", "]", "else", ":", "for", "param", "in", "new_conf", "[", "section", "]", ":", "self", ".", "conf", "[", "section", "]", "[", "param", "]", "=", "new_conf", "[", "section", "]", "[", "param", "]" ]
34.666667
17.266667
def allow_port(port, proto='tcp', direction='both'): ''' Like allow_ports, but it will append to the existing entry instead of replacing it. Takes a single port instead of a list of ports. CLI Example: .. code-block:: bash salt '*' csf.allow_port 22 proto='tcp' direction='in' ''' ports = get_ports(proto=proto, direction=direction) direction = direction.upper() _validate_direction_and_proto(direction, proto) directions = build_directions(direction) results = [] for direction in directions: _ports = ports[direction] _ports.append(port) results += allow_ports(_ports, proto=proto, direction=direction) return results
[ "def", "allow_port", "(", "port", ",", "proto", "=", "'tcp'", ",", "direction", "=", "'both'", ")", ":", "ports", "=", "get_ports", "(", "proto", "=", "proto", ",", "direction", "=", "direction", ")", "direction", "=", "direction", ".", "upper", "(", ")", "_validate_direction_and_proto", "(", "direction", ",", "proto", ")", "directions", "=", "build_directions", "(", "direction", ")", "results", "=", "[", "]", "for", "direction", "in", "directions", ":", "_ports", "=", "ports", "[", "direction", "]", "_ports", ".", "append", "(", "port", ")", "results", "+=", "allow_ports", "(", "_ports", ",", "proto", "=", "proto", ",", "direction", "=", "direction", ")", "return", "results" ]
29.956522
20.130435
def plot_roc(y_true, y_probas, title='ROC Curves', plot_micro=True, plot_macro=True, classes_to_plot=None, ax=None, figsize=None, cmap='nipy_spectral', title_fontsize="large", text_fontsize="medium"): """Generates the ROC curves from labels and predicted scores/probabilities Args: y_true (array-like, shape (n_samples)): Ground truth (correct) target values. y_probas (array-like, shape (n_samples, n_classes)): Prediction probabilities for each class returned by a classifier. title (string, optional): Title of the generated plot. Defaults to "ROC Curves". plot_micro (boolean, optional): Plot the micro average ROC curve. Defaults to ``True``. plot_macro (boolean, optional): Plot the macro average ROC curve. Defaults to ``True``. classes_to_plot (list-like, optional): Classes for which the ROC curve should be plotted. e.g. [0, 'cold']. If given class does not exist, it will be ignored. If ``None``, all classes will be plotted. Defaults to ``None`` ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. cmap (string or :class:`matplotlib.colors.Colormap` instance, optional): Colormap used for plotting the projection. View Matplotlib Colormap documentation for available options. https://matplotlib.org/users/colormaps.html title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn. Example: >>> import scikitplot as skplt >>> nb = GaussianNB() >>> nb = nb.fit(X_train, y_train) >>> y_probas = nb.predict_proba(X_test) >>> skplt.metrics.plot_roc(y_test, y_probas) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_roc_curve.png :align: center :alt: ROC Curves """ y_true = np.array(y_true) y_probas = np.array(y_probas) classes = np.unique(y_true) probas = y_probas if classes_to_plot is None: classes_to_plot = classes if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize) ax.set_title(title, fontsize=title_fontsize) fpr_dict = dict() tpr_dict = dict() indices_to_plot = np.in1d(classes, classes_to_plot) for i, to_plot in enumerate(indices_to_plot): fpr_dict[i], tpr_dict[i], _ = roc_curve(y_true, probas[:, i], pos_label=classes[i]) if to_plot: roc_auc = auc(fpr_dict[i], tpr_dict[i]) color = plt.cm.get_cmap(cmap)(float(i) / len(classes)) ax.plot(fpr_dict[i], tpr_dict[i], lw=2, color=color, label='ROC curve of class {0} (area = {1:0.2f})' ''.format(classes[i], roc_auc)) if plot_micro: binarized_y_true = label_binarize(y_true, classes=classes) if len(classes) == 2: binarized_y_true = np.hstack( (1 - binarized_y_true, binarized_y_true)) fpr, tpr, _ = roc_curve(binarized_y_true.ravel(), probas.ravel()) roc_auc = auc(fpr, tpr) ax.plot(fpr, tpr, label='micro-average ROC curve ' '(area = {0:0.2f})'.format(roc_auc), color='deeppink', linestyle=':', linewidth=4) if plot_macro: # Compute macro-average ROC curve and ROC area # First aggregate all false positive rates all_fpr = np.unique(np.concatenate([fpr_dict[x] for x in range(len(classes))])) # Then interpolate all ROC curves at this points mean_tpr = np.zeros_like(all_fpr) for i in range(len(classes)): mean_tpr += interp(all_fpr, fpr_dict[i], tpr_dict[i]) # Finally average it and compute AUC mean_tpr /= len(classes) roc_auc = auc(all_fpr, mean_tpr) ax.plot(all_fpr, mean_tpr, label='macro-average ROC curve ' '(area = {0:0.2f})'.format(roc_auc), color='navy', linestyle=':', linewidth=4) ax.plot([0, 1], [0, 1], 'k--', lw=2) ax.set_xlim([0.0, 1.0]) ax.set_ylim([0.0, 1.05]) ax.set_xlabel('False Positive Rate', fontsize=text_fontsize) ax.set_ylabel('True Positive Rate', fontsize=text_fontsize) ax.tick_params(labelsize=text_fontsize) ax.legend(loc='lower right', fontsize=text_fontsize) return ax
[ "def", "plot_roc", "(", "y_true", ",", "y_probas", ",", "title", "=", "'ROC Curves'", ",", "plot_micro", "=", "True", ",", "plot_macro", "=", "True", ",", "classes_to_plot", "=", "None", ",", "ax", "=", "None", ",", "figsize", "=", "None", ",", "cmap", "=", "'nipy_spectral'", ",", "title_fontsize", "=", "\"large\"", ",", "text_fontsize", "=", "\"medium\"", ")", ":", "y_true", "=", "np", ".", "array", "(", "y_true", ")", "y_probas", "=", "np", ".", "array", "(", "y_probas", ")", "classes", "=", "np", ".", "unique", "(", "y_true", ")", "probas", "=", "y_probas", "if", "classes_to_plot", "is", "None", ":", "classes_to_plot", "=", "classes", "if", "ax", "is", "None", ":", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "1", ",", "1", ",", "figsize", "=", "figsize", ")", "ax", ".", "set_title", "(", "title", ",", "fontsize", "=", "title_fontsize", ")", "fpr_dict", "=", "dict", "(", ")", "tpr_dict", "=", "dict", "(", ")", "indices_to_plot", "=", "np", ".", "in1d", "(", "classes", ",", "classes_to_plot", ")", "for", "i", ",", "to_plot", "in", "enumerate", "(", "indices_to_plot", ")", ":", "fpr_dict", "[", "i", "]", ",", "tpr_dict", "[", "i", "]", ",", "_", "=", "roc_curve", "(", "y_true", ",", "probas", "[", ":", ",", "i", "]", ",", "pos_label", "=", "classes", "[", "i", "]", ")", "if", "to_plot", ":", "roc_auc", "=", "auc", "(", "fpr_dict", "[", "i", "]", ",", "tpr_dict", "[", "i", "]", ")", "color", "=", "plt", ".", "cm", ".", "get_cmap", "(", "cmap", ")", "(", "float", "(", "i", ")", "/", "len", "(", "classes", ")", ")", "ax", ".", "plot", "(", "fpr_dict", "[", "i", "]", ",", "tpr_dict", "[", "i", "]", ",", "lw", "=", "2", ",", "color", "=", "color", ",", "label", "=", "'ROC curve of class {0} (area = {1:0.2f})'", "''", ".", "format", "(", "classes", "[", "i", "]", ",", "roc_auc", ")", ")", "if", "plot_micro", ":", "binarized_y_true", "=", "label_binarize", "(", "y_true", ",", "classes", "=", "classes", ")", "if", "len", "(", "classes", ")", "==", "2", ":", "binarized_y_true", "=", "np", ".", "hstack", "(", "(", "1", "-", "binarized_y_true", ",", "binarized_y_true", ")", ")", "fpr", ",", "tpr", ",", "_", "=", "roc_curve", "(", "binarized_y_true", ".", "ravel", "(", ")", ",", "probas", ".", "ravel", "(", ")", ")", "roc_auc", "=", "auc", "(", "fpr", ",", "tpr", ")", "ax", ".", "plot", "(", "fpr", ",", "tpr", ",", "label", "=", "'micro-average ROC curve '", "'(area = {0:0.2f})'", ".", "format", "(", "roc_auc", ")", ",", "color", "=", "'deeppink'", ",", "linestyle", "=", "':'", ",", "linewidth", "=", "4", ")", "if", "plot_macro", ":", "# Compute macro-average ROC curve and ROC area", "# First aggregate all false positive rates", "all_fpr", "=", "np", ".", "unique", "(", "np", ".", "concatenate", "(", "[", "fpr_dict", "[", "x", "]", "for", "x", "in", "range", "(", "len", "(", "classes", ")", ")", "]", ")", ")", "# Then interpolate all ROC curves at this points", "mean_tpr", "=", "np", ".", "zeros_like", "(", "all_fpr", ")", "for", "i", "in", "range", "(", "len", "(", "classes", ")", ")", ":", "mean_tpr", "+=", "interp", "(", "all_fpr", ",", "fpr_dict", "[", "i", "]", ",", "tpr_dict", "[", "i", "]", ")", "# Finally average it and compute AUC", "mean_tpr", "/=", "len", "(", "classes", ")", "roc_auc", "=", "auc", "(", "all_fpr", ",", "mean_tpr", ")", "ax", ".", "plot", "(", "all_fpr", ",", "mean_tpr", ",", "label", "=", "'macro-average ROC curve '", "'(area = {0:0.2f})'", ".", "format", "(", "roc_auc", ")", ",", "color", "=", "'navy'", ",", "linestyle", "=", "':'", ",", "linewidth", "=", "4", ")", "ax", ".", "plot", "(", "[", "0", ",", "1", "]", ",", "[", "0", ",", "1", "]", ",", "'k--'", ",", "lw", "=", "2", ")", "ax", ".", "set_xlim", "(", "[", "0.0", ",", "1.0", "]", ")", "ax", ".", "set_ylim", "(", "[", "0.0", ",", "1.05", "]", ")", "ax", ".", "set_xlabel", "(", "'False Positive Rate'", ",", "fontsize", "=", "text_fontsize", ")", "ax", ".", "set_ylabel", "(", "'True Positive Rate'", ",", "fontsize", "=", "text_fontsize", ")", "ax", ".", "tick_params", "(", "labelsize", "=", "text_fontsize", ")", "ax", ".", "legend", "(", "loc", "=", "'lower right'", ",", "fontsize", "=", "text_fontsize", ")", "return", "ax" ]
38.692308
23.069231
def __compose(self): """ Compose the message, pulling together body, attachments etc """ msg = MIMEMultipart() msg['Subject'] = self.config['shutit.core.alerting.emailer.subject'] msg['To'] = self.config['shutit.core.alerting.emailer.mailto'] msg['From'] = self.config['shutit.core.alerting.emailer.mailfrom'] # add the module's maintainer as a CC if configured if self.config['shutit.core.alerting.emailer.mailto_maintainer']: msg['Cc'] = self.config['shutit.core.alerting.emailer.maintainer'] if self.config['shutit.core.alerting.emailer.signature'] != '': signature = '\n\n' + self.config['shutit.core.alerting.emailer.signature'] else: signature = self.config['shutit.core.alerting.emailer.signature'] body = MIMEText('\n'.join(self.lines) + signature) msg.attach(body) for attach in self.attaches: msg.attach(attach) return msg
[ "def", "__compose", "(", "self", ")", ":", "msg", "=", "MIMEMultipart", "(", ")", "msg", "[", "'Subject'", "]", "=", "self", ".", "config", "[", "'shutit.core.alerting.emailer.subject'", "]", "msg", "[", "'To'", "]", "=", "self", ".", "config", "[", "'shutit.core.alerting.emailer.mailto'", "]", "msg", "[", "'From'", "]", "=", "self", ".", "config", "[", "'shutit.core.alerting.emailer.mailfrom'", "]", "# add the module's maintainer as a CC if configured", "if", "self", ".", "config", "[", "'shutit.core.alerting.emailer.mailto_maintainer'", "]", ":", "msg", "[", "'Cc'", "]", "=", "self", ".", "config", "[", "'shutit.core.alerting.emailer.maintainer'", "]", "if", "self", ".", "config", "[", "'shutit.core.alerting.emailer.signature'", "]", "!=", "''", ":", "signature", "=", "'\\n\\n'", "+", "self", ".", "config", "[", "'shutit.core.alerting.emailer.signature'", "]", "else", ":", "signature", "=", "self", ".", "config", "[", "'shutit.core.alerting.emailer.signature'", "]", "body", "=", "MIMEText", "(", "'\\n'", ".", "join", "(", "self", ".", "lines", ")", "+", "signature", ")", "msg", ".", "attach", "(", "body", ")", "for", "attach", "in", "self", ".", "attaches", ":", "msg", ".", "attach", "(", "attach", ")", "return", "msg" ]
45.421053
21.526316
def to_dataframe(self, dtypes=None): """Create a :class:`pandas.DataFrame` of all rows in the stream. This method requires the pandas libary to create a data frame and the fastavro library to parse row blocks. .. warning:: DATETIME columns are not supported. They are currently parsed as strings in the fastavro library. Args: dtypes ( \ Map[str, Union[str, pandas.Series.dtype]] \ ): Optional. A dictionary of column names pandas ``dtype``s. The provided ``dtype`` is used when constructing the series for the column specified. Otherwise, the default pandas behavior is used. Returns: pandas.DataFrame: A data frame of all rows in the stream. """ if pandas is None: raise ImportError(_PANDAS_REQUIRED) frames = [] for page in self.pages: frames.append(page.to_dataframe(dtypes=dtypes)) return pandas.concat(frames)
[ "def", "to_dataframe", "(", "self", ",", "dtypes", "=", "None", ")", ":", "if", "pandas", "is", "None", ":", "raise", "ImportError", "(", "_PANDAS_REQUIRED", ")", "frames", "=", "[", "]", "for", "page", "in", "self", ".", "pages", ":", "frames", ".", "append", "(", "page", ".", "to_dataframe", "(", "dtypes", "=", "dtypes", ")", ")", "return", "pandas", ".", "concat", "(", "frames", ")" ]
35.3
21.466667
def create_articles(self, project, articleset, json_data=None, **options): """ Create one or more articles in the set. Provide the needed arguments using the json_data or with key-value pairs @param json_data: A dictionary or list of dictionaries. Each dict can contain a 'children' attribute which is another list of dictionaries. """ url = URL.article.format(**locals()) # TODO duplicated from create_set, move into requests # (or separate post method?) if json_data is None: # form encoded request return self.request(url, method="post", data=options) else: if not isinstance(json_data, string_types): json_data = json.dumps(json_data, default=serialize) headers = {'content-type': 'application/json'} return self.request(url, method='post', data=json_data, headers=headers)
[ "def", "create_articles", "(", "self", ",", "project", ",", "articleset", ",", "json_data", "=", "None", ",", "*", "*", "options", ")", ":", "url", "=", "URL", ".", "article", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "# TODO duplicated from create_set, move into requests", "# (or separate post method?)", "if", "json_data", "is", "None", ":", "# form encoded request", "return", "self", ".", "request", "(", "url", ",", "method", "=", "\"post\"", ",", "data", "=", "options", ")", "else", ":", "if", "not", "isinstance", "(", "json_data", ",", "string_types", ")", ":", "json_data", "=", "json", ".", "dumps", "(", "json_data", ",", "default", "=", "serialize", ")", "headers", "=", "{", "'content-type'", ":", "'application/json'", "}", "return", "self", ".", "request", "(", "url", ",", "method", "=", "'post'", ",", "data", "=", "json_data", ",", "headers", "=", "headers", ")" ]
50.894737
19
def t_istringapostrophe_css_string(self, t): r'[^\'@]+' t.lexer.lineno += t.value.count('\n') return t
[ "def", "t_istringapostrophe_css_string", "(", "self", ",", "t", ")", ":", "t", ".", "lexer", ".", "lineno", "+=", "t", ".", "value", ".", "count", "(", "'\\n'", ")", "return", "t" ]
30.75
13.75
def find(self, name, required): """ Finds all matching dependencies by their name. :param name: the dependency name to locate. :param required: true to raise an exception when no dependencies are found. :return: a list of found dependencies """ if name == None: raise Exception("Name cannot be null") locator = self._locate(name) if locator == None: if required: raise ReferenceException(None, name) return None return self._references.find(locator, required)
[ "def", "find", "(", "self", ",", "name", ",", "required", ")", ":", "if", "name", "==", "None", ":", "raise", "Exception", "(", "\"Name cannot be null\"", ")", "locator", "=", "self", ".", "_locate", "(", "name", ")", "if", "locator", "==", "None", ":", "if", "required", ":", "raise", "ReferenceException", "(", "None", ",", "name", ")", "return", "None", "return", "self", ".", "_references", ".", "find", "(", "locator", ",", "required", ")" ]
29.65
18.45
def download_extract(url): """download and extract file.""" logger.info("Downloading %s", url) request = urllib2.Request(url) request.add_header('User-Agent', 'caelum/0.1 +https://github.com/nrcharles/caelum') opener = urllib2.build_opener() with tempfile.TemporaryFile(suffix='.zip', dir=env.WEATHER_DATA_PATH) \ as local_file: logger.debug('Saving to temporary file %s', local_file.name) local_file.write(opener.open(request).read()) compressed_file = zipfile.ZipFile(local_file, 'r') logger.debug('Extracting %s', compressed_file) compressed_file.extractall(env.WEATHER_DATA_PATH) local_file.close()
[ "def", "download_extract", "(", "url", ")", ":", "logger", ".", "info", "(", "\"Downloading %s\"", ",", "url", ")", "request", "=", "urllib2", ".", "Request", "(", "url", ")", "request", ".", "add_header", "(", "'User-Agent'", ",", "'caelum/0.1 +https://github.com/nrcharles/caelum'", ")", "opener", "=", "urllib2", ".", "build_opener", "(", ")", "with", "tempfile", ".", "TemporaryFile", "(", "suffix", "=", "'.zip'", ",", "dir", "=", "env", ".", "WEATHER_DATA_PATH", ")", "as", "local_file", ":", "logger", ".", "debug", "(", "'Saving to temporary file %s'", ",", "local_file", ".", "name", ")", "local_file", ".", "write", "(", "opener", ".", "open", "(", "request", ")", ".", "read", "(", ")", ")", "compressed_file", "=", "zipfile", ".", "ZipFile", "(", "local_file", ",", "'r'", ")", "logger", ".", "debug", "(", "'Extracting %s'", ",", "compressed_file", ")", "compressed_file", ".", "extractall", "(", "env", ".", "WEATHER_DATA_PATH", ")", "local_file", ".", "close", "(", ")" ]
46.333333
14.466667
def _reldiff(a, b): """ Computes the relative difference of two floating-point numbers rel = abs(a-b)/min(abs(a), abs(b)) If a == 0 and b == 0, then 0.0 is returned Otherwise if a or b is 0.0, inf is returned. """ a = float(a) b = float(b) aa = abs(a) ba = abs(b) if a == 0.0 and b == 0.0: return 0.0 elif a == 0 or b == 0.0: return float('inf') return abs(a - b) / min(aa, ba)
[ "def", "_reldiff", "(", "a", ",", "b", ")", ":", "a", "=", "float", "(", "a", ")", "b", "=", "float", "(", "b", ")", "aa", "=", "abs", "(", "a", ")", "ba", "=", "abs", "(", "b", ")", "if", "a", "==", "0.0", "and", "b", "==", "0.0", ":", "return", "0.0", "elif", "a", "==", "0", "or", "b", "==", "0.0", ":", "return", "float", "(", "'inf'", ")", "return", "abs", "(", "a", "-", "b", ")", "/", "min", "(", "aa", ",", "ba", ")" ]
20.47619
20.190476
def turn_right(self, angle_degrees, rate=RATE): """ Turn to the right, staying on the spot :param angle_degrees: How far to turn (degrees) :param rate: The trurning speed (degrees/second) :return: """ flight_time = angle_degrees / rate self.start_turn_right(rate) time.sleep(flight_time) self.stop()
[ "def", "turn_right", "(", "self", ",", "angle_degrees", ",", "rate", "=", "RATE", ")", ":", "flight_time", "=", "angle_degrees", "/", "rate", "self", ".", "start_turn_right", "(", "rate", ")", "time", ".", "sleep", "(", "flight_time", ")", "self", ".", "stop", "(", ")" ]
28.384615
14.230769
def api_call(self, opts, args=None, body=None, **kwargs): """Setup the request""" if args: path = opts['name'] % args else: path = opts['name'] path = '/api/v1%s' % path return self._request( opts['method'], path=path, payload=body, **kwargs)
[ "def", "api_call", "(", "self", ",", "opts", ",", "args", "=", "None", ",", "body", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "args", ":", "path", "=", "opts", "[", "'name'", "]", "%", "args", "else", ":", "path", "=", "opts", "[", "'name'", "]", "path", "=", "'/api/v1%s'", "%", "path", "return", "self", ".", "_request", "(", "opts", "[", "'method'", "]", ",", "path", "=", "path", ",", "payload", "=", "body", ",", "*", "*", "kwargs", ")" ]
34.444444
13.222222
def stop_receive(self, fd): """ Stop yielding readability events for `fd`. Redundant calls to :meth:`stop_receive` are silently ignored, this may change in future. """ self._rfds.pop(fd, None) self._update(fd)
[ "def", "stop_receive", "(", "self", ",", "fd", ")", ":", "self", ".", "_rfds", ".", "pop", "(", "fd", ",", "None", ")", "self", ".", "_update", "(", "fd", ")" ]
28.666667
15.555556
def cythonize(*args, **kwargs): ''' dirty hack, only import cythonize at the time you use it. if you don't write Cython extension, you won't fail even if you don't install Cython. ''' global cythonize from Cython.Build import cythonize return cythonize(*args, **kwargs)
[ "def", "cythonize", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "global", "cythonize", "from", "Cython", ".", "Build", "import", "cythonize", "return", "cythonize", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
29.3
17.3
def arrows(self): """Iterate over all my arrows.""" for o in self.arrow.values(): for arro in o.values(): yield arro
[ "def", "arrows", "(", "self", ")", ":", "for", "o", "in", "self", ".", "arrow", ".", "values", "(", ")", ":", "for", "arro", "in", "o", ".", "values", "(", ")", ":", "yield", "arro" ]
31.2
9
def is_dir(self, follow_symlinks=True): """ Return True if this entry is a directory or a symbolic link pointing to a directory; return False if the entry is or points to any other kind of file, or if it doesn’t exist anymore. The result is cached on the os.DirEntry object. Args: follow_symlinks (bool): Follow symlinks. Not supported on cloud storage objects. Returns: bool: True if directory exists. """ try: return (self._system.isdir( path=self._path, client_kwargs=self._client_kwargs, virtual_dir=False) or # Some directories only exists virtually in object path and # don't have headers. bool(S_ISDIR(self.stat().st_mode))) except ObjectPermissionError: # The directory was listed, but unable to head it or access to its # content return True
[ "def", "is_dir", "(", "self", ",", "follow_symlinks", "=", "True", ")", ":", "try", ":", "return", "(", "self", ".", "_system", ".", "isdir", "(", "path", "=", "self", ".", "_path", ",", "client_kwargs", "=", "self", ".", "_client_kwargs", ",", "virtual_dir", "=", "False", ")", "or", "# Some directories only exists virtually in object path and", "# don't have headers.", "bool", "(", "S_ISDIR", "(", "self", ".", "stat", "(", ")", ".", "st_mode", ")", ")", ")", "except", "ObjectPermissionError", ":", "# The directory was listed, but unable to head it or access to its", "# content", "return", "True" ]
34.857143
20.214286
def unprotect(self, **kwargs): """Unprotect the branch. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabProtectError: If the branch could not be unprotected """ id = self.get_id().replace('/', '%2F') path = '%s/%s/unprotect' % (self.manager.path, id) self.manager.gitlab.http_put(path, **kwargs) self._attrs['protected'] = False
[ "def", "unprotect", "(", "self", ",", "*", "*", "kwargs", ")", ":", "id", "=", "self", ".", "get_id", "(", ")", ".", "replace", "(", "'/'", ",", "'%2F'", ")", "path", "=", "'%s/%s/unprotect'", "%", "(", "self", ".", "manager", ".", "path", ",", "id", ")", "self", ".", "manager", ".", "gitlab", ".", "http_put", "(", "path", ",", "*", "*", "kwargs", ")", "self", ".", "_attrs", "[", "'protected'", "]", "=", "False" ]
36.214286
19.142857
def sexagesimal(sexathang, latlon, form='DDD'): """ Arguments: sexathang: (float), -15.560615 (negative = South), -146.241122 (negative = West) # Apataki Carenage latlon: (str) 'lat' | 'lon' form: (str), 'DDD'|'DMM'|'DMS', decimal Degrees, decimal Minutes, decimal Seconds Returns: latitude: e.g., '15°33'38.214"S' longitude: e.g., '146°14'28.039"W' """ cardinal = 'O' if not isinstance(sexathang, float): sexathang = 'n/a' return sexathang if latlon == 'lon': if sexathang > 0.0: cardinal = 'E' if sexathang < 0.0: cardinal = 'W' if latlon == 'lat': if sexathang > 0.0: cardinal = 'N' if sexathang < 0.0: cardinal = 'S' if 'RAW' in form: sexathang = '{0:4.9f}°'.format(sexathang) # 4 to allow -100° through -179.999999° to -180° return sexathang if 'DDD' in form: sexathang = '{0:3.6f}°'.format(abs(sexathang)) if 'DMM' in form: _latlon = abs(sexathang) minute_latlon, degree_latlon = modf(_latlon) minute_latlon *= 60 sexathang = '{0}°{1:2.5f}\''.format(int(degree_latlon), minute_latlon) if 'DMS' in form: _latlon = abs(sexathang) minute_latlon, degree_latlon = modf(_latlon) second_latlon, minute_latlon = modf(minute_latlon * 60) second_latlon *= 60.0 sexathang = '{0}°{1}\'{2:2.3f}\"'.format(int(degree_latlon), int(minute_latlon), second_latlon) return sexathang + cardinal
[ "def", "sexagesimal", "(", "sexathang", ",", "latlon", ",", "form", "=", "'DDD'", ")", ":", "cardinal", "=", "'O'", "if", "not", "isinstance", "(", "sexathang", ",", "float", ")", ":", "sexathang", "=", "'n/a'", "return", "sexathang", "if", "latlon", "==", "'lon'", ":", "if", "sexathang", ">", "0.0", ":", "cardinal", "=", "'E'", "if", "sexathang", "<", "0.0", ":", "cardinal", "=", "'W'", "if", "latlon", "==", "'lat'", ":", "if", "sexathang", ">", "0.0", ":", "cardinal", "=", "'N'", "if", "sexathang", "<", "0.0", ":", "cardinal", "=", "'S'", "if", "'RAW'", "in", "form", ":", "sexathang", "=", "'{0:4.9f}°'.", "f", "ormat(", "s", "exathang)", " ", " 4 to allow -100° through -179.999999° to -180°", "return", "sexathang", "if", "'DDD'", "in", "form", ":", "sexathang", "=", "'{0:3.6f}°'.", "f", "ormat(", "a", "bs(", "s", "exathang)", ")", "", "if", "'DMM'", "in", "form", ":", "_latlon", "=", "abs", "(", "sexathang", ")", "minute_latlon", ",", "degree_latlon", "=", "modf", "(", "_latlon", ")", "minute_latlon", "*=", "60", "sexathang", "=", "'{0}°{1:2.5f}\\''.", "f", "ormat(", "i", "nt(", "d", "egree_latlon)", ",", " ", "inute_latlon)", "", "if", "'DMS'", "in", "form", ":", "_latlon", "=", "abs", "(", "sexathang", ")", "minute_latlon", ",", "degree_latlon", "=", "modf", "(", "_latlon", ")", "second_latlon", ",", "minute_latlon", "=", "modf", "(", "minute_latlon", "*", "60", ")", "second_latlon", "*=", "60.0", "sexathang", "=", "'{0}°{1}\\'{2:2.3f}\\\"'.", "f", "ormat(", "i", "nt(", "d", "egree_latlon)", ",", " ", "nt(", "m", "inute_latlon)", ",", " ", "econd_latlon)", "", "return", "sexathang", "+", "cardinal" ]
31.791667
21.291667
def avail_images(call=None): ''' Return available Packet os images. CLI Example: .. code-block:: bash salt-cloud --list-images packet-provider salt-cloud -f avail_images packet-provider ''' if call == 'action': raise SaltCloudException( 'The avail_images function must be called with -f or --function.' ) ret = {} vm_ = get_configured_provider() manager = packet.Manager(auth_token=vm_['token']) ret = {} for os_system in manager.list_operating_systems(): ret[os_system.name] = os_system.__dict__ return ret
[ "def", "avail_images", "(", "call", "=", "None", ")", ":", "if", "call", "==", "'action'", ":", "raise", "SaltCloudException", "(", "'The avail_images function must be called with -f or --function.'", ")", "ret", "=", "{", "}", "vm_", "=", "get_configured_provider", "(", ")", "manager", "=", "packet", ".", "Manager", "(", "auth_token", "=", "vm_", "[", "'token'", "]", ")", "ret", "=", "{", "}", "for", "os_system", "in", "manager", ".", "list_operating_systems", "(", ")", ":", "ret", "[", "os_system", ".", "name", "]", "=", "os_system", ".", "__dict__", "return", "ret" ]
21.814815
24.851852
def read_from_file(filename): """ Arguments: | ``filename`` -- the filename of the input file Use as follows:: >>> if = CP2KInputFile.read_from_file("somefile.inp") >>> for section in if: ... print section.name """ with open(filename) as f: result = CP2KInputFile() try: while True: result.load_children(f) except EOFError: pass return result
[ "def", "read_from_file", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "f", ":", "result", "=", "CP2KInputFile", "(", ")", "try", ":", "while", "True", ":", "result", ".", "load_children", "(", "f", ")", "except", "EOFError", ":", "pass", "return", "result" ]
27.631579
14.684211
def next_line(self): """Read the next line from the line generator and split it""" self.line = next(self.lines) # Will raise StopIteration when there are no more lines self.values = self.line.split()
[ "def", "next_line", "(", "self", ")", ":", "self", ".", "line", "=", "next", "(", "self", ".", "lines", ")", "# Will raise StopIteration when there are no more lines", "self", ".", "values", "=", "self", ".", "line", ".", "split", "(", ")" ]
55.25
18.5
def _set_labels(self, axes, dimensions, xlabel=None, ylabel=None, zlabel=None): """ Sets the labels of the axes using the supplied list of dimensions. Optionally explicit labels may be supplied to override the dimension label. """ xlabel, ylabel, zlabel = self._get_axis_labels(dimensions, xlabel, ylabel, zlabel) if self.invert_axes: xlabel, ylabel = ylabel, xlabel if xlabel and self.xaxis and 'x' in self.labelled: axes.set_xlabel(xlabel, **self._fontsize('xlabel')) if ylabel and self.yaxis and 'y' in self.labelled: axes.set_ylabel(ylabel, **self._fontsize('ylabel')) if zlabel and self.zaxis and 'z' in self.labelled: axes.set_zlabel(zlabel, **self._fontsize('zlabel'))
[ "def", "_set_labels", "(", "self", ",", "axes", ",", "dimensions", ",", "xlabel", "=", "None", ",", "ylabel", "=", "None", ",", "zlabel", "=", "None", ")", ":", "xlabel", ",", "ylabel", ",", "zlabel", "=", "self", ".", "_get_axis_labels", "(", "dimensions", ",", "xlabel", ",", "ylabel", ",", "zlabel", ")", "if", "self", ".", "invert_axes", ":", "xlabel", ",", "ylabel", "=", "ylabel", ",", "xlabel", "if", "xlabel", "and", "self", ".", "xaxis", "and", "'x'", "in", "self", ".", "labelled", ":", "axes", ".", "set_xlabel", "(", "xlabel", ",", "*", "*", "self", ".", "_fontsize", "(", "'xlabel'", ")", ")", "if", "ylabel", "and", "self", ".", "yaxis", "and", "'y'", "in", "self", ".", "labelled", ":", "axes", ".", "set_ylabel", "(", "ylabel", ",", "*", "*", "self", ".", "_fontsize", "(", "'ylabel'", ")", ")", "if", "zlabel", "and", "self", ".", "zaxis", "and", "'z'", "in", "self", ".", "labelled", ":", "axes", ".", "set_zlabel", "(", "zlabel", ",", "*", "*", "self", ".", "_fontsize", "(", "'zlabel'", ")", ")" ]
52.6
21.533333
def to_number(obj): ''' Cast an arbitrary object or sequence to a number type ''' if isinstance(obj, LiteralWrapper): val = obj.obj elif isinstance(obj, Iterable) and not isinstance(obj, str): val = next(obj, None) else: val = obj if val is None: #FIXME: Should be NaN, not 0 yield 0 elif isinstance(val, str): yield float(val) elif isinstance(val, node): yield float(strval(val)) elif isinstance(val, int) or isinstance(val, float): yield val else: raise RuntimeError('Unknown type for number conversion: {}'.format(val))
[ "def", "to_number", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "LiteralWrapper", ")", ":", "val", "=", "obj", ".", "obj", "elif", "isinstance", "(", "obj", ",", "Iterable", ")", "and", "not", "isinstance", "(", "obj", ",", "str", ")", ":", "val", "=", "next", "(", "obj", ",", "None", ")", "else", ":", "val", "=", "obj", "if", "val", "is", "None", ":", "#FIXME: Should be NaN, not 0", "yield", "0", "elif", "isinstance", "(", "val", ",", "str", ")", ":", "yield", "float", "(", "val", ")", "elif", "isinstance", "(", "val", ",", "node", ")", ":", "yield", "float", "(", "strval", "(", "val", ")", ")", "elif", "isinstance", "(", "val", ",", "int", ")", "or", "isinstance", "(", "val", ",", "float", ")", ":", "yield", "val", "else", ":", "raise", "RuntimeError", "(", "'Unknown type for number conversion: {}'", ".", "format", "(", "val", ")", ")" ]
29.428571
19.809524
def vm_get_network_by_name(vm, network_name): """ Try to find Network scanning all attached to VM networks :param vm: <vim.vm> :param network_name: <str> name of network :return: <vim.vm.Network or None> """ # return None for network in vm.network: if hasattr(network, "name") and network_name == network.name: return network return None
[ "def", "vm_get_network_by_name", "(", "vm", ",", "network_name", ")", ":", "# return None", "for", "network", "in", "vm", ".", "network", ":", "if", "hasattr", "(", "network", ",", "\"name\"", ")", "and", "network_name", "==", "network", ".", "name", ":", "return", "network", "return", "None" ]
35.5
11.833333
def union(self, a, b): """Merges the set that contains ``a`` with the set that contains ``b``. Parameters ---------- a, b : objects Two objects whose sets are to be merged. """ s1, s2 = self.find(a), self.find(b) if s1 != s2: r1, r2 = self._rank[s1], self._rank[s2] if r2 > r1: r1, r2 = r2, r1 s1, s2 = s2, s1 if r1 == r2: self._rank[s1] += 1 self._leader[s2] = s1 self._size[s1] += self._size[s2] self.nClusters -= 1
[ "def", "union", "(", "self", ",", "a", ",", "b", ")", ":", "s1", ",", "s2", "=", "self", ".", "find", "(", "a", ")", ",", "self", ".", "find", "(", "b", ")", "if", "s1", "!=", "s2", ":", "r1", ",", "r2", "=", "self", ".", "_rank", "[", "s1", "]", ",", "self", ".", "_rank", "[", "s2", "]", "if", "r2", ">", "r1", ":", "r1", ",", "r2", "=", "r2", ",", "r1", "s1", ",", "s2", "=", "s2", ",", "s1", "if", "r1", "==", "r2", ":", "self", ".", "_rank", "[", "s1", "]", "+=", "1", "self", ".", "_leader", "[", "s2", "]", "=", "s1", "self", ".", "_size", "[", "s1", "]", "+=", "self", ".", "_size", "[", "s2", "]", "self", ".", "nClusters", "-=", "1" ]
29.55
14.15
def interrupt_then_kill(self, delay=2.0): """Send INT, wait a delay and then send KILL.""" try: self.signal(SIGINT) except Exception: self.log.debug("interrupt failed") pass self.killer = ioloop.DelayedCallback(lambda : self.signal(SIGKILL), delay*1000, self.loop) self.killer.start()
[ "def", "interrupt_then_kill", "(", "self", ",", "delay", "=", "2.0", ")", ":", "try", ":", "self", ".", "signal", "(", "SIGINT", ")", "except", "Exception", ":", "self", ".", "log", ".", "debug", "(", "\"interrupt failed\"", ")", "pass", "self", ".", "killer", "=", "ioloop", ".", "DelayedCallback", "(", "lambda", ":", "self", ".", "signal", "(", "SIGKILL", ")", ",", "delay", "*", "1000", ",", "self", ".", "loop", ")", "self", ".", "killer", ".", "start", "(", ")" ]
39.222222
17.222222
def resolve_response_data(head_key, data_key, data): """ Resolves the responses you get from billomat If you have done a get_one_element request then you will get a dictionary If you have done a get_all_elements request then you will get a list with all elements in it :param head_key: the head key e.g: CLIENTS :param data_key: the data key e.g: CLIENT :param data: the responses you got :return: dict or list """ new_data = [] if isinstance(data, list): for data_row in data: if head_key in data_row and data_key in data_row[head_key]: if isinstance(data_row[head_key][data_key], list): new_data += data_row[head_key][data_key] else: new_data.append(data_row[head_key][data_key]) elif data_key in data_row: return data_row[data_key] else: if head_key in data and data_key in data[head_key]: new_data += data[head_key][data_key] elif data_key in data: return data[data_key] return new_data
[ "def", "resolve_response_data", "(", "head_key", ",", "data_key", ",", "data", ")", ":", "new_data", "=", "[", "]", "if", "isinstance", "(", "data", ",", "list", ")", ":", "for", "data_row", "in", "data", ":", "if", "head_key", "in", "data_row", "and", "data_key", "in", "data_row", "[", "head_key", "]", ":", "if", "isinstance", "(", "data_row", "[", "head_key", "]", "[", "data_key", "]", ",", "list", ")", ":", "new_data", "+=", "data_row", "[", "head_key", "]", "[", "data_key", "]", "else", ":", "new_data", ".", "append", "(", "data_row", "[", "head_key", "]", "[", "data_key", "]", ")", "elif", "data_key", "in", "data_row", ":", "return", "data_row", "[", "data_key", "]", "else", ":", "if", "head_key", "in", "data", "and", "data_key", "in", "data", "[", "head_key", "]", ":", "new_data", "+=", "data", "[", "head_key", "]", "[", "data_key", "]", "elif", "data_key", "in", "data", ":", "return", "data", "[", "data_key", "]", "return", "new_data" ]
42.178571
17.678571
def get_anniversary_periods(start, finish, anniversary=1): """ Return a list of anniversaries periods between start and finish. """ import sys current = start periods = [] while current <= finish: (period_start, period_finish) = date_period(DATE_FREQUENCY_MONTHLY, anniversary, current) current = period_start + relativedelta(months=+1) period_start = period_start if period_start > start else start period_finish = period_finish if period_finish < finish else finish periods.append((period_start, period_finish)) return periods
[ "def", "get_anniversary_periods", "(", "start", ",", "finish", ",", "anniversary", "=", "1", ")", ":", "import", "sys", "current", "=", "start", "periods", "=", "[", "]", "while", "current", "<=", "finish", ":", "(", "period_start", ",", "period_finish", ")", "=", "date_period", "(", "DATE_FREQUENCY_MONTHLY", ",", "anniversary", ",", "current", ")", "current", "=", "period_start", "+", "relativedelta", "(", "months", "=", "+", "1", ")", "period_start", "=", "period_start", "if", "period_start", ">", "start", "else", "start", "period_finish", "=", "period_finish", "if", "period_finish", "<", "finish", "else", "finish", "periods", ".", "append", "(", "(", "period_start", ",", "period_finish", ")", ")", "return", "periods" ]
42
21.714286
def site_content(self, election_day): """ Site content represents content for the entire site on a given election day. """ from electionnight.models import PageType page_type = PageType.objects.get( model_type=ContentType.objects.get( app_label="election", model="electionday" ), election_day=election_day, ) site_content = self.get( content_type=ContentType.objects.get_for_model(page_type), object_id=page_type.pk, election_day=election_day, ) return {"site": self.serialize_content_blocks(site_content)}
[ "def", "site_content", "(", "self", ",", "election_day", ")", ":", "from", "electionnight", ".", "models", "import", "PageType", "page_type", "=", "PageType", ".", "objects", ".", "get", "(", "model_type", "=", "ContentType", ".", "objects", ".", "get", "(", "app_label", "=", "\"election\"", ",", "model", "=", "\"electionday\"", ")", ",", "election_day", "=", "election_day", ",", ")", "site_content", "=", "self", ".", "get", "(", "content_type", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "page_type", ")", ",", "object_id", "=", "page_type", ".", "pk", ",", "election_day", "=", "election_day", ",", ")", "return", "{", "\"site\"", ":", "self", ".", "serialize_content_blocks", "(", "site_content", ")", "}" ]
34.578947
14.578947
def make_cutter(self): """ Makes a shape to be used as a negative; it can be cut away from other shapes to make a perfectly shaped pocket for this part. For example, for a countersunk screw with a neck, the following cutter would be generated. .. image:: /_static/img/fastenerpart/male.cutter.png If the head were an externally driven shape (like a hex bolt), then the cutter's head would be wide enough to accommodate a tool to fasten it. """ # head obj = self.head.make_cutter() # neck if self.neck_length: # neck cut diameter (if thread is larger than the neck, thread must fit through) (inner_radius, outer_radius) = self.thread.get_radii() neck_cut_radius = max(outer_radius, self.neck_diam / 2) neck = cadquery.Workplane( 'XY', origin=(0, 0, -self.neck_length) ).circle(neck_cut_radius).extrude(self.neck_length) obj = obj.union(neck) # thread (pilot hole) pilot_hole = self.thread.make_pilothole_cutter() \ .translate((0, 0, -self.length)) obj = obj.union(pilot_hole) return obj
[ "def", "make_cutter", "(", "self", ")", ":", "# head", "obj", "=", "self", ".", "head", ".", "make_cutter", "(", ")", "# neck", "if", "self", ".", "neck_length", ":", "# neck cut diameter (if thread is larger than the neck, thread must fit through)", "(", "inner_radius", ",", "outer_radius", ")", "=", "self", ".", "thread", ".", "get_radii", "(", ")", "neck_cut_radius", "=", "max", "(", "outer_radius", ",", "self", ".", "neck_diam", "/", "2", ")", "neck", "=", "cadquery", ".", "Workplane", "(", "'XY'", ",", "origin", "=", "(", "0", ",", "0", ",", "-", "self", ".", "neck_length", ")", ")", ".", "circle", "(", "neck_cut_radius", ")", ".", "extrude", "(", "self", ".", "neck_length", ")", "obj", "=", "obj", ".", "union", "(", "neck", ")", "# thread (pilot hole)", "pilot_hole", "=", "self", ".", "thread", ".", "make_pilothole_cutter", "(", ")", ".", "translate", "(", "(", "0", ",", "0", ",", "-", "self", ".", "length", ")", ")", "obj", "=", "obj", ".", "union", "(", "pilot_hole", ")", "return", "obj" ]
36.242424
23.333333
def lotus_root_data(): """Tomographic X-ray data of a lotus root. Notes ----- See the article `Tomographic X-ray data of a lotus root filled with attenuating objects`_ for further information. See Also -------- lotus_root_geometry References ---------- .. _Tomographic X-ray data of a lotus root filled with attenuating objects: https://arxiv.org/abs/1609.07299 """ # TODO: Store data in some ODL controlled url url = 'http://www.fips.fi/dataset/CT_Lotus_v1/sinogram.mat' dct = get_data('lotus_root.mat', subset=DATA_SUBSET, url=url) # Change axes to match ODL definitions data = np.swapaxes(dct['sinogram'], 0, 1)[:, :] data = data.astype('float') return data
[ "def", "lotus_root_data", "(", ")", ":", "# TODO: Store data in some ODL controlled url", "url", "=", "'http://www.fips.fi/dataset/CT_Lotus_v1/sinogram.mat'", "dct", "=", "get_data", "(", "'lotus_root.mat'", ",", "subset", "=", "DATA_SUBSET", ",", "url", "=", "url", ")", "# Change axes to match ODL definitions", "data", "=", "np", ".", "swapaxes", "(", "dct", "[", "'sinogram'", "]", ",", "0", ",", "1", ")", "[", ":", ",", ":", "]", "data", "=", "data", ".", "astype", "(", "'float'", ")", "return", "data" ]
27.807692
22.692308
def _normalizePoint(self, x, y): """Check if a point is in bounds and make minor adjustments. Respects Pythons negative indexes. -1 starts at the bottom right. Replaces the _drawable function """ # cast to int, always faster than type checking x = int(x) y = int(y) assert (-self.width <= x < self.width) and \ (-self.height <= y < self.height), \ ('(%i, %i) is an invalid postition on %s' % (x, y, self)) # handle negative indexes return (x % self.width, y % self.height)
[ "def", "_normalizePoint", "(", "self", ",", "x", ",", "y", ")", ":", "# cast to int, always faster than type checking", "x", "=", "int", "(", "x", ")", "y", "=", "int", "(", "y", ")", "assert", "(", "-", "self", ".", "width", "<=", "x", "<", "self", ".", "width", ")", "and", "(", "-", "self", ".", "height", "<=", "y", "<", "self", ".", "height", ")", ",", "(", "'(%i, %i) is an invalid postition on %s'", "%", "(", "x", ",", "y", ",", "self", ")", ")", "# handle negative indexes", "return", "(", "x", "%", "self", ".", "width", ",", "y", "%", "self", ".", "height", ")" ]
35.6875
18.25
def parse_v3_unit_placement(placement_str): """Return a UnitPlacement for bundles version 3, given a placement string. See https://github.com/juju/charmstore/blob/v4/docs/bundles.md Raise a ValueError if the placement is not valid. """ placement = placement_str container = machine = service = unit = '' if ':' in placement: try: container, placement = placement_str.split(':') except ValueError: msg = 'placement {} is malformed, too many parts'.format( placement_str) raise ValueError(msg.encode('utf-8')) if '=' in placement: try: placement, unit = placement.split('=') except ValueError: msg = 'placement {} is malformed, too many parts'.format( placement_str) raise ValueError(msg.encode('utf-8')) if placement.isdigit(): machine = placement else: service = placement if (container and container not in VALID_CONTAINERS): msg = 'invalid container {} for placement {}'.format( container, placement_str) raise ValueError(msg.encode('utf-8')) unit = _parse_unit(unit, placement_str) if machine and machine != '0': raise ValueError(b'legacy bundles may not place units on machines ' b'other than 0') return UnitPlacement(container, machine, service, unit)
[ "def", "parse_v3_unit_placement", "(", "placement_str", ")", ":", "placement", "=", "placement_str", "container", "=", "machine", "=", "service", "=", "unit", "=", "''", "if", "':'", "in", "placement", ":", "try", ":", "container", ",", "placement", "=", "placement_str", ".", "split", "(", "':'", ")", "except", "ValueError", ":", "msg", "=", "'placement {} is malformed, too many parts'", ".", "format", "(", "placement_str", ")", "raise", "ValueError", "(", "msg", ".", "encode", "(", "'utf-8'", ")", ")", "if", "'='", "in", "placement", ":", "try", ":", "placement", ",", "unit", "=", "placement", ".", "split", "(", "'='", ")", "except", "ValueError", ":", "msg", "=", "'placement {} is malformed, too many parts'", ".", "format", "(", "placement_str", ")", "raise", "ValueError", "(", "msg", ".", "encode", "(", "'utf-8'", ")", ")", "if", "placement", ".", "isdigit", "(", ")", ":", "machine", "=", "placement", "else", ":", "service", "=", "placement", "if", "(", "container", "and", "container", "not", "in", "VALID_CONTAINERS", ")", ":", "msg", "=", "'invalid container {} for placement {}'", ".", "format", "(", "container", ",", "placement_str", ")", "raise", "ValueError", "(", "msg", ".", "encode", "(", "'utf-8'", ")", ")", "unit", "=", "_parse_unit", "(", "unit", ",", "placement_str", ")", "if", "machine", "and", "machine", "!=", "'0'", ":", "raise", "ValueError", "(", "b'legacy bundles may not place units on machines '", "b'other than 0'", ")", "return", "UnitPlacement", "(", "container", ",", "machine", ",", "service", ",", "unit", ")" ]
39.771429
14.828571
def update_contributions(sender, instance, action, model, pk_set, **kwargs): """Creates a contribution for each author added to an article. """ if action != 'pre_add': return else: for author in model.objects.filter(pk__in=pk_set): update_content_contributions(instance, author)
[ "def", "update_contributions", "(", "sender", ",", "instance", ",", "action", ",", "model", ",", "pk_set", ",", "*", "*", "kwargs", ")", ":", "if", "action", "!=", "'pre_add'", ":", "return", "else", ":", "for", "author", "in", "model", ".", "objects", ".", "filter", "(", "pk__in", "=", "pk_set", ")", ":", "update_content_contributions", "(", "instance", ",", "author", ")" ]
39.375
17.75
def create_tarfile(self): """ Create a tar file with the contents of the current directory """ floyd_logger.info("Compressing data...") # Show progress bar (file_compressed/file_to_compress) self.__compression_bar = ProgressBar(expected_size=self.__files_to_compress, filled_char='=') # Auxiliary functions def dfilter_file_counter(tarinfo): """ Dummy filter function used to track the progression at file levels. """ self.__compression_bar.show(self.__files_compressed) self.__files_compressed += 1 return tarinfo def warn_purge_exit(info_msg, filename, progress_bar, exit_msg): """ Warn the user that's something went wrong, remove the tarball and provide an exit message. """ progress_bar.done() floyd_logger.info(info_msg) rmtree(os.path.dirname(filename)) sys.exit(exit_msg) try: # Define the default signal handler for catching: Ctrl-C signal.signal(signal.SIGINT, signal.default_int_handler) with tarfile.open(self.filename, "w:gz") as tar: tar.add(self.source_dir, arcname=os.path.basename(self.source_dir), filter=dfilter_file_counter) self.__compression_bar.done() except (OSError, IOError) as e: # OSError: [Errno 13] Permission denied if e.errno == errno.EACCES: self.source_dir = os.getcwd() if self.source_dir == '.' else self.source_dir # Expand cwd warn_purge_exit(info_msg="Permission denied. Removing compressed data...", filename=self.filename, progress_bar=self.__compression_bar, exit_msg=("Permission denied. Make sure to have read permission " "for all the files and directories in the path: %s") % (self.source_dir)) # OSError: [Errno 28] No Space Left on Device (IOError on python2.7) elif e.errno == errno.ENOSPC: dir_path = os.path.dirname(self.filename) warn_purge_exit(info_msg="No space left. Removing compressed data...", filename=self.filename, progress_bar=self.__compression_bar, exit_msg=("No space left when compressing your data in: %s.\n" "Make sure to have enough space before uploading your data.") % (os.path.abspath(dir_path))) except KeyboardInterrupt: # Purge tarball on Ctrl-C warn_purge_exit(info_msg="Ctrl-C signal detected: Removing compressed data...", filename=self.filename, progress_bar=self.__compression_bar, exit_msg="Stopped the data upload gracefully.")
[ "def", "create_tarfile", "(", "self", ")", ":", "floyd_logger", ".", "info", "(", "\"Compressing data...\"", ")", "# Show progress bar (file_compressed/file_to_compress)", "self", ".", "__compression_bar", "=", "ProgressBar", "(", "expected_size", "=", "self", ".", "__files_to_compress", ",", "filled_char", "=", "'='", ")", "# Auxiliary functions", "def", "dfilter_file_counter", "(", "tarinfo", ")", ":", "\"\"\"\n Dummy filter function used to track the progression at file levels.\n \"\"\"", "self", ".", "__compression_bar", ".", "show", "(", "self", ".", "__files_compressed", ")", "self", ".", "__files_compressed", "+=", "1", "return", "tarinfo", "def", "warn_purge_exit", "(", "info_msg", ",", "filename", ",", "progress_bar", ",", "exit_msg", ")", ":", "\"\"\"\n Warn the user that's something went wrong,\n remove the tarball and provide an exit message.\n \"\"\"", "progress_bar", ".", "done", "(", ")", "floyd_logger", ".", "info", "(", "info_msg", ")", "rmtree", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ")", "sys", ".", "exit", "(", "exit_msg", ")", "try", ":", "# Define the default signal handler for catching: Ctrl-C", "signal", ".", "signal", "(", "signal", ".", "SIGINT", ",", "signal", ".", "default_int_handler", ")", "with", "tarfile", ".", "open", "(", "self", ".", "filename", ",", "\"w:gz\"", ")", "as", "tar", ":", "tar", ".", "add", "(", "self", ".", "source_dir", ",", "arcname", "=", "os", ".", "path", ".", "basename", "(", "self", ".", "source_dir", ")", ",", "filter", "=", "dfilter_file_counter", ")", "self", ".", "__compression_bar", ".", "done", "(", ")", "except", "(", "OSError", ",", "IOError", ")", "as", "e", ":", "# OSError: [Errno 13] Permission denied", "if", "e", ".", "errno", "==", "errno", ".", "EACCES", ":", "self", ".", "source_dir", "=", "os", ".", "getcwd", "(", ")", "if", "self", ".", "source_dir", "==", "'.'", "else", "self", ".", "source_dir", "# Expand cwd", "warn_purge_exit", "(", "info_msg", "=", "\"Permission denied. Removing compressed data...\"", ",", "filename", "=", "self", ".", "filename", ",", "progress_bar", "=", "self", ".", "__compression_bar", ",", "exit_msg", "=", "(", "\"Permission denied. Make sure to have read permission \"", "\"for all the files and directories in the path: %s\"", ")", "%", "(", "self", ".", "source_dir", ")", ")", "# OSError: [Errno 28] No Space Left on Device (IOError on python2.7)", "elif", "e", ".", "errno", "==", "errno", ".", "ENOSPC", ":", "dir_path", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "filename", ")", "warn_purge_exit", "(", "info_msg", "=", "\"No space left. Removing compressed data...\"", ",", "filename", "=", "self", ".", "filename", ",", "progress_bar", "=", "self", ".", "__compression_bar", ",", "exit_msg", "=", "(", "\"No space left when compressing your data in: %s.\\n\"", "\"Make sure to have enough space before uploading your data.\"", ")", "%", "(", "os", ".", "path", ".", "abspath", "(", "dir_path", ")", ")", ")", "except", "KeyboardInterrupt", ":", "# Purge tarball on Ctrl-C", "warn_purge_exit", "(", "info_msg", "=", "\"Ctrl-C signal detected: Removing compressed data...\"", ",", "filename", "=", "self", ".", "filename", ",", "progress_bar", "=", "self", ".", "__compression_bar", ",", "exit_msg", "=", "\"Stopped the data upload gracefully.\"", ")" ]
52.327586
23.672414
def example_exc_handler(tries_remaining, exception, delay): """Example exception handler; prints a warning to stderr. tries_remaining: The number of tries remaining. exception: The exception instance which was raised. """ print >> sys.stderr, "Caught '%s', %d tries remaining, sleeping for %s seconds" % ( exception, tries_remaining, delay)
[ "def", "example_exc_handler", "(", "tries_remaining", ",", "exception", ",", "delay", ")", ":", "print", ">>", "sys", ".", "stderr", ",", "\"Caught '%s', %d tries remaining, sleeping for %s seconds\"", "%", "(", "exception", ",", "tries_remaining", ",", "delay", ")" ]
45.25
16.75
def writeTicks(self, ticks): ''' read quotes ''' tName = self.tableName(HBaseDAM.TICK) if tName not in self.__hbase.getTableNames(): self.__hbase.createTable(tName, [ColumnDescriptor(name=HBaseDAM.TICK, maxVersions=5)]) for tick in ticks: self.__hbase.updateRow(self.tableName(HBaseDAM.TICK), tick.time, [Mutation(column = "%s:%s" % (HBaseDAM.TICK, field), value = getattr(tick, field) ) for field in TICK_FIELDS])
[ "def", "writeTicks", "(", "self", ",", "ticks", ")", ":", "tName", "=", "self", ".", "tableName", "(", "HBaseDAM", ".", "TICK", ")", "if", "tName", "not", "in", "self", ".", "__hbase", ".", "getTableNames", "(", ")", ":", "self", ".", "__hbase", ".", "createTable", "(", "tName", ",", "[", "ColumnDescriptor", "(", "name", "=", "HBaseDAM", ".", "TICK", ",", "maxVersions", "=", "5", ")", "]", ")", "for", "tick", "in", "ticks", ":", "self", ".", "__hbase", ".", "updateRow", "(", "self", ".", "tableName", "(", "HBaseDAM", ".", "TICK", ")", ",", "tick", ".", "time", ",", "[", "Mutation", "(", "column", "=", "\"%s:%s\"", "%", "(", "HBaseDAM", ".", "TICK", ",", "field", ")", ",", "value", "=", "getattr", "(", "tick", ",", "field", ")", ")", "for", "field", "in", "TICK_FIELDS", "]", ")" ]
53.272727
26.909091
def process_star(filename, output, *, extension, star_name, period, shift, parameters, period_label, shift_label, **kwargs): """Processes a star's lightcurve, prints its coefficients, and saves its plotted lightcurve to a file. Returns the result of get_lightcurve. """ if star_name is None: basename = path.basename(filename) if basename.endswith(extension): star_name = basename[:-len(extension)] else: # file has wrong extension return if parameters is not None: if period is None: try: period = parameters[period_label][star_name] except KeyError: pass if shift is None: try: shift = parameters.loc[shift_label][star_name] except KeyError: pass result = get_lightcurve_from_file(filename, name=star_name, period=period, shift=shift, **kwargs) if result is None: return if output is not None: plot_lightcurve(star_name, result['lightcurve'], result['period'], result['phased_data'], output=output, **kwargs) return result
[ "def", "process_star", "(", "filename", ",", "output", ",", "*", ",", "extension", ",", "star_name", ",", "period", ",", "shift", ",", "parameters", ",", "period_label", ",", "shift_label", ",", "*", "*", "kwargs", ")", ":", "if", "star_name", "is", "None", ":", "basename", "=", "path", ".", "basename", "(", "filename", ")", "if", "basename", ".", "endswith", "(", "extension", ")", ":", "star_name", "=", "basename", "[", ":", "-", "len", "(", "extension", ")", "]", "else", ":", "# file has wrong extension", "return", "if", "parameters", "is", "not", "None", ":", "if", "period", "is", "None", ":", "try", ":", "period", "=", "parameters", "[", "period_label", "]", "[", "star_name", "]", "except", "KeyError", ":", "pass", "if", "shift", "is", "None", ":", "try", ":", "shift", "=", "parameters", ".", "loc", "[", "shift_label", "]", "[", "star_name", "]", "except", "KeyError", ":", "pass", "result", "=", "get_lightcurve_from_file", "(", "filename", ",", "name", "=", "star_name", ",", "period", "=", "period", ",", "shift", "=", "shift", ",", "*", "*", "kwargs", ")", "if", "result", "is", "None", ":", "return", "if", "output", "is", "not", "None", ":", "plot_lightcurve", "(", "star_name", ",", "result", "[", "'lightcurve'", "]", ",", "result", "[", "'period'", "]", ",", "result", "[", "'phased_data'", "]", ",", "output", "=", "output", ",", "*", "*", "kwargs", ")", "return", "result" ]
37.352941
18.676471
def _progress(bytes_received, bytes_total, worker): """Return download progress.""" worker.sig_download_progress.emit( worker.url, worker.path, bytes_received, bytes_total)
[ "def", "_progress", "(", "bytes_received", ",", "bytes_total", ",", "worker", ")", ":", "worker", ".", "sig_download_progress", ".", "emit", "(", "worker", ".", "url", ",", "worker", ".", "path", ",", "bytes_received", ",", "bytes_total", ")" ]
49.25
9.5
def mro(*bases): """Calculate the Method Resolution Order of bases using the C3 algorithm. Suppose you intended creating a class K with the given base classes. This function returns the MRO which K would have, *excluding* K itself (since it doesn't yet exist), as if you had actually created the class. Another way of looking at this, if you pass a single class K, this will return the linearization of K (the MRO of K, *including* itself). Found at: http://code.activestate.com/recipes/577748-calculate-the-mro-of-a-class/ """ seqs = [list(C.__mro__) for C in bases] + [list(bases)] res = [] while True: non_empty = list(filter(None, seqs)) if not non_empty: # Nothing left to process, we're done. return tuple(res) for seq in non_empty: # Find merge candidates among seq heads. candidate = seq[0] not_head = [s for s in non_empty if candidate in s[1:]] if not_head: # Reject the candidate. candidate = None else: break if not candidate: raise TypeError("inconsistent hierarchy, no C3 MRO is possible") res.append(candidate) for seq in non_empty: # Remove candidate. if seq[0] == candidate: del seq[0]
[ "def", "mro", "(", "*", "bases", ")", ":", "seqs", "=", "[", "list", "(", "C", ".", "__mro__", ")", "for", "C", "in", "bases", "]", "+", "[", "list", "(", "bases", ")", "]", "res", "=", "[", "]", "while", "True", ":", "non_empty", "=", "list", "(", "filter", "(", "None", ",", "seqs", ")", ")", "if", "not", "non_empty", ":", "# Nothing left to process, we're done.", "return", "tuple", "(", "res", ")", "for", "seq", "in", "non_empty", ":", "# Find merge candidates among seq heads.", "candidate", "=", "seq", "[", "0", "]", "not_head", "=", "[", "s", "for", "s", "in", "non_empty", "if", "candidate", "in", "s", "[", "1", ":", "]", "]", "if", "not_head", ":", "# Reject the candidate.", "candidate", "=", "None", "else", ":", "break", "if", "not", "candidate", ":", "raise", "TypeError", "(", "\"inconsistent hierarchy, no C3 MRO is possible\"", ")", "res", ".", "append", "(", "candidate", ")", "for", "seq", "in", "non_empty", ":", "# Remove candidate.", "if", "seq", "[", "0", "]", "==", "candidate", ":", "del", "seq", "[", "0", "]" ]
38.285714
20.571429
def createFromURL(urlOfXMLDefinition): """Factory method to create a DeviceTR64 from an URL to the XML device definitions. :param str urlOfXMLDefinition: :return: the new object :rtype: Wifi """ url = urlparse(urlOfXMLDefinition) if not url.port: if url.scheme.lower() == "https": port = 443 else: port = 80 else: port = url.port return Wifi(url.hostname, port, url.scheme)
[ "def", "createFromURL", "(", "urlOfXMLDefinition", ")", ":", "url", "=", "urlparse", "(", "urlOfXMLDefinition", ")", "if", "not", "url", ".", "port", ":", "if", "url", ".", "scheme", ".", "lower", "(", ")", "==", "\"https\"", ":", "port", "=", "443", "else", ":", "port", "=", "80", "else", ":", "port", "=", "url", ".", "port", "return", "Wifi", "(", "url", ".", "hostname", ",", "port", ",", "url", ".", "scheme", ")" ]
27.722222
15.5
def parse(grid_str, mode=MODE_ZINC, charset='utf-8'): ''' Parse the given Zinc text and return the equivalent data. ''' # Decode incoming text (or python3 will whine!) if isinstance(grid_str, six.binary_type): grid_str = grid_str.decode(encoding=charset) # Split the separate grids up, the grammar definition has trouble splitting # them up normally. This will truncate the newline off the end of the last # row. _parse = functools.partial(parse_grid, mode=mode, charset=charset) if mode == MODE_JSON: if isinstance(grid_str, six.string_types): grid_data = json.loads(grid_str) else: grid_data = grid_str if isinstance(grid_data, dict): return _parse(grid_data) else: return list(map(_parse, grid_data)) else: return list(map(_parse, GRID_SEP.split(grid_str.rstrip())))
[ "def", "parse", "(", "grid_str", ",", "mode", "=", "MODE_ZINC", ",", "charset", "=", "'utf-8'", ")", ":", "# Decode incoming text (or python3 will whine!)", "if", "isinstance", "(", "grid_str", ",", "six", ".", "binary_type", ")", ":", "grid_str", "=", "grid_str", ".", "decode", "(", "encoding", "=", "charset", ")", "# Split the separate grids up, the grammar definition has trouble splitting", "# them up normally. This will truncate the newline off the end of the last", "# row.", "_parse", "=", "functools", ".", "partial", "(", "parse_grid", ",", "mode", "=", "mode", ",", "charset", "=", "charset", ")", "if", "mode", "==", "MODE_JSON", ":", "if", "isinstance", "(", "grid_str", ",", "six", ".", "string_types", ")", ":", "grid_data", "=", "json", ".", "loads", "(", "grid_str", ")", "else", ":", "grid_data", "=", "grid_str", "if", "isinstance", "(", "grid_data", ",", "dict", ")", ":", "return", "_parse", "(", "grid_data", ")", "else", ":", "return", "list", "(", "map", "(", "_parse", ",", "grid_data", ")", ")", "else", ":", "return", "list", "(", "map", "(", "_parse", ",", "GRID_SEP", ".", "split", "(", "grid_str", ".", "rstrip", "(", ")", ")", ")", ")" ]
37.5
19.25
def Enable(self, value): "enable or disable all top menus" for i in range(self.GetMenuCount()): self.EnableTop(i, value)
[ "def", "Enable", "(", "self", ",", "value", ")", ":", "for", "i", "in", "range", "(", "self", ".", "GetMenuCount", "(", ")", ")", ":", "self", ".", "EnableTop", "(", "i", ",", "value", ")" ]
37
6.5
def get_lines_data(self): """ Returns string:line_numbers list Since all strings are unique it is OK to get line numbers this way. Since same string can occur several times inside single .json file the values should be popped(FIFO) from the list :rtype: list """ encoding = 'utf-8' for token in tokenize(self.data.decode(encoding)): if token.type == 'operator': if token.value == '{': self.start_object() elif token.value ==':': self.with_separator(token) elif token.value == '}': self.end_object() elif token.value == ',': self.end_pair() elif token.type=='string': if self.state=='key': self.current_key=unquote_string(token.value) if self.current_key==JSON_GETTEXT_KEYWORD: self.gettext_mode=True #==value not actually used, but if only key was met (like in list) it still will be used. The important part, that key wont be parsed as value, not reversal if self.gettext_mode: if self.current_key==JSON_GETTEXT_KEY_CONTENT: self.token_to_add=token elif self.current_key==JSON_GETTEXT_KEY_ALT_CONTENT: self.token_params['alt_token']=token elif self.current_key==JSON_GETTEXT_KEY_FUNCNAME: self.token_params['funcname']=token.value else: self.token_to_add=token return self.results
[ "def", "get_lines_data", "(", "self", ")", ":", "encoding", "=", "'utf-8'", "for", "token", "in", "tokenize", "(", "self", ".", "data", ".", "decode", "(", "encoding", ")", ")", ":", "if", "token", ".", "type", "==", "'operator'", ":", "if", "token", ".", "value", "==", "'{'", ":", "self", ".", "start_object", "(", ")", "elif", "token", ".", "value", "==", "':'", ":", "self", ".", "with_separator", "(", "token", ")", "elif", "token", ".", "value", "==", "'}'", ":", "self", ".", "end_object", "(", ")", "elif", "token", ".", "value", "==", "','", ":", "self", ".", "end_pair", "(", ")", "elif", "token", ".", "type", "==", "'string'", ":", "if", "self", ".", "state", "==", "'key'", ":", "self", ".", "current_key", "=", "unquote_string", "(", "token", ".", "value", ")", "if", "self", ".", "current_key", "==", "JSON_GETTEXT_KEYWORD", ":", "self", ".", "gettext_mode", "=", "True", "#==value not actually used, but if only key was met (like in list) it still will be used. The important part, that key wont be parsed as value, not reversal", "if", "self", ".", "gettext_mode", ":", "if", "self", ".", "current_key", "==", "JSON_GETTEXT_KEY_CONTENT", ":", "self", ".", "token_to_add", "=", "token", "elif", "self", ".", "current_key", "==", "JSON_GETTEXT_KEY_ALT_CONTENT", ":", "self", ".", "token_params", "[", "'alt_token'", "]", "=", "token", "elif", "self", ".", "current_key", "==", "JSON_GETTEXT_KEY_FUNCNAME", ":", "self", ".", "token_params", "[", "'funcname'", "]", "=", "token", ".", "value", "else", ":", "self", ".", "token_to_add", "=", "token", "return", "self", ".", "results" ]
41.7
20.2
def subsample(self, down_to=1, new_path=None): """Pick a number of sequences from the file pseudo-randomly.""" # Auto path # if new_path is None: subsampled = self.__class__(new_temp_path()) elif isinstance(new_path, FASTA): subsampled = new_path else: subsampled = self.__class__(new_path) # Check size # if down_to > len(self): message = "Can't subsample %s down to %i. Only down to %i." print Color.ylw + message % (self, down_to, len(self)) + Color.end self.copy(new_path) return # Do it # subsampled.create() for seq in isubsample(self, down_to): subsampled.add_seq(seq) subsampled.close() # Did it work # assert len(subsampled) == down_to return subsampled
[ "def", "subsample", "(", "self", ",", "down_to", "=", "1", ",", "new_path", "=", "None", ")", ":", "# Auto path #", "if", "new_path", "is", "None", ":", "subsampled", "=", "self", ".", "__class__", "(", "new_temp_path", "(", ")", ")", "elif", "isinstance", "(", "new_path", ",", "FASTA", ")", ":", "subsampled", "=", "new_path", "else", ":", "subsampled", "=", "self", ".", "__class__", "(", "new_path", ")", "# Check size #", "if", "down_to", ">", "len", "(", "self", ")", ":", "message", "=", "\"Can't subsample %s down to %i. Only down to %i.\"", "print", "Color", ".", "ylw", "+", "message", "%", "(", "self", ",", "down_to", ",", "len", "(", "self", ")", ")", "+", "Color", ".", "end", "self", ".", "copy", "(", "new_path", ")", "return", "# Do it #", "subsampled", ".", "create", "(", ")", "for", "seq", "in", "isubsample", "(", "self", ",", "down_to", ")", ":", "subsampled", ".", "add_seq", "(", "seq", ")", "subsampled", ".", "close", "(", ")", "# Did it work #", "assert", "len", "(", "subsampled", ")", "==", "down_to", "return", "subsampled" ]
43.105263
18.210526
def get_interpolated_fd_waveform(dtype=numpy.complex64, return_hc=True, **params): """ Return a fourier domain waveform approximant, using interpolation """ def rulog2(val): return 2.0 ** numpy.ceil(numpy.log2(float(val))) orig_approx = params['approximant'] params['approximant'] = params['approximant'].replace('_INTERP', '') df = params['delta_f'] if 'duration' not in params: duration = get_waveform_filter_length_in_time(**params) elif params['duration'] > 0: duration = params['duration'] else: err_msg = "Waveform duration must be greater than 0." raise ValueError(err_msg) #FIXME We should try to get this length directly somehow # I think this number should be conservative ringdown_padding = 0.5 df_min = 1.0 / rulog2(duration + ringdown_padding) # FIXME: I don't understand this, but waveforms with df_min < 0.5 will chop # off the inspiral when using ringdown_padding - 0.5. # Also, if ringdown_padding is set to a very small # value we can see cases where the ringdown is chopped. if df_min > 0.5: df_min = 0.5 params['delta_f'] = df_min hp, hc = get_fd_waveform(**params) hp = hp.astype(dtype) if return_hc: hc = hc.astype(dtype) else: hc = None f_end = get_waveform_end_frequency(**params) if f_end is None: f_end = (len(hp) - 1) * hp.delta_f if 'f_final' in params and params['f_final'] > 0: f_end_params = params['f_final'] if f_end is not None: f_end = min(f_end_params, f_end) n_min = int(rulog2(f_end / df_min)) + 1 if n_min < len(hp): hp = hp[:n_min] if hc is not None: hc = hc[:n_min] offset = int(ringdown_padding * (len(hp)-1)*2 * hp.delta_f) hp = interpolate_complex_frequency(hp, df, zeros_offset=offset, side='left') if hc is not None: hc = interpolate_complex_frequency(hc, df, zeros_offset=offset, side='left') params['approximant'] = orig_approx return hp, hc
[ "def", "get_interpolated_fd_waveform", "(", "dtype", "=", "numpy", ".", "complex64", ",", "return_hc", "=", "True", ",", "*", "*", "params", ")", ":", "def", "rulog2", "(", "val", ")", ":", "return", "2.0", "**", "numpy", ".", "ceil", "(", "numpy", ".", "log2", "(", "float", "(", "val", ")", ")", ")", "orig_approx", "=", "params", "[", "'approximant'", "]", "params", "[", "'approximant'", "]", "=", "params", "[", "'approximant'", "]", ".", "replace", "(", "'_INTERP'", ",", "''", ")", "df", "=", "params", "[", "'delta_f'", "]", "if", "'duration'", "not", "in", "params", ":", "duration", "=", "get_waveform_filter_length_in_time", "(", "*", "*", "params", ")", "elif", "params", "[", "'duration'", "]", ">", "0", ":", "duration", "=", "params", "[", "'duration'", "]", "else", ":", "err_msg", "=", "\"Waveform duration must be greater than 0.\"", "raise", "ValueError", "(", "err_msg", ")", "#FIXME We should try to get this length directly somehow", "# I think this number should be conservative", "ringdown_padding", "=", "0.5", "df_min", "=", "1.0", "/", "rulog2", "(", "duration", "+", "ringdown_padding", ")", "# FIXME: I don't understand this, but waveforms with df_min < 0.5 will chop", "# off the inspiral when using ringdown_padding - 0.5.", "# Also, if ringdown_padding is set to a very small", "# value we can see cases where the ringdown is chopped.", "if", "df_min", ">", "0.5", ":", "df_min", "=", "0.5", "params", "[", "'delta_f'", "]", "=", "df_min", "hp", ",", "hc", "=", "get_fd_waveform", "(", "*", "*", "params", ")", "hp", "=", "hp", ".", "astype", "(", "dtype", ")", "if", "return_hc", ":", "hc", "=", "hc", ".", "astype", "(", "dtype", ")", "else", ":", "hc", "=", "None", "f_end", "=", "get_waveform_end_frequency", "(", "*", "*", "params", ")", "if", "f_end", "is", "None", ":", "f_end", "=", "(", "len", "(", "hp", ")", "-", "1", ")", "*", "hp", ".", "delta_f", "if", "'f_final'", "in", "params", "and", "params", "[", "'f_final'", "]", ">", "0", ":", "f_end_params", "=", "params", "[", "'f_final'", "]", "if", "f_end", "is", "not", "None", ":", "f_end", "=", "min", "(", "f_end_params", ",", "f_end", ")", "n_min", "=", "int", "(", "rulog2", "(", "f_end", "/", "df_min", ")", ")", "+", "1", "if", "n_min", "<", "len", "(", "hp", ")", ":", "hp", "=", "hp", "[", ":", "n_min", "]", "if", "hc", "is", "not", "None", ":", "hc", "=", "hc", "[", ":", "n_min", "]", "offset", "=", "int", "(", "ringdown_padding", "*", "(", "len", "(", "hp", ")", "-", "1", ")", "*", "2", "*", "hp", ".", "delta_f", ")", "hp", "=", "interpolate_complex_frequency", "(", "hp", ",", "df", ",", "zeros_offset", "=", "offset", ",", "side", "=", "'left'", ")", "if", "hc", "is", "not", "None", ":", "hc", "=", "interpolate_complex_frequency", "(", "hc", ",", "df", ",", "zeros_offset", "=", "offset", ",", "side", "=", "'left'", ")", "params", "[", "'approximant'", "]", "=", "orig_approx", "return", "hp", ",", "hc" ]
34.47541
19.196721
def get_command_class(self, cmd): """ Returns command class from the registry for a given ``cmd``. :param cmd: command to run (key at the registry) """ try: cmdpath = self.registry[cmd] except KeyError: raise CommandError("No such command %r" % cmd) if isinstance(cmdpath, basestring): Command = import_class(cmdpath) else: Command = cmdpath return Command
[ "def", "get_command_class", "(", "self", ",", "cmd", ")", ":", "try", ":", "cmdpath", "=", "self", ".", "registry", "[", "cmd", "]", "except", "KeyError", ":", "raise", "CommandError", "(", "\"No such command %r\"", "%", "cmd", ")", "if", "isinstance", "(", "cmdpath", ",", "basestring", ")", ":", "Command", "=", "import_class", "(", "cmdpath", ")", "else", ":", "Command", "=", "cmdpath", "return", "Command" ]
30.866667
14.333333
def addresses_for_key(gpg, key): """ Takes a key and extracts the email addresses for it. """ fingerprint = key["fingerprint"] addresses = [] for key in gpg.list_keys(): if key["fingerprint"] == fingerprint: addresses.extend([address.split("<")[-1].strip(">") for address in key["uids"] if address]) return addresses
[ "def", "addresses_for_key", "(", "gpg", ",", "key", ")", ":", "fingerprint", "=", "key", "[", "\"fingerprint\"", "]", "addresses", "=", "[", "]", "for", "key", "in", "gpg", ".", "list_keys", "(", ")", ":", "if", "key", "[", "\"fingerprint\"", "]", "==", "fingerprint", ":", "addresses", ".", "extend", "(", "[", "address", ".", "split", "(", "\"<\"", ")", "[", "-", "1", "]", ".", "strip", "(", "\">\"", ")", "for", "address", "in", "key", "[", "\"uids\"", "]", "if", "address", "]", ")", "return", "addresses" ]
35.818182
12.363636
def cast_bytes(s, encoding=None): """Source: https://github.com/ipython/ipython_genutils""" if not isinstance(s, bytes): return encode(s, encoding) return s
[ "def", "cast_bytes", "(", "s", ",", "encoding", "=", "None", ")", ":", "if", "not", "isinstance", "(", "s", ",", "bytes", ")", ":", "return", "encode", "(", "s", ",", "encoding", ")", "return", "s" ]
34.4
9.8
def _dataset_merge_filestore_resource(self, resource, updated_resource, filestore_resources, ignore_fields): # type: (hdx.data.Resource, hdx.data.Resource, List[hdx.data.Resource], List[str]) -> None """Helper method to merge updated resource from dataset into HDX resource read from HDX including filestore. Args: resource (hdx.data.Resource): Resource read from HDX updated_resource (hdx.data.Resource): Updated resource from dataset filestore_resources (List[hdx.data.Resource]): List of resources that use filestore (to be appended to) ignore_fields (List[str]): List of fields to ignore when checking resource Returns: None """ if updated_resource.get_file_to_upload(): resource.set_file_to_upload(updated_resource.get_file_to_upload()) filestore_resources.append(resource) merge_two_dictionaries(resource, updated_resource) resource.check_required_fields(ignore_fields=ignore_fields) if resource.get_file_to_upload(): resource['url'] = Dataset.temporary_url
[ "def", "_dataset_merge_filestore_resource", "(", "self", ",", "resource", ",", "updated_resource", ",", "filestore_resources", ",", "ignore_fields", ")", ":", "# type: (hdx.data.Resource, hdx.data.Resource, List[hdx.data.Resource], List[str]) -> None", "if", "updated_resource", ".", "get_file_to_upload", "(", ")", ":", "resource", ".", "set_file_to_upload", "(", "updated_resource", ".", "get_file_to_upload", "(", ")", ")", "filestore_resources", ".", "append", "(", "resource", ")", "merge_two_dictionaries", "(", "resource", ",", "updated_resource", ")", "resource", ".", "check_required_fields", "(", "ignore_fields", "=", "ignore_fields", ")", "if", "resource", ".", "get_file_to_upload", "(", ")", ":", "resource", "[", "'url'", "]", "=", "Dataset", ".", "temporary_url" ]
55.7
28.85
def _get_cores_memory(data, downscale=2): """Retrieve cores and memory, using samtools as baseline. For memory, scaling down because we share with alignment and de-duplication. """ resources = config_utils.get_resources("samtools", data["config"]) num_cores = data["config"]["algorithm"].get("num_cores", 1) max_mem = config_utils.adjust_memory(resources.get("memory", "2G"), downscale, "decrease").upper() return num_cores, max_mem
[ "def", "_get_cores_memory", "(", "data", ",", "downscale", "=", "2", ")", ":", "resources", "=", "config_utils", ".", "get_resources", "(", "\"samtools\"", ",", "data", "[", "\"config\"", "]", ")", "num_cores", "=", "data", "[", "\"config\"", "]", "[", "\"algorithm\"", "]", ".", "get", "(", "\"num_cores\"", ",", "1", ")", "max_mem", "=", "config_utils", ".", "adjust_memory", "(", "resources", ".", "get", "(", "\"memory\"", ",", "\"2G\"", ")", ",", "downscale", ",", "\"decrease\"", ")", ".", "upper", "(", ")", "return", "num_cores", ",", "max_mem" ]
49.3
20.7
def _parse(batch_cmd): """ :rtype: (sh_cmd, batch_to_file_s, batch_from_file) :returns: parsed result like below: .. code-block:: python # when parsing 'diff IN_BATCH0 IN_BATCH1 > OUT_BATCH' ( 'diff /tmp/relshell-AbCDeF /tmp/relshell-uVwXyz', ( <instance of BatchToFile>, <instance of BatchToFile> ) # (IN_BATCH0, IN_BATCH1) 'STDOUT', ) """ cmd_array = shlex.split(batch_cmd) (cmd_array, batch_to_file_s) = BatchCommand._parse_in_batches(cmd_array) (cmd_array, batch_from_file) = BatchCommand._parse_out_batch(cmd_array) return (list2cmdline(cmd_array), batch_to_file_s, batch_from_file)
[ "def", "_parse", "(", "batch_cmd", ")", ":", "cmd_array", "=", "shlex", ".", "split", "(", "batch_cmd", ")", "(", "cmd_array", ",", "batch_to_file_s", ")", "=", "BatchCommand", ".", "_parse_in_batches", "(", "cmd_array", ")", "(", "cmd_array", ",", "batch_from_file", ")", "=", "BatchCommand", ".", "_parse_out_batch", "(", "cmd_array", ")", "return", "(", "list2cmdline", "(", "cmd_array", ")", ",", "batch_to_file_s", ",", "batch_from_file", ")" ]
41.777778
24.666667
def parse_content(self, content): """ Use all the defined scanners to search the log file, setting the properties defined in the scanner. """ self.lines = content for scanner in self.scanners: scanner(self)
[ "def", "parse_content", "(", "self", ",", "content", ")", ":", "self", ".", "lines", "=", "content", "for", "scanner", "in", "self", ".", "scanners", ":", "scanner", "(", "self", ")" ]
32.375
8.875
def scatter(self, x, y, **kwargs): """Plot a scatter chart using metadata columns see pyam.plotting.scatter() for all available options """ variables = self.data['variable'].unique() xisvar = x in variables yisvar = y in variables if not xisvar and not yisvar: cols = [x, y] + self._discover_meta_cols(**kwargs) df = self.meta[cols].reset_index() elif xisvar and yisvar: # filter pivot both and rename dfx = ( self .filter(variable=x) .as_pandas(with_metadata=kwargs) .rename(columns={'value': x, 'unit': 'xunit'}) .set_index(YEAR_IDX) .drop('variable', axis=1) ) dfy = ( self .filter(variable=y) .as_pandas(with_metadata=kwargs) .rename(columns={'value': y, 'unit': 'yunit'}) .set_index(YEAR_IDX) .drop('variable', axis=1) ) df = dfx.join(dfy, lsuffix='_left', rsuffix='').reset_index() else: # filter, merge with meta, and rename value column to match var var = x if xisvar else y df = ( self .filter(variable=var) .as_pandas(with_metadata=kwargs) .rename(columns={'value': var}) ) ax = plotting.scatter(df.dropna(), x, y, **kwargs) return ax
[ "def", "scatter", "(", "self", ",", "x", ",", "y", ",", "*", "*", "kwargs", ")", ":", "variables", "=", "self", ".", "data", "[", "'variable'", "]", ".", "unique", "(", ")", "xisvar", "=", "x", "in", "variables", "yisvar", "=", "y", "in", "variables", "if", "not", "xisvar", "and", "not", "yisvar", ":", "cols", "=", "[", "x", ",", "y", "]", "+", "self", ".", "_discover_meta_cols", "(", "*", "*", "kwargs", ")", "df", "=", "self", ".", "meta", "[", "cols", "]", ".", "reset_index", "(", ")", "elif", "xisvar", "and", "yisvar", ":", "# filter pivot both and rename", "dfx", "=", "(", "self", ".", "filter", "(", "variable", "=", "x", ")", ".", "as_pandas", "(", "with_metadata", "=", "kwargs", ")", ".", "rename", "(", "columns", "=", "{", "'value'", ":", "x", ",", "'unit'", ":", "'xunit'", "}", ")", ".", "set_index", "(", "YEAR_IDX", ")", ".", "drop", "(", "'variable'", ",", "axis", "=", "1", ")", ")", "dfy", "=", "(", "self", ".", "filter", "(", "variable", "=", "y", ")", ".", "as_pandas", "(", "with_metadata", "=", "kwargs", ")", ".", "rename", "(", "columns", "=", "{", "'value'", ":", "y", ",", "'unit'", ":", "'yunit'", "}", ")", ".", "set_index", "(", "YEAR_IDX", ")", ".", "drop", "(", "'variable'", ",", "axis", "=", "1", ")", ")", "df", "=", "dfx", ".", "join", "(", "dfy", ",", "lsuffix", "=", "'_left'", ",", "rsuffix", "=", "''", ")", ".", "reset_index", "(", ")", "else", ":", "# filter, merge with meta, and rename value column to match var", "var", "=", "x", "if", "xisvar", "else", "y", "df", "=", "(", "self", ".", "filter", "(", "variable", "=", "var", ")", ".", "as_pandas", "(", "with_metadata", "=", "kwargs", ")", ".", "rename", "(", "columns", "=", "{", "'value'", ":", "var", "}", ")", ")", "ax", "=", "plotting", ".", "scatter", "(", "df", ".", "dropna", "(", ")", ",", "x", ",", "y", ",", "*", "*", "kwargs", ")", "return", "ax" ]
36.414634
14.146341
def write(self)->None: "Writes model gradient statistics to Tensorboard." if len(self.gradients) == 0: return norms = [x.data.norm() for x in self.gradients] self._write_avg_norm(norms=norms) self._write_median_norm(norms=norms) self._write_max_norm(norms=norms) self._write_min_norm(norms=norms) self._write_num_zeros() self._write_avg_gradient() self._write_median_gradient() self._write_max_gradient() self._write_min_gradient()
[ "def", "write", "(", "self", ")", "->", "None", ":", "if", "len", "(", "self", ".", "gradients", ")", "==", "0", ":", "return", "norms", "=", "[", "x", ".", "data", ".", "norm", "(", ")", "for", "x", "in", "self", ".", "gradients", "]", "self", ".", "_write_avg_norm", "(", "norms", "=", "norms", ")", "self", ".", "_write_median_norm", "(", "norms", "=", "norms", ")", "self", ".", "_write_max_norm", "(", "norms", "=", "norms", ")", "self", ".", "_write_min_norm", "(", "norms", "=", "norms", ")", "self", ".", "_write_num_zeros", "(", ")", "self", ".", "_write_avg_gradient", "(", ")", "self", ".", "_write_median_gradient", "(", ")", "self", ".", "_write_max_gradient", "(", ")", "self", ".", "_write_min_gradient", "(", ")" ]
39.615385
7
def get_all_supported_exts_for_type(self, type_to_match: Type[Any], strict: bool) -> Set[str]: """ Utility method to return the set of all supported file extensions that may be converted to objects of the given type. type=JOKER is a joker that means all types :param type_to_match: :param strict: :return: """ matching = self.find_all_matching_parsers(desired_type=type_to_match, strict=strict)[0] return {ext for exts in [p.supported_exts for p in (matching[0] + matching[1] + matching[2])] for ext in exts}
[ "def", "get_all_supported_exts_for_type", "(", "self", ",", "type_to_match", ":", "Type", "[", "Any", "]", ",", "strict", ":", "bool", ")", "->", "Set", "[", "str", "]", ":", "matching", "=", "self", ".", "find_all_matching_parsers", "(", "desired_type", "=", "type_to_match", ",", "strict", "=", "strict", ")", "[", "0", "]", "return", "{", "ext", "for", "exts", "in", "[", "p", ".", "supported_exts", "for", "p", "in", "(", "matching", "[", "0", "]", "+", "matching", "[", "1", "]", "+", "matching", "[", "2", "]", ")", "]", "for", "ext", "in", "exts", "}" ]
48.833333
30.5
def lowpass_fir(self, frequency, order, beta=5.0, remove_corrupted=True): """ Lowpass filter the time series using an FIR filtered generated from the ideal response passed through a kaiser window (beta = 5.0) Parameters ---------- Time Series: TimeSeries The time series to be low-passed. frequency: float The frequency below which is suppressed. order: int Number of corrupted samples on each side of the time series beta: float Beta parameter of the kaiser window that sets the side lobe attenuation. remove_corrupted : {True, boolean} If True, the region of the time series corrupted by the filtering is excised before returning. If false, the corrupted regions are not excised and the full time series is returned. """ from pycbc.filter import lowpass_fir ts = lowpass_fir(self, frequency, order, beta=beta) if remove_corrupted: ts = ts[order:len(ts)-order] return ts
[ "def", "lowpass_fir", "(", "self", ",", "frequency", ",", "order", ",", "beta", "=", "5.0", ",", "remove_corrupted", "=", "True", ")", ":", "from", "pycbc", ".", "filter", "import", "lowpass_fir", "ts", "=", "lowpass_fir", "(", "self", ",", "frequency", ",", "order", ",", "beta", "=", "beta", ")", "if", "remove_corrupted", ":", "ts", "=", "ts", "[", "order", ":", "len", "(", "ts", ")", "-", "order", "]", "return", "ts" ]
44.041667
19.208333
def _set_level_1(self, v, load=False): """ Setter method for level_1, mapped from YANG variable /routing_system/router/isis/router_isis_cmds_holder/address_family/ipv6/af_ipv6_unicast/af_ipv6_attributes/af_common_attributes/redistribute/isis/level_1 (container) If this variable is read-only (config: false) in the source YANG file, then _set_level_1 is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_level_1() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=level_1.level_1, is_container='container', presence=False, yang_name="level-1", rest_name="level-1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Level-1 routes'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """level_1 must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=level_1.level_1, is_container='container', presence=False, yang_name="level-1", rest_name="level-1", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Level-1 routes'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)""", }) self.__level_1 = t if hasattr(self, '_set'): self._set()
[ "def", "_set_level_1", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "level_1", ".", "level_1", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"level-1\"", ",", "rest_name", "=", "\"level-1\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Level-1 routes'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-isis'", ",", "defining_module", "=", "'brocade-isis'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"level_1 must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=level_1.level_1, is_container='container', presence=False, yang_name=\"level-1\", rest_name=\"level-1\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Level-1 routes'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__level_1", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
75.181818
37.772727
def listsdm(sdm, file=None): """Generate a standard "listsdm" listing of(A)SDM dataset contents. sdm (str) The path to the (A)SDM dataset to parse file (stream-like object, such as an opened file) Where to print the human-readable listing. If unspecified, results go to :data:`sys.stdout`. Returns A dictionary of information about the dataset. Contents not yet documented. Example:: from pwkit.environments.casa import tasks tasks.listsdm('myalmaa.asdm') This code based on CASA's `task_listsdm.py`, with this version info:: # v1.0: 2010.12.07, M. Krauss # v1.1: 2011.02.23, M. Krauss: added functionality for ALMA data # # Original code based on readscans.py, courtesy S. Meyers """ from xml.dom import minidom import string def printf(fmt, *args): if len(args): s = fmt % args else: s = str(fmt) print(s, file=file) qa = util.tools.quanta() me = util.tools.measures() list_scans = True list_antennas = False list_fields = True list_spws = False # read Scan.xml xmlscans = minidom.parse(sdm+'/Scan.xml') scandict = {} startTimeShort = [] endTimeShort = [] rowlist = xmlscans.getElementsByTagName('row') for rownode in rowlist: rowfid = rownode.getElementsByTagName('scanNumber') fid = int(rowfid[0].childNodes[0].nodeValue) scandict[fid] = {} # number of subscans rowsubs = rownode.getElementsByTagName('numSubscan') if len(rowsubs) == 0: # EVLA and old ALMA data rowsubs = rownode.getElementsByTagName('numSubScan') nsubs = int(rowsubs[0].childNodes[0].nodeValue) # intents rownint = rownode.getElementsByTagName('numIntent') nint = int(rownint[0].childNodes[0].nodeValue) rowintents = rownode.getElementsByTagName('scanIntent') sint = str(rowintents[0].childNodes[0].nodeValue) sints = sint.split() rint = '' for r in range(nint): intent = sints[2+r] if rint=='': rint = intent else: rint += ' '+intent # start and end times in mjd ns rowstart = rownode.getElementsByTagName('startTime') start = int(rowstart[0].childNodes[0].nodeValue) startmjd = float(start)*1.0E-9/86400.0 t = b(qa.quantity(startmjd,b'd')) starttime = qa.time(t,form=b'ymd',prec=8)[0] startTimeShort.append(qa.time(t,prec=8)[0]) rowend = rownode.getElementsByTagName('endTime') end = int(rowend[0].childNodes[0].nodeValue) endmjd = float(end)*1.0E-9/86400.0 t = b(qa.quantity(endmjd,b'd')) endtime = qa.time(t,form=b'ymd',prec=8)[0] endTimeShort.append(qa.time(t,prec=8)[0]) # source name rowsrc = rownode.getElementsByTagName('sourceName') try: src = str(rowsrc[0].childNodes[0].nodeValue) except: src = '???' # PKGW scandict[fid]['start'] = starttime scandict[fid]['end'] = endtime timestr = starttime+'~'+endtime scandict[fid]['timerange'] = timestr scandict[fid]['source'] = src scandict[fid]['intent'] = rint scandict[fid]['nsubs'] = nsubs # read Main.xml xmlmain = minidom.parse(sdm+'/Main.xml') rowlist = xmlmain.getElementsByTagName('row') mainScanList = [] mainConfigList = [] fieldIdList = [] for rownode in rowlist: # get the scan numbers rowfid = rownode.getElementsByTagName('scanNumber') fid = int(rowfid[0].childNodes[0].nodeValue) mainScanList.append(fid) # get the configuration description rowconfig = rownode.getElementsByTagName('configDescriptionId') config = str(rowconfig[0].childNodes[0].nodeValue) mainConfigList.append(config) # get the field ID rowfieldid = rownode.getElementsByTagName('fieldId') fieldid = string.split(str(rowfieldid[0].childNodes[0].nodeValue), '_')[1] fieldIdList.append(fieldid) # read ConfigDescription.xml to relate the configuration # description to a(set) of data description IDs xmlconfig = minidom.parse(sdm+'/ConfigDescription.xml') rowlist = xmlconfig.getElementsByTagName('row') configDescList = [] dataDescList = [] for rownode in rowlist: # get the configuration description rowConfigDesc = rownode.getElementsByTagName('configDescriptionId') configDesc = str(rowConfigDesc[0].childNodes[0].nodeValue) configDescList.append(configDesc) # make a list of the data description IDs: rowNumDataDesc = rownode.getElementsByTagName('numDataDescription') numDataDesc = int(rowNumDataDesc[0].childNodes[0].nodeValue) rowDataDesc = rownode.getElementsByTagName('dataDescriptionId') dataDescStr = str(rowDataDesc[0].childNodes[0].nodeValue) dataDescSplit = dataDescStr.split() dataDesc = [] for i in range(numDataDesc): dataDesc.append(dataDescSplit[i+2]) dataDescList.append(dataDesc) # read DataDescription.xml to relate the data description IDs to # spectral window IDs xmlDataDesc = minidom.parse(sdm+'/DataDescription.xml') rowlist = xmlDataDesc.getElementsByTagName('row') dataDescElList = [] spwIdDataDescList = [] for rownode in rowlist: # get the data description ID, make another list: rowDataDescEl = rownode.getElementsByTagName('dataDescriptionId') dataDescEl = str(rowDataDescEl[0].childNodes[0].nodeValue) dataDescElList.append(dataDescEl) # get the related spectral window ID: rowSpwIdDataDesc = rownode.getElementsByTagName('spectralWindowId') spwIdDataDesc = str(rowSpwIdDataDesc[0].childNodes[0].nodeValue) spwIdDataDescList.append(spwIdDataDesc) # read SpectralWindow.xml, get information about number of # channels, reference frequency, baseband name, channel width. # Interesting that there seem to be multiple fields that give the # same information: chanFreqStart=reFreq, # chanFreqStep=chanWidth=resolution. Why?(Note: all units are Hz) # Note: this is where the script breaks for ALMA data, since there # are different tags in SpectraWindow.xml(for varying channel widths). xmlSpecWin = minidom.parse(sdm+'/SpectralWindow.xml') rowlist = xmlSpecWin.getElementsByTagName('row') spwIdList = [] nChanList = [] refFreqList = [] chanWidthList = [] basebandList = [] for rownode in rowlist: # get the various row values: rowSpwId = rownode.getElementsByTagName('spectralWindowId') rowNChan = rownode.getElementsByTagName('numChan') rowRefFreq = rownode.getElementsByTagName('refFreq') # For EVLA rowChanWidth = rownode.getElementsByTagName('chanWidth') # For ALMA rowChanWidthArr = rownode.getElementsByTagName('chanWidthArray') rowBaseband = rownode.getElementsByTagName('basebandName') # convert to values or strings and append to the relevant lists: spwId = str(rowSpwId[0].childNodes[0].nodeValue) spwIdList.append(spwId) nChan = int(rowNChan[0].childNodes[0].nodeValue) nChanList.append(nChan) refFreq = float(rowRefFreq[0].childNodes[0].nodeValue) refFreqList.append(refFreq) if rowChanWidth: chanWidth = float(rowChanWidth[0].childNodes[0].nodeValue) chanWidthList.append(chanWidth) if rowChanWidthArr: tmpArr = str(rowChanWidthArr[0].childNodes[0].nodeValue).split(' ') tmpWidth = [] for cw in range(2, len(tmpArr)): thisWidth = float(tmpArr[cw]) tmpWidth.append(thisWidth) chanWidthList.append(tmpWidth) baseband = str(rowBaseband[0].childNodes[0].nodeValue) basebandList.append(baseband) # read Field.xml xmlField = minidom.parse(sdm+'/Field.xml') rowlist = xmlField.getElementsByTagName('row') fieldList = [] fieldNameList = [] fieldCodeList = [] fieldRAList = [] fieldDecList = [] fieldSrcIDList = [] for rownode in rowlist: rowField = rownode.getElementsByTagName('fieldId') rowName = rownode.getElementsByTagName('fieldName') rowCode = rownode.getElementsByTagName('code') rowCoords = rownode.getElementsByTagName('referenceDir') rowSrcId = rownode.getElementsByTagName('sourceId') # convert to values or strings and append to relevent lists: fieldList.append(int(string.split(str(rowField[0].childNodes[0].nodeValue),'_')[1])) fieldNameList.append(str(rowName[0].childNodes[0].nodeValue)) fieldCodeList.append(str(rowCode[0].childNodes[0].nodeValue)) coordInfo = rowCoords[0].childNodes[0].nodeValue.split() RADeg = float(coordInfo[3])* (180.0/np.pi) DecDeg = float(coordInfo[4])* (180.0/np.pi) RAInp = {'unit': 'deg', 'value': RADeg} DecInp = {'unit': 'deg', 'value': DecDeg} RAHMS = b(qa.formxxx(b(RAInp), format=b'hms')) DecDMS = b(qa.formxxx(b(DecInp), format=b'dms')) fieldRAList.append(RAHMS) fieldDecList.append(DecDMS) fieldSrcIDList.append(int(rowSrcId[0].childNodes[0].nodeValue)) # read Antenna.xml xmlAnt = minidom.parse(sdm+'/Antenna.xml') rowlist = xmlAnt.getElementsByTagName('row') antList = [] antNameList = [] dishDiamList = [] stationList = [] for rownode in rowlist: rowAnt = rownode.getElementsByTagName('antennaId') rowAntName = rownode.getElementsByTagName('name') rowDishDiam = rownode.getElementsByTagName('dishDiameter') rowStation = rownode.getElementsByTagName('stationId') # convert and append antList.append(int(string.split(str(rowAnt[0].childNodes[0].nodeValue), '_')[1])) antNameList.append(str(rowAntName[0].childNodes[0].nodeValue)) dishDiamList.append(float(rowDishDiam[0].childNodes[0].nodeValue)) stationList.append(str(rowStation[0].childNodes[0].nodeValue)) # read Station.xml xmlStation = minidom.parse(sdm+'/Station.xml') rowlist = xmlStation.getElementsByTagName('row') statIdList = [] statNameList = [] statLatList = [] statLonList = [] for rownode in rowlist: rowStatId = rownode.getElementsByTagName('stationId') rowStatName = rownode.getElementsByTagName('name') rowStatPos = rownode.getElementsByTagName('position') # convert and append statIdList.append(str(rowStatId[0].childNodes[0].nodeValue)) statNameList.append(str(rowStatName[0].childNodes[0].nodeValue)) posInfo = string.split(str(rowStatPos[0].childNodes[0].nodeValue)) x = b(qa.quantity([float(posInfo[2])], b'm')) y = b(qa.quantity([float(posInfo[3])], b'm')) z = b(qa.quantity([float(posInfo[4])], b'm')) pos = b(me.position(b'ITRF', x, y, z)) qLon = pos['m0'] qLat = pos['m1'] statLatList.append(qa.formxxx(qLat, b'dms', prec=0)) statLonList.append(qa.formxxx(qLon, b'dms', prec=0)) # associate antennas with stations: assocStatList = [] for station in stationList: i = np.where(np.array(statIdList) == station)[0][0] assocStatList.append(statNameList[i]) # read ExecBlock.xml xmlExecBlock = minidom.parse(sdm+'/ExecBlock.xml') rowlist = xmlExecBlock.getElementsByTagName('row') sTime = float(rowlist[0].getElementsByTagName('startTime')[0].childNodes[0].nodeValue)*1.0E-9 eTime = float(rowlist[0].getElementsByTagName('endTime')[0].childNodes[0].nodeValue)*1.0E-9 # integration time in seconds, start and end times: intTime = eTime - sTime t = b(qa.quantity(sTime/86400.0, b'd')) obsStart = qa.time(t, form=b'ymd', prec=8)[0] t = b(qa.quantity(eTime/86400.0, b'd')) obsEnd = qa.time(t, form=b'ymd', prec=8)[0] # observer name and obs. info: observerName = str(rowlist[0].getElementsByTagName('observerName')[0].childNodes[0].nodeValue) configName = str(rowlist[0].getElementsByTagName('configName')[0].childNodes[0].nodeValue) telescopeName = str(rowlist[0].getElementsByTagName('telescopeName')[0].childNodes[0].nodeValue) numAntenna = int(rowlist[0].getElementsByTagName('numAntenna')[0].childNodes[0].nodeValue) # make lists like the dataDescList for spectral windows & related info: spwOrd = [] nChanOrd = [] rFreqOrd = [] cWidthOrd = [] bbandOrd = [] for i in range(0, len(configDescList)): spwTempList = [] nChanTempList = [] rFreqTempList = [] cWidthTempList = [] bbandTempList = [] for dDesc in dataDescList[i]: el = np.where(np.array(dataDescElList) == dDesc)[0][0] spwIdN = spwIdDataDescList[el] spwEl = np.where(np.array(spwIdList) == spwIdN)[0][0] spwTempList.append(int(string.split(spwIdList[spwEl], '_')[1])) nChanTempList.append(nChanList[spwEl]) rFreqTempList.append(refFreqList[spwEl]) cWidthTempList.append(chanWidthList[spwEl]) bbandTempList.append(basebandList[spwEl]) spwOrd.append(spwTempList) nChanOrd.append(nChanTempList) rFreqOrd.append(rFreqTempList) cWidthOrd.append(cWidthTempList) bbandOrd.append(bbandTempList) # add this info to the scan dictionary: for scanNum in scandict: spwOrdList = [] nChanOrdList = [] rFreqOrdList = [] cWidthOrdList = [] bbandOrdList = [] # scanEl could have multiple elements if subscans are present, # or for ALMA data: scanEl = np.where(np.array(mainScanList) == scanNum)[0] for thisEl in scanEl: configEl = mainConfigList[thisEl] listEl = np.where(np.array(configDescList) == configEl)[0][0] spwOrdList.append(spwOrd[listEl]) nChanOrdList.append(nChanOrd[listEl]) rFreqOrdList.append(rFreqOrd[listEl]) cWidthOrdList.append(cWidthOrd[listEl]) bbandOrdList.append(bbandOrd[listEl]) try: scandict[scanNum]['field'] = int(fieldIdList[scanEl[0]]) except: scandict[scanNum]['field'] = -1 # PKGW scandict[scanNum]['spws'] = spwOrdList scandict[scanNum]['nchan'] = nChanOrdList scandict[scanNum]['reffreq'] = rFreqOrdList scandict[scanNum]['chanwidth'] = cWidthOrdList scandict[scanNum]['baseband'] = bbandOrdList # report information to the logger printf('================================================================================') printf(' SDM File: %s', sdm) printf('================================================================================') printf(' Observer: %s', observerName) printf(' Facility: %s, %s-configuration', telescopeName, configName) printf(' Observed from %s to %s(UTC)', obsStart, obsEnd) printf(' Total integration time = %.2f seconds(%.2f hours)', intTime, intTime / 3600) if list_scans: printf(' ') printf('Scan listing:') maxspwlen = 0 for scaninfo in scandict.values(): SPWs = [] for spw in scaninfo['spws']: SPWs += spw scaninfo['spwstr'] = str(list(set(SPWs))) maxspwlen = max(maxspwlen, len(scaninfo['spwstr'])) fmt = ' %-25s %-4s %-5s %-15s %-*s %s' printf(fmt, 'Timerange(UTC)', 'Scan', 'FldID', 'FieldName', maxspwlen, 'SpwIDs', 'Intent(s)') for i,(scanid, scaninfo) in enumerate(scandict.items()): printf(fmt, startTimeShort[i] + ' - ' + endTimeShort[i], scanid, scaninfo['field'], scaninfo['source'], maxspwlen, scaninfo['spwstr'], scaninfo['intent']) if list_spws: printf(' ') printf('Spectral window information:') printf(' SpwID #Chans Ch0(MHz) ChWidth(kHz) TotBW(MHz) Baseband') for i in range(0, len(spwIdList)): printf(' %s %s %s %s %s %s', string.split(spwIdList[i], '_')[1].ljust(4), str(nChanList[i]).ljust(4), str(refFreqList[i]/1e6).ljust(8), str(np.array(chanWidthList[i])/1e3).ljust(8), str(np.array(chanWidthList[i])*nChanList[i]/1e6).ljust(8), basebandList[i].ljust(8)) if list_fields: printf(' ') printf('Field information:') printf(' FldID Code Name RA Dec SrcID') for i in range(0, len(fieldList)): printf(' %-6d %-6s %-15s %-13s %-15s %-5d', fieldList[i], fieldCodeList[i], fieldNameList[i], fieldRAList[i], fieldDecList[i], fieldSrcIDList[i]) if list_antennas: printf(' ') printf('Antennas(%i):' % len(antList)) printf(' ID Name Station Diam.(m) Lat. Long.') for i in range(0, len(antList)): printf(' %s %s %s %s %s %s ', str(antList[i]).ljust(5), antNameList[i].ljust(6), assocStatList[i].ljust(5), str(dishDiamList[i]).ljust(5), statLatList[i].ljust(12), statLonList[i].ljust(12)) # return the scan dictionary return scandict
[ "def", "listsdm", "(", "sdm", ",", "file", "=", "None", ")", ":", "from", "xml", ".", "dom", "import", "minidom", "import", "string", "def", "printf", "(", "fmt", ",", "*", "args", ")", ":", "if", "len", "(", "args", ")", ":", "s", "=", "fmt", "%", "args", "else", ":", "s", "=", "str", "(", "fmt", ")", "print", "(", "s", ",", "file", "=", "file", ")", "qa", "=", "util", ".", "tools", ".", "quanta", "(", ")", "me", "=", "util", ".", "tools", ".", "measures", "(", ")", "list_scans", "=", "True", "list_antennas", "=", "False", "list_fields", "=", "True", "list_spws", "=", "False", "# read Scan.xml", "xmlscans", "=", "minidom", ".", "parse", "(", "sdm", "+", "'/Scan.xml'", ")", "scandict", "=", "{", "}", "startTimeShort", "=", "[", "]", "endTimeShort", "=", "[", "]", "rowlist", "=", "xmlscans", ".", "getElementsByTagName", "(", "'row'", ")", "for", "rownode", "in", "rowlist", ":", "rowfid", "=", "rownode", ".", "getElementsByTagName", "(", "'scanNumber'", ")", "fid", "=", "int", "(", "rowfid", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "scandict", "[", "fid", "]", "=", "{", "}", "# number of subscans", "rowsubs", "=", "rownode", ".", "getElementsByTagName", "(", "'numSubscan'", ")", "if", "len", "(", "rowsubs", ")", "==", "0", ":", "# EVLA and old ALMA data", "rowsubs", "=", "rownode", ".", "getElementsByTagName", "(", "'numSubScan'", ")", "nsubs", "=", "int", "(", "rowsubs", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "# intents", "rownint", "=", "rownode", ".", "getElementsByTagName", "(", "'numIntent'", ")", "nint", "=", "int", "(", "rownint", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "rowintents", "=", "rownode", ".", "getElementsByTagName", "(", "'scanIntent'", ")", "sint", "=", "str", "(", "rowintents", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "sints", "=", "sint", ".", "split", "(", ")", "rint", "=", "''", "for", "r", "in", "range", "(", "nint", ")", ":", "intent", "=", "sints", "[", "2", "+", "r", "]", "if", "rint", "==", "''", ":", "rint", "=", "intent", "else", ":", "rint", "+=", "' '", "+", "intent", "# start and end times in mjd ns", "rowstart", "=", "rownode", ".", "getElementsByTagName", "(", "'startTime'", ")", "start", "=", "int", "(", "rowstart", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "startmjd", "=", "float", "(", "start", ")", "*", "1.0E-9", "/", "86400.0", "t", "=", "b", "(", "qa", ".", "quantity", "(", "startmjd", ",", "b'd'", ")", ")", "starttime", "=", "qa", ".", "time", "(", "t", ",", "form", "=", "b'ymd'", ",", "prec", "=", "8", ")", "[", "0", "]", "startTimeShort", ".", "append", "(", "qa", ".", "time", "(", "t", ",", "prec", "=", "8", ")", "[", "0", "]", ")", "rowend", "=", "rownode", ".", "getElementsByTagName", "(", "'endTime'", ")", "end", "=", "int", "(", "rowend", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "endmjd", "=", "float", "(", "end", ")", "*", "1.0E-9", "/", "86400.0", "t", "=", "b", "(", "qa", ".", "quantity", "(", "endmjd", ",", "b'd'", ")", ")", "endtime", "=", "qa", ".", "time", "(", "t", ",", "form", "=", "b'ymd'", ",", "prec", "=", "8", ")", "[", "0", "]", "endTimeShort", ".", "append", "(", "qa", ".", "time", "(", "t", ",", "prec", "=", "8", ")", "[", "0", "]", ")", "# source name", "rowsrc", "=", "rownode", ".", "getElementsByTagName", "(", "'sourceName'", ")", "try", ":", "src", "=", "str", "(", "rowsrc", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "except", ":", "src", "=", "'???'", "# PKGW", "scandict", "[", "fid", "]", "[", "'start'", "]", "=", "starttime", "scandict", "[", "fid", "]", "[", "'end'", "]", "=", "endtime", "timestr", "=", "starttime", "+", "'~'", "+", "endtime", "scandict", "[", "fid", "]", "[", "'timerange'", "]", "=", "timestr", "scandict", "[", "fid", "]", "[", "'source'", "]", "=", "src", "scandict", "[", "fid", "]", "[", "'intent'", "]", "=", "rint", "scandict", "[", "fid", "]", "[", "'nsubs'", "]", "=", "nsubs", "# read Main.xml", "xmlmain", "=", "minidom", ".", "parse", "(", "sdm", "+", "'/Main.xml'", ")", "rowlist", "=", "xmlmain", ".", "getElementsByTagName", "(", "'row'", ")", "mainScanList", "=", "[", "]", "mainConfigList", "=", "[", "]", "fieldIdList", "=", "[", "]", "for", "rownode", "in", "rowlist", ":", "# get the scan numbers", "rowfid", "=", "rownode", ".", "getElementsByTagName", "(", "'scanNumber'", ")", "fid", "=", "int", "(", "rowfid", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "mainScanList", ".", "append", "(", "fid", ")", "# get the configuration description", "rowconfig", "=", "rownode", ".", "getElementsByTagName", "(", "'configDescriptionId'", ")", "config", "=", "str", "(", "rowconfig", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "mainConfigList", ".", "append", "(", "config", ")", "# get the field ID", "rowfieldid", "=", "rownode", ".", "getElementsByTagName", "(", "'fieldId'", ")", "fieldid", "=", "string", ".", "split", "(", "str", "(", "rowfieldid", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", ",", "'_'", ")", "[", "1", "]", "fieldIdList", ".", "append", "(", "fieldid", ")", "# read ConfigDescription.xml to relate the configuration", "# description to a(set) of data description IDs", "xmlconfig", "=", "minidom", ".", "parse", "(", "sdm", "+", "'/ConfigDescription.xml'", ")", "rowlist", "=", "xmlconfig", ".", "getElementsByTagName", "(", "'row'", ")", "configDescList", "=", "[", "]", "dataDescList", "=", "[", "]", "for", "rownode", "in", "rowlist", ":", "# get the configuration description", "rowConfigDesc", "=", "rownode", ".", "getElementsByTagName", "(", "'configDescriptionId'", ")", "configDesc", "=", "str", "(", "rowConfigDesc", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "configDescList", ".", "append", "(", "configDesc", ")", "# make a list of the data description IDs:", "rowNumDataDesc", "=", "rownode", ".", "getElementsByTagName", "(", "'numDataDescription'", ")", "numDataDesc", "=", "int", "(", "rowNumDataDesc", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "rowDataDesc", "=", "rownode", ".", "getElementsByTagName", "(", "'dataDescriptionId'", ")", "dataDescStr", "=", "str", "(", "rowDataDesc", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "dataDescSplit", "=", "dataDescStr", ".", "split", "(", ")", "dataDesc", "=", "[", "]", "for", "i", "in", "range", "(", "numDataDesc", ")", ":", "dataDesc", ".", "append", "(", "dataDescSplit", "[", "i", "+", "2", "]", ")", "dataDescList", ".", "append", "(", "dataDesc", ")", "# read DataDescription.xml to relate the data description IDs to", "# spectral window IDs", "xmlDataDesc", "=", "minidom", ".", "parse", "(", "sdm", "+", "'/DataDescription.xml'", ")", "rowlist", "=", "xmlDataDesc", ".", "getElementsByTagName", "(", "'row'", ")", "dataDescElList", "=", "[", "]", "spwIdDataDescList", "=", "[", "]", "for", "rownode", "in", "rowlist", ":", "# get the data description ID, make another list:", "rowDataDescEl", "=", "rownode", ".", "getElementsByTagName", "(", "'dataDescriptionId'", ")", "dataDescEl", "=", "str", "(", "rowDataDescEl", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "dataDescElList", ".", "append", "(", "dataDescEl", ")", "# get the related spectral window ID:", "rowSpwIdDataDesc", "=", "rownode", ".", "getElementsByTagName", "(", "'spectralWindowId'", ")", "spwIdDataDesc", "=", "str", "(", "rowSpwIdDataDesc", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "spwIdDataDescList", ".", "append", "(", "spwIdDataDesc", ")", "# read SpectralWindow.xml, get information about number of", "# channels, reference frequency, baseband name, channel width.", "# Interesting that there seem to be multiple fields that give the", "# same information: chanFreqStart=reFreq,", "# chanFreqStep=chanWidth=resolution. Why?(Note: all units are Hz)", "# Note: this is where the script breaks for ALMA data, since there", "# are different tags in SpectraWindow.xml(for varying channel widths).", "xmlSpecWin", "=", "minidom", ".", "parse", "(", "sdm", "+", "'/SpectralWindow.xml'", ")", "rowlist", "=", "xmlSpecWin", ".", "getElementsByTagName", "(", "'row'", ")", "spwIdList", "=", "[", "]", "nChanList", "=", "[", "]", "refFreqList", "=", "[", "]", "chanWidthList", "=", "[", "]", "basebandList", "=", "[", "]", "for", "rownode", "in", "rowlist", ":", "# get the various row values:", "rowSpwId", "=", "rownode", ".", "getElementsByTagName", "(", "'spectralWindowId'", ")", "rowNChan", "=", "rownode", ".", "getElementsByTagName", "(", "'numChan'", ")", "rowRefFreq", "=", "rownode", ".", "getElementsByTagName", "(", "'refFreq'", ")", "# For EVLA", "rowChanWidth", "=", "rownode", ".", "getElementsByTagName", "(", "'chanWidth'", ")", "# For ALMA", "rowChanWidthArr", "=", "rownode", ".", "getElementsByTagName", "(", "'chanWidthArray'", ")", "rowBaseband", "=", "rownode", ".", "getElementsByTagName", "(", "'basebandName'", ")", "# convert to values or strings and append to the relevant lists:", "spwId", "=", "str", "(", "rowSpwId", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "spwIdList", ".", "append", "(", "spwId", ")", "nChan", "=", "int", "(", "rowNChan", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "nChanList", ".", "append", "(", "nChan", ")", "refFreq", "=", "float", "(", "rowRefFreq", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "refFreqList", ".", "append", "(", "refFreq", ")", "if", "rowChanWidth", ":", "chanWidth", "=", "float", "(", "rowChanWidth", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "chanWidthList", ".", "append", "(", "chanWidth", ")", "if", "rowChanWidthArr", ":", "tmpArr", "=", "str", "(", "rowChanWidthArr", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", ".", "split", "(", "' '", ")", "tmpWidth", "=", "[", "]", "for", "cw", "in", "range", "(", "2", ",", "len", "(", "tmpArr", ")", ")", ":", "thisWidth", "=", "float", "(", "tmpArr", "[", "cw", "]", ")", "tmpWidth", ".", "append", "(", "thisWidth", ")", "chanWidthList", ".", "append", "(", "tmpWidth", ")", "baseband", "=", "str", "(", "rowBaseband", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "basebandList", ".", "append", "(", "baseband", ")", "# read Field.xml", "xmlField", "=", "minidom", ".", "parse", "(", "sdm", "+", "'/Field.xml'", ")", "rowlist", "=", "xmlField", ".", "getElementsByTagName", "(", "'row'", ")", "fieldList", "=", "[", "]", "fieldNameList", "=", "[", "]", "fieldCodeList", "=", "[", "]", "fieldRAList", "=", "[", "]", "fieldDecList", "=", "[", "]", "fieldSrcIDList", "=", "[", "]", "for", "rownode", "in", "rowlist", ":", "rowField", "=", "rownode", ".", "getElementsByTagName", "(", "'fieldId'", ")", "rowName", "=", "rownode", ".", "getElementsByTagName", "(", "'fieldName'", ")", "rowCode", "=", "rownode", ".", "getElementsByTagName", "(", "'code'", ")", "rowCoords", "=", "rownode", ".", "getElementsByTagName", "(", "'referenceDir'", ")", "rowSrcId", "=", "rownode", ".", "getElementsByTagName", "(", "'sourceId'", ")", "# convert to values or strings and append to relevent lists:", "fieldList", ".", "append", "(", "int", "(", "string", ".", "split", "(", "str", "(", "rowField", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", ",", "'_'", ")", "[", "1", "]", ")", ")", "fieldNameList", ".", "append", "(", "str", "(", "rowName", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", ")", "fieldCodeList", ".", "append", "(", "str", "(", "rowCode", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", ")", "coordInfo", "=", "rowCoords", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ".", "split", "(", ")", "RADeg", "=", "float", "(", "coordInfo", "[", "3", "]", ")", "*", "(", "180.0", "/", "np", ".", "pi", ")", "DecDeg", "=", "float", "(", "coordInfo", "[", "4", "]", ")", "*", "(", "180.0", "/", "np", ".", "pi", ")", "RAInp", "=", "{", "'unit'", ":", "'deg'", ",", "'value'", ":", "RADeg", "}", "DecInp", "=", "{", "'unit'", ":", "'deg'", ",", "'value'", ":", "DecDeg", "}", "RAHMS", "=", "b", "(", "qa", ".", "formxxx", "(", "b", "(", "RAInp", ")", ",", "format", "=", "b'hms'", ")", ")", "DecDMS", "=", "b", "(", "qa", ".", "formxxx", "(", "b", "(", "DecInp", ")", ",", "format", "=", "b'dms'", ")", ")", "fieldRAList", ".", "append", "(", "RAHMS", ")", "fieldDecList", ".", "append", "(", "DecDMS", ")", "fieldSrcIDList", ".", "append", "(", "int", "(", "rowSrcId", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", ")", "# read Antenna.xml", "xmlAnt", "=", "minidom", ".", "parse", "(", "sdm", "+", "'/Antenna.xml'", ")", "rowlist", "=", "xmlAnt", ".", "getElementsByTagName", "(", "'row'", ")", "antList", "=", "[", "]", "antNameList", "=", "[", "]", "dishDiamList", "=", "[", "]", "stationList", "=", "[", "]", "for", "rownode", "in", "rowlist", ":", "rowAnt", "=", "rownode", ".", "getElementsByTagName", "(", "'antennaId'", ")", "rowAntName", "=", "rownode", ".", "getElementsByTagName", "(", "'name'", ")", "rowDishDiam", "=", "rownode", ".", "getElementsByTagName", "(", "'dishDiameter'", ")", "rowStation", "=", "rownode", ".", "getElementsByTagName", "(", "'stationId'", ")", "# convert and append", "antList", ".", "append", "(", "int", "(", "string", ".", "split", "(", "str", "(", "rowAnt", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", ",", "'_'", ")", "[", "1", "]", ")", ")", "antNameList", ".", "append", "(", "str", "(", "rowAntName", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", ")", "dishDiamList", ".", "append", "(", "float", "(", "rowDishDiam", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", ")", "stationList", ".", "append", "(", "str", "(", "rowStation", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", ")", "# read Station.xml", "xmlStation", "=", "minidom", ".", "parse", "(", "sdm", "+", "'/Station.xml'", ")", "rowlist", "=", "xmlStation", ".", "getElementsByTagName", "(", "'row'", ")", "statIdList", "=", "[", "]", "statNameList", "=", "[", "]", "statLatList", "=", "[", "]", "statLonList", "=", "[", "]", "for", "rownode", "in", "rowlist", ":", "rowStatId", "=", "rownode", ".", "getElementsByTagName", "(", "'stationId'", ")", "rowStatName", "=", "rownode", ".", "getElementsByTagName", "(", "'name'", ")", "rowStatPos", "=", "rownode", ".", "getElementsByTagName", "(", "'position'", ")", "# convert and append", "statIdList", ".", "append", "(", "str", "(", "rowStatId", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", ")", "statNameList", ".", "append", "(", "str", "(", "rowStatName", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", ")", "posInfo", "=", "string", ".", "split", "(", "str", "(", "rowStatPos", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", ")", "x", "=", "b", "(", "qa", ".", "quantity", "(", "[", "float", "(", "posInfo", "[", "2", "]", ")", "]", ",", "b'm'", ")", ")", "y", "=", "b", "(", "qa", ".", "quantity", "(", "[", "float", "(", "posInfo", "[", "3", "]", ")", "]", ",", "b'm'", ")", ")", "z", "=", "b", "(", "qa", ".", "quantity", "(", "[", "float", "(", "posInfo", "[", "4", "]", ")", "]", ",", "b'm'", ")", ")", "pos", "=", "b", "(", "me", ".", "position", "(", "b'ITRF'", ",", "x", ",", "y", ",", "z", ")", ")", "qLon", "=", "pos", "[", "'m0'", "]", "qLat", "=", "pos", "[", "'m1'", "]", "statLatList", ".", "append", "(", "qa", ".", "formxxx", "(", "qLat", ",", "b'dms'", ",", "prec", "=", "0", ")", ")", "statLonList", ".", "append", "(", "qa", ".", "formxxx", "(", "qLon", ",", "b'dms'", ",", "prec", "=", "0", ")", ")", "# associate antennas with stations:", "assocStatList", "=", "[", "]", "for", "station", "in", "stationList", ":", "i", "=", "np", ".", "where", "(", "np", ".", "array", "(", "statIdList", ")", "==", "station", ")", "[", "0", "]", "[", "0", "]", "assocStatList", ".", "append", "(", "statNameList", "[", "i", "]", ")", "# read ExecBlock.xml", "xmlExecBlock", "=", "minidom", ".", "parse", "(", "sdm", "+", "'/ExecBlock.xml'", ")", "rowlist", "=", "xmlExecBlock", ".", "getElementsByTagName", "(", "'row'", ")", "sTime", "=", "float", "(", "rowlist", "[", "0", "]", ".", "getElementsByTagName", "(", "'startTime'", ")", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "*", "1.0E-9", "eTime", "=", "float", "(", "rowlist", "[", "0", "]", ".", "getElementsByTagName", "(", "'endTime'", ")", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "*", "1.0E-9", "# integration time in seconds, start and end times:", "intTime", "=", "eTime", "-", "sTime", "t", "=", "b", "(", "qa", ".", "quantity", "(", "sTime", "/", "86400.0", ",", "b'd'", ")", ")", "obsStart", "=", "qa", ".", "time", "(", "t", ",", "form", "=", "b'ymd'", ",", "prec", "=", "8", ")", "[", "0", "]", "t", "=", "b", "(", "qa", ".", "quantity", "(", "eTime", "/", "86400.0", ",", "b'd'", ")", ")", "obsEnd", "=", "qa", ".", "time", "(", "t", ",", "form", "=", "b'ymd'", ",", "prec", "=", "8", ")", "[", "0", "]", "# observer name and obs. info:", "observerName", "=", "str", "(", "rowlist", "[", "0", "]", ".", "getElementsByTagName", "(", "'observerName'", ")", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "configName", "=", "str", "(", "rowlist", "[", "0", "]", ".", "getElementsByTagName", "(", "'configName'", ")", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "telescopeName", "=", "str", "(", "rowlist", "[", "0", "]", ".", "getElementsByTagName", "(", "'telescopeName'", ")", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "numAntenna", "=", "int", "(", "rowlist", "[", "0", "]", ".", "getElementsByTagName", "(", "'numAntenna'", ")", "[", "0", "]", ".", "childNodes", "[", "0", "]", ".", "nodeValue", ")", "# make lists like the dataDescList for spectral windows & related info:", "spwOrd", "=", "[", "]", "nChanOrd", "=", "[", "]", "rFreqOrd", "=", "[", "]", "cWidthOrd", "=", "[", "]", "bbandOrd", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "configDescList", ")", ")", ":", "spwTempList", "=", "[", "]", "nChanTempList", "=", "[", "]", "rFreqTempList", "=", "[", "]", "cWidthTempList", "=", "[", "]", "bbandTempList", "=", "[", "]", "for", "dDesc", "in", "dataDescList", "[", "i", "]", ":", "el", "=", "np", ".", "where", "(", "np", ".", "array", "(", "dataDescElList", ")", "==", "dDesc", ")", "[", "0", "]", "[", "0", "]", "spwIdN", "=", "spwIdDataDescList", "[", "el", "]", "spwEl", "=", "np", ".", "where", "(", "np", ".", "array", "(", "spwIdList", ")", "==", "spwIdN", ")", "[", "0", "]", "[", "0", "]", "spwTempList", ".", "append", "(", "int", "(", "string", ".", "split", "(", "spwIdList", "[", "spwEl", "]", ",", "'_'", ")", "[", "1", "]", ")", ")", "nChanTempList", ".", "append", "(", "nChanList", "[", "spwEl", "]", ")", "rFreqTempList", ".", "append", "(", "refFreqList", "[", "spwEl", "]", ")", "cWidthTempList", ".", "append", "(", "chanWidthList", "[", "spwEl", "]", ")", "bbandTempList", ".", "append", "(", "basebandList", "[", "spwEl", "]", ")", "spwOrd", ".", "append", "(", "spwTempList", ")", "nChanOrd", ".", "append", "(", "nChanTempList", ")", "rFreqOrd", ".", "append", "(", "rFreqTempList", ")", "cWidthOrd", ".", "append", "(", "cWidthTempList", ")", "bbandOrd", ".", "append", "(", "bbandTempList", ")", "# add this info to the scan dictionary:", "for", "scanNum", "in", "scandict", ":", "spwOrdList", "=", "[", "]", "nChanOrdList", "=", "[", "]", "rFreqOrdList", "=", "[", "]", "cWidthOrdList", "=", "[", "]", "bbandOrdList", "=", "[", "]", "# scanEl could have multiple elements if subscans are present,", "# or for ALMA data:", "scanEl", "=", "np", ".", "where", "(", "np", ".", "array", "(", "mainScanList", ")", "==", "scanNum", ")", "[", "0", "]", "for", "thisEl", "in", "scanEl", ":", "configEl", "=", "mainConfigList", "[", "thisEl", "]", "listEl", "=", "np", ".", "where", "(", "np", ".", "array", "(", "configDescList", ")", "==", "configEl", ")", "[", "0", "]", "[", "0", "]", "spwOrdList", ".", "append", "(", "spwOrd", "[", "listEl", "]", ")", "nChanOrdList", ".", "append", "(", "nChanOrd", "[", "listEl", "]", ")", "rFreqOrdList", ".", "append", "(", "rFreqOrd", "[", "listEl", "]", ")", "cWidthOrdList", ".", "append", "(", "cWidthOrd", "[", "listEl", "]", ")", "bbandOrdList", ".", "append", "(", "bbandOrd", "[", "listEl", "]", ")", "try", ":", "scandict", "[", "scanNum", "]", "[", "'field'", "]", "=", "int", "(", "fieldIdList", "[", "scanEl", "[", "0", "]", "]", ")", "except", ":", "scandict", "[", "scanNum", "]", "[", "'field'", "]", "=", "-", "1", "# PKGW", "scandict", "[", "scanNum", "]", "[", "'spws'", "]", "=", "spwOrdList", "scandict", "[", "scanNum", "]", "[", "'nchan'", "]", "=", "nChanOrdList", "scandict", "[", "scanNum", "]", "[", "'reffreq'", "]", "=", "rFreqOrdList", "scandict", "[", "scanNum", "]", "[", "'chanwidth'", "]", "=", "cWidthOrdList", "scandict", "[", "scanNum", "]", "[", "'baseband'", "]", "=", "bbandOrdList", "# report information to the logger", "printf", "(", "'================================================================================'", ")", "printf", "(", "' SDM File: %s'", ",", "sdm", ")", "printf", "(", "'================================================================================'", ")", "printf", "(", "' Observer: %s'", ",", "observerName", ")", "printf", "(", "' Facility: %s, %s-configuration'", ",", "telescopeName", ",", "configName", ")", "printf", "(", "' Observed from %s to %s(UTC)'", ",", "obsStart", ",", "obsEnd", ")", "printf", "(", "' Total integration time = %.2f seconds(%.2f hours)'", ",", "intTime", ",", "intTime", "/", "3600", ")", "if", "list_scans", ":", "printf", "(", "' '", ")", "printf", "(", "'Scan listing:'", ")", "maxspwlen", "=", "0", "for", "scaninfo", "in", "scandict", ".", "values", "(", ")", ":", "SPWs", "=", "[", "]", "for", "spw", "in", "scaninfo", "[", "'spws'", "]", ":", "SPWs", "+=", "spw", "scaninfo", "[", "'spwstr'", "]", "=", "str", "(", "list", "(", "set", "(", "SPWs", ")", ")", ")", "maxspwlen", "=", "max", "(", "maxspwlen", ",", "len", "(", "scaninfo", "[", "'spwstr'", "]", ")", ")", "fmt", "=", "' %-25s %-4s %-5s %-15s %-*s %s'", "printf", "(", "fmt", ",", "'Timerange(UTC)'", ",", "'Scan'", ",", "'FldID'", ",", "'FieldName'", ",", "maxspwlen", ",", "'SpwIDs'", ",", "'Intent(s)'", ")", "for", "i", ",", "(", "scanid", ",", "scaninfo", ")", "in", "enumerate", "(", "scandict", ".", "items", "(", ")", ")", ":", "printf", "(", "fmt", ",", "startTimeShort", "[", "i", "]", "+", "' - '", "+", "endTimeShort", "[", "i", "]", ",", "scanid", ",", "scaninfo", "[", "'field'", "]", ",", "scaninfo", "[", "'source'", "]", ",", "maxspwlen", ",", "scaninfo", "[", "'spwstr'", "]", ",", "scaninfo", "[", "'intent'", "]", ")", "if", "list_spws", ":", "printf", "(", "' '", ")", "printf", "(", "'Spectral window information:'", ")", "printf", "(", "' SpwID #Chans Ch0(MHz) ChWidth(kHz) TotBW(MHz) Baseband'", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "spwIdList", ")", ")", ":", "printf", "(", "' %s %s %s %s %s %s'", ",", "string", ".", "split", "(", "spwIdList", "[", "i", "]", ",", "'_'", ")", "[", "1", "]", ".", "ljust", "(", "4", ")", ",", "str", "(", "nChanList", "[", "i", "]", ")", ".", "ljust", "(", "4", ")", ",", "str", "(", "refFreqList", "[", "i", "]", "/", "1e6", ")", ".", "ljust", "(", "8", ")", ",", "str", "(", "np", ".", "array", "(", "chanWidthList", "[", "i", "]", ")", "/", "1e3", ")", ".", "ljust", "(", "8", ")", ",", "str", "(", "np", ".", "array", "(", "chanWidthList", "[", "i", "]", ")", "*", "nChanList", "[", "i", "]", "/", "1e6", ")", ".", "ljust", "(", "8", ")", ",", "basebandList", "[", "i", "]", ".", "ljust", "(", "8", ")", ")", "if", "list_fields", ":", "printf", "(", "' '", ")", "printf", "(", "'Field information:'", ")", "printf", "(", "' FldID Code Name RA Dec SrcID'", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "fieldList", ")", ")", ":", "printf", "(", "' %-6d %-6s %-15s %-13s %-15s %-5d'", ",", "fieldList", "[", "i", "]", ",", "fieldCodeList", "[", "i", "]", ",", "fieldNameList", "[", "i", "]", ",", "fieldRAList", "[", "i", "]", ",", "fieldDecList", "[", "i", "]", ",", "fieldSrcIDList", "[", "i", "]", ")", "if", "list_antennas", ":", "printf", "(", "' '", ")", "printf", "(", "'Antennas(%i):'", "%", "len", "(", "antList", ")", ")", "printf", "(", "' ID Name Station Diam.(m) Lat. Long.'", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "antList", ")", ")", ":", "printf", "(", "' %s %s %s %s %s %s '", ",", "str", "(", "antList", "[", "i", "]", ")", ".", "ljust", "(", "5", ")", ",", "antNameList", "[", "i", "]", ".", "ljust", "(", "6", ")", ",", "assocStatList", "[", "i", "]", ".", "ljust", "(", "5", ")", ",", "str", "(", "dishDiamList", "[", "i", "]", ")", ".", "ljust", "(", "5", ")", ",", "statLatList", "[", "i", "]", ".", "ljust", "(", "12", ")", ",", "statLonList", "[", "i", "]", ".", "ljust", "(", "12", ")", ")", "# return the scan dictionary", "return", "scandict" ]
39.333333
20.324201
def get_choices(cli, prog_name, args, incomplete): """ :param cli: command definition :param prog_name: the program that is running :param args: full list of args :param incomplete: the incomplete text to autocomplete :return: all the possible completions for the incomplete """ all_args = copy.deepcopy(args) ctx = resolve_ctx(cli, prog_name, args) if ctx is None: return [] # In newer versions of bash long opts with '='s are partitioned, but it's easier to parse # without the '=' if start_of_option(incomplete) and WORDBREAK in incomplete: partition_incomplete = incomplete.partition(WORDBREAK) all_args.append(partition_incomplete[0]) incomplete = partition_incomplete[2] elif incomplete == WORDBREAK: incomplete = '' completions = [] if start_of_option(incomplete): # completions for partial options for param in ctx.command.params: if isinstance(param, Option) and not param.hidden: param_opts = [param_opt for param_opt in param.opts + param.secondary_opts if param_opt not in all_args or param.multiple] completions.extend([(o, param.help) for o in param_opts if o.startswith(incomplete)]) return completions # completion for option values from user supplied values for param in ctx.command.params: if is_incomplete_option(all_args, param): return get_user_autocompletions(ctx, all_args, incomplete, param) # completion for argument values from user supplied values for param in ctx.command.params: if is_incomplete_argument(ctx.params, param): return get_user_autocompletions(ctx, all_args, incomplete, param) add_subcommand_completions(ctx, incomplete, completions) # Sort before returning so that proper ordering can be enforced in custom types. return sorted(completions)
[ "def", "get_choices", "(", "cli", ",", "prog_name", ",", "args", ",", "incomplete", ")", ":", "all_args", "=", "copy", ".", "deepcopy", "(", "args", ")", "ctx", "=", "resolve_ctx", "(", "cli", ",", "prog_name", ",", "args", ")", "if", "ctx", "is", "None", ":", "return", "[", "]", "# In newer versions of bash long opts with '='s are partitioned, but it's easier to parse", "# without the '='", "if", "start_of_option", "(", "incomplete", ")", "and", "WORDBREAK", "in", "incomplete", ":", "partition_incomplete", "=", "incomplete", ".", "partition", "(", "WORDBREAK", ")", "all_args", ".", "append", "(", "partition_incomplete", "[", "0", "]", ")", "incomplete", "=", "partition_incomplete", "[", "2", "]", "elif", "incomplete", "==", "WORDBREAK", ":", "incomplete", "=", "''", "completions", "=", "[", "]", "if", "start_of_option", "(", "incomplete", ")", ":", "# completions for partial options", "for", "param", "in", "ctx", ".", "command", ".", "params", ":", "if", "isinstance", "(", "param", ",", "Option", ")", "and", "not", "param", ".", "hidden", ":", "param_opts", "=", "[", "param_opt", "for", "param_opt", "in", "param", ".", "opts", "+", "param", ".", "secondary_opts", "if", "param_opt", "not", "in", "all_args", "or", "param", ".", "multiple", "]", "completions", ".", "extend", "(", "[", "(", "o", ",", "param", ".", "help", ")", "for", "o", "in", "param_opts", "if", "o", ".", "startswith", "(", "incomplete", ")", "]", ")", "return", "completions", "# completion for option values from user supplied values", "for", "param", "in", "ctx", ".", "command", ".", "params", ":", "if", "is_incomplete_option", "(", "all_args", ",", "param", ")", ":", "return", "get_user_autocompletions", "(", "ctx", ",", "all_args", ",", "incomplete", ",", "param", ")", "# completion for argument values from user supplied values", "for", "param", "in", "ctx", ".", "command", ".", "params", ":", "if", "is_incomplete_argument", "(", "ctx", ".", "params", ",", "param", ")", ":", "return", "get_user_autocompletions", "(", "ctx", ",", "all_args", ",", "incomplete", ",", "param", ")", "add_subcommand_completions", "(", "ctx", ",", "incomplete", ",", "completions", ")", "# Sort before returning so that proper ordering can be enforced in custom types.", "return", "sorted", "(", "completions", ")" ]
43.522727
19.659091
def query_by_attribute(self, key, value, **kwargs): """ Get Build Records by attribute. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.query_by_attribute(key, value, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str key: Attribute key (required) :param str value: Attribute value (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildRecordPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.query_by_attribute_with_http_info(key, value, **kwargs) else: (data) = self.query_by_attribute_with_http_info(key, value, **kwargs) return data
[ "def", "query_by_attribute", "(", "self", ",", "key", ",", "value", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'callback'", ")", ":", "return", "self", ".", "query_by_attribute_with_http_info", "(", "key", ",", "value", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "query_by_attribute_with_http_info", "(", "key", ",", "value", ",", "*", "*", "kwargs", ")", "return", "data" ]
41.733333
15.6
def verify_password(password, password_hash): """Returns ``True`` if the password matches the supplied hash. :param password: A plaintext password to verify :param password_hash: The expected hash value of the password (usually from your database) """ if use_double_hash(password_hash): password = get_hmac(password) return _pwd_context.verify(password, password_hash)
[ "def", "verify_password", "(", "password", ",", "password_hash", ")", ":", "if", "use_double_hash", "(", "password_hash", ")", ":", "password", "=", "get_hmac", "(", "password", ")", "return", "_pwd_context", ".", "verify", "(", "password", ",", "password_hash", ")" ]
38
14.090909
def work_in(dirname=None): """Context manager version of os.chdir. When exited, returns to the working directory prior to entering. """ curdir = os.getcwd() try: if dirname is not None: os.chdir(dirname) yield finally: os.chdir(curdir)
[ "def", "work_in", "(", "dirname", "=", "None", ")", ":", "curdir", "=", "os", ".", "getcwd", "(", ")", "try", ":", "if", "dirname", "is", "not", "None", ":", "os", ".", "chdir", "(", "dirname", ")", "yield", "finally", ":", "os", ".", "chdir", "(", "curdir", ")" ]
23.75
18.416667
def save_named_query(self, alias, querystring, afterwards=None): """ add an alias for a query string. These are stored in the notmuch database and can be used as part of more complex queries using the syntax "query:alias". See :manpage:`notmuch-search-terms(7)` for more info. :param alias: name of shortcut :type alias: str :param querystring: value, i.e., the full query string :type querystring: str :param afterwards: callback to trigger after adding the alias :type afterwards: callable or None """ if self.ro: raise DatabaseROError() self.writequeue.append(('setconfig', afterwards, 'query.' + alias, querystring))
[ "def", "save_named_query", "(", "self", ",", "alias", ",", "querystring", ",", "afterwards", "=", "None", ")", ":", "if", "self", ".", "ro", ":", "raise", "DatabaseROError", "(", ")", "self", ".", "writequeue", ".", "append", "(", "(", "'setconfig'", ",", "afterwards", ",", "'query.'", "+", "alias", ",", "querystring", ")", ")" ]
40
17.157895
def make_random_models_table(n_sources, param_ranges, random_state=None): """ Make a `~astropy.table.Table` containing randomly generated parameters for an Astropy model to simulate a set of sources. Each row of the table corresponds to a source whose parameters are defined by the column names. The parameters are drawn from a uniform distribution over the specified input ranges. The output table can be input into :func:`make_model_sources_image` to create an image containing the model sources. Parameters ---------- n_sources : float The number of random model sources to generate. param_ranges : dict The lower and upper boundaries for each of the model parameters as a `dict` mapping the parameter name to its ``(lower, upper)`` bounds. random_state : int or `~numpy.random.RandomState`, optional Pseudo-random number generator state used for random sampling. Returns ------- table : `~astropy.table.Table` A table of parameters for the randomly generated sources. Each row of the table corresponds to a source whose model parameters are defined by the column names. The column names will be the keys of the dictionary ``param_ranges``. See Also -------- make_random_gaussians_table, make_model_sources_image Notes ----- To generate identical parameter values from separate function calls, ``param_ranges`` must be input as an `~collections.OrderedDict` with the same parameter ranges and ``random_state`` must be the same. Examples -------- >>> from collections import OrderedDict >>> from photutils.datasets import make_random_models_table >>> n_sources = 5 >>> param_ranges = [('amplitude', [500, 1000]), ... ('x_mean', [0, 500]), ... ('y_mean', [0, 300]), ... ('x_stddev', [1, 5]), ... ('y_stddev', [1, 5]), ... ('theta', [0, np.pi])] >>> param_ranges = OrderedDict(param_ranges) >>> sources = make_random_models_table(n_sources, param_ranges, ... random_state=12345) >>> for col in sources.colnames: ... sources[col].info.format = '%.8g' # for consistent table output >>> print(sources) amplitude x_mean y_mean x_stddev y_stddev theta --------- --------- --------- --------- --------- ---------- 964.80805 297.77235 224.31444 3.6256447 3.5699013 2.2923859 658.18778 482.25726 288.39202 4.2392502 3.8698145 3.1227889 591.95941 326.58855 2.5164894 4.4887037 2.870396 2.1264615 602.28014 374.45332 31.933313 4.8585904 2.3023387 2.4844422 783.86251 326.78494 89.611114 3.8947414 2.7585784 0.53694298 """ prng = check_random_state(random_state) sources = Table() for param_name, (lower, upper) in param_ranges.items(): # Generate a column for every item in param_ranges, even if it # is not in the model (e.g. flux). However, such columns will # be ignored when rendering the image. sources[param_name] = prng.uniform(lower, upper, n_sources) return sources
[ "def", "make_random_models_table", "(", "n_sources", ",", "param_ranges", ",", "random_state", "=", "None", ")", ":", "prng", "=", "check_random_state", "(", "random_state", ")", "sources", "=", "Table", "(", ")", "for", "param_name", ",", "(", "lower", ",", "upper", ")", "in", "param_ranges", ".", "items", "(", ")", ":", "# Generate a column for every item in param_ranges, even if it", "# is not in the model (e.g. flux). However, such columns will", "# be ignored when rendering the image.", "sources", "[", "param_name", "]", "=", "prng", ".", "uniform", "(", "lower", ",", "upper", ",", "n_sources", ")", "return", "sources" ]
39.911392
23.860759
def run(self): """ Called by the threading system """ try: self._connect() self._register() while True: try: body = self.command_queue.get(block=True, timeout=1 * SECOND) except queue.Empty: body = None if body is not None: result = self._send(body) if result: self.command_queue.task_done() else: # Something was wrong with the socket. self._disconnect() self._connect() self._register() # Check for stop event after a read from the queue. This is to # allow you to open a socket, immediately send to it, and then # stop it. We do this in the Metadata send at application start # time if self._stop_event.is_set(): logger.debug("CoreAgentSocket thread stopping.") break except Exception: logger.debug("CoreAgentSocket thread exception.") finally: self._started_event.clear() self._stop_event.clear() self._stopped_event.set() logger.debug("CoreAgentSocket thread stopped.")
[ "def", "run", "(", "self", ")", ":", "try", ":", "self", ".", "_connect", "(", ")", "self", ".", "_register", "(", ")", "while", "True", ":", "try", ":", "body", "=", "self", ".", "command_queue", ".", "get", "(", "block", "=", "True", ",", "timeout", "=", "1", "*", "SECOND", ")", "except", "queue", ".", "Empty", ":", "body", "=", "None", "if", "body", "is", "not", "None", ":", "result", "=", "self", ".", "_send", "(", "body", ")", "if", "result", ":", "self", ".", "command_queue", ".", "task_done", "(", ")", "else", ":", "# Something was wrong with the socket.", "self", ".", "_disconnect", "(", ")", "self", ".", "_connect", "(", ")", "self", ".", "_register", "(", ")", "# Check for stop event after a read from the queue. This is to", "# allow you to open a socket, immediately send to it, and then", "# stop it. We do this in the Metadata send at application start", "# time", "if", "self", ".", "_stop_event", ".", "is_set", "(", ")", ":", "logger", ".", "debug", "(", "\"CoreAgentSocket thread stopping.\"", ")", "break", "except", "Exception", ":", "logger", ".", "debug", "(", "\"CoreAgentSocket thread exception.\"", ")", "finally", ":", "self", ".", "_started_event", ".", "clear", "(", ")", "self", ".", "_stop_event", ".", "clear", "(", ")", "self", ".", "_stopped_event", ".", "set", "(", ")", "logger", ".", "debug", "(", "\"CoreAgentSocket thread stopped.\"", ")" ]
36.105263
16.684211
def get(self, name): """Returns a Notification by name. """ if not self.loaded: raise RegistryNotLoaded(self) if not self._registry.get(name): raise NotificationNotRegistered( f"Notification not registered. Got '{name}'." ) return self._registry.get(name)
[ "def", "get", "(", "self", ",", "name", ")", ":", "if", "not", "self", ".", "loaded", ":", "raise", "RegistryNotLoaded", "(", "self", ")", "if", "not", "self", ".", "_registry", ".", "get", "(", "name", ")", ":", "raise", "NotificationNotRegistered", "(", "f\"Notification not registered. Got '{name}'.\"", ")", "return", "self", ".", "_registry", ".", "get", "(", "name", ")" ]
33.8
8.7
def get_inactive() -> List[str]: """Return the list of inactive subarrays.""" inactive = [] for i in range(__num_subarrays__): key = Subarray.get_key(i) if DB.get_hash_value(key, 'active').upper() == 'FALSE': inactive.append(Subarray.get_id(i)) return inactive
[ "def", "get_inactive", "(", ")", "->", "List", "[", "str", "]", ":", "inactive", "=", "[", "]", "for", "i", "in", "range", "(", "__num_subarrays__", ")", ":", "key", "=", "Subarray", ".", "get_key", "(", "i", ")", "if", "DB", ".", "get_hash_value", "(", "key", ",", "'active'", ")", ".", "upper", "(", ")", "==", "'FALSE'", ":", "inactive", ".", "append", "(", "Subarray", ".", "get_id", "(", "i", ")", ")", "return", "inactive" ]
40.625
10.875
def _zeo_key(self, key, new_type=OOBTree): """ Get key from the :attr:`zeo` database root. If the key doesn't exist, create it by calling `new_type` argument. Args: key (str): Key in the root dict. new_type (func/obj): Object/function returning the new instance. Returns: obj: Stored object, or `new_type`. """ zeo_key = self.zeo.get(key, None) if zeo_key is None: zeo_key = new_type() self.zeo[key] = zeo_key return zeo_key
[ "def", "_zeo_key", "(", "self", ",", "key", ",", "new_type", "=", "OOBTree", ")", ":", "zeo_key", "=", "self", ".", "zeo", ".", "get", "(", "key", ",", "None", ")", "if", "zeo_key", "is", "None", ":", "zeo_key", "=", "new_type", "(", ")", "self", ".", "zeo", "[", "key", "]", "=", "zeo_key", "return", "zeo_key" ]
28.526316
18.421053
def parse(cls, fptr, offset, length): """Parse JPX free box. Parameters ---------- f : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- FreeBox Instance of the current free box. """ # Must seek to end of box. nbytes = offset + length - fptr.tell() fptr.read(nbytes) return cls(length=length, offset=offset)
[ "def", "parse", "(", "cls", ",", "fptr", ",", "offset", ",", "length", ")", ":", "# Must seek to end of box.", "nbytes", "=", "offset", "+", "length", "-", "fptr", ".", "tell", "(", ")", "fptr", ".", "read", "(", "nbytes", ")", "return", "cls", "(", "length", "=", "length", ",", "offset", "=", "offset", ")" ]
24.952381
15.285714
def set_tmp_folder(): """ Create a temporary folder using the current time in which the zip can be extracted and which should be destroyed afterward. """ output = "%s" % datetime.datetime.now() for char in [' ', ':', '.', '-']: output = output.replace(char, '') output.strip() tmp_folder = os.path.join(tempfile.gettempdir(), output) return tmp_folder
[ "def", "set_tmp_folder", "(", ")", ":", "output", "=", "\"%s\"", "%", "datetime", ".", "datetime", ".", "now", "(", ")", "for", "char", "in", "[", "' '", ",", "':'", ",", "'.'", ",", "'-'", "]", ":", "output", "=", "output", ".", "replace", "(", "char", ",", "''", ")", "output", ".", "strip", "(", ")", "tmp_folder", "=", "os", ".", "path", ".", "join", "(", "tempfile", ".", "gettempdir", "(", ")", ",", "output", ")", "return", "tmp_folder" ]
38.2
11.6
def from_where(cls, where): """ Factory method for creating the top-level expression """ if where.conjunction: return Conjunction.from_clause(where) else: return cls.from_clause(where[0])
[ "def", "from_where", "(", "cls", ",", "where", ")", ":", "if", "where", ".", "conjunction", ":", "return", "Conjunction", ".", "from_clause", "(", "where", ")", "else", ":", "return", "cls", ".", "from_clause", "(", "where", "[", "0", "]", ")" ]
38.333333
10.666667
def distribution_present(name, region=None, key=None, keyid=None, profile=None, **kwargs): ''' Ensure the given CloudFront distribution exists in the described state. The implementation of this function, and all those following, is orthagonal to that of :py:mod:`boto_cloudfront.present <salt.states.boto_cloudfront.present>`. Resources created with :py:mod:`boto_cloudfront.present <salt.states.boto_cloudfront.present>` will not be correctly managed by this function, as a different method is used to store Salt's state signifier. This function and those following are a suite, designed to work together. As an extra bonus, they correctly process updates of the managed resources, so it is recommended to use them in preference to :py:mod:`boto_cloudfront.present <salt.states.boto_cloudfront.present>` above. Note that the semantics of DistributionConfig (below) are rather arcane, and vary wildly depending on whether the distribution already exists or not (e.g. is being initially created, or being updated in place). Many more details can be found here__. .. __: https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/distribution-overview-required-fields.html name (string) Name of the state definition. Name (string) Name of the resource (for purposes of Salt's idempotency). If not provided, the value of ``name`` will be used. DistributionConfig (dict) Configuration for the distribution. Notes: - The CallerReference field should NOT be provided - it will be autopopulated by Salt. - A large number of sub- (and sub-sub-) fields require a ``Quantity`` element, which simply COUNTS the number of items in the ``Items`` element. This is bluntly stupid, so as a convenience, Salt will traverse the provided configuration, and add (or fix) a ``Quantity`` element for any ``Items`` elements of list-type it encounters. This adds a bit of sanity to an otherwise error-prone situation. Note that for this to work, zero-length lists must be inlined as ``[]``. - Due to the unavailibity of a better way to store stateful idempotency information about Distributions, the Comment sub-element (as the only user-settable attribute without weird self-blocking semantics, and which is available from the core ``get_distribution()`` API call) is utilized to store the Salt state signifier, which is used to determine resource existence and state. That said, to enable **some** usability of this field, only the value up to the first colon character is taken as the signifier, with everything afterward free-form, and ignored (but preserved) by Salt. Tags (dict) Tags to associate with the distribution. region (string) Region to connect to. key (string) Secret key to use. keyid (string) Access key to use. profile (dict or string) Dict, or pillar key pointing to a dict, containing AWS region/key/keyid. Example: .. code-block:: yaml plt-dev-spaapi-cf-dist-cf_dist-present: boto_cloudfront.distribution_present: - Name: plt-dev-spaapi-cf-dist - DistributionConfig: Comment: SPA Logging: Enabled: false Prefix: '' Bucket: '' IncludeCookies: false WebACLId: '' Origins: Items: - S3OriginConfig: OriginAccessIdentity: the-SPA-OAI OriginPath: '' CustomHeaders: Items: [] Id: S3-hs-backend-srpms DomainName: hs-backend-srpms.s3.amazonaws.com PriceClass: PriceClass_All DefaultRootObject: '' Enabled: true DefaultCacheBehavior: ViewerProtocolPolicy: allow-all TrustedSigners: Items: [] Enabled: false SmoothStreaming: false TargetOriginId: S3-hs-backend-srpms FieldLevelEncryptionId: '' ForwardedValues: Headers: Items: [] Cookies: Forward: none QueryStringCacheKeys: Items: [] QueryString: false MaxTTL: 31536000 LambdaFunctionAssociations: Items: [] DefaultTTL: 86400 AllowedMethods: CachedMethods: Items: - HEAD - GET Items: - HEAD - GET MinTTL: 0 Compress: false IsIPV6Enabled: true ViewerCertificate: CloudFrontDefaultCertificate: true MinimumProtocolVersion: TLSv1 CertificateSource: cloudfront Aliases: Items: - bubba-hotep.bodhi-dev.io HttpVersion: http2 - Tags: Owner: dev_engrs ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} kwargs = {k: v for k, v in kwargs.items() if not k.startswith('_')} authargs = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} Name = kwargs.pop('Name', name) Tags = kwargs.pop('Tags', None) DistributionConfig = kwargs.get('DistributionConfig', {}) ## Sub-element munging on config data should go in here, before we proceed: # For instance, origin access identities must be of the form # `origin-access-identity/cloudfront/ID-of-origin-access-identity`, but we can't really # know that ID apriori, so any OAI state names inside the config data must be resolved # and converted into that format before submission. Be aware that the `state names` of # salt managed OAIs are stored in their Comment fields for lack of any better place... for item in range(len(DistributionConfig.get('Origins', {}).get('Items', []))): oai = DistributionConfig['Origins']['Items'][item].get('S3OriginConfig', {}).get('OriginAccessIdentity', '') if oai and not oai.startswith('origin-access-identity/cloudfront/'): res = __salt__['boto_cloudfront.get_cloud_front_origin_access_identities_by_comment']( Comment=oai, region=region, key=key, keyid=keyid, profile=profile) if res is None: # An error occurred, bubble it up... log.warning('Error encountered while trying to determine the Resource ID of' ' CloudFront origin access identity `%s`. Passing as-is.', oai) elif not res: log.warning('Failed to determine the Resource ID of CloudFront origin access' ' identity `%s`. Passing as-is.', oai) elif len(res) > 1: log.warning('Failed to find unique Resource ID for CloudFront origin access' ' identity `%s`. Passing as-is.', oai) else: # One unique OAI resource found -- deref and replace it... new = 'origin-access-identity/cloudfront/{}'.format(res[0]['Id']) DistributionConfig['Origins']['Items'][item]['S3OriginConfig']['OriginAccessIdentity'] = new # Munge Name into the Comment field... DistributionConfig['Comment'] = '{}:{}'.format(Name, DistributionConfig['Comment']) \ if DistributionConfig.get('Comment') else Name # Fix up any missing (or wrong) Quantity sub-elements... DistributionConfig = _fix_quantities(DistributionConfig) kwargs['DistributionConfig'] = DistributionConfig # Current state of the thing? res = __salt__['boto_cloudfront.get_distributions_by_comment'](Comment=Name, region=region, key=key, keyid=keyid, profile=profile) if res is None: msg = 'Error determining current state of distribution `{}`.'.format(Name) log.error(msg) ret['comment'] = msg ret['result'] = False return ret if len(res) > 1: msg = 'Multiple CloudFront distibutions matched `{}`.'.format(Name) log.error(msg) ret['comment'] = msg ret['result'] = False return ret # Luckily, the `DistributionConfig` structure returned by `get_distribution()` (as a sub- # element of `Distribution`) is identical to that returned by `get_distribution_config(), # and as a bonus, the ETag's are ALSO compatible... # Since "updates" are actually "replace everything from scratch" events, this implies that # it's enough to simply determine SOME update is necessary to trigger one, rather than # exhaustively calculating all changes needed - this makes life MUCH EASIER :) # Thus our workflow here is: # - check if the distribution exists # - if it doesn't, create it fresh with the requested DistributionConfig, and Tag it if needed # - if it does, grab its ETag, and TWO copies of the current DistributionConfig # - merge the requested DistributionConfig on top of one of them # - compare the copy we just merged against the one we didn't # - if they differ, send the merged copy, along with the ETag we got, back as an update # - lastly, verify and set/unset any Tags which may need changing... exists = bool(res) if not exists: if 'CallerReference' not in kwargs['DistributionConfig']: kwargs['DistributionConfig']['CallerReference'] = str(uuid.uuid4()) if __opts__['test']: ret['result'] = None ret['comment'] = 'CloudFront distribution `{}` would be created.'.format(Name) new = {'DistributionConfig': kwargs['DistributionConfig']} new.update({'Tags': Tags}) if Tags else None ret['pchanges'] = {'old': None, 'new': new} return ret kwargs.update(authargs) comments = [] res = __salt__['boto_cloudfront.create_distribution_v2'](**kwargs) if res is None: ret['result'] = False msg = 'Error occurred while creating distribution `{}`.'.format(Name) log.error(msg) ret['comment'] = msg return ret new = {'DistributionConfig': res['Distribution']['DistributionConfig']} comments += ['Created distribution `{}`.'.format(Name)] newARN = res.get('Distribution', {}).get('ARN') tagged = __salt__['boto_cloudfront.tag_resource'](Tags=Tags, **authargs) if tagged is False: ret['result'] = False msg = 'Error occurred while tagging distribution `{}`.'.format(Name) log.error(msg) comments += [msg] ret['comment'] = ' '.join(comments) return ret comments += ['Tagged distribution `{}`.'.format(Name)] new['Tags'] = Tags ret['comment'] = ' '.join(comments) ret['changes'] = {'old': None, 'new': new} return ret else: currentId = res[0]['Id'] current = __salt__['boto_cloudfront.get_distribution_v2'](Id=currentId, **authargs) # Insanely unlikely given that we JUST got back this Id from the previous search, but.... if not current: msg = 'Failed to lookup CloudFront distribution with Id `{}`.'.format(currentId) log.error(msg) ret['comment'] = msg ret['result'] = False return ret currentDC = current['Distribution']['DistributionConfig'] currentARN = current['Distribution']['ARN'] currentETag = current['ETag'] currentTags = __salt__['boto_cloudfront.list_tags_for_resource'](Resource=currentARN, **authargs) copyOne = copy.deepcopy(currentDC) copyTwo = copy.deepcopy(currentDC) copyTwo.update(kwargs['DistributionConfig']) correct = __utils__['boto3.json_objs_equal'](copyOne, copyTwo) tags_correct = (currentTags == Tags) comments = [] old = {} new = {} if correct and tags_correct: ret['comment'] = 'CloudFront distribution `{}` is in the correct state.'.format(Name) return ret if __opts__['test']: ret['result'] = None if not correct: comments += ['CloudFront distribution `{}` config would be updated.'.format(Name)] old['DistributionConfig'] = copyOne new['DistributionConfig'] = copyTwo if not tags_correct: comments += ['CloudFront distribution `{}` Tags would be updated.'.format(Name)] old['Tags'] = currentTags new['Tags'] = Tags ret['comment'] = ' '.join(comments) ret['pchanges'] = {'old': old, 'new': new} return ret if not correct: kwargs = {'DistributionConfig': copyTwo, 'Id': currentId, 'IfMatch': currentETag} kwargs.update(authargs) log.debug('Calling `boto_cloudfront.update_distribution_v2()` with **kwargs ==' ' %s', kwargs) res = __salt__['boto_cloudfront.update_distribution_v2'](**kwargs) if res is None: ret['result'] = False msg = 'Error occurred while updating distribution `{}`.'.format(Name) log.error(msg) ret['comment'] = msg return ret old['DistributionConfig'] = copyOne new['DistributionConfig'] = res['Distribution']['DistributionConfig'] comments += ['CloudFront distribution `{}` config updated.'.format(Name)] if not tags_correct: tagged = __salt__['boto_cloudfront.enforce_tags'](Resource=currentARN, Tags=Tags, **authargs) if tagged is False: ret['result'] = False msg = 'Error occurred while updating Tags on distribution `{}`.'.format(Name) log.error(msg) comments += [msg] ret['comment'] = ' '.join(comments) return ret comments += ['CloudFront distribution `{}` Tags updated.'.format(Name)] old['Tags'] = currentTags new['Tags'] = Tags ret['comment'] = ' '.join(comments) ret['changes'] = {'old': old, 'new': new} return ret
[ "def", "distribution_present", "(", "name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "kwargs", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "if", "not", "k", ".", "startswith", "(", "'_'", ")", "}", "authargs", "=", "{", "'region'", ":", "region", ",", "'key'", ":", "key", ",", "'keyid'", ":", "keyid", ",", "'profile'", ":", "profile", "}", "Name", "=", "kwargs", ".", "pop", "(", "'Name'", ",", "name", ")", "Tags", "=", "kwargs", ".", "pop", "(", "'Tags'", ",", "None", ")", "DistributionConfig", "=", "kwargs", ".", "get", "(", "'DistributionConfig'", ",", "{", "}", ")", "## Sub-element munging on config data should go in here, before we proceed:", "# For instance, origin access identities must be of the form", "# `origin-access-identity/cloudfront/ID-of-origin-access-identity`, but we can't really", "# know that ID apriori, so any OAI state names inside the config data must be resolved", "# and converted into that format before submission. Be aware that the `state names` of", "# salt managed OAIs are stored in their Comment fields for lack of any better place...", "for", "item", "in", "range", "(", "len", "(", "DistributionConfig", ".", "get", "(", "'Origins'", ",", "{", "}", ")", ".", "get", "(", "'Items'", ",", "[", "]", ")", ")", ")", ":", "oai", "=", "DistributionConfig", "[", "'Origins'", "]", "[", "'Items'", "]", "[", "item", "]", ".", "get", "(", "'S3OriginConfig'", ",", "{", "}", ")", ".", "get", "(", "'OriginAccessIdentity'", ",", "''", ")", "if", "oai", "and", "not", "oai", ".", "startswith", "(", "'origin-access-identity/cloudfront/'", ")", ":", "res", "=", "__salt__", "[", "'boto_cloudfront.get_cloud_front_origin_access_identities_by_comment'", "]", "(", "Comment", "=", "oai", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "res", "is", "None", ":", "# An error occurred, bubble it up...", "log", ".", "warning", "(", "'Error encountered while trying to determine the Resource ID of'", "' CloudFront origin access identity `%s`. Passing as-is.'", ",", "oai", ")", "elif", "not", "res", ":", "log", ".", "warning", "(", "'Failed to determine the Resource ID of CloudFront origin access'", "' identity `%s`. Passing as-is.'", ",", "oai", ")", "elif", "len", "(", "res", ")", ">", "1", ":", "log", ".", "warning", "(", "'Failed to find unique Resource ID for CloudFront origin access'", "' identity `%s`. Passing as-is.'", ",", "oai", ")", "else", ":", "# One unique OAI resource found -- deref and replace it...", "new", "=", "'origin-access-identity/cloudfront/{}'", ".", "format", "(", "res", "[", "0", "]", "[", "'Id'", "]", ")", "DistributionConfig", "[", "'Origins'", "]", "[", "'Items'", "]", "[", "item", "]", "[", "'S3OriginConfig'", "]", "[", "'OriginAccessIdentity'", "]", "=", "new", "# Munge Name into the Comment field...", "DistributionConfig", "[", "'Comment'", "]", "=", "'{}:{}'", ".", "format", "(", "Name", ",", "DistributionConfig", "[", "'Comment'", "]", ")", "if", "DistributionConfig", ".", "get", "(", "'Comment'", ")", "else", "Name", "# Fix up any missing (or wrong) Quantity sub-elements...", "DistributionConfig", "=", "_fix_quantities", "(", "DistributionConfig", ")", "kwargs", "[", "'DistributionConfig'", "]", "=", "DistributionConfig", "# Current state of the thing?", "res", "=", "__salt__", "[", "'boto_cloudfront.get_distributions_by_comment'", "]", "(", "Comment", "=", "Name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "res", "is", "None", ":", "msg", "=", "'Error determining current state of distribution `{}`.'", ".", "format", "(", "Name", ")", "log", ".", "error", "(", "msg", ")", "ret", "[", "'comment'", "]", "=", "msg", "ret", "[", "'result'", "]", "=", "False", "return", "ret", "if", "len", "(", "res", ")", ">", "1", ":", "msg", "=", "'Multiple CloudFront distibutions matched `{}`.'", ".", "format", "(", "Name", ")", "log", ".", "error", "(", "msg", ")", "ret", "[", "'comment'", "]", "=", "msg", "ret", "[", "'result'", "]", "=", "False", "return", "ret", "# Luckily, the `DistributionConfig` structure returned by `get_distribution()` (as a sub-", "# element of `Distribution`) is identical to that returned by `get_distribution_config(),", "# and as a bonus, the ETag's are ALSO compatible...", "# Since \"updates\" are actually \"replace everything from scratch\" events, this implies that", "# it's enough to simply determine SOME update is necessary to trigger one, rather than", "# exhaustively calculating all changes needed - this makes life MUCH EASIER :)", "# Thus our workflow here is:", "# - check if the distribution exists", "# - if it doesn't, create it fresh with the requested DistributionConfig, and Tag it if needed", "# - if it does, grab its ETag, and TWO copies of the current DistributionConfig", "# - merge the requested DistributionConfig on top of one of them", "# - compare the copy we just merged against the one we didn't", "# - if they differ, send the merged copy, along with the ETag we got, back as an update", "# - lastly, verify and set/unset any Tags which may need changing...", "exists", "=", "bool", "(", "res", ")", "if", "not", "exists", ":", "if", "'CallerReference'", "not", "in", "kwargs", "[", "'DistributionConfig'", "]", ":", "kwargs", "[", "'DistributionConfig'", "]", "[", "'CallerReference'", "]", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'CloudFront distribution `{}` would be created.'", ".", "format", "(", "Name", ")", "new", "=", "{", "'DistributionConfig'", ":", "kwargs", "[", "'DistributionConfig'", "]", "}", "new", ".", "update", "(", "{", "'Tags'", ":", "Tags", "}", ")", "if", "Tags", "else", "None", "ret", "[", "'pchanges'", "]", "=", "{", "'old'", ":", "None", ",", "'new'", ":", "new", "}", "return", "ret", "kwargs", ".", "update", "(", "authargs", ")", "comments", "=", "[", "]", "res", "=", "__salt__", "[", "'boto_cloudfront.create_distribution_v2'", "]", "(", "*", "*", "kwargs", ")", "if", "res", "is", "None", ":", "ret", "[", "'result'", "]", "=", "False", "msg", "=", "'Error occurred while creating distribution `{}`.'", ".", "format", "(", "Name", ")", "log", ".", "error", "(", "msg", ")", "ret", "[", "'comment'", "]", "=", "msg", "return", "ret", "new", "=", "{", "'DistributionConfig'", ":", "res", "[", "'Distribution'", "]", "[", "'DistributionConfig'", "]", "}", "comments", "+=", "[", "'Created distribution `{}`.'", ".", "format", "(", "Name", ")", "]", "newARN", "=", "res", ".", "get", "(", "'Distribution'", ",", "{", "}", ")", ".", "get", "(", "'ARN'", ")", "tagged", "=", "__salt__", "[", "'boto_cloudfront.tag_resource'", "]", "(", "Tags", "=", "Tags", ",", "*", "*", "authargs", ")", "if", "tagged", "is", "False", ":", "ret", "[", "'result'", "]", "=", "False", "msg", "=", "'Error occurred while tagging distribution `{}`.'", ".", "format", "(", "Name", ")", "log", ".", "error", "(", "msg", ")", "comments", "+=", "[", "msg", "]", "ret", "[", "'comment'", "]", "=", "' '", ".", "join", "(", "comments", ")", "return", "ret", "comments", "+=", "[", "'Tagged distribution `{}`.'", ".", "format", "(", "Name", ")", "]", "new", "[", "'Tags'", "]", "=", "Tags", "ret", "[", "'comment'", "]", "=", "' '", ".", "join", "(", "comments", ")", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "None", ",", "'new'", ":", "new", "}", "return", "ret", "else", ":", "currentId", "=", "res", "[", "0", "]", "[", "'Id'", "]", "current", "=", "__salt__", "[", "'boto_cloudfront.get_distribution_v2'", "]", "(", "Id", "=", "currentId", ",", "*", "*", "authargs", ")", "# Insanely unlikely given that we JUST got back this Id from the previous search, but....", "if", "not", "current", ":", "msg", "=", "'Failed to lookup CloudFront distribution with Id `{}`.'", ".", "format", "(", "currentId", ")", "log", ".", "error", "(", "msg", ")", "ret", "[", "'comment'", "]", "=", "msg", "ret", "[", "'result'", "]", "=", "False", "return", "ret", "currentDC", "=", "current", "[", "'Distribution'", "]", "[", "'DistributionConfig'", "]", "currentARN", "=", "current", "[", "'Distribution'", "]", "[", "'ARN'", "]", "currentETag", "=", "current", "[", "'ETag'", "]", "currentTags", "=", "__salt__", "[", "'boto_cloudfront.list_tags_for_resource'", "]", "(", "Resource", "=", "currentARN", ",", "*", "*", "authargs", ")", "copyOne", "=", "copy", ".", "deepcopy", "(", "currentDC", ")", "copyTwo", "=", "copy", ".", "deepcopy", "(", "currentDC", ")", "copyTwo", ".", "update", "(", "kwargs", "[", "'DistributionConfig'", "]", ")", "correct", "=", "__utils__", "[", "'boto3.json_objs_equal'", "]", "(", "copyOne", ",", "copyTwo", ")", "tags_correct", "=", "(", "currentTags", "==", "Tags", ")", "comments", "=", "[", "]", "old", "=", "{", "}", "new", "=", "{", "}", "if", "correct", "and", "tags_correct", ":", "ret", "[", "'comment'", "]", "=", "'CloudFront distribution `{}` is in the correct state.'", ".", "format", "(", "Name", ")", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "if", "not", "correct", ":", "comments", "+=", "[", "'CloudFront distribution `{}` config would be updated.'", ".", "format", "(", "Name", ")", "]", "old", "[", "'DistributionConfig'", "]", "=", "copyOne", "new", "[", "'DistributionConfig'", "]", "=", "copyTwo", "if", "not", "tags_correct", ":", "comments", "+=", "[", "'CloudFront distribution `{}` Tags would be updated.'", ".", "format", "(", "Name", ")", "]", "old", "[", "'Tags'", "]", "=", "currentTags", "new", "[", "'Tags'", "]", "=", "Tags", "ret", "[", "'comment'", "]", "=", "' '", ".", "join", "(", "comments", ")", "ret", "[", "'pchanges'", "]", "=", "{", "'old'", ":", "old", ",", "'new'", ":", "new", "}", "return", "ret", "if", "not", "correct", ":", "kwargs", "=", "{", "'DistributionConfig'", ":", "copyTwo", ",", "'Id'", ":", "currentId", ",", "'IfMatch'", ":", "currentETag", "}", "kwargs", ".", "update", "(", "authargs", ")", "log", ".", "debug", "(", "'Calling `boto_cloudfront.update_distribution_v2()` with **kwargs =='", "' %s'", ",", "kwargs", ")", "res", "=", "__salt__", "[", "'boto_cloudfront.update_distribution_v2'", "]", "(", "*", "*", "kwargs", ")", "if", "res", "is", "None", ":", "ret", "[", "'result'", "]", "=", "False", "msg", "=", "'Error occurred while updating distribution `{}`.'", ".", "format", "(", "Name", ")", "log", ".", "error", "(", "msg", ")", "ret", "[", "'comment'", "]", "=", "msg", "return", "ret", "old", "[", "'DistributionConfig'", "]", "=", "copyOne", "new", "[", "'DistributionConfig'", "]", "=", "res", "[", "'Distribution'", "]", "[", "'DistributionConfig'", "]", "comments", "+=", "[", "'CloudFront distribution `{}` config updated.'", ".", "format", "(", "Name", ")", "]", "if", "not", "tags_correct", ":", "tagged", "=", "__salt__", "[", "'boto_cloudfront.enforce_tags'", "]", "(", "Resource", "=", "currentARN", ",", "Tags", "=", "Tags", ",", "*", "*", "authargs", ")", "if", "tagged", "is", "False", ":", "ret", "[", "'result'", "]", "=", "False", "msg", "=", "'Error occurred while updating Tags on distribution `{}`.'", ".", "format", "(", "Name", ")", "log", ".", "error", "(", "msg", ")", "comments", "+=", "[", "msg", "]", "ret", "[", "'comment'", "]", "=", "' '", ".", "join", "(", "comments", ")", "return", "ret", "comments", "+=", "[", "'CloudFront distribution `{}` Tags updated.'", ".", "format", "(", "Name", ")", "]", "old", "[", "'Tags'", "]", "=", "currentTags", "new", "[", "'Tags'", "]", "=", "Tags", "ret", "[", "'comment'", "]", "=", "' '", ".", "join", "(", "comments", ")", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "old", ",", "'new'", ":", "new", "}", "return", "ret" ]
46.249211
23.283912
def fulfill(self, method, *args, **kwargs): """ Fulfill an HTTP request to Keen's API. """ return getattr(self.session, method)(*args, **kwargs)
[ "def", "fulfill", "(", "self", ",", "method", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "getattr", "(", "self", ".", "session", ",", "method", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
31.6
20.8
def generate_options(): '''Helper coroutine to identify short options that haven't been used yet. Yields lists of short option (if available) and long option for the given name, keeping track of which short options have been previously used. If you aren't familiar with coroutines, use similar to a generator: x = generate_options() next(x) # advance coroutine past its initialization code params = x.send(param_name) ''' used_short_options = set() param_name = yield while True: names = ['--' + param_name] for letter in param_name: if letter not in used_short_options: used_short_options.add(letter) names.insert(0, '-' + letter) break param_name = yield names
[ "def", "generate_options", "(", ")", ":", "used_short_options", "=", "set", "(", ")", "param_name", "=", "yield", "while", "True", ":", "names", "=", "[", "'--'", "+", "param_name", "]", "for", "letter", "in", "param_name", ":", "if", "letter", "not", "in", "used_short_options", ":", "used_short_options", ".", "add", "(", "letter", ")", "names", ".", "insert", "(", "0", ",", "'-'", "+", "letter", ")", "break", "param_name", "=", "yield", "names" ]
36.952381
19.428571
def routedResource(f, routerAttribute='router'): """ Decorate a router-producing callable to instead produce a resource. This simply produces a new callable that invokes the original callable, and calls ``resource`` on the ``routerAttribute``. If the router producer has multiple routers the attribute can be altered to choose the appropriate one, for example: .. code-block:: python class _ComplexRouter(object): router = Router() privateRouter = Router() @router.route('/') def publicRoot(self, request, params): return SomethingPublic(...) @privateRouter.route('/') def privateRoot(self, request, params): return SomethingPrivate(...) PublicResource = routedResource(_ComplexRouter) PrivateResource = routedResource(_ComplexRouter, 'privateRouter') :type f: ``callable`` :param f: Callable producing an object with a `Router` attribute, for example, a type. :type routerAttribute: `str` :param routerAttribute: Name of the `Router` attribute on the result of calling ``f``. :rtype: `callable` :return: Callable producing an `IResource`. """ return wraps(f)( lambda *a, **kw: getattr(f(*a, **kw), routerAttribute).resource())
[ "def", "routedResource", "(", "f", ",", "routerAttribute", "=", "'router'", ")", ":", "return", "wraps", "(", "f", ")", "(", "lambda", "*", "a", ",", "*", "*", "kw", ":", "getattr", "(", "f", "(", "*", "a", ",", "*", "*", "kw", ")", ",", "routerAttribute", ")", ".", "resource", "(", ")", ")" ]
32.8
21.35
def smartypants(text): """ Transforms sequences of characters into HTML entities. =================================== ===================== ========= Markdown HTML Result =================================== ===================== ========= ``'s`` (s, t, m, d, re, ll, ve) &rsquo;s ’s ``"Quotes"`` &ldquo;Quotes&rdquo; “Quotes” ``---`` &mdash; — ``--`` &ndash; – ``...`` &hellip; … ``. . .`` &hellip; … ``(c)`` &copy; © ``(r)`` &reg; ® ``(tm)`` &trade; ™ ``3/4`` &frac34; ¾ ``1/2`` &frac12; ½ ``1/4`` &frac14; ¼ =================================== ===================== ========= """ byte_str = text.encode('utf-8') ob = lib.hoedown_buffer_new(OUNIT) lib.hoedown_html_smartypants(ob, byte_str, len(byte_str)) try: return to_string(ob) finally: lib.hoedown_buffer_free(ob);
[ "def", "smartypants", "(", "text", ")", ":", "byte_str", "=", "text", ".", "encode", "(", "'utf-8'", ")", "ob", "=", "lib", ".", "hoedown_buffer_new", "(", "OUNIT", ")", "lib", ".", "hoedown_html_smartypants", "(", "ob", ",", "byte_str", ",", "len", "(", "byte_str", ")", ")", "try", ":", "return", "to_string", "(", "ob", ")", "finally", ":", "lib", ".", "hoedown_buffer_free", "(", "ob", ")" ]
47.896552
22.655172
def load_tool(result): """ Load the module with the tool-specific code. """ def load_tool_module(tool_module): if not tool_module: logging.warning('Cannot extract values from log files for benchmark results %s ' '(missing attribute "toolmodule" on tag "result").', Util.prettylist(result.attributes['name'])) return None try: logging.debug('Loading %s', tool_module) return __import__(tool_module, fromlist=['Tool']).Tool() except ImportError as ie: logging.warning( 'Missing module "%s", cannot extract values from log files (ImportError: %s).', tool_module, ie) except AttributeError: logging.warning( 'The module "%s" does not define the necessary class Tool, ' 'cannot extract values from log files.', tool_module) return None tool_module = result.attributes['toolmodule'][0] if 'toolmodule' in result.attributes else None if tool_module in loaded_tools: return loaded_tools[tool_module] else: result = load_tool_module(tool_module) loaded_tools[tool_module] = result return result
[ "def", "load_tool", "(", "result", ")", ":", "def", "load_tool_module", "(", "tool_module", ")", ":", "if", "not", "tool_module", ":", "logging", ".", "warning", "(", "'Cannot extract values from log files for benchmark results %s '", "'(missing attribute \"toolmodule\" on tag \"result\").'", ",", "Util", ".", "prettylist", "(", "result", ".", "attributes", "[", "'name'", "]", ")", ")", "return", "None", "try", ":", "logging", ".", "debug", "(", "'Loading %s'", ",", "tool_module", ")", "return", "__import__", "(", "tool_module", ",", "fromlist", "=", "[", "'Tool'", "]", ")", ".", "Tool", "(", ")", "except", "ImportError", "as", "ie", ":", "logging", ".", "warning", "(", "'Missing module \"%s\", cannot extract values from log files (ImportError: %s).'", ",", "tool_module", ",", "ie", ")", "except", "AttributeError", ":", "logging", ".", "warning", "(", "'The module \"%s\" does not define the necessary class Tool, '", "'cannot extract values from log files.'", ",", "tool_module", ")", "return", "None", "tool_module", "=", "result", ".", "attributes", "[", "'toolmodule'", "]", "[", "0", "]", "if", "'toolmodule'", "in", "result", ".", "attributes", "else", "None", "if", "tool_module", "in", "loaded_tools", ":", "return", "loaded_tools", "[", "tool_module", "]", "else", ":", "result", "=", "load_tool_module", "(", "tool_module", ")", "loaded_tools", "[", "tool_module", "]", "=", "result", "return", "result" ]
40.774194
19.354839
def _hash(expr, func=None): """ Calculate the hash value. :param expr: :param func: hash function :return: """ if func is None: func = lambda x: hash(x) return _map(expr, func=func, rtype=types.int64)
[ "def", "_hash", "(", "expr", ",", "func", "=", "None", ")", ":", "if", "func", "is", "None", ":", "func", "=", "lambda", "x", ":", "hash", "(", "x", ")", "return", "_map", "(", "expr", ",", "func", "=", "func", ",", "rtype", "=", "types", ".", "int64", ")" ]
19.25
17.083333