text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def cancel(self): """Cancel a running :meth:`iterconsume` session.""" for consumer_tag in self._open_consumers.values(): try: self.backend.cancel(consumer_tag) except KeyError: pass self._open_consumers.clear()
[ "def", "cancel", "(", "self", ")", ":", "for", "consumer_tag", "in", "self", ".", "_open_consumers", ".", "values", "(", ")", ":", "try", ":", "self", ".", "backend", ".", "cancel", "(", "consumer_tag", ")", "except", "KeyError", ":", "pass", "self", ".", "_open_consumers", ".", "clear", "(", ")" ]
35.375
13.75
def run_tfba(self, reaction): """Run FBA and tFBA on model.""" solver = self._get_solver(integer=True) p = fluxanalysis.FluxBalanceProblem(self._mm, solver) start_time = time.time() p.add_thermodynamic() try: p.maximize(reaction) except fluxanalysis.FluxBalanceError as e: self.report_flux_balance_error(e) logger.info('Solving took {:.2f} seconds'.format( time.time() - start_time)) for reaction_id in self._mm.reactions: yield reaction_id, p.get_flux(reaction_id)
[ "def", "run_tfba", "(", "self", ",", "reaction", ")", ":", "solver", "=", "self", ".", "_get_solver", "(", "integer", "=", "True", ")", "p", "=", "fluxanalysis", ".", "FluxBalanceProblem", "(", "self", ".", "_mm", ",", "solver", ")", "start_time", "=", "time", ".", "time", "(", ")", "p", ".", "add_thermodynamic", "(", ")", "try", ":", "p", ".", "maximize", "(", "reaction", ")", "except", "fluxanalysis", ".", "FluxBalanceError", "as", "e", ":", "self", ".", "report_flux_balance_error", "(", "e", ")", "logger", ".", "info", "(", "'Solving took {:.2f} seconds'", ".", "format", "(", "time", ".", "time", "(", ")", "-", "start_time", ")", ")", "for", "reaction_id", "in", "self", ".", "_mm", ".", "reactions", ":", "yield", "reaction_id", ",", "p", ".", "get_flux", "(", "reaction_id", ")" ]
28.6
19.4
def _wrap_ele(self, block, view, frag, extra_data=None): """ Does the guts of the wrapping the same way for both xblocks and asides. Their wrappers provide other info in extra_data which gets put into the dom data- attrs. """ wrapped = Fragment() data = { 'usage': block.scope_ids.usage_id, 'block-type': block.scope_ids.block_type, } data.update(extra_data) if frag.js_init_fn: data['init'] = frag.js_init_fn data['runtime-version'] = frag.js_init_version json_init = "" # TODO/Note: We eventually want to remove: hasattr(frag, 'json_init_args') # However, I'd like to maintain backwards-compatibility with older XBlock # for at least a little while so as not to adversely effect developers. # pmitros/Jun 28, 2014. if hasattr(frag, 'json_init_args') and frag.json_init_args is not None: json_init = ( '<script type="json/xblock-args" class="xblock_json_init_args">' '{data}</script>' ).format(data=json.dumps(frag.json_init_args)) block_css_entrypoint = block.entry_point.replace('.', '-') css_classes = [ block_css_entrypoint, '{}-{}'.format(block_css_entrypoint, view), ] html = "<div class='{}'{properties}>{body}{js}</div>".format( markupsafe.escape(' '.join(css_classes)), properties="".join(" data-%s='%s'" % item for item in list(data.items())), body=frag.body_html(), js=json_init) wrapped.add_content(html) wrapped.add_fragment_resources(frag) return wrapped
[ "def", "_wrap_ele", "(", "self", ",", "block", ",", "view", ",", "frag", ",", "extra_data", "=", "None", ")", ":", "wrapped", "=", "Fragment", "(", ")", "data", "=", "{", "'usage'", ":", "block", ".", "scope_ids", ".", "usage_id", ",", "'block-type'", ":", "block", ".", "scope_ids", ".", "block_type", ",", "}", "data", ".", "update", "(", "extra_data", ")", "if", "frag", ".", "js_init_fn", ":", "data", "[", "'init'", "]", "=", "frag", ".", "js_init_fn", "data", "[", "'runtime-version'", "]", "=", "frag", ".", "js_init_version", "json_init", "=", "\"\"", "# TODO/Note: We eventually want to remove: hasattr(frag, 'json_init_args')", "# However, I'd like to maintain backwards-compatibility with older XBlock", "# for at least a little while so as not to adversely effect developers.", "# pmitros/Jun 28, 2014.", "if", "hasattr", "(", "frag", ",", "'json_init_args'", ")", "and", "frag", ".", "json_init_args", "is", "not", "None", ":", "json_init", "=", "(", "'<script type=\"json/xblock-args\" class=\"xblock_json_init_args\">'", "'{data}</script>'", ")", ".", "format", "(", "data", "=", "json", ".", "dumps", "(", "frag", ".", "json_init_args", ")", ")", "block_css_entrypoint", "=", "block", ".", "entry_point", ".", "replace", "(", "'.'", ",", "'-'", ")", "css_classes", "=", "[", "block_css_entrypoint", ",", "'{}-{}'", ".", "format", "(", "block_css_entrypoint", ",", "view", ")", ",", "]", "html", "=", "\"<div class='{}'{properties}>{body}{js}</div>\"", ".", "format", "(", "markupsafe", ".", "escape", "(", "' '", ".", "join", "(", "css_classes", ")", ")", ",", "properties", "=", "\"\"", ".", "join", "(", "\" data-%s='%s'\"", "%", "item", "for", "item", "in", "list", "(", "data", ".", "items", "(", ")", ")", ")", ",", "body", "=", "frag", ".", "body_html", "(", ")", ",", "js", "=", "json_init", ")", "wrapped", ".", "add_content", "(", "html", ")", "wrapped", ".", "add_fragment_resources", "(", "frag", ")", "return", "wrapped" ]
40.119048
22.404762
def twenty_five_neurons_mix_stimulated(): "Object allocation" "If M = 0 then only object will be allocated" params = pcnn_parameters(); params.AF = 0.1; params.AL = 0.0; params.AT = 0.7; params.VF = 1.0; params.VL = 1.0; params.VT = 10.0; params.M = 0.0; template_dynamic_pcnn(25, 100, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0], params, conn_type.GRID_FOUR, False);
[ "def", "twenty_five_neurons_mix_stimulated", "(", ")", ":", "\"If M = 0 then only object will be allocated\"", "params", "=", "pcnn_parameters", "(", ")", "params", ".", "AF", "=", "0.1", "params", ".", "AL", "=", "0.0", "params", ".", "AT", "=", "0.7", "params", ".", "VF", "=", "1.0", "params", ".", "VL", "=", "1.0", "params", ".", "VT", "=", "10.0", "params", ".", "M", "=", "0.0", "template_dynamic_pcnn", "(", "25", ",", "100", ",", "[", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "1", ",", "1", ",", "0", ",", "0", ",", "0", ",", "1", ",", "1", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", ",", "0", "]", ",", "params", ",", "conn_type", ".", "GRID_FOUR", ",", "False", ")" ]
33.222222
18.444444
def dtrajs(self): """ get discrete trajectories """ if not self._estimated: self.logger.info("not yet parametrized, running now.") self.parametrize() return self._chain[-1].dtrajs
[ "def", "dtrajs", "(", "self", ")", ":", "if", "not", "self", ".", "_estimated", ":", "self", ".", "logger", ".", "info", "(", "\"not yet parametrized, running now.\"", ")", "self", ".", "parametrize", "(", ")", "return", "self", ".", "_chain", "[", "-", "1", "]", ".", "dtrajs" ]
37
11.833333
def tlv_parse(data): """ Parses a bytestring of TLV values into a dict with the tags as keys.""" parsed = {} while data: t, l, data = ord_byte(data[0]), ord_byte(data[1]), data[2:] parsed[t], data = data[:l], data[l:] return parsed
[ "def", "tlv_parse", "(", "data", ")", ":", "parsed", "=", "{", "}", "while", "data", ":", "t", ",", "l", ",", "data", "=", "ord_byte", "(", "data", "[", "0", "]", ")", ",", "ord_byte", "(", "data", "[", "1", "]", ")", ",", "data", "[", "2", ":", "]", "parsed", "[", "t", "]", ",", "data", "=", "data", "[", ":", "l", "]", ",", "data", "[", "l", ":", "]", "return", "parsed" ]
36.714286
17.714286
def reset_spyder(self): """ Quit and reset Spyder and then Restart application. """ answer = QMessageBox.warning(self, _("Warning"), _("Spyder will restart and reset to default settings: <br><br>" "Do you want to continue?"), QMessageBox.Yes | QMessageBox.No) if answer == QMessageBox.Yes: self.restart(reset=True)
[ "def", "reset_spyder", "(", "self", ")", ":", "answer", "=", "QMessageBox", ".", "warning", "(", "self", ",", "_", "(", "\"Warning\"", ")", ",", "_", "(", "\"Spyder will restart and reset to default settings: <br><br>\"", "\"Do you want to continue?\"", ")", ",", "QMessageBox", ".", "Yes", "|", "QMessageBox", ".", "No", ")", "if", "answer", "==", "QMessageBox", ".", "Yes", ":", "self", ".", "restart", "(", "reset", "=", "True", ")" ]
40.7
10.7
def find_first_wt_parent(self, with_ip=False): """ Recursively looks at the part_of parent ancestry line (ignoring pooled_from parents) and returns a parent Biosample ID if its wild_type attribute is True. Args: with_ip: `bool`. True means to restrict the search to the first parental Wild Type that also has an Immunoblot linked to it, which may serve as a control between another immunoblot. For example, it could be useful to compare the target protein bands in Immunoblots between a Wild Type sample and a CRISPR eGFP-tagged gene in a descendent sample. Returns: `False`: There isn't a WT parent, or there is but not one with an Immunoblot linked to it (if the `with_ip` parameter is set to True). `int`: The ID of the WT parent. """ parent_id = self.part_of_id if not parent_id: return False parent = Biosample(parent_id) if parent.wild_type: if with_ip and parent.immunoblot_ids: return parent.id elif not with_ip: return parent.id return parent.find_first_wt_parent(with_ip=with_ip)
[ "def", "find_first_wt_parent", "(", "self", ",", "with_ip", "=", "False", ")", ":", "parent_id", "=", "self", ".", "part_of_id", "if", "not", "parent_id", ":", "return", "False", "parent", "=", "Biosample", "(", "parent_id", ")", "if", "parent", ".", "wild_type", ":", "if", "with_ip", "and", "parent", ".", "immunoblot_ids", ":", "return", "parent", ".", "id", "elif", "not", "with_ip", ":", "return", "parent", ".", "id", "return", "parent", ".", "find_first_wt_parent", "(", "with_ip", "=", "with_ip", ")" ]
46.074074
24.074074
async def result(self) -> T: """\ Wait for the task's termination; either the result is returned or a raised exception is reraised. If an event is sent before the task terminates, an `EventException` is raised with the event as argument. """ try: event = await self.recv_event() except Component.Success as succ: # success was thrown; return the result result, = succ.args return cast(T, result) except Component.Failure as fail: # here we don't expect a wrapped result, so we unwrap the failure cause, = fail.args raise cause else: # there was a regular event; shouldn't happen/is exceptional raise Component.EventException(event)
[ "async", "def", "result", "(", "self", ")", "->", "T", ":", "try", ":", "event", "=", "await", "self", ".", "recv_event", "(", ")", "except", "Component", ".", "Success", "as", "succ", ":", "# success was thrown; return the result", "result", ",", "=", "succ", ".", "args", "return", "cast", "(", "T", ",", "result", ")", "except", "Component", ".", "Failure", "as", "fail", ":", "# here we don't expect a wrapped result, so we unwrap the failure", "cause", ",", "=", "fail", ".", "args", "raise", "cause", "else", ":", "# there was a regular event; shouldn't happen/is exceptional", "raise", "Component", ".", "EventException", "(", "event", ")" ]
43.666667
18.944444
def _from_record(data): """ Infer a BigQuery table schema from a list of fields or a dictionary. The typeof the elements is used. For a list, the field names are simply 'Column1', 'Column2', etc. Args: data: The list of fields or dictionary. Returns: A list of dictionaries containing field 'name' and 'type' entries, suitable for use in a BigQuery Tables resource schema. """ if isinstance(data, dict): return Schema._from_dict_record(data) elif isinstance(data, list): return Schema._from_list_record(data) else: raise Exception('Cannot create a schema from record %s' % str(data))
[ "def", "_from_record", "(", "data", ")", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "return", "Schema", ".", "_from_dict_record", "(", "data", ")", "elif", "isinstance", "(", "data", ",", "list", ")", ":", "return", "Schema", ".", "_from_list_record", "(", "data", ")", "else", ":", "raise", "Exception", "(", "'Cannot create a schema from record %s'", "%", "str", "(", "data", ")", ")" ]
37.882353
21.176471
def copy_path(self): """Return a copy of the current path. :returns: A list of ``(path_operation, coordinates)`` tuples of a :ref:`PATH_OPERATION` string and a tuple of floats coordinates whose content depends on the operation type: * :obj:`MOVE_TO <PATH_MOVE_TO>`: 1 point ``(x, y)`` * :obj:`LINE_TO <PATH_LINE_TO>`: 1 point ``(x, y)`` * :obj:`CURVE_TO <PATH_CURVE_TO>`: 3 points ``(x1, y1, x2, y2, x3, y3)`` * :obj:`CLOSE_PATH <PATH_CLOSE_PATH>` 0 points ``()`` (empty tuple) """ path = cairo.cairo_copy_path(self._pointer) result = list(_iter_path(path)) cairo.cairo_path_destroy(path) return result
[ "def", "copy_path", "(", "self", ")", ":", "path", "=", "cairo", ".", "cairo_copy_path", "(", "self", ".", "_pointer", ")", "result", "=", "list", "(", "_iter_path", "(", "path", ")", ")", "cairo", ".", "cairo_path_destroy", "(", "path", ")", "return", "result" ]
37.6
17.3
def get_photos_info(photoset_id): """Request the photos information with the photoset id :param photoset_id: The photoset id of flickr :type photoset_id: str :return: photos information :rtype: list """ args = _get_request_args( 'flickr.photosets.getPhotos', photoset_id=photoset_id ) resp = requests.post(API_URL, data=args) resp_json = json.loads(resp.text.encode('utf-8')) logger.debug(resp_json) photos = resp_json['photoset']['photo'] return photos
[ "def", "get_photos_info", "(", "photoset_id", ")", ":", "args", "=", "_get_request_args", "(", "'flickr.photosets.getPhotos'", ",", "photoset_id", "=", "photoset_id", ")", "resp", "=", "requests", ".", "post", "(", "API_URL", ",", "data", "=", "args", ")", "resp_json", "=", "json", ".", "loads", "(", "resp", ".", "text", ".", "encode", "(", "'utf-8'", ")", ")", "logger", ".", "debug", "(", "resp_json", ")", "photos", "=", "resp_json", "[", "'photoset'", "]", "[", "'photo'", "]", "return", "photos" ]
29.764706
12.764706
def _spawn_thread(self, target): """ Create a thread """ t = Thread(target=target) t.daemon = True t.start() return t
[ "def", "_spawn_thread", "(", "self", ",", "target", ")", ":", "t", "=", "Thread", "(", "target", "=", "target", ")", "t", ".", "daemon", "=", "True", "t", ".", "start", "(", ")", "return", "t" ]
25.333333
13.166667
def _is_url_like_archive(url): # type: (str) -> bool """Return whether the URL looks like an archive. """ filename = Link(url).filename for bad_ext in ARCHIVE_EXTENSIONS: if filename.endswith(bad_ext): return True return False
[ "def", "_is_url_like_archive", "(", "url", ")", ":", "# type: (str) -> bool", "filename", "=", "Link", "(", "url", ")", ".", "filename", "for", "bad_ext", "in", "ARCHIVE_EXTENSIONS", ":", "if", "filename", ".", "endswith", "(", "bad_ext", ")", ":", "return", "True", "return", "False" ]
29.111111
8.555556
def geo_haystack(self, name, bucket_size): """ Create a Haystack index. See: http://www.mongodb.org/display/DOCS/Geospatial+Haystack+Indexing :param name: Name of the indexed column :param bucket_size: Size of the haystack buckets (see mongo docs) """ self.components.append((name, 'geoHaystack')) self.__bucket_size = bucket_size return self
[ "def", "geo_haystack", "(", "self", ",", "name", ",", "bucket_size", ")", ":", "self", ".", "components", ".", "append", "(", "(", "name", ",", "'geoHaystack'", ")", ")", "self", ".", "__bucket_size", "=", "bucket_size", "return", "self" ]
41.1
16
def GetDateRange(self): """Return the range over which this ServicePeriod is valid. The range includes exception dates that add service outside of (start_date, end_date), but doesn't shrink the range if exception dates take away service at the edges of the range. Returns: A tuple of "YYYYMMDD" strings, (start date, end date) or (None, None) if no dates have been given. """ start = self.start_date end = self.end_date for date, (exception_type, _) in self.date_exceptions.items(): if exception_type == self._EXCEPTION_TYPE_REMOVE: continue if not start or (date < start): start = date if not end or (date > end): end = date if start is None: start = end elif end is None: end = start # If start and end are None we did a little harmless shuffling return (start, end)
[ "def", "GetDateRange", "(", "self", ")", ":", "start", "=", "self", ".", "start_date", "end", "=", "self", ".", "end_date", "for", "date", ",", "(", "exception_type", ",", "_", ")", "in", "self", ".", "date_exceptions", ".", "items", "(", ")", ":", "if", "exception_type", "==", "self", ".", "_EXCEPTION_TYPE_REMOVE", ":", "continue", "if", "not", "start", "or", "(", "date", "<", "start", ")", ":", "start", "=", "date", "if", "not", "end", "or", "(", "date", ">", "end", ")", ":", "end", "=", "date", "if", "start", "is", "None", ":", "start", "=", "end", "elif", "end", "is", "None", ":", "end", "=", "start", "# If start and end are None we did a little harmless shuffling", "return", "(", "start", ",", "end", ")" ]
31.962963
20.555556
def getVersionString(): """ Function return string with version information. It is performed by use one of three procedures: git describe, file in .git dir and file __VERSION__. """ version_string = None try: version_string = subprocess.check_output(['git', 'describe']) except: logger.warning('Command "git describe" is not working') if version_string == None: # noqa try: path_to_version = os.path.join(path_to_script, '../.git/refs/heads/master') with file(path_to_version) as f: version_string = f.read() except: logger.warning('Problem with reading file ".git/refs/heads/master"') if version_string == None: # noqa try: path_to_version = os.path.join(path_to_script, '../__VERSION__') with file(path_to_version) as f: version_string = f.read() path_to_version = path_to_version + \ ' version number is created manually' except: logger.warning('Problem with reading file "__VERSION__"') return version_string
[ "def", "getVersionString", "(", ")", ":", "version_string", "=", "None", "try", ":", "version_string", "=", "subprocess", ".", "check_output", "(", "[", "'git'", ",", "'describe'", "]", ")", "except", ":", "logger", ".", "warning", "(", "'Command \"git describe\" is not working'", ")", "if", "version_string", "==", "None", ":", "# noqa", "try", ":", "path_to_version", "=", "os", ".", "path", ".", "join", "(", "path_to_script", ",", "'../.git/refs/heads/master'", ")", "with", "file", "(", "path_to_version", ")", "as", "f", ":", "version_string", "=", "f", ".", "read", "(", ")", "except", ":", "logger", ".", "warning", "(", "'Problem with reading file \".git/refs/heads/master\"'", ")", "if", "version_string", "==", "None", ":", "# noqa", "try", ":", "path_to_version", "=", "os", ".", "path", ".", "join", "(", "path_to_script", ",", "'../__VERSION__'", ")", "with", "file", "(", "path_to_version", ")", "as", "f", ":", "version_string", "=", "f", ".", "read", "(", ")", "path_to_version", "=", "path_to_version", "+", "' version number is created manually'", "except", ":", "logger", ".", "warning", "(", "'Problem with reading file \"__VERSION__\"'", ")", "return", "version_string" ]
35.393939
20.30303
def unified_load(namespace, subclasses=None, recurse=False): """Provides a unified interface to both the module and class loaders, finding modules by default or classes if given a ``subclasses`` parameter. """ if subclasses is not None: return ClassLoader(recurse=recurse).load(namespace, subclasses=subclasses) else: return ModuleLoader(recurse=recurse).load(namespace)
[ "def", "unified_load", "(", "namespace", ",", "subclasses", "=", "None", ",", "recurse", "=", "False", ")", ":", "if", "subclasses", "is", "not", "None", ":", "return", "ClassLoader", "(", "recurse", "=", "recurse", ")", ".", "load", "(", "namespace", ",", "subclasses", "=", "subclasses", ")", "else", ":", "return", "ModuleLoader", "(", "recurse", "=", "recurse", ")", ".", "load", "(", "namespace", ")" ]
44.333333
22.333333
def _lmder1_linear_full_rank(n, m, factor, target_fnorm1, target_fnorm2): """A full-rank linear function (lmder test #1)""" def func(params, vec): s = params.sum() temp = 2. * s / m + 1 vec[:] = -temp vec[:params.size] += params def jac(params, jac): # jac.shape = (n, m) by LMDER standards jac.fill(-2. / m) for i in range(n): jac[i,i] += 1 guess = np.ones(n) * factor #_lmder1_test(m, func, jac, guess) _lmder1_driver(m, func, jac, guess, target_fnorm1, target_fnorm2, [-1] * n)
[ "def", "_lmder1_linear_full_rank", "(", "n", ",", "m", ",", "factor", ",", "target_fnorm1", ",", "target_fnorm2", ")", ":", "def", "func", "(", "params", ",", "vec", ")", ":", "s", "=", "params", ".", "sum", "(", ")", "temp", "=", "2.", "*", "s", "/", "m", "+", "1", "vec", "[", ":", "]", "=", "-", "temp", "vec", "[", ":", "params", ".", "size", "]", "+=", "params", "def", "jac", "(", "params", ",", "jac", ")", ":", "# jac.shape = (n, m) by LMDER standards", "jac", ".", "fill", "(", "-", "2.", "/", "m", ")", "for", "i", "in", "range", "(", "n", ")", ":", "jac", "[", "i", ",", "i", "]", "+=", "1", "guess", "=", "np", ".", "ones", "(", "n", ")", "*", "factor", "#_lmder1_test(m, func, jac, guess)", "_lmder1_driver", "(", "m", ",", "func", ",", "jac", ",", "guess", ",", "target_fnorm1", ",", "target_fnorm2", ",", "[", "-", "1", "]", "*", "n", ")" ]
28.285714
16.904762
def returnOrderBook(self, currencyPair='all', depth='50'): """Returns the order book for a given market, as well as a sequence number for use with the Push API and an indicator specifying whether the market is frozen. You may set currencyPair to "all" to get the order books of all markets.""" return self._public('returnOrderBook', currencyPair=currencyPair, depth=depth)
[ "def", "returnOrderBook", "(", "self", ",", "currencyPair", "=", "'all'", ",", "depth", "=", "'50'", ")", ":", "return", "self", ".", "_public", "(", "'returnOrderBook'", ",", "currencyPair", "=", "currencyPair", ",", "depth", "=", "depth", ")" ]
62
17.285714
def group_primers(self, my_list): """Group elements in list by certain number 'n'""" new_list = [] n = 2 for i in range(0, len(my_list), n): grouped_primers = my_list[i:i + n] forward_primer = grouped_primers[0].split(" ") reverse_primer = grouped_primers[1].split(" ") formatted_primers = ">F_{0}\n{1}".format(forward_primer[1], forward_primer[0]) formatted_primers += "\n>R_{0}\n{1}".format(reverse_primer[1], reverse_primer[0]) new_list.append(formatted_primers) return new_list
[ "def", "group_primers", "(", "self", ",", "my_list", ")", ":", "new_list", "=", "[", "]", "n", "=", "2", "for", "i", "in", "range", "(", "0", ",", "len", "(", "my_list", ")", ",", "n", ")", ":", "grouped_primers", "=", "my_list", "[", "i", ":", "i", "+", "n", "]", "forward_primer", "=", "grouped_primers", "[", "0", "]", ".", "split", "(", "\" \"", ")", "reverse_primer", "=", "grouped_primers", "[", "1", "]", ".", "split", "(", "\" \"", ")", "formatted_primers", "=", "\">F_{0}\\n{1}\"", ".", "format", "(", "forward_primer", "[", "1", "]", ",", "forward_primer", "[", "0", "]", ")", "formatted_primers", "+=", "\"\\n>R_{0}\\n{1}\"", ".", "format", "(", "reverse_primer", "[", "1", "]", ",", "reverse_primer", "[", "0", "]", ")", "new_list", ".", "append", "(", "formatted_primers", ")", "return", "new_list" ]
48.5
18.666667
def info(self, params=None): ''' /v1/account/info GET - account Retrieve information about the current account Link: https://www.vultr.com/api/#account_info ''' params = params if params else dict() return self.request('/v1/account/info', params, 'GET')
[ "def", "info", "(", "self", ",", "params", "=", "None", ")", ":", "params", "=", "params", "if", "params", "else", "dict", "(", ")", "return", "self", ".", "request", "(", "'/v1/account/info'", ",", "params", ",", "'GET'", ")" ]
33.555556
18.444444
def get_request_message(cls, remote_info): # pylint: disable=g-bad-name """Gets request message or container from remote info. Args: remote_info: Instance of protorpc.remote._RemoteMethodInfo corresponding to a method. Returns: Either an instance of the request type from the remote or the ResourceContainer that was cached with the remote method. """ if remote_info in cls.__remote_info_cache: return cls.__remote_info_cache[remote_info] else: return remote_info.request_type()
[ "def", "get_request_message", "(", "cls", ",", "remote_info", ")", ":", "# pylint: disable=g-bad-name", "if", "remote_info", "in", "cls", ".", "__remote_info_cache", ":", "return", "cls", ".", "__remote_info_cache", "[", "remote_info", "]", "else", ":", "return", "remote_info", ".", "request_type", "(", ")" ]
35.666667
21.866667
def perform_edit_extension_draft_operation(self, draft_patch, publisher_name, extension_name, draft_id): """PerformEditExtensionDraftOperation. [Preview API] :param :class:`<ExtensionDraftPatch> <azure.devops.v5_1.gallery.models.ExtensionDraftPatch>` draft_patch: :param str publisher_name: :param str extension_name: :param str draft_id: :rtype: :class:`<ExtensionDraft> <azure.devops.v5_1.gallery.models.ExtensionDraft>` """ route_values = {} if publisher_name is not None: route_values['publisherName'] = self._serialize.url('publisher_name', publisher_name, 'str') if extension_name is not None: route_values['extensionName'] = self._serialize.url('extension_name', extension_name, 'str') if draft_id is not None: route_values['draftId'] = self._serialize.url('draft_id', draft_id, 'str') content = self._serialize.body(draft_patch, 'ExtensionDraftPatch') response = self._send(http_method='PATCH', location_id='02b33873-4e61-496e-83a2-59d1df46b7d8', version='5.1-preview.1', route_values=route_values, content=content) return self._deserialize('ExtensionDraft', response)
[ "def", "perform_edit_extension_draft_operation", "(", "self", ",", "draft_patch", ",", "publisher_name", ",", "extension_name", ",", "draft_id", ")", ":", "route_values", "=", "{", "}", "if", "publisher_name", "is", "not", "None", ":", "route_values", "[", "'publisherName'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'publisher_name'", ",", "publisher_name", ",", "'str'", ")", "if", "extension_name", "is", "not", "None", ":", "route_values", "[", "'extensionName'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'extension_name'", ",", "extension_name", ",", "'str'", ")", "if", "draft_id", "is", "not", "None", ":", "route_values", "[", "'draftId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'draft_id'", ",", "draft_id", ",", "'str'", ")", "content", "=", "self", ".", "_serialize", ".", "body", "(", "draft_patch", ",", "'ExtensionDraftPatch'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'PATCH'", ",", "location_id", "=", "'02b33873-4e61-496e-83a2-59d1df46b7d8'", ",", "version", "=", "'5.1-preview.1'", ",", "route_values", "=", "route_values", ",", "content", "=", "content", ")", "return", "self", ".", "_deserialize", "(", "'ExtensionDraft'", ",", "response", ")" ]
57.826087
24.913043
def cpu_and_memory(programs, items): """Retrieve CPU and memory/core specified in configuration input. """ assert len(items) > 0, "Finding job resources but no items to process" config = items[0]["config"] all_cores = [] all_memory = [] algs = [config_utils.get_algorithm_config(x) for x in items] progs = _get_resource_programs(programs, algs) # Calculate cores for prog in progs: resources = config_utils.get_resources(prog, config) all_cores.append(resources.get("cores", 1)) if len(all_cores) == 0: all_cores.append(1) cores_per_job = max(all_cores) # Calculate memory. Use 1Gb memory usage per core as min baseline if not specified for prog in progs: resources = config_utils.get_resources(prog, config) memory = _get_prog_memory(resources, cores_per_job) if memory: all_memory.append(memory) if len(all_memory) == 0: all_memory.append(1) memory_per_core = max(all_memory) return cores_per_job, memory_per_core
[ "def", "cpu_and_memory", "(", "programs", ",", "items", ")", ":", "assert", "len", "(", "items", ")", ">", "0", ",", "\"Finding job resources but no items to process\"", "config", "=", "items", "[", "0", "]", "[", "\"config\"", "]", "all_cores", "=", "[", "]", "all_memory", "=", "[", "]", "algs", "=", "[", "config_utils", ".", "get_algorithm_config", "(", "x", ")", "for", "x", "in", "items", "]", "progs", "=", "_get_resource_programs", "(", "programs", ",", "algs", ")", "# Calculate cores", "for", "prog", "in", "progs", ":", "resources", "=", "config_utils", ".", "get_resources", "(", "prog", ",", "config", ")", "all_cores", ".", "append", "(", "resources", ".", "get", "(", "\"cores\"", ",", "1", ")", ")", "if", "len", "(", "all_cores", ")", "==", "0", ":", "all_cores", ".", "append", "(", "1", ")", "cores_per_job", "=", "max", "(", "all_cores", ")", "# Calculate memory. Use 1Gb memory usage per core as min baseline if not specified", "for", "prog", "in", "progs", ":", "resources", "=", "config_utils", ".", "get_resources", "(", "prog", ",", "config", ")", "memory", "=", "_get_prog_memory", "(", "resources", ",", "cores_per_job", ")", "if", "memory", ":", "all_memory", ".", "append", "(", "memory", ")", "if", "len", "(", "all_memory", ")", "==", "0", ":", "all_memory", ".", "append", "(", "1", ")", "memory_per_core", "=", "max", "(", "all_memory", ")", "return", "cores_per_job", ",", "memory_per_core" ]
39.461538
14.615385
def is_transition_matrix(T, tol=1e-12): r"""Check if the given matrix is a transition matrix. Parameters ---------- T : (M, M) ndarray or scipy.sparse matrix Matrix to check tol : float (optional) Floating point tolerance to check with Returns ------- is_transition_matrix : bool True, if T is a valid transition matrix, False otherwise Notes ----- A valid transition matrix :math:`P=(p_{ij})` has non-negative elements, :math:`p_{ij} \geq 0`, and elements of each row sum up to one, :math:`\sum_j p_{ij} = 1`. Matrices wit this property are also called stochastic matrices. Examples -------- >>> import numpy as np >>> from msmtools.analysis import is_transition_matrix >>> A = np.array([[0.4, 0.5, 0.3], [0.2, 0.4, 0.4], [-1, 1, 1]]) >>> is_transition_matrix(A) False >>> T = np.array([[0.9, 0.1, 0.0], [0.5, 0.0, 0.5], [0.0, 0.1, 0.9]]) >>> is_transition_matrix(T) True """ T = _types.ensure_ndarray_or_sparse(T, ndim=2, uniform=True, kind='numeric') if _issparse(T): return sparse.assessment.is_transition_matrix(T, tol) else: return dense.assessment.is_transition_matrix(T, tol)
[ "def", "is_transition_matrix", "(", "T", ",", "tol", "=", "1e-12", ")", ":", "T", "=", "_types", ".", "ensure_ndarray_or_sparse", "(", "T", ",", "ndim", "=", "2", ",", "uniform", "=", "True", ",", "kind", "=", "'numeric'", ")", "if", "_issparse", "(", "T", ")", ":", "return", "sparse", ".", "assessment", ".", "is_transition_matrix", "(", "T", ",", "tol", ")", "else", ":", "return", "dense", ".", "assessment", ".", "is_transition_matrix", "(", "T", ",", "tol", ")" ]
29.317073
23.804878
def _handle_request(self, request: dict) -> dict: """Processes Alexa requests from skill server and returns responses to Alexa. Args: request: Dict with Alexa request payload and metadata. Returns: result: Alexa formatted or error response. """ request_body: bytes = request['request_body'] signature_chain_url: str = request['signature_chain_url'] signature: str = request['signature'] alexa_request: dict = request['alexa_request'] if not self._verify_request(signature_chain_url, signature, request_body): return {'error': 'failed certificate/signature check'} timestamp_str = alexa_request['request']['timestamp'] timestamp_datetime = datetime.strptime(timestamp_str, '%Y-%m-%dT%H:%M:%SZ') now = datetime.utcnow() delta = now - timestamp_datetime if now >= timestamp_datetime else timestamp_datetime - now if abs(delta.seconds) > REQUEST_TIMESTAMP_TOLERANCE_SECS: log.error(f'Failed timestamp check for request: {request_body.decode("utf-8", "replace")}') return {'error': 'failed request timestamp check'} conversation_key = alexa_request['session']['user']['userId'] if conversation_key not in self.conversations.keys(): if self.config['multi_instance']: conv_agent = self._init_agent() log.info('New conversation instance level agent initiated') else: conv_agent = self.agent self.conversations[conversation_key] = \ Conversation(config=self.config, agent=conv_agent, conversation_key=conversation_key, self_destruct_callback=lambda: self._del_conversation(conversation_key)) log.info(f'Created new conversation, key: {conversation_key}') conversation = self.conversations[conversation_key] response = conversation.handle_request(alexa_request) return response
[ "def", "_handle_request", "(", "self", ",", "request", ":", "dict", ")", "->", "dict", ":", "request_body", ":", "bytes", "=", "request", "[", "'request_body'", "]", "signature_chain_url", ":", "str", "=", "request", "[", "'signature_chain_url'", "]", "signature", ":", "str", "=", "request", "[", "'signature'", "]", "alexa_request", ":", "dict", "=", "request", "[", "'alexa_request'", "]", "if", "not", "self", ".", "_verify_request", "(", "signature_chain_url", ",", "signature", ",", "request_body", ")", ":", "return", "{", "'error'", ":", "'failed certificate/signature check'", "}", "timestamp_str", "=", "alexa_request", "[", "'request'", "]", "[", "'timestamp'", "]", "timestamp_datetime", "=", "datetime", ".", "strptime", "(", "timestamp_str", ",", "'%Y-%m-%dT%H:%M:%SZ'", ")", "now", "=", "datetime", ".", "utcnow", "(", ")", "delta", "=", "now", "-", "timestamp_datetime", "if", "now", ">=", "timestamp_datetime", "else", "timestamp_datetime", "-", "now", "if", "abs", "(", "delta", ".", "seconds", ")", ">", "REQUEST_TIMESTAMP_TOLERANCE_SECS", ":", "log", ".", "error", "(", "f'Failed timestamp check for request: {request_body.decode(\"utf-8\", \"replace\")}'", ")", "return", "{", "'error'", ":", "'failed request timestamp check'", "}", "conversation_key", "=", "alexa_request", "[", "'session'", "]", "[", "'user'", "]", "[", "'userId'", "]", "if", "conversation_key", "not", "in", "self", ".", "conversations", ".", "keys", "(", ")", ":", "if", "self", ".", "config", "[", "'multi_instance'", "]", ":", "conv_agent", "=", "self", ".", "_init_agent", "(", ")", "log", ".", "info", "(", "'New conversation instance level agent initiated'", ")", "else", ":", "conv_agent", "=", "self", ".", "agent", "self", ".", "conversations", "[", "conversation_key", "]", "=", "Conversation", "(", "config", "=", "self", ".", "config", ",", "agent", "=", "conv_agent", ",", "conversation_key", "=", "conversation_key", ",", "self_destruct_callback", "=", "lambda", ":", "self", ".", "_del_conversation", "(", "conversation_key", ")", ")", "log", ".", "info", "(", "f'Created new conversation, key: {conversation_key}'", ")", "conversation", "=", "self", ".", "conversations", "[", "conversation_key", "]", "response", "=", "conversation", ".", "handle_request", "(", "alexa_request", ")", "return", "response" ]
43.468085
26.148936
def tensor(self, field_name, tensor_ind): """ Returns the tensor for a given field and tensor index. Parameters ---------- field_name : str the name of the field to load tensor_index : int the index of the tensor Returns ------- :obj:`Tensor` the desired tensor """ if tensor_ind == self._tensor_cache_file_num[field_name]: return self._tensors[field_name] filename = self.generate_tensor_filename(field_name, tensor_ind, compressed=True) Tensor.load(filename, compressed=True, prealloc=self._tensors[field_name]) self._tensor_cache_file_num[field_name] = tensor_ind return self._tensors[field_name]
[ "def", "tensor", "(", "self", ",", "field_name", ",", "tensor_ind", ")", ":", "if", "tensor_ind", "==", "self", ".", "_tensor_cache_file_num", "[", "field_name", "]", ":", "return", "self", ".", "_tensors", "[", "field_name", "]", "filename", "=", "self", ".", "generate_tensor_filename", "(", "field_name", ",", "tensor_ind", ",", "compressed", "=", "True", ")", "Tensor", ".", "load", "(", "filename", ",", "compressed", "=", "True", ",", "prealloc", "=", "self", ".", "_tensors", "[", "field_name", "]", ")", "self", ".", "_tensor_cache_file_num", "[", "field_name", "]", "=", "tensor_ind", "return", "self", ".", "_tensors", "[", "field_name", "]" ]
34.545455
16.318182
def first(seq, key=lambda x: bool(x), default=None, apply=lambda x: x): """Give the first value that satisfies the key test. Args: seq (iterable): key (callable): test for each element of iterable default: returned when all elements fail test apply (callable): applied to element before return, but not to default value Returns: first element in seq that passes key, mutated with optional apply Examples: >>> first([0, False, None, [], (), 42]) 42 >>> first([0, False, None, [], ()]) is None True >>> first([0, False, None, [], ()], default='ohai') 'ohai' >>> import re >>> m = first(re.match(regex, 'abc') for regex in ['b.*', 'a(.*)']) >>> m.group(1) 'bc' The optional `key` argument specifies a one-argument predicate function like that used for `filter()`. The `key` argument, if supplied, must be in keyword form. For example: >>> first([1, 1, 3, 4, 5], key=lambda x: x % 2 == 0) 4 """ return next((apply(x) for x in seq if key(x)), default() if callable(default) else default)
[ "def", "first", "(", "seq", ",", "key", "=", "lambda", "x", ":", "bool", "(", "x", ")", ",", "default", "=", "None", ",", "apply", "=", "lambda", "x", ":", "x", ")", ":", "return", "next", "(", "(", "apply", "(", "x", ")", "for", "x", "in", "seq", "if", "key", "(", "x", ")", ")", ",", "default", "(", ")", "if", "callable", "(", "default", ")", "else", "default", ")" ]
36.612903
26.645161
def _init_metadata(self): """stub""" self._learning_objective_id_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'learning_objective_id'), 'element_label': 'Learning Objective Id', 'instructions': 'accepts a valid OSID Id string', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_id_values': [''], 'syntax': 'ID', 'id_set': [] } self._minimum_proficiency_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'minimum_proficiency'), 'element_label': 'Minimum Proficiency in the given Objective to "pass"', 'instructions': 'accepts a valid OSID Id string', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_id_values': [''], 'syntax': 'ID', 'id_set': [] }
[ "def", "_init_metadata", "(", "self", ")", ":", "self", ".", "_learning_objective_id_metadata", "=", "{", "'element_id'", ":", "Id", "(", "self", ".", "my_osid_object_form", ".", "_authority", ",", "self", ".", "my_osid_object_form", ".", "_namespace", ",", "'learning_objective_id'", ")", ",", "'element_label'", ":", "'Learning Objective Id'", ",", "'instructions'", ":", "'accepts a valid OSID Id string'", ",", "'required'", ":", "False", ",", "'read_only'", ":", "False", ",", "'linked'", ":", "False", ",", "'array'", ":", "False", ",", "'default_id_values'", ":", "[", "''", "]", ",", "'syntax'", ":", "'ID'", ",", "'id_set'", ":", "[", "]", "}", "self", ".", "_minimum_proficiency_metadata", "=", "{", "'element_id'", ":", "Id", "(", "self", ".", "my_osid_object_form", ".", "_authority", ",", "self", ".", "my_osid_object_form", ".", "_namespace", ",", "'minimum_proficiency'", ")", ",", "'element_label'", ":", "'Minimum Proficiency in the given Objective to \"pass\"'", ",", "'instructions'", ":", "'accepts a valid OSID Id string'", ",", "'required'", ":", "False", ",", "'read_only'", ":", "False", ",", "'linked'", ":", "False", ",", "'array'", ":", "False", ",", "'default_id_values'", ":", "[", "''", "]", ",", "'syntax'", ":", "'ID'", ",", "'id_set'", ":", "[", "]", "}" ]
39.666667
15.533333
def _call_func_bc(nargs, idx, ops, keys): """ Implements transformation of CALL_FUNCTION bc inst to Rapids expression. The implementation follows definition of behavior defined in https://docs.python.org/3/library/dis.html :param nargs: number of arguments including keyword and positional arguments :param idx: index of current instruction on the stack :param ops: stack of instructions :param keys: names of instructions :return: ExprNode representing method call """ named_args = {} unnamed_args = [] args = [] # Extract arguments based on calling convention for CALL_FUNCTION_KW while nargs > 0: if nargs >= 256: # named args ( foo(50,True,x=10) ) read first ( right -> left ) arg, idx = _opcode_read_arg(idx, ops, keys) named_args[ops[idx][1][0]] = arg idx -= 1 # skip the LOAD_CONST for the named args nargs -= 256 # drop 256 else: arg, idx = _opcode_read_arg(idx, ops, keys) unnamed_args.insert(0, arg) nargs -= 1 # LOAD_ATTR <method_name>: Map call arguments to a call of method on H2OFrame class op = ops[idx][1][0] args = _get_h2o_frame_method_args(op, named_args, unnamed_args) if is_attr(ops[idx][0]) else [] # Map function name to proper rapids name op = _get_func_name(op, args) # Go to next instruction idx -= 1 if is_bytecode_instruction(ops[idx][0]): arg, idx = _opcode_read_arg(idx, ops, keys) args.insert(0, arg) elif is_load_fast(ops[idx][0]): args.insert(0, _load_fast(ops[idx][1][0])) idx -= 1 return [ExprNode(op, *args), idx]
[ "def", "_call_func_bc", "(", "nargs", ",", "idx", ",", "ops", ",", "keys", ")", ":", "named_args", "=", "{", "}", "unnamed_args", "=", "[", "]", "args", "=", "[", "]", "# Extract arguments based on calling convention for CALL_FUNCTION_KW", "while", "nargs", ">", "0", ":", "if", "nargs", ">=", "256", ":", "# named args ( foo(50,True,x=10) ) read first ( right -> left )", "arg", ",", "idx", "=", "_opcode_read_arg", "(", "idx", ",", "ops", ",", "keys", ")", "named_args", "[", "ops", "[", "idx", "]", "[", "1", "]", "[", "0", "]", "]", "=", "arg", "idx", "-=", "1", "# skip the LOAD_CONST for the named args", "nargs", "-=", "256", "# drop 256", "else", ":", "arg", ",", "idx", "=", "_opcode_read_arg", "(", "idx", ",", "ops", ",", "keys", ")", "unnamed_args", ".", "insert", "(", "0", ",", "arg", ")", "nargs", "-=", "1", "# LOAD_ATTR <method_name>: Map call arguments to a call of method on H2OFrame class", "op", "=", "ops", "[", "idx", "]", "[", "1", "]", "[", "0", "]", "args", "=", "_get_h2o_frame_method_args", "(", "op", ",", "named_args", ",", "unnamed_args", ")", "if", "is_attr", "(", "ops", "[", "idx", "]", "[", "0", "]", ")", "else", "[", "]", "# Map function name to proper rapids name", "op", "=", "_get_func_name", "(", "op", ",", "args", ")", "# Go to next instruction", "idx", "-=", "1", "if", "is_bytecode_instruction", "(", "ops", "[", "idx", "]", "[", "0", "]", ")", ":", "arg", ",", "idx", "=", "_opcode_read_arg", "(", "idx", ",", "ops", ",", "keys", ")", "args", ".", "insert", "(", "0", ",", "arg", ")", "elif", "is_load_fast", "(", "ops", "[", "idx", "]", "[", "0", "]", ")", ":", "args", ".", "insert", "(", "0", ",", "_load_fast", "(", "ops", "[", "idx", "]", "[", "1", "]", "[", "0", "]", ")", ")", "idx", "-=", "1", "return", "[", "ExprNode", "(", "op", ",", "*", "args", ")", ",", "idx", "]" ]
41.3
17.25
def gatk_filter_rnaseq(vrn_file, data): """ this incorporates filters listed here, dropping clusters of variants within a 35 nucleotide window, high fischer strand values and low quality by depth https://software.broadinstitute.org/gatk/guide/article?id=3891 java -jar GenomeAnalysisTK.jar -T VariantFiltration -R hg_19.fasta -V input.vcf -window 35 -cluster 3 -filterName FS -filter "FS > 30.0" -filterName QD -filter "QD < 2.0" -o output.vcf """ out_file = "%s-filter%s" % utils.splitext_plus(vrn_file) if not file_exists(out_file): ref_file = dd.get_ref_file(data) with file_transaction(data, out_file) as tx_out_file: params = ["VariantFiltration", "-R", ref_file, "-V", vrn_file, "--cluster-window-size", "35", "--cluster-size", "3", "--filter-expression", "'FS > 30.0'", "--filter-name", "FS", "--filter-expression", "'QD < 2.0'", "--filter-name", "QD", "--output", tx_out_file] # Use GATK4 for filtering, tools_off is for variant calling config = utils.deepish_copy(dd.get_config(data)) if "gatk4" in dd.get_tools_off({"config": config}): config["algorithm"]["tools_off"].remove("gatk4") jvm_opts = broad.get_gatk_opts(config, os.path.dirname(tx_out_file)) do.run(broad.gatk_cmd("gatk", jvm_opts, params, config), "Filter RNA-seq variants.") return out_file
[ "def", "gatk_filter_rnaseq", "(", "vrn_file", ",", "data", ")", ":", "out_file", "=", "\"%s-filter%s\"", "%", "utils", ".", "splitext_plus", "(", "vrn_file", ")", "if", "not", "file_exists", "(", "out_file", ")", ":", "ref_file", "=", "dd", ".", "get_ref_file", "(", "data", ")", "with", "file_transaction", "(", "data", ",", "out_file", ")", "as", "tx_out_file", ":", "params", "=", "[", "\"VariantFiltration\"", ",", "\"-R\"", ",", "ref_file", ",", "\"-V\"", ",", "vrn_file", ",", "\"--cluster-window-size\"", ",", "\"35\"", ",", "\"--cluster-size\"", ",", "\"3\"", ",", "\"--filter-expression\"", ",", "\"'FS > 30.0'\"", ",", "\"--filter-name\"", ",", "\"FS\"", ",", "\"--filter-expression\"", ",", "\"'QD < 2.0'\"", ",", "\"--filter-name\"", ",", "\"QD\"", ",", "\"--output\"", ",", "tx_out_file", "]", "# Use GATK4 for filtering, tools_off is for variant calling", "config", "=", "utils", ".", "deepish_copy", "(", "dd", ".", "get_config", "(", "data", ")", ")", "if", "\"gatk4\"", "in", "dd", ".", "get_tools_off", "(", "{", "\"config\"", ":", "config", "}", ")", ":", "config", "[", "\"algorithm\"", "]", "[", "\"tools_off\"", "]", ".", "remove", "(", "\"gatk4\"", ")", "jvm_opts", "=", "broad", ".", "get_gatk_opts", "(", "config", ",", "os", ".", "path", ".", "dirname", "(", "tx_out_file", ")", ")", "do", ".", "run", "(", "broad", ".", "gatk_cmd", "(", "\"gatk\"", ",", "jvm_opts", ",", "params", ",", "config", ")", ",", "\"Filter RNA-seq variants.\"", ")", "return", "out_file" ]
51.096774
16.774194
def uniform_partition_fromintv(intv_prod, shape, nodes_on_bdry=False): """Return a partition of an interval product into equally sized cells. Parameters ---------- intv_prod : `IntervalProd` Interval product to be partitioned shape : int or sequence of ints Number of nodes per axis. For 1d intervals, a single integer can be specified. nodes_on_bdry : bool or sequence, optional If a sequence is provided, it determines per axis whether to place the last grid point on the boundary (``True``) or shift it by half a cell size into the interior (``False``). In each axis, an entry may consist in a single bool or a 2-tuple of bool. In the latter case, the first tuple entry decides for the left, the second for the right boundary. The length of the sequence must be ``intv_prod.ndim``. A single boolean is interpreted as a global choice for all boundaries. See Also -------- uniform_partition_fromgrid Examples -------- By default, no grid points are placed on the boundary: >>> interval = odl.IntervalProd(0, 1) >>> part = odl.uniform_partition_fromintv(interval, 4) >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.5 , 0.75, 1. ]),) >>> part.grid.coord_vectors (array([ 0.125, 0.375, 0.625, 0.875]),) This can be changed with the nodes_on_bdry parameter: >>> part = odl.uniform_partition_fromintv(interval, 3, ... nodes_on_bdry=True) >>> part.cell_boundary_vecs (array([ 0. , 0.25, 0.75, 1. ]),) >>> part.grid.coord_vectors (array([ 0. , 0.5, 1. ]),) We can specify this per axis, too. In this case we choose both in the first axis and only the rightmost in the second: >>> rect = odl.IntervalProd([0, 0], [1, 1]) >>> part = odl.uniform_partition_fromintv( ... rect, (3, 3), nodes_on_bdry=(True, (False, True))) ... >>> part.cell_boundary_vecs[0] # first axis, as above array([ 0. , 0.25, 0.75, 1. ]) >>> part.grid.coord_vectors[0] array([ 0. , 0.5, 1. ]) >>> part.cell_boundary_vecs[1] # second, asymmetric axis array([ 0. , 0.4, 0.8, 1. ]) >>> part.grid.coord_vectors[1] array([ 0.2, 0.6, 1. ]) """ grid = uniform_grid_fromintv(intv_prod, shape, nodes_on_bdry=nodes_on_bdry) return RectPartition(intv_prod, grid)
[ "def", "uniform_partition_fromintv", "(", "intv_prod", ",", "shape", ",", "nodes_on_bdry", "=", "False", ")", ":", "grid", "=", "uniform_grid_fromintv", "(", "intv_prod", ",", "shape", ",", "nodes_on_bdry", "=", "nodes_on_bdry", ")", "return", "RectPartition", "(", "intv_prod", ",", "grid", ")" ]
37.46875
19.390625
def similar(names=None, ids=None, start=0, results=15, buckets=None, limit=False, max_familiarity=None, min_familiarity=None, max_hotttnesss=None, min_hotttnesss=None, seed_catalog=None,artist_start_year_before=None, \ artist_start_year_after=None,artist_end_year_before=None,artist_end_year_after=None): """Return similar artists to this one Args: Kwargs: ids (str/list): An artist id or list of ids names (str/list): An artist name or list of names results (int): An integer number of results to return buckets (list): A list of strings specifying which buckets to retrieve limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets start (int): An integer starting value for the result set max_familiarity (float): A float specifying the max familiarity of artists to search for min_familiarity (float): A float specifying the min familiarity of artists to search for max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for seed_catalog (str): A string specifying the catalog similar artists are restricted to Returns: A list of similar Artist objects Example: >>> some_dudes = [artist.Artist('weezer'), artist.Artist('radiohead')] >>> some_dudes [<artist - Weezer>, <artist - Radiohead>] >>> sims = artist.similar(ids=[art.id for art in some_dudes], results=5) >>> sims [<artist - The Smashing Pumpkins>, <artist - Biffy Clyro>, <artist - Death Cab for Cutie>, <artist - Jimmy Eat World>, <artist - Nerf Herder>] >>> """ buckets = buckets or [] kwargs = {} if ids: if not isinstance(ids, list): ids = [ids] kwargs['id'] = ids if names: if not isinstance(names, list): names = [names] kwargs['name'] = names if max_familiarity is not None: kwargs['max_familiarity'] = max_familiarity if min_familiarity is not None: kwargs['min_familiarity'] = min_familiarity if max_hotttnesss is not None: kwargs['max_hotttnesss'] = max_hotttnesss if min_hotttnesss is not None: kwargs['min_hotttnesss'] = min_hotttnesss if seed_catalog is not None: kwargs['seed_catalog'] = seed_catalog if start: kwargs['start'] = start if results: kwargs['results'] = results if buckets: kwargs['bucket'] = buckets if limit: kwargs['limit'] = 'true' if artist_start_year_before: kwargs['artist_start_year_before'] = artist_start_year_before if artist_start_year_after: kwargs['artist_start_year_after'] = artist_start_year_after if artist_end_year_before: kwargs['artist_end_year_before'] = artist_end_year_before if artist_end_year_after: kwargs['artist_end_year_after'] = artist_end_year_after result = util.callm("%s/%s" % ('artist', 'similar'), kwargs) return [Artist(**util.fix(a_dict)) for a_dict in result['response']['artists']]
[ "def", "similar", "(", "names", "=", "None", ",", "ids", "=", "None", ",", "start", "=", "0", ",", "results", "=", "15", ",", "buckets", "=", "None", ",", "limit", "=", "False", ",", "max_familiarity", "=", "None", ",", "min_familiarity", "=", "None", ",", "max_hotttnesss", "=", "None", ",", "min_hotttnesss", "=", "None", ",", "seed_catalog", "=", "None", ",", "artist_start_year_before", "=", "None", ",", "artist_start_year_after", "=", "None", ",", "artist_end_year_before", "=", "None", ",", "artist_end_year_after", "=", "None", ")", ":", "buckets", "=", "buckets", "or", "[", "]", "kwargs", "=", "{", "}", "if", "ids", ":", "if", "not", "isinstance", "(", "ids", ",", "list", ")", ":", "ids", "=", "[", "ids", "]", "kwargs", "[", "'id'", "]", "=", "ids", "if", "names", ":", "if", "not", "isinstance", "(", "names", ",", "list", ")", ":", "names", "=", "[", "names", "]", "kwargs", "[", "'name'", "]", "=", "names", "if", "max_familiarity", "is", "not", "None", ":", "kwargs", "[", "'max_familiarity'", "]", "=", "max_familiarity", "if", "min_familiarity", "is", "not", "None", ":", "kwargs", "[", "'min_familiarity'", "]", "=", "min_familiarity", "if", "max_hotttnesss", "is", "not", "None", ":", "kwargs", "[", "'max_hotttnesss'", "]", "=", "max_hotttnesss", "if", "min_hotttnesss", "is", "not", "None", ":", "kwargs", "[", "'min_hotttnesss'", "]", "=", "min_hotttnesss", "if", "seed_catalog", "is", "not", "None", ":", "kwargs", "[", "'seed_catalog'", "]", "=", "seed_catalog", "if", "start", ":", "kwargs", "[", "'start'", "]", "=", "start", "if", "results", ":", "kwargs", "[", "'results'", "]", "=", "results", "if", "buckets", ":", "kwargs", "[", "'bucket'", "]", "=", "buckets", "if", "limit", ":", "kwargs", "[", "'limit'", "]", "=", "'true'", "if", "artist_start_year_before", ":", "kwargs", "[", "'artist_start_year_before'", "]", "=", "artist_start_year_before", "if", "artist_start_year_after", ":", "kwargs", "[", "'artist_start_year_after'", "]", "=", "artist_start_year_after", "if", "artist_end_year_before", ":", "kwargs", "[", "'artist_end_year_before'", "]", "=", "artist_end_year_before", "if", "artist_end_year_after", ":", "kwargs", "[", "'artist_end_year_after'", "]", "=", "artist_end_year_after", "result", "=", "util", ".", "callm", "(", "\"%s/%s\"", "%", "(", "'artist'", ",", "'similar'", ")", ",", "kwargs", ")", "return", "[", "Artist", "(", "*", "*", "util", ".", "fix", "(", "a_dict", ")", ")", "for", "a_dict", "in", "result", "[", "'response'", "]", "[", "'artists'", "]", "]" ]
37.546512
26.988372
def generic_http_header_parser_for(header_name): """ A parser factory to extract the request id from an HTTP header :return: A parser that can be used to extract the request id from the current request context :rtype: ()->str|None """ def parser(): request_id = request.headers.get(header_name, '').strip() if not request_id: # If the request id is empty return None return None return request_id return parser
[ "def", "generic_http_header_parser_for", "(", "header_name", ")", ":", "def", "parser", "(", ")", ":", "request_id", "=", "request", ".", "headers", ".", "get", "(", "header_name", ",", "''", ")", ".", "strip", "(", ")", "if", "not", "request_id", ":", "# If the request id is empty return None", "return", "None", "return", "request_id", "return", "parser" ]
31.6
21.066667
def register (type, suffixes = [], base_type = None): """ Registers a target type, possibly derived from a 'base-type'. If 'suffixes' are provided, they list all the suffixes that mean a file is of 'type'. Also, the first element gives the suffix to be used when constructing and object of 'type'. type: a string suffixes: None or a sequence of strings base_type: None or a string """ # Type names cannot contain hyphens, because when used as # feature-values they will be interpreted as composite features # which need to be decomposed. if __re_hyphen.search (type): raise BaseException ('type name "%s" contains a hyphen' % type) # it's possible for a type to be registered with a # base type that hasn't been registered yet. in the # check for base_type below and the following calls to setdefault() # the key `type` will be added to __types. When the base type # actually gets registered, it would fail after the simple check # of "type in __types"; thus the check for "'base' in __types[type]" if type in __types and 'base' in __types[type]: raise BaseException ('Type "%s" is already registered.' % type) entry = __types.setdefault(type, {}) entry['base'] = base_type entry.setdefault('derived', []) entry.setdefault('scanner', None) if base_type: __types.setdefault(base_type, {}).setdefault('derived', []).append(type) if len (suffixes) > 0: # Generated targets of 'type' will use the first of 'suffixes' # (this may be overriden) set_generated_target_suffix (type, [], suffixes [0]) # Specify mapping from suffixes to type register_suffixes (suffixes, type) feature.extend('target-type', [type]) feature.extend('main-target-type', [type]) feature.extend('base-target-type', [type]) if base_type: feature.compose ('<target-type>' + type, [replace_grist (base_type, '<base-target-type>')]) feature.compose ('<base-target-type>' + type, ['<base-target-type>' + base_type]) import b2.build.generators as generators # Adding a new derived type affects generator selection so we need to # make the generator selection module update any of its cached # information related to a new derived type being defined. generators.update_cached_information_with_a_new_type(type) # FIXME: resolving recursive dependency. from b2.manager import get_manager get_manager().projects().project_rules().add_rule_for_type(type)
[ "def", "register", "(", "type", ",", "suffixes", "=", "[", "]", ",", "base_type", "=", "None", ")", ":", "# Type names cannot contain hyphens, because when used as", "# feature-values they will be interpreted as composite features", "# which need to be decomposed.", "if", "__re_hyphen", ".", "search", "(", "type", ")", ":", "raise", "BaseException", "(", "'type name \"%s\" contains a hyphen'", "%", "type", ")", "# it's possible for a type to be registered with a", "# base type that hasn't been registered yet. in the", "# check for base_type below and the following calls to setdefault()", "# the key `type` will be added to __types. When the base type", "# actually gets registered, it would fail after the simple check", "# of \"type in __types\"; thus the check for \"'base' in __types[type]\"", "if", "type", "in", "__types", "and", "'base'", "in", "__types", "[", "type", "]", ":", "raise", "BaseException", "(", "'Type \"%s\" is already registered.'", "%", "type", ")", "entry", "=", "__types", ".", "setdefault", "(", "type", ",", "{", "}", ")", "entry", "[", "'base'", "]", "=", "base_type", "entry", ".", "setdefault", "(", "'derived'", ",", "[", "]", ")", "entry", ".", "setdefault", "(", "'scanner'", ",", "None", ")", "if", "base_type", ":", "__types", ".", "setdefault", "(", "base_type", ",", "{", "}", ")", ".", "setdefault", "(", "'derived'", ",", "[", "]", ")", ".", "append", "(", "type", ")", "if", "len", "(", "suffixes", ")", ">", "0", ":", "# Generated targets of 'type' will use the first of 'suffixes'", "# (this may be overriden)", "set_generated_target_suffix", "(", "type", ",", "[", "]", ",", "suffixes", "[", "0", "]", ")", "# Specify mapping from suffixes to type", "register_suffixes", "(", "suffixes", ",", "type", ")", "feature", ".", "extend", "(", "'target-type'", ",", "[", "type", "]", ")", "feature", ".", "extend", "(", "'main-target-type'", ",", "[", "type", "]", ")", "feature", ".", "extend", "(", "'base-target-type'", ",", "[", "type", "]", ")", "if", "base_type", ":", "feature", ".", "compose", "(", "'<target-type>'", "+", "type", ",", "[", "replace_grist", "(", "base_type", ",", "'<base-target-type>'", ")", "]", ")", "feature", ".", "compose", "(", "'<base-target-type>'", "+", "type", ",", "[", "'<base-target-type>'", "+", "base_type", "]", ")", "import", "b2", ".", "build", ".", "generators", "as", "generators", "# Adding a new derived type affects generator selection so we need to", "# make the generator selection module update any of its cached", "# information related to a new derived type being defined.", "generators", ".", "update_cached_information_with_a_new_type", "(", "type", ")", "# FIXME: resolving recursive dependency.", "from", "b2", ".", "manager", "import", "get_manager", "get_manager", "(", ")", ".", "projects", "(", ")", ".", "project_rules", "(", ")", ".", "add_rule_for_type", "(", "type", ")" ]
44.140351
22.070175
def ffconvert(fname, limit_states, ff, min_iml=1E-10): """ Convert a fragility function into a numpy array plus a bunch of attributes. :param fname: path to the fragility model file :param limit_states: expected limit states :param ff: fragility function node :returns: a pair (array, dictionary) """ with context(fname, ff): ffs = ff[1:] imls = ff.imls nodamage = imls.attrib.get('noDamageLimit') if nodamage == 0: # use a cutoff to avoid log(0) in GMPE.to_distribution_values logging.warning('Found a noDamageLimit=0 in %s, line %s, ' 'using %g instead', fname, ff.lineno, min_iml) nodamage = min_iml with context(fname, imls): attrs = dict(format=ff['format'], imt=imls['imt'], id=ff['id'], nodamage=nodamage) LS = len(limit_states) if LS != len(ffs): with context(fname, ff): raise InvalidFile('expected %d limit states, found %d' % (LS, len(ffs))) if ff['format'] == 'continuous': minIML = float(imls['minIML']) if minIML == 0: # use a cutoff to avoid log(0) in GMPE.to_distribution_values logging.warning('Found minIML=0 in %s, line %s, using %g instead', fname, imls.lineno, min_iml) minIML = min_iml attrs['minIML'] = minIML attrs['maxIML'] = float(imls['maxIML']) array = numpy.zeros(LS, [('mean', F64), ('stddev', F64)]) for i, ls, node in zip(range(LS), limit_states, ff[1:]): if ls != node['ls']: with context(fname, node): raise InvalidFile('expected %s, found' % (ls, node['ls'])) array['mean'][i] = node['mean'] array['stddev'][i] = node['stddev'] elif ff['format'] == 'discrete': attrs['imls'] = ~imls valid.check_levels(attrs['imls'], attrs['imt'], min_iml) num_poes = len(attrs['imls']) array = numpy.zeros((LS, num_poes)) for i, ls, node in zip(range(LS), limit_states, ff[1:]): with context(fname, node): if ls != node['ls']: raise InvalidFile('expected %s, found' % (ls, node['ls'])) poes = (~node if isinstance(~node, list) else valid.probabilities(~node)) if len(poes) != num_poes: raise InvalidFile('expected %s, found' % (num_poes, len(poes))) array[i, :] = poes # NB: the format is constrained in nrml.FragilityNode to be either # discrete or continuous, there is no third option return array, attrs
[ "def", "ffconvert", "(", "fname", ",", "limit_states", ",", "ff", ",", "min_iml", "=", "1E-10", ")", ":", "with", "context", "(", "fname", ",", "ff", ")", ":", "ffs", "=", "ff", "[", "1", ":", "]", "imls", "=", "ff", ".", "imls", "nodamage", "=", "imls", ".", "attrib", ".", "get", "(", "'noDamageLimit'", ")", "if", "nodamage", "==", "0", ":", "# use a cutoff to avoid log(0) in GMPE.to_distribution_values", "logging", ".", "warning", "(", "'Found a noDamageLimit=0 in %s, line %s, '", "'using %g instead'", ",", "fname", ",", "ff", ".", "lineno", ",", "min_iml", ")", "nodamage", "=", "min_iml", "with", "context", "(", "fname", ",", "imls", ")", ":", "attrs", "=", "dict", "(", "format", "=", "ff", "[", "'format'", "]", ",", "imt", "=", "imls", "[", "'imt'", "]", ",", "id", "=", "ff", "[", "'id'", "]", ",", "nodamage", "=", "nodamage", ")", "LS", "=", "len", "(", "limit_states", ")", "if", "LS", "!=", "len", "(", "ffs", ")", ":", "with", "context", "(", "fname", ",", "ff", ")", ":", "raise", "InvalidFile", "(", "'expected %d limit states, found %d'", "%", "(", "LS", ",", "len", "(", "ffs", ")", ")", ")", "if", "ff", "[", "'format'", "]", "==", "'continuous'", ":", "minIML", "=", "float", "(", "imls", "[", "'minIML'", "]", ")", "if", "minIML", "==", "0", ":", "# use a cutoff to avoid log(0) in GMPE.to_distribution_values", "logging", ".", "warning", "(", "'Found minIML=0 in %s, line %s, using %g instead'", ",", "fname", ",", "imls", ".", "lineno", ",", "min_iml", ")", "minIML", "=", "min_iml", "attrs", "[", "'minIML'", "]", "=", "minIML", "attrs", "[", "'maxIML'", "]", "=", "float", "(", "imls", "[", "'maxIML'", "]", ")", "array", "=", "numpy", ".", "zeros", "(", "LS", ",", "[", "(", "'mean'", ",", "F64", ")", ",", "(", "'stddev'", ",", "F64", ")", "]", ")", "for", "i", ",", "ls", ",", "node", "in", "zip", "(", "range", "(", "LS", ")", ",", "limit_states", ",", "ff", "[", "1", ":", "]", ")", ":", "if", "ls", "!=", "node", "[", "'ls'", "]", ":", "with", "context", "(", "fname", ",", "node", ")", ":", "raise", "InvalidFile", "(", "'expected %s, found'", "%", "(", "ls", ",", "node", "[", "'ls'", "]", ")", ")", "array", "[", "'mean'", "]", "[", "i", "]", "=", "node", "[", "'mean'", "]", "array", "[", "'stddev'", "]", "[", "i", "]", "=", "node", "[", "'stddev'", "]", "elif", "ff", "[", "'format'", "]", "==", "'discrete'", ":", "attrs", "[", "'imls'", "]", "=", "~", "imls", "valid", ".", "check_levels", "(", "attrs", "[", "'imls'", "]", ",", "attrs", "[", "'imt'", "]", ",", "min_iml", ")", "num_poes", "=", "len", "(", "attrs", "[", "'imls'", "]", ")", "array", "=", "numpy", ".", "zeros", "(", "(", "LS", ",", "num_poes", ")", ")", "for", "i", ",", "ls", ",", "node", "in", "zip", "(", "range", "(", "LS", ")", ",", "limit_states", ",", "ff", "[", "1", ":", "]", ")", ":", "with", "context", "(", "fname", ",", "node", ")", ":", "if", "ls", "!=", "node", "[", "'ls'", "]", ":", "raise", "InvalidFile", "(", "'expected %s, found'", "%", "(", "ls", ",", "node", "[", "'ls'", "]", ")", ")", "poes", "=", "(", "~", "node", "if", "isinstance", "(", "~", "node", ",", "list", ")", "else", "valid", ".", "probabilities", "(", "~", "node", ")", ")", "if", "len", "(", "poes", ")", "!=", "num_poes", ":", "raise", "InvalidFile", "(", "'expected %s, found'", "%", "(", "num_poes", ",", "len", "(", "poes", ")", ")", ")", "array", "[", "i", ",", ":", "]", "=", "poes", "# NB: the format is constrained in nrml.FragilityNode to be either", "# discrete or continuous, there is no third option", "return", "array", ",", "attrs" ]
42.439394
13.924242
def _load_pretrained_tok2vec(nlp, loc): """Load pre-trained weights for the 'token-to-vector' part of the component models, which is typically a CNN. See 'spacy pretrain'. Experimental. """ with loc.open("rb") as file_: weights_data = file_.read() loaded = [] for name, component in nlp.pipeline: if hasattr(component, "model") and hasattr(component.model, "tok2vec"): component.tok2vec.from_bytes(weights_data) loaded.append(name) return loaded
[ "def", "_load_pretrained_tok2vec", "(", "nlp", ",", "loc", ")", ":", "with", "loc", ".", "open", "(", "\"rb\"", ")", "as", "file_", ":", "weights_data", "=", "file_", ".", "read", "(", ")", "loaded", "=", "[", "]", "for", "name", ",", "component", "in", "nlp", ".", "pipeline", ":", "if", "hasattr", "(", "component", ",", "\"model\"", ")", "and", "hasattr", "(", "component", ".", "model", ",", "\"tok2vec\"", ")", ":", "component", ".", "tok2vec", ".", "from_bytes", "(", "weights_data", ")", "loaded", ".", "append", "(", "name", ")", "return", "loaded" ]
41.833333
13
def gen_goal(self, y_des): """Generate the goal for path imitation. For rhythmic DMPs the goal is the average of the desired trajectory. y_des np.array: the desired trajectory to follow """ goal = np.zeros(self.dmps) for n in range(self.dmps): num_idx = ~np.isnan(y_des[n]) # ignore nan's when calculating goal goal[n] = .5 * (y_des[n,num_idx].min() + \ y_des[n,num_idx].max()) return goal
[ "def", "gen_goal", "(", "self", ",", "y_des", ")", ":", "goal", "=", "np", ".", "zeros", "(", "self", ".", "dmps", ")", "for", "n", "in", "range", "(", "self", ".", "dmps", ")", ":", "num_idx", "=", "~", "np", ".", "isnan", "(", "y_des", "[", "n", "]", ")", "# ignore nan's when calculating goal", "goal", "[", "n", "]", "=", ".5", "*", "(", "y_des", "[", "n", ",", "num_idx", "]", ".", "min", "(", ")", "+", "y_des", "[", "n", ",", "num_idx", "]", ".", "max", "(", ")", ")", "return", "goal" ]
33.4
18.066667
def full_name(self): """ You can get full name of user. :return: str """ full_name = self.first_name if self.last_name: full_name += ' ' + self.last_name return full_name
[ "def", "full_name", "(", "self", ")", ":", "full_name", "=", "self", ".", "first_name", "if", "self", ".", "last_name", ":", "full_name", "+=", "' '", "+", "self", ".", "last_name", "return", "full_name" ]
23
12.2
def init_app(self, app): """Init app with Flask instance. You can also pass the instance of Flask later:: oauth = OAuth() oauth.init_app(app) """ self.app = app app.extensions = getattr(app, 'extensions', {}) app.extensions[self.state_key] = self
[ "def", "init_app", "(", "self", ",", "app", ")", ":", "self", ".", "app", "=", "app", "app", ".", "extensions", "=", "getattr", "(", "app", ",", "'extensions'", ",", "{", "}", ")", "app", ".", "extensions", "[", "self", ".", "state_key", "]", "=", "self" ]
28.181818
15.545455
def get_cycles(graph_dict, vertices=None): """given a dictionary representing an ordered graph (i.e. key are vertices and values is a list of destination vertices representing edges), return a list of detected cycles """ if not graph_dict: return () result = [] if vertices is None: vertices = graph_dict.keys() for vertice in vertices: _get_cycles(graph_dict, [], set(), result, vertice) return result
[ "def", "get_cycles", "(", "graph_dict", ",", "vertices", "=", "None", ")", ":", "if", "not", "graph_dict", ":", "return", "(", ")", "result", "=", "[", "]", "if", "vertices", "is", "None", ":", "vertices", "=", "graph_dict", ".", "keys", "(", ")", "for", "vertice", "in", "vertices", ":", "_get_cycles", "(", "graph_dict", ",", "[", "]", ",", "set", "(", ")", ",", "result", ",", "vertice", ")", "return", "result" ]
34.615385
14.846154
def json(cls, message): """ Print a nice JSON output Args: message: the message to print """ if type(message) is OrderedDict: pprint(dict(message)) else: pprint(message)
[ "def", "json", "(", "cls", ",", "message", ")", ":", "if", "type", "(", "message", ")", "is", "OrderedDict", ":", "pprint", "(", "dict", "(", "message", ")", ")", "else", ":", "pprint", "(", "message", ")" ]
22.272727
15.636364
def dinfdistdown(np, ang, fel, slp, src, statsm, distm, edgecontamination, wg, dist, workingdir=None, mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None): """Run D-inf distance down to stream""" in_params = {'-m': '%s %s' % (TauDEM.convertstatsmethod(statsm), TauDEM.convertdistmethod(distm))} if StringClass.string_match(edgecontamination, 'false') or edgecontamination is False: in_params['-nc'] = None fname = TauDEM.func_name('dinfdistdown') return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir), {'-fel': fel, '-slp': slp, '-ang': ang, '-src': src, '-wg': wg}, workingdir, in_params, {'-dd': dist}, {'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np}, {'logfile': log_file, 'runtimefile': runtime_file})
[ "def", "dinfdistdown", "(", "np", ",", "ang", ",", "fel", ",", "slp", ",", "src", ",", "statsm", ",", "distm", ",", "edgecontamination", ",", "wg", ",", "dist", ",", "workingdir", "=", "None", ",", "mpiexedir", "=", "None", ",", "exedir", "=", "None", ",", "log_file", "=", "None", ",", "runtime_file", "=", "None", ",", "hostfile", "=", "None", ")", ":", "in_params", "=", "{", "'-m'", ":", "'%s %s'", "%", "(", "TauDEM", ".", "convertstatsmethod", "(", "statsm", ")", ",", "TauDEM", ".", "convertdistmethod", "(", "distm", ")", ")", "}", "if", "StringClass", ".", "string_match", "(", "edgecontamination", ",", "'false'", ")", "or", "edgecontamination", "is", "False", ":", "in_params", "[", "'-nc'", "]", "=", "None", "fname", "=", "TauDEM", ".", "func_name", "(", "'dinfdistdown'", ")", "return", "TauDEM", ".", "run", "(", "FileClass", ".", "get_executable_fullpath", "(", "fname", ",", "exedir", ")", ",", "{", "'-fel'", ":", "fel", ",", "'-slp'", ":", "slp", ",", "'-ang'", ":", "ang", ",", "'-src'", ":", "src", ",", "'-wg'", ":", "wg", "}", ",", "workingdir", ",", "in_params", ",", "{", "'-dd'", ":", "dist", "}", ",", "{", "'mpipath'", ":", "mpiexedir", ",", "'hostfile'", ":", "hostfile", ",", "'n'", ":", "np", "}", ",", "{", "'logfile'", ":", "log_file", ",", "'runtimefile'", ":", "runtime_file", "}", ")" ]
63.875
24.9375
def to_datetime(timestamp): """Return datetime object from timestamp.""" return dt.fromtimestamp(time.mktime( time.localtime(int(str(timestamp)[:10]))))
[ "def", "to_datetime", "(", "timestamp", ")", ":", "return", "dt", ".", "fromtimestamp", "(", "time", ".", "mktime", "(", "time", ".", "localtime", "(", "int", "(", "str", "(", "timestamp", ")", "[", ":", "10", "]", ")", ")", ")", ")" ]
41.25
5.75
def new_calendar(self, calendar_name): """ Creates a new calendar :param str calendar_name: name of the new calendar :return: a new Calendar instance :rtype: Calendar """ if not calendar_name: return None url = self.build_url(self._endpoints.get('root_calendars')) response = self.con.post(url, data={self._cc('name'): calendar_name}) if not response: return None data = response.json() # Everything received from cloud must be passed as self._cloud_data_key return self.calendar_constructor(parent=self, **{self._cloud_data_key: data})
[ "def", "new_calendar", "(", "self", ",", "calendar_name", ")", ":", "if", "not", "calendar_name", ":", "return", "None", "url", "=", "self", ".", "build_url", "(", "self", ".", "_endpoints", ".", "get", "(", "'root_calendars'", ")", ")", "response", "=", "self", ".", "con", ".", "post", "(", "url", ",", "data", "=", "{", "self", ".", "_cc", "(", "'name'", ")", ":", "calendar_name", "}", ")", "if", "not", "response", ":", "return", "None", "data", "=", "response", ".", "json", "(", ")", "# Everything received from cloud must be passed as self._cloud_data_key", "return", "self", ".", "calendar_constructor", "(", "parent", "=", "self", ",", "*", "*", "{", "self", ".", "_cloud_data_key", ":", "data", "}", ")" ]
32.47619
21.666667
def _sizer_add(self, child): "called when adding a control to the window" if self.sizer: if DEBUG: print "adding to sizer:", child.name border = None if not border: border = child.sizer_border flags = child._sizer_flags if child.sizer_align: flags |= child._sizer_align if child.sizer_expand: flags |= wx.EXPAND if 'grid' in self.sizer: self._sizer.Add(child.wx_obj, flag=flags, border=border, pos=(child.sizer_row, child.sizer_col), span=(child.sizer_rowspan, child.sizer_colspan)) else: self._sizer.Add(child.wx_obj, 0, flags, border)
[ "def", "_sizer_add", "(", "self", ",", "child", ")", ":", "if", "self", ".", "sizer", ":", "if", "DEBUG", ":", "print", "\"adding to sizer:\"", ",", "child", ".", "name", "border", "=", "None", "if", "not", "border", ":", "border", "=", "child", ".", "sizer_border", "flags", "=", "child", ".", "_sizer_flags", "if", "child", ".", "sizer_align", ":", "flags", "|=", "child", ".", "_sizer_align", "if", "child", ".", "sizer_expand", ":", "flags", "|=", "wx", ".", "EXPAND", "if", "'grid'", "in", "self", ".", "sizer", ":", "self", ".", "_sizer", ".", "Add", "(", "child", ".", "wx_obj", ",", "flag", "=", "flags", ",", "border", "=", "border", ",", "pos", "=", "(", "child", ".", "sizer_row", ",", "child", ".", "sizer_col", ")", ",", "span", "=", "(", "child", ".", "sizer_rowspan", ",", "child", ".", "sizer_colspan", ")", ")", "else", ":", "self", ".", "_sizer", ".", "Add", "(", "child", ".", "wx_obj", ",", "0", ",", "flags", ",", "border", ")" ]
44.111111
14.888889
def _drawContents(self, currentRti=None): """ Draws the attributes of the currentRTI """ table = self.table table.setUpdatesEnabled(False) try: table.clearContents() verticalHeader = table.verticalHeader() verticalHeader.setSectionResizeMode(QtWidgets.QHeaderView.Fixed) if currentRti is None: return # Each column in the repo tree corresponds to a row in this detail pane. repoModel = self._repoTreeView.model() propNames = RepoTreeModel.HEADERS table.setRowCount(len(propNames)) for row, propName in enumerate(propNames): nameItem = QtWidgets.QTableWidgetItem(propName) nameItem.setToolTip(propName) table.setItem(row, self.COL_PROP_NAME, nameItem) propValue = repoModel.itemData(currentRti, row) propItem = QtWidgets.QTableWidgetItem(propValue) propItem.setToolTip(propValue) table.setItem(row, self.COL_VALUE, propItem) table.resizeRowToContents(row) verticalHeader.setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents) finally: table.setUpdatesEnabled(True)
[ "def", "_drawContents", "(", "self", ",", "currentRti", "=", "None", ")", ":", "table", "=", "self", ".", "table", "table", ".", "setUpdatesEnabled", "(", "False", ")", "try", ":", "table", ".", "clearContents", "(", ")", "verticalHeader", "=", "table", ".", "verticalHeader", "(", ")", "verticalHeader", ".", "setSectionResizeMode", "(", "QtWidgets", ".", "QHeaderView", ".", "Fixed", ")", "if", "currentRti", "is", "None", ":", "return", "# Each column in the repo tree corresponds to a row in this detail pane.", "repoModel", "=", "self", ".", "_repoTreeView", ".", "model", "(", ")", "propNames", "=", "RepoTreeModel", ".", "HEADERS", "table", ".", "setRowCount", "(", "len", "(", "propNames", ")", ")", "for", "row", ",", "propName", "in", "enumerate", "(", "propNames", ")", ":", "nameItem", "=", "QtWidgets", ".", "QTableWidgetItem", "(", "propName", ")", "nameItem", ".", "setToolTip", "(", "propName", ")", "table", ".", "setItem", "(", "row", ",", "self", ".", "COL_PROP_NAME", ",", "nameItem", ")", "propValue", "=", "repoModel", ".", "itemData", "(", "currentRti", ",", "row", ")", "propItem", "=", "QtWidgets", ".", "QTableWidgetItem", "(", "propValue", ")", "propItem", ".", "setToolTip", "(", "propValue", ")", "table", ".", "setItem", "(", "row", ",", "self", ".", "COL_VALUE", ",", "propItem", ")", "table", ".", "resizeRowToContents", "(", "row", ")", "verticalHeader", ".", "setSectionResizeMode", "(", "QtWidgets", ".", "QHeaderView", ".", "ResizeToContents", ")", "finally", ":", "table", ".", "setUpdatesEnabled", "(", "True", ")" ]
39.59375
18.875
def disk_check_size(ctx, param, value): """ Validation callback for disk size parameter.""" if value: # if we've got a prefix if isinstance(value, tuple): val = value[1] else: val = value if val % 1024: raise click.ClickException('Size must be a multiple of 1024.') return value
[ "def", "disk_check_size", "(", "ctx", ",", "param", ",", "value", ")", ":", "if", "value", ":", "# if we've got a prefix", "if", "isinstance", "(", "value", ",", "tuple", ")", ":", "val", "=", "value", "[", "1", "]", "else", ":", "val", "=", "value", "if", "val", "%", "1024", ":", "raise", "click", ".", "ClickException", "(", "'Size must be a multiple of 1024.'", ")", "return", "value" ]
31.636364
15.909091
def relative_abundance(coverage): """ cov = number of bases / length of genome relative abundance = [(cov) / sum(cov for all genomes)] * 100 """ relative = {} sums = [] for genome in coverage: for cov in coverage[genome]: sums.append(0) break for genome in coverage: index = 0 for cov in coverage[genome]: sums[index] += cov index += 1 for genome in coverage: index = 0 relative[genome] = [] for cov in coverage[genome]: if sums[index] == 0: relative[genome].append(0) else: relative[genome].append((cov / sums[index]) * float(100)) index += 1 return relative
[ "def", "relative_abundance", "(", "coverage", ")", ":", "relative", "=", "{", "}", "sums", "=", "[", "]", "for", "genome", "in", "coverage", ":", "for", "cov", "in", "coverage", "[", "genome", "]", ":", "sums", ".", "append", "(", "0", ")", "break", "for", "genome", "in", "coverage", ":", "index", "=", "0", "for", "cov", "in", "coverage", "[", "genome", "]", ":", "sums", "[", "index", "]", "+=", "cov", "index", "+=", "1", "for", "genome", "in", "coverage", ":", "index", "=", "0", "relative", "[", "genome", "]", "=", "[", "]", "for", "cov", "in", "coverage", "[", "genome", "]", ":", "if", "sums", "[", "index", "]", "==", "0", ":", "relative", "[", "genome", "]", ".", "append", "(", "0", ")", "else", ":", "relative", "[", "genome", "]", ".", "append", "(", "(", "cov", "/", "sums", "[", "index", "]", ")", "*", "float", "(", "100", ")", ")", "index", "+=", "1", "return", "relative" ]
22.461538
18.153846
def delete(self): """ Wraps the standard delete() method to catch expected exceptions and raise the appropriate pyrax exceptions. """ try: return super(CloudNetwork, self).delete() except exc.Forbidden as e: # Network is in use raise exc.NetworkInUse("Cannot delete a network in use by a server.")
[ "def", "delete", "(", "self", ")", ":", "try", ":", "return", "super", "(", "CloudNetwork", ",", "self", ")", ".", "delete", "(", ")", "except", "exc", ".", "Forbidden", "as", "e", ":", "# Network is in use", "raise", "exc", ".", "NetworkInUse", "(", "\"Cannot delete a network in use by a server.\"", ")" ]
37.2
16.2
def _amIdoneIterating(self, f_k_new, relative_tolerance, iteration, maximum_iterations, print_warning, verbose): """ Convenience function to test whether we are done iterating, same for all iteration types REQUIRED ARGUMENTS f_k_new (array): new free energies f_k (array) : older free energies relative_tolerance (float): the relative tolerance for terminating verbose (bool): verbose response iterations (int): current number of iterations print_warning (bool): sometimes, we want to surpress the warning. RETURN VALUES yesIam (bool): indicates that the iteration has converged. """ yesIam = False # Compute change from old to new estimate. Delta_f_k = f_k_new - self.f_k[self.states_with_samples] # Check convergence criteria. # Terminate when max((f - fold) / f) < relative_tolerance for all # nonzero f. max_delta = np.max( np.abs(Delta_f_k) / np.max(np.abs(f_k_new))) # Update stored free energies. f_k = f_k_new.copy() self.f_k[self.states_with_samples] = f_k # write out current estimate if verbose: print("current f_k for states with samples =") print(f_k) print("relative max_delta = %e" % max_delta) # Check convergence criteria. # Terminate when max((f - fold) / f) < relative_tolerance for all # nonzero f. if np.isnan(max_delta) or (max_delta < relative_tolerance): yesIam = True if (yesIam): # Report convergence, or warn user if convergence was not achieved. if np.all(self.f_k == 0.0): # all f_k appear to be zero print('WARNING: All f_k appear to be zero.') elif (max_delta < relative_tolerance): # Convergence achieved. if verbose: print('Converged to tolerance of %e in %d iterations.' % (max_delta, iteration + 1)) elif (print_warning): # Warn that convergence was not achieved. # many times, self-consistent iteration is used in conjunction with another program. In that case, # we don't really need to warn about anything, since we are not # running it to convergence. print('WARNING: Did not converge to within specified tolerance.') print('max_delta = %e, TOLERANCE = %e, MAX_ITS = %d, iterations completed = %d' % (max_delta, relative_tolerance, maximum_iterations, iteration)) return yesIam
[ "def", "_amIdoneIterating", "(", "self", ",", "f_k_new", ",", "relative_tolerance", ",", "iteration", ",", "maximum_iterations", ",", "print_warning", ",", "verbose", ")", ":", "yesIam", "=", "False", "# Compute change from old to new estimate.", "Delta_f_k", "=", "f_k_new", "-", "self", ".", "f_k", "[", "self", ".", "states_with_samples", "]", "# Check convergence criteria.", "# Terminate when max((f - fold) / f) < relative_tolerance for all", "# nonzero f.", "max_delta", "=", "np", ".", "max", "(", "np", ".", "abs", "(", "Delta_f_k", ")", "/", "np", ".", "max", "(", "np", ".", "abs", "(", "f_k_new", ")", ")", ")", "# Update stored free energies.", "f_k", "=", "f_k_new", ".", "copy", "(", ")", "self", ".", "f_k", "[", "self", ".", "states_with_samples", "]", "=", "f_k", "# write out current estimate", "if", "verbose", ":", "print", "(", "\"current f_k for states with samples =\"", ")", "print", "(", "f_k", ")", "print", "(", "\"relative max_delta = %e\"", "%", "max_delta", ")", "# Check convergence criteria.", "# Terminate when max((f - fold) / f) < relative_tolerance for all", "# nonzero f.", "if", "np", ".", "isnan", "(", "max_delta", ")", "or", "(", "max_delta", "<", "relative_tolerance", ")", ":", "yesIam", "=", "True", "if", "(", "yesIam", ")", ":", "# Report convergence, or warn user if convergence was not achieved.", "if", "np", ".", "all", "(", "self", ".", "f_k", "==", "0.0", ")", ":", "# all f_k appear to be zero", "print", "(", "'WARNING: All f_k appear to be zero.'", ")", "elif", "(", "max_delta", "<", "relative_tolerance", ")", ":", "# Convergence achieved.", "if", "verbose", ":", "print", "(", "'Converged to tolerance of %e in %d iterations.'", "%", "(", "max_delta", ",", "iteration", "+", "1", ")", ")", "elif", "(", "print_warning", ")", ":", "# Warn that convergence was not achieved.", "# many times, self-consistent iteration is used in conjunction with another program. In that case,", "# we don't really need to warn about anything, since we are not", "# running it to convergence.", "print", "(", "'WARNING: Did not converge to within specified tolerance.'", ")", "print", "(", "'max_delta = %e, TOLERANCE = %e, MAX_ITS = %d, iterations completed = %d'", "%", "(", "max_delta", ",", "relative_tolerance", ",", "maximum_iterations", ",", "iteration", ")", ")", "return", "yesIam" ]
42.786885
24.819672
def get_parent_dept_path(self): """Method to get the department list""" self.logger.info("%s\t%s" % (self.request_method, self.request_url)) return self.json_response.get("parentIds", None)
[ "def", "get_parent_dept_path", "(", "self", ")", ":", "self", ".", "logger", ".", "info", "(", "\"%s\\t%s\"", "%", "(", "self", ".", "request_method", ",", "self", ".", "request_url", ")", ")", "return", "self", ".", "json_response", ".", "get", "(", "\"parentIds\"", ",", "None", ")" ]
53.25
15.25
def visit_Import(self, node): """ Check if imported module exists in MODULES. """ for alias in node.names: current_module = MODULES # Recursive check for submodules for path in alias.name.split('.'): if path not in current_module: raise PythranSyntaxError( "Module '{0}' unknown.".format(alias.name), node) else: current_module = current_module[path]
[ "def", "visit_Import", "(", "self", ",", "node", ")", ":", "for", "alias", "in", "node", ".", "names", ":", "current_module", "=", "MODULES", "# Recursive check for submodules", "for", "path", "in", "alias", ".", "name", ".", "split", "(", "'.'", ")", ":", "if", "path", "not", "in", "current_module", ":", "raise", "PythranSyntaxError", "(", "\"Module '{0}' unknown.\"", ".", "format", "(", "alias", ".", "name", ")", ",", "node", ")", "else", ":", "current_module", "=", "current_module", "[", "path", "]" ]
42.583333
9.833333
def get_environment(self): """ Get environment facts. power and fan are currently not implemented cpu is using 1-minute average cpu hard-coded to cpu0 (i.e. only a single CPU) """ environment = {} cpu_cmd = "show proc cpu" mem_cmd = "show memory statistics" temp_cmd = "show env temperature status" output = self._send_command(cpu_cmd) environment.setdefault("cpu", {}) environment["cpu"][0] = {} environment["cpu"][0]["%usage"] = 0.0 for line in output.splitlines(): if "CPU utilization" in line: # CPU utilization for five seconds: 2%/0%; one minute: 2%; five minutes: 1% cpu_regex = r"^.*one minute: (\d+)%; five.*$" match = re.search(cpu_regex, line) environment["cpu"][0]["%usage"] = float(match.group(1)) break output = self._send_command(mem_cmd) for line in output.splitlines(): if "Processor" in line: _, _, proc_total_mem, proc_used_mem, _ = line.split()[:5] elif "I/O" in line or "io" in line: _, _, io_total_mem, io_used_mem, _ = line.split()[:5] total_mem = int(proc_total_mem) + int(io_total_mem) used_mem = int(proc_used_mem) + int(io_used_mem) environment.setdefault("memory", {}) environment["memory"]["used_ram"] = used_mem environment["memory"]["available_ram"] = total_mem environment.setdefault("temperature", {}) re_temp_value = re.compile("(.*) Temperature Value") # The 'show env temperature status' is not ubiquitous in Cisco IOS output = self._send_command(temp_cmd) if "% Invalid" not in output: for line in output.splitlines(): m = re_temp_value.match(line) if m is not None: temp_name = m.group(1).lower() temp_value = float(line.split(":")[1].split()[0]) env_value = { "is_alert": False, "is_critical": False, "temperature": temp_value, } environment["temperature"][temp_name] = env_value elif "Yellow Threshold" in line: system_temp_alert = float(line.split(":")[1].split()[0]) if temp_value > system_temp_alert: env_value["is_alert"] = True elif "Red Threshold" in line: system_temp_crit = float(line.split(":")[1].split()[0]) if temp_value > system_temp_crit: env_value["is_critical"] = True else: env_value = {"is_alert": False, "is_critical": False, "temperature": -1.0} environment["temperature"]["invalid"] = env_value # Initialize 'power' and 'fan' to default values (not implemented) environment.setdefault("power", {}) environment["power"]["invalid"] = { "status": True, "output": -1.0, "capacity": -1.0, } environment.setdefault("fans", {}) environment["fans"]["invalid"] = {"status": True} return environment
[ "def", "get_environment", "(", "self", ")", ":", "environment", "=", "{", "}", "cpu_cmd", "=", "\"show proc cpu\"", "mem_cmd", "=", "\"show memory statistics\"", "temp_cmd", "=", "\"show env temperature status\"", "output", "=", "self", ".", "_send_command", "(", "cpu_cmd", ")", "environment", ".", "setdefault", "(", "\"cpu\"", ",", "{", "}", ")", "environment", "[", "\"cpu\"", "]", "[", "0", "]", "=", "{", "}", "environment", "[", "\"cpu\"", "]", "[", "0", "]", "[", "\"%usage\"", "]", "=", "0.0", "for", "line", "in", "output", ".", "splitlines", "(", ")", ":", "if", "\"CPU utilization\"", "in", "line", ":", "# CPU utilization for five seconds: 2%/0%; one minute: 2%; five minutes: 1%", "cpu_regex", "=", "r\"^.*one minute: (\\d+)%; five.*$\"", "match", "=", "re", ".", "search", "(", "cpu_regex", ",", "line", ")", "environment", "[", "\"cpu\"", "]", "[", "0", "]", "[", "\"%usage\"", "]", "=", "float", "(", "match", ".", "group", "(", "1", ")", ")", "break", "output", "=", "self", ".", "_send_command", "(", "mem_cmd", ")", "for", "line", "in", "output", ".", "splitlines", "(", ")", ":", "if", "\"Processor\"", "in", "line", ":", "_", ",", "_", ",", "proc_total_mem", ",", "proc_used_mem", ",", "_", "=", "line", ".", "split", "(", ")", "[", ":", "5", "]", "elif", "\"I/O\"", "in", "line", "or", "\"io\"", "in", "line", ":", "_", ",", "_", ",", "io_total_mem", ",", "io_used_mem", ",", "_", "=", "line", ".", "split", "(", ")", "[", ":", "5", "]", "total_mem", "=", "int", "(", "proc_total_mem", ")", "+", "int", "(", "io_total_mem", ")", "used_mem", "=", "int", "(", "proc_used_mem", ")", "+", "int", "(", "io_used_mem", ")", "environment", ".", "setdefault", "(", "\"memory\"", ",", "{", "}", ")", "environment", "[", "\"memory\"", "]", "[", "\"used_ram\"", "]", "=", "used_mem", "environment", "[", "\"memory\"", "]", "[", "\"available_ram\"", "]", "=", "total_mem", "environment", ".", "setdefault", "(", "\"temperature\"", ",", "{", "}", ")", "re_temp_value", "=", "re", ".", "compile", "(", "\"(.*) Temperature Value\"", ")", "# The 'show env temperature status' is not ubiquitous in Cisco IOS", "output", "=", "self", ".", "_send_command", "(", "temp_cmd", ")", "if", "\"% Invalid\"", "not", "in", "output", ":", "for", "line", "in", "output", ".", "splitlines", "(", ")", ":", "m", "=", "re_temp_value", ".", "match", "(", "line", ")", "if", "m", "is", "not", "None", ":", "temp_name", "=", "m", ".", "group", "(", "1", ")", ".", "lower", "(", ")", "temp_value", "=", "float", "(", "line", ".", "split", "(", "\":\"", ")", "[", "1", "]", ".", "split", "(", ")", "[", "0", "]", ")", "env_value", "=", "{", "\"is_alert\"", ":", "False", ",", "\"is_critical\"", ":", "False", ",", "\"temperature\"", ":", "temp_value", ",", "}", "environment", "[", "\"temperature\"", "]", "[", "temp_name", "]", "=", "env_value", "elif", "\"Yellow Threshold\"", "in", "line", ":", "system_temp_alert", "=", "float", "(", "line", ".", "split", "(", "\":\"", ")", "[", "1", "]", ".", "split", "(", ")", "[", "0", "]", ")", "if", "temp_value", ">", "system_temp_alert", ":", "env_value", "[", "\"is_alert\"", "]", "=", "True", "elif", "\"Red Threshold\"", "in", "line", ":", "system_temp_crit", "=", "float", "(", "line", ".", "split", "(", "\":\"", ")", "[", "1", "]", ".", "split", "(", ")", "[", "0", "]", ")", "if", "temp_value", ">", "system_temp_crit", ":", "env_value", "[", "\"is_critical\"", "]", "=", "True", "else", ":", "env_value", "=", "{", "\"is_alert\"", ":", "False", ",", "\"is_critical\"", ":", "False", ",", "\"temperature\"", ":", "-", "1.0", "}", "environment", "[", "\"temperature\"", "]", "[", "\"invalid\"", "]", "=", "env_value", "# Initialize 'power' and 'fan' to default values (not implemented)", "environment", ".", "setdefault", "(", "\"power\"", ",", "{", "}", ")", "environment", "[", "\"power\"", "]", "[", "\"invalid\"", "]", "=", "{", "\"status\"", ":", "True", ",", "\"output\"", ":", "-", "1.0", ",", "\"capacity\"", ":", "-", "1.0", ",", "}", "environment", ".", "setdefault", "(", "\"fans\"", ",", "{", "}", ")", "environment", "[", "\"fans\"", "]", "[", "\"invalid\"", "]", "=", "{", "\"status\"", ":", "True", "}", "return", "environment" ]
42.697368
15.697368
def main(custom_commandline=None): """ Main function for esptool custom_commandline - Optional override for default arguments parsing (that uses sys.argv), can be a list of custom arguments as strings. Arguments and their values need to be added as individual items to the list e.g. "-b 115200" thus becomes ['-b', '115200']. """ parser = argparse.ArgumentParser(description='esptool.py v%s - ESP8266 ROM Bootloader Utility' % __version__, prog='esptool') parser.add_argument('--chip', '-c', help='Target chip type', choices=['auto', 'esp8266', 'esp32'], default=os.environ.get('ESPTOOL_CHIP', 'auto')) parser.add_argument( '--port', '-p', help='Serial port device', default=os.environ.get('ESPTOOL_PORT', None)) parser.add_argument( '--baud', '-b', help='Serial port baud rate used when flashing/reading', type=arg_auto_int, default=os.environ.get('ESPTOOL_BAUD', ESPLoader.ESP_ROM_BAUD)) parser.add_argument( '--before', help='What to do before connecting to the chip', choices=['default_reset', 'no_reset', 'no_reset_no_sync'], default=os.environ.get('ESPTOOL_BEFORE', 'default_reset')) parser.add_argument( '--after', '-a', help='What to do after esptool.py is finished', choices=['hard_reset', 'soft_reset', 'no_reset'], default=os.environ.get('ESPTOOL_AFTER', 'hard_reset')) parser.add_argument( '--no-stub', help="Disable launching the flasher stub, only talk to ROM bootloader. Some features will not be available.", action='store_true') parser.add_argument( '--trace', '-t', help="Enable trace-level output of esptool.py interactions.", action='store_true') parser.add_argument( '--override-vddsdio', help="Override ESP32 VDDSDIO internal voltage regulator (use with care)", choices=ESP32ROM.OVERRIDE_VDDSDIO_CHOICES, nargs='?') subparsers = parser.add_subparsers( dest='operation', help='Run esptool {command} -h for additional help') def add_spi_connection_arg(parent): parent.add_argument('--spi-connection', '-sc', help='ESP32-only argument. Override default SPI Flash connection. ' + 'Value can be SPI, HSPI or a comma-separated list of 5 I/O numbers to use for SPI flash (CLK,Q,D,HD,CS).', action=SpiConnectionAction) parser_load_ram = subparsers.add_parser( 'load_ram', help='Download an image to RAM and execute') parser_load_ram.add_argument('filename', help='Firmware image') parser_dump_mem = subparsers.add_parser( 'dump_mem', help='Dump arbitrary memory to disk') parser_dump_mem.add_argument('address', help='Base address', type=arg_auto_int) parser_dump_mem.add_argument('size', help='Size of region to dump', type=arg_auto_int) parser_dump_mem.add_argument('filename', help='Name of binary dump') parser_read_mem = subparsers.add_parser( 'read_mem', help='Read arbitrary memory location') parser_read_mem.add_argument('address', help='Address to read', type=arg_auto_int) parser_write_mem = subparsers.add_parser( 'write_mem', help='Read-modify-write to arbitrary memory location') parser_write_mem.add_argument('address', help='Address to write', type=arg_auto_int) parser_write_mem.add_argument('value', help='Value', type=arg_auto_int) parser_write_mem.add_argument('mask', help='Mask of bits to write', type=arg_auto_int) def add_spi_flash_subparsers(parent, is_elf2image): """ Add common parser arguments for SPI flash properties """ extra_keep_args = [] if is_elf2image else ['keep'] auto_detect = not is_elf2image if auto_detect: extra_fs_message = ", detect, or keep" else: extra_fs_message = "" parent.add_argument('--flash_freq', '-ff', help='SPI Flash frequency', choices=extra_keep_args + ['40m', '26m', '20m', '80m'], default=os.environ.get('ESPTOOL_FF', '40m' if is_elf2image else 'keep')) parent.add_argument('--flash_mode', '-fm', help='SPI Flash mode', choices=extra_keep_args + ['qio', 'qout', 'dio', 'dout'], default=os.environ.get('ESPTOOL_FM', 'qio' if is_elf2image else 'keep')) parent.add_argument('--flash_size', '-fs', help='SPI Flash size in MegaBytes (1MB, 2MB, 4MB, 8MB, 16M)' ' plus ESP8266-only (256KB, 512KB, 2MB-c1, 4MB-c1)' + extra_fs_message, action=FlashSizeAction, auto_detect=auto_detect, default=os.environ.get('ESPTOOL_FS', 'detect' if auto_detect else '1MB')) add_spi_connection_arg(parent) parser_write_flash = subparsers.add_parser( 'write_flash', help='Write a binary blob to flash') parser_write_flash.add_argument('addr_filename', metavar='<address> <filename>', help='Address followed by binary filename, separated by space', action=AddrFilenamePairAction) parser_write_flash.add_argument('--erase-all', '-e', help='Erase all regions of flash (not just write areas) before programming', action="store_true") add_spi_flash_subparsers(parser_write_flash, is_elf2image=False) parser_write_flash.add_argument('--no-progress', '-p', help='Suppress progress output', action="store_true") parser_write_flash.add_argument('--verify', help='Verify just-written data on flash ' + '(mostly superfluous, data is read back during flashing)', action='store_true') parser_write_flash.add_argument('--encrypt', help='Encrypt before write ', action='store_true') parser_write_flash.add_argument('--ignore-flash-encryption-efuse-setting', help='Ignore flash encryption efuse settings ', action='store_true') compress_args = parser_write_flash.add_mutually_exclusive_group(required=False) compress_args.add_argument('--compress', '-z', help='Compress data in transfer (default unless --no-stub is specified)',action="store_true", default=None) compress_args.add_argument('--no-compress', '-u', help='Disable data compression during transfer (default if --no-stub is specified)',action="store_true") subparsers.add_parser( 'run', help='Run application code in flash') parser_image_info = subparsers.add_parser( 'image_info', help='Dump headers from an application image') parser_image_info.add_argument('filename', help='Image file to parse') parser_make_image = subparsers.add_parser( 'make_image', help='Create an application image from binary files') parser_make_image.add_argument('output', help='Output image file') parser_make_image.add_argument('--segfile', '-f', action='append', help='Segment input file') parser_make_image.add_argument('--segaddr', '-a', action='append', help='Segment base address', type=arg_auto_int) parser_make_image.add_argument('--entrypoint', '-e', help='Address of entry point', type=arg_auto_int, default=0) parser_elf2image = subparsers.add_parser( 'elf2image', help='Create an application image from ELF file') parser_elf2image.add_argument('input', help='Input ELF file') parser_elf2image.add_argument('--output', '-o', help='Output filename prefix (for version 1 image), or filename (for version 2 single image)', type=str) parser_elf2image.add_argument('--version', '-e', help='Output image version', choices=['1','2'], default='1') parser_elf2image.add_argument('--secure-pad', action='store_true', help='Pad image so once signed it will end on a 64KB boundary. For ESP32 images only.') parser_elf2image.add_argument('--elf-sha256-offset', help='If set, insert SHA256 hash (32 bytes) of the input ELF file at specified offset in the binary.', type=arg_auto_int, default=None) add_spi_flash_subparsers(parser_elf2image, is_elf2image=True) subparsers.add_parser( 'read_mac', help='Read MAC address from OTP ROM') subparsers.add_parser( 'chip_id', help='Read Chip ID from OTP ROM') parser_flash_id = subparsers.add_parser( 'flash_id', help='Read SPI flash manufacturer and device ID') add_spi_connection_arg(parser_flash_id) parser_read_status = subparsers.add_parser( 'read_flash_status', help='Read SPI flash status register') add_spi_connection_arg(parser_read_status) parser_read_status.add_argument('--bytes', help='Number of bytes to read (1-3)', type=int, choices=[1,2,3], default=2) parser_write_status = subparsers.add_parser( 'write_flash_status', help='Write SPI flash status register') add_spi_connection_arg(parser_write_status) parser_write_status.add_argument('--non-volatile', help='Write non-volatile bits (use with caution)', action='store_true') parser_write_status.add_argument('--bytes', help='Number of status bytes to write (1-3)', type=int, choices=[1,2,3], default=2) parser_write_status.add_argument('value', help='New value', type=arg_auto_int) parser_read_flash = subparsers.add_parser( 'read_flash', help='Read SPI flash content') add_spi_connection_arg(parser_read_flash) parser_read_flash.add_argument('address', help='Start address', type=arg_auto_int) parser_read_flash.add_argument('size', help='Size of region to dump', type=arg_auto_int) parser_read_flash.add_argument('filename', help='Name of binary dump') parser_read_flash.add_argument('--no-progress', '-p', help='Suppress progress output', action="store_true") parser_verify_flash = subparsers.add_parser( 'verify_flash', help='Verify a binary blob against flash') parser_verify_flash.add_argument('addr_filename', help='Address and binary file to verify there, separated by space', action=AddrFilenamePairAction) parser_verify_flash.add_argument('--diff', '-d', help='Show differences', choices=['no', 'yes'], default='no') add_spi_flash_subparsers(parser_verify_flash, is_elf2image=False) parser_erase_flash = subparsers.add_parser( 'erase_flash', help='Perform Chip Erase on SPI flash') add_spi_connection_arg(parser_erase_flash) parser_erase_region = subparsers.add_parser( 'erase_region', help='Erase a region of the flash') add_spi_connection_arg(parser_erase_region) parser_erase_region.add_argument('address', help='Start address (must be multiple of 4096)', type=arg_auto_int) parser_erase_region.add_argument('size', help='Size of region to erase (must be multiple of 4096)', type=arg_auto_int) subparsers.add_parser( 'version', help='Print esptool version') # internal sanity check - every operation matches a module function of the same name for operation in subparsers.choices.keys(): assert operation in globals(), "%s should be a module function" % operation expand_file_arguments() args = parser.parse_args(custom_commandline) print('esptool.py v%s' % __version__) # operation function can take 1 arg (args), 2 args (esp, arg) # or be a member function of the ESPLoader class. if args.operation is None: parser.print_help() sys.exit(1) operation_func = globals()[args.operation] if PYTHON2: # This function is depreciated in Python3 operation_args = inspect.getargspec(operation_func).args else: operation_args = inspect.getfullargspec(operation_func).args if operation_args[0] == 'esp': # operation function takes an ESPLoader connection object if args.before != "no_reset_no_sync": initial_baud = min(ESPLoader.ESP_ROM_BAUD, args.baud) # don't sync faster than the default baud rate else: initial_baud = args.baud if args.port is None: ser_list = sorted(ports.device for ports in list_ports.comports()) print("Found %d serial ports" % len(ser_list)) else: ser_list = [args.port] esp = None for each_port in reversed(ser_list): print("Serial port %s" % each_port) try: if args.chip == 'auto': esp = ESPLoader.detect_chip(each_port, initial_baud, args.before, args.trace) else: chip_class = { 'esp8266': ESP8266ROM, 'esp32': ESP32ROM, }[args.chip] esp = chip_class(each_port, initial_baud, args.trace) esp.connect(args.before) break except (FatalError, OSError) as err: if args.port is not None: raise print("%s failed to connect: %s" % (each_port, err)) esp = None if esp is None: raise FatalError("Could not connect to an Espressif device on any of the %d available serial ports." % len(ser_list)) print("Chip is %s" % (esp.get_chip_description())) print("Features: %s" % ", ".join(esp.get_chip_features())) print("Crystal is %dMHz" % esp.get_crystal_freq()) read_mac(esp, args) if not args.no_stub: esp = esp.run_stub() if args.override_vddsdio: esp.override_vddsdio(args.override_vddsdio) if args.baud > initial_baud: try: esp.change_baud(args.baud) except NotImplementedInROMError: print("WARNING: ROM doesn't support changing baud rate. Keeping initial baud rate %d" % initial_baud) # override common SPI flash parameter stuff if configured to do so if hasattr(args, "spi_connection") and args.spi_connection is not None: if esp.CHIP_NAME != "ESP32": raise FatalError("Chip %s does not support --spi-connection option." % esp.CHIP_NAME) print("Configuring SPI flash mode...") esp.flash_spi_attach(args.spi_connection) elif args.no_stub: print("Enabling default SPI flash mode...") # ROM loader doesn't enable flash unless we explicitly do it esp.flash_spi_attach(0) if hasattr(args, "flash_size"): print("Configuring flash size...") detect_flash_size(esp, args) if args.flash_size != 'keep': # TODO: should set this even with 'keep' esp.flash_set_parameters(flash_size_bytes(args.flash_size)) try: operation_func(esp, args) finally: try: # Clean up AddrFilenamePairAction files for address, argfile in args.addr_filename: argfile.close() except AttributeError: pass # Handle post-operation behaviour (reset or other) if operation_func == load_ram: # the ESP is now running the loaded image, so let it run print('Exiting immediately.') elif args.after == 'hard_reset': print('Hard resetting via RTS pin...') esp.hard_reset() elif args.after == 'soft_reset': print('Soft resetting...') # flash_finish will trigger a soft reset esp.soft_reset(False) else: print('Staying in bootloader.') if esp.IS_STUB: esp.soft_reset(True) # exit stub back to ROM loader esp._port.close() else: operation_func(args)
[ "def", "main", "(", "custom_commandline", "=", "None", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'esptool.py v%s - ESP8266 ROM Bootloader Utility'", "%", "__version__", ",", "prog", "=", "'esptool'", ")", "parser", ".", "add_argument", "(", "'--chip'", ",", "'-c'", ",", "help", "=", "'Target chip type'", ",", "choices", "=", "[", "'auto'", ",", "'esp8266'", ",", "'esp32'", "]", ",", "default", "=", "os", ".", "environ", ".", "get", "(", "'ESPTOOL_CHIP'", ",", "'auto'", ")", ")", "parser", ".", "add_argument", "(", "'--port'", ",", "'-p'", ",", "help", "=", "'Serial port device'", ",", "default", "=", "os", ".", "environ", ".", "get", "(", "'ESPTOOL_PORT'", ",", "None", ")", ")", "parser", ".", "add_argument", "(", "'--baud'", ",", "'-b'", ",", "help", "=", "'Serial port baud rate used when flashing/reading'", ",", "type", "=", "arg_auto_int", ",", "default", "=", "os", ".", "environ", ".", "get", "(", "'ESPTOOL_BAUD'", ",", "ESPLoader", ".", "ESP_ROM_BAUD", ")", ")", "parser", ".", "add_argument", "(", "'--before'", ",", "help", "=", "'What to do before connecting to the chip'", ",", "choices", "=", "[", "'default_reset'", ",", "'no_reset'", ",", "'no_reset_no_sync'", "]", ",", "default", "=", "os", ".", "environ", ".", "get", "(", "'ESPTOOL_BEFORE'", ",", "'default_reset'", ")", ")", "parser", ".", "add_argument", "(", "'--after'", ",", "'-a'", ",", "help", "=", "'What to do after esptool.py is finished'", ",", "choices", "=", "[", "'hard_reset'", ",", "'soft_reset'", ",", "'no_reset'", "]", ",", "default", "=", "os", ".", "environ", ".", "get", "(", "'ESPTOOL_AFTER'", ",", "'hard_reset'", ")", ")", "parser", ".", "add_argument", "(", "'--no-stub'", ",", "help", "=", "\"Disable launching the flasher stub, only talk to ROM bootloader. Some features will not be available.\"", ",", "action", "=", "'store_true'", ")", "parser", ".", "add_argument", "(", "'--trace'", ",", "'-t'", ",", "help", "=", "\"Enable trace-level output of esptool.py interactions.\"", ",", "action", "=", "'store_true'", ")", "parser", ".", "add_argument", "(", "'--override-vddsdio'", ",", "help", "=", "\"Override ESP32 VDDSDIO internal voltage regulator (use with care)\"", ",", "choices", "=", "ESP32ROM", ".", "OVERRIDE_VDDSDIO_CHOICES", ",", "nargs", "=", "'?'", ")", "subparsers", "=", "parser", ".", "add_subparsers", "(", "dest", "=", "'operation'", ",", "help", "=", "'Run esptool {command} -h for additional help'", ")", "def", "add_spi_connection_arg", "(", "parent", ")", ":", "parent", ".", "add_argument", "(", "'--spi-connection'", ",", "'-sc'", ",", "help", "=", "'ESP32-only argument. Override default SPI Flash connection. '", "+", "'Value can be SPI, HSPI or a comma-separated list of 5 I/O numbers to use for SPI flash (CLK,Q,D,HD,CS).'", ",", "action", "=", "SpiConnectionAction", ")", "parser_load_ram", "=", "subparsers", ".", "add_parser", "(", "'load_ram'", ",", "help", "=", "'Download an image to RAM and execute'", ")", "parser_load_ram", ".", "add_argument", "(", "'filename'", ",", "help", "=", "'Firmware image'", ")", "parser_dump_mem", "=", "subparsers", ".", "add_parser", "(", "'dump_mem'", ",", "help", "=", "'Dump arbitrary memory to disk'", ")", "parser_dump_mem", ".", "add_argument", "(", "'address'", ",", "help", "=", "'Base address'", ",", "type", "=", "arg_auto_int", ")", "parser_dump_mem", ".", "add_argument", "(", "'size'", ",", "help", "=", "'Size of region to dump'", ",", "type", "=", "arg_auto_int", ")", "parser_dump_mem", ".", "add_argument", "(", "'filename'", ",", "help", "=", "'Name of binary dump'", ")", "parser_read_mem", "=", "subparsers", ".", "add_parser", "(", "'read_mem'", ",", "help", "=", "'Read arbitrary memory location'", ")", "parser_read_mem", ".", "add_argument", "(", "'address'", ",", "help", "=", "'Address to read'", ",", "type", "=", "arg_auto_int", ")", "parser_write_mem", "=", "subparsers", ".", "add_parser", "(", "'write_mem'", ",", "help", "=", "'Read-modify-write to arbitrary memory location'", ")", "parser_write_mem", ".", "add_argument", "(", "'address'", ",", "help", "=", "'Address to write'", ",", "type", "=", "arg_auto_int", ")", "parser_write_mem", ".", "add_argument", "(", "'value'", ",", "help", "=", "'Value'", ",", "type", "=", "arg_auto_int", ")", "parser_write_mem", ".", "add_argument", "(", "'mask'", ",", "help", "=", "'Mask of bits to write'", ",", "type", "=", "arg_auto_int", ")", "def", "add_spi_flash_subparsers", "(", "parent", ",", "is_elf2image", ")", ":", "\"\"\" Add common parser arguments for SPI flash properties \"\"\"", "extra_keep_args", "=", "[", "]", "if", "is_elf2image", "else", "[", "'keep'", "]", "auto_detect", "=", "not", "is_elf2image", "if", "auto_detect", ":", "extra_fs_message", "=", "\", detect, or keep\"", "else", ":", "extra_fs_message", "=", "\"\"", "parent", ".", "add_argument", "(", "'--flash_freq'", ",", "'-ff'", ",", "help", "=", "'SPI Flash frequency'", ",", "choices", "=", "extra_keep_args", "+", "[", "'40m'", ",", "'26m'", ",", "'20m'", ",", "'80m'", "]", ",", "default", "=", "os", ".", "environ", ".", "get", "(", "'ESPTOOL_FF'", ",", "'40m'", "if", "is_elf2image", "else", "'keep'", ")", ")", "parent", ".", "add_argument", "(", "'--flash_mode'", ",", "'-fm'", ",", "help", "=", "'SPI Flash mode'", ",", "choices", "=", "extra_keep_args", "+", "[", "'qio'", ",", "'qout'", ",", "'dio'", ",", "'dout'", "]", ",", "default", "=", "os", ".", "environ", ".", "get", "(", "'ESPTOOL_FM'", ",", "'qio'", "if", "is_elf2image", "else", "'keep'", ")", ")", "parent", ".", "add_argument", "(", "'--flash_size'", ",", "'-fs'", ",", "help", "=", "'SPI Flash size in MegaBytes (1MB, 2MB, 4MB, 8MB, 16M)'", "' plus ESP8266-only (256KB, 512KB, 2MB-c1, 4MB-c1)'", "+", "extra_fs_message", ",", "action", "=", "FlashSizeAction", ",", "auto_detect", "=", "auto_detect", ",", "default", "=", "os", ".", "environ", ".", "get", "(", "'ESPTOOL_FS'", ",", "'detect'", "if", "auto_detect", "else", "'1MB'", ")", ")", "add_spi_connection_arg", "(", "parent", ")", "parser_write_flash", "=", "subparsers", ".", "add_parser", "(", "'write_flash'", ",", "help", "=", "'Write a binary blob to flash'", ")", "parser_write_flash", ".", "add_argument", "(", "'addr_filename'", ",", "metavar", "=", "'<address> <filename>'", ",", "help", "=", "'Address followed by binary filename, separated by space'", ",", "action", "=", "AddrFilenamePairAction", ")", "parser_write_flash", ".", "add_argument", "(", "'--erase-all'", ",", "'-e'", ",", "help", "=", "'Erase all regions of flash (not just write areas) before programming'", ",", "action", "=", "\"store_true\"", ")", "add_spi_flash_subparsers", "(", "parser_write_flash", ",", "is_elf2image", "=", "False", ")", "parser_write_flash", ".", "add_argument", "(", "'--no-progress'", ",", "'-p'", ",", "help", "=", "'Suppress progress output'", ",", "action", "=", "\"store_true\"", ")", "parser_write_flash", ".", "add_argument", "(", "'--verify'", ",", "help", "=", "'Verify just-written data on flash '", "+", "'(mostly superfluous, data is read back during flashing)'", ",", "action", "=", "'store_true'", ")", "parser_write_flash", ".", "add_argument", "(", "'--encrypt'", ",", "help", "=", "'Encrypt before write '", ",", "action", "=", "'store_true'", ")", "parser_write_flash", ".", "add_argument", "(", "'--ignore-flash-encryption-efuse-setting'", ",", "help", "=", "'Ignore flash encryption efuse settings '", ",", "action", "=", "'store_true'", ")", "compress_args", "=", "parser_write_flash", ".", "add_mutually_exclusive_group", "(", "required", "=", "False", ")", "compress_args", ".", "add_argument", "(", "'--compress'", ",", "'-z'", ",", "help", "=", "'Compress data in transfer (default unless --no-stub is specified)'", ",", "action", "=", "\"store_true\"", ",", "default", "=", "None", ")", "compress_args", ".", "add_argument", "(", "'--no-compress'", ",", "'-u'", ",", "help", "=", "'Disable data compression during transfer (default if --no-stub is specified)'", ",", "action", "=", "\"store_true\"", ")", "subparsers", ".", "add_parser", "(", "'run'", ",", "help", "=", "'Run application code in flash'", ")", "parser_image_info", "=", "subparsers", ".", "add_parser", "(", "'image_info'", ",", "help", "=", "'Dump headers from an application image'", ")", "parser_image_info", ".", "add_argument", "(", "'filename'", ",", "help", "=", "'Image file to parse'", ")", "parser_make_image", "=", "subparsers", ".", "add_parser", "(", "'make_image'", ",", "help", "=", "'Create an application image from binary files'", ")", "parser_make_image", ".", "add_argument", "(", "'output'", ",", "help", "=", "'Output image file'", ")", "parser_make_image", ".", "add_argument", "(", "'--segfile'", ",", "'-f'", ",", "action", "=", "'append'", ",", "help", "=", "'Segment input file'", ")", "parser_make_image", ".", "add_argument", "(", "'--segaddr'", ",", "'-a'", ",", "action", "=", "'append'", ",", "help", "=", "'Segment base address'", ",", "type", "=", "arg_auto_int", ")", "parser_make_image", ".", "add_argument", "(", "'--entrypoint'", ",", "'-e'", ",", "help", "=", "'Address of entry point'", ",", "type", "=", "arg_auto_int", ",", "default", "=", "0", ")", "parser_elf2image", "=", "subparsers", ".", "add_parser", "(", "'elf2image'", ",", "help", "=", "'Create an application image from ELF file'", ")", "parser_elf2image", ".", "add_argument", "(", "'input'", ",", "help", "=", "'Input ELF file'", ")", "parser_elf2image", ".", "add_argument", "(", "'--output'", ",", "'-o'", ",", "help", "=", "'Output filename prefix (for version 1 image), or filename (for version 2 single image)'", ",", "type", "=", "str", ")", "parser_elf2image", ".", "add_argument", "(", "'--version'", ",", "'-e'", ",", "help", "=", "'Output image version'", ",", "choices", "=", "[", "'1'", ",", "'2'", "]", ",", "default", "=", "'1'", ")", "parser_elf2image", ".", "add_argument", "(", "'--secure-pad'", ",", "action", "=", "'store_true'", ",", "help", "=", "'Pad image so once signed it will end on a 64KB boundary. For ESP32 images only.'", ")", "parser_elf2image", ".", "add_argument", "(", "'--elf-sha256-offset'", ",", "help", "=", "'If set, insert SHA256 hash (32 bytes) of the input ELF file at specified offset in the binary.'", ",", "type", "=", "arg_auto_int", ",", "default", "=", "None", ")", "add_spi_flash_subparsers", "(", "parser_elf2image", ",", "is_elf2image", "=", "True", ")", "subparsers", ".", "add_parser", "(", "'read_mac'", ",", "help", "=", "'Read MAC address from OTP ROM'", ")", "subparsers", ".", "add_parser", "(", "'chip_id'", ",", "help", "=", "'Read Chip ID from OTP ROM'", ")", "parser_flash_id", "=", "subparsers", ".", "add_parser", "(", "'flash_id'", ",", "help", "=", "'Read SPI flash manufacturer and device ID'", ")", "add_spi_connection_arg", "(", "parser_flash_id", ")", "parser_read_status", "=", "subparsers", ".", "add_parser", "(", "'read_flash_status'", ",", "help", "=", "'Read SPI flash status register'", ")", "add_spi_connection_arg", "(", "parser_read_status", ")", "parser_read_status", ".", "add_argument", "(", "'--bytes'", ",", "help", "=", "'Number of bytes to read (1-3)'", ",", "type", "=", "int", ",", "choices", "=", "[", "1", ",", "2", ",", "3", "]", ",", "default", "=", "2", ")", "parser_write_status", "=", "subparsers", ".", "add_parser", "(", "'write_flash_status'", ",", "help", "=", "'Write SPI flash status register'", ")", "add_spi_connection_arg", "(", "parser_write_status", ")", "parser_write_status", ".", "add_argument", "(", "'--non-volatile'", ",", "help", "=", "'Write non-volatile bits (use with caution)'", ",", "action", "=", "'store_true'", ")", "parser_write_status", ".", "add_argument", "(", "'--bytes'", ",", "help", "=", "'Number of status bytes to write (1-3)'", ",", "type", "=", "int", ",", "choices", "=", "[", "1", ",", "2", ",", "3", "]", ",", "default", "=", "2", ")", "parser_write_status", ".", "add_argument", "(", "'value'", ",", "help", "=", "'New value'", ",", "type", "=", "arg_auto_int", ")", "parser_read_flash", "=", "subparsers", ".", "add_parser", "(", "'read_flash'", ",", "help", "=", "'Read SPI flash content'", ")", "add_spi_connection_arg", "(", "parser_read_flash", ")", "parser_read_flash", ".", "add_argument", "(", "'address'", ",", "help", "=", "'Start address'", ",", "type", "=", "arg_auto_int", ")", "parser_read_flash", ".", "add_argument", "(", "'size'", ",", "help", "=", "'Size of region to dump'", ",", "type", "=", "arg_auto_int", ")", "parser_read_flash", ".", "add_argument", "(", "'filename'", ",", "help", "=", "'Name of binary dump'", ")", "parser_read_flash", ".", "add_argument", "(", "'--no-progress'", ",", "'-p'", ",", "help", "=", "'Suppress progress output'", ",", "action", "=", "\"store_true\"", ")", "parser_verify_flash", "=", "subparsers", ".", "add_parser", "(", "'verify_flash'", ",", "help", "=", "'Verify a binary blob against flash'", ")", "parser_verify_flash", ".", "add_argument", "(", "'addr_filename'", ",", "help", "=", "'Address and binary file to verify there, separated by space'", ",", "action", "=", "AddrFilenamePairAction", ")", "parser_verify_flash", ".", "add_argument", "(", "'--diff'", ",", "'-d'", ",", "help", "=", "'Show differences'", ",", "choices", "=", "[", "'no'", ",", "'yes'", "]", ",", "default", "=", "'no'", ")", "add_spi_flash_subparsers", "(", "parser_verify_flash", ",", "is_elf2image", "=", "False", ")", "parser_erase_flash", "=", "subparsers", ".", "add_parser", "(", "'erase_flash'", ",", "help", "=", "'Perform Chip Erase on SPI flash'", ")", "add_spi_connection_arg", "(", "parser_erase_flash", ")", "parser_erase_region", "=", "subparsers", ".", "add_parser", "(", "'erase_region'", ",", "help", "=", "'Erase a region of the flash'", ")", "add_spi_connection_arg", "(", "parser_erase_region", ")", "parser_erase_region", ".", "add_argument", "(", "'address'", ",", "help", "=", "'Start address (must be multiple of 4096)'", ",", "type", "=", "arg_auto_int", ")", "parser_erase_region", ".", "add_argument", "(", "'size'", ",", "help", "=", "'Size of region to erase (must be multiple of 4096)'", ",", "type", "=", "arg_auto_int", ")", "subparsers", ".", "add_parser", "(", "'version'", ",", "help", "=", "'Print esptool version'", ")", "# internal sanity check - every operation matches a module function of the same name", "for", "operation", "in", "subparsers", ".", "choices", ".", "keys", "(", ")", ":", "assert", "operation", "in", "globals", "(", ")", ",", "\"%s should be a module function\"", "%", "operation", "expand_file_arguments", "(", ")", "args", "=", "parser", ".", "parse_args", "(", "custom_commandline", ")", "print", "(", "'esptool.py v%s'", "%", "__version__", ")", "# operation function can take 1 arg (args), 2 args (esp, arg)", "# or be a member function of the ESPLoader class.", "if", "args", ".", "operation", "is", "None", ":", "parser", ".", "print_help", "(", ")", "sys", ".", "exit", "(", "1", ")", "operation_func", "=", "globals", "(", ")", "[", "args", ".", "operation", "]", "if", "PYTHON2", ":", "# This function is depreciated in Python3", "operation_args", "=", "inspect", ".", "getargspec", "(", "operation_func", ")", ".", "args", "else", ":", "operation_args", "=", "inspect", ".", "getfullargspec", "(", "operation_func", ")", ".", "args", "if", "operation_args", "[", "0", "]", "==", "'esp'", ":", "# operation function takes an ESPLoader connection object", "if", "args", ".", "before", "!=", "\"no_reset_no_sync\"", ":", "initial_baud", "=", "min", "(", "ESPLoader", ".", "ESP_ROM_BAUD", ",", "args", ".", "baud", ")", "# don't sync faster than the default baud rate", "else", ":", "initial_baud", "=", "args", ".", "baud", "if", "args", ".", "port", "is", "None", ":", "ser_list", "=", "sorted", "(", "ports", ".", "device", "for", "ports", "in", "list_ports", ".", "comports", "(", ")", ")", "print", "(", "\"Found %d serial ports\"", "%", "len", "(", "ser_list", ")", ")", "else", ":", "ser_list", "=", "[", "args", ".", "port", "]", "esp", "=", "None", "for", "each_port", "in", "reversed", "(", "ser_list", ")", ":", "print", "(", "\"Serial port %s\"", "%", "each_port", ")", "try", ":", "if", "args", ".", "chip", "==", "'auto'", ":", "esp", "=", "ESPLoader", ".", "detect_chip", "(", "each_port", ",", "initial_baud", ",", "args", ".", "before", ",", "args", ".", "trace", ")", "else", ":", "chip_class", "=", "{", "'esp8266'", ":", "ESP8266ROM", ",", "'esp32'", ":", "ESP32ROM", ",", "}", "[", "args", ".", "chip", "]", "esp", "=", "chip_class", "(", "each_port", ",", "initial_baud", ",", "args", ".", "trace", ")", "esp", ".", "connect", "(", "args", ".", "before", ")", "break", "except", "(", "FatalError", ",", "OSError", ")", "as", "err", ":", "if", "args", ".", "port", "is", "not", "None", ":", "raise", "print", "(", "\"%s failed to connect: %s\"", "%", "(", "each_port", ",", "err", ")", ")", "esp", "=", "None", "if", "esp", "is", "None", ":", "raise", "FatalError", "(", "\"Could not connect to an Espressif device on any of the %d available serial ports.\"", "%", "len", "(", "ser_list", ")", ")", "print", "(", "\"Chip is %s\"", "%", "(", "esp", ".", "get_chip_description", "(", ")", ")", ")", "print", "(", "\"Features: %s\"", "%", "\", \"", ".", "join", "(", "esp", ".", "get_chip_features", "(", ")", ")", ")", "print", "(", "\"Crystal is %dMHz\"", "%", "esp", ".", "get_crystal_freq", "(", ")", ")", "read_mac", "(", "esp", ",", "args", ")", "if", "not", "args", ".", "no_stub", ":", "esp", "=", "esp", ".", "run_stub", "(", ")", "if", "args", ".", "override_vddsdio", ":", "esp", ".", "override_vddsdio", "(", "args", ".", "override_vddsdio", ")", "if", "args", ".", "baud", ">", "initial_baud", ":", "try", ":", "esp", ".", "change_baud", "(", "args", ".", "baud", ")", "except", "NotImplementedInROMError", ":", "print", "(", "\"WARNING: ROM doesn't support changing baud rate. Keeping initial baud rate %d\"", "%", "initial_baud", ")", "# override common SPI flash parameter stuff if configured to do so", "if", "hasattr", "(", "args", ",", "\"spi_connection\"", ")", "and", "args", ".", "spi_connection", "is", "not", "None", ":", "if", "esp", ".", "CHIP_NAME", "!=", "\"ESP32\"", ":", "raise", "FatalError", "(", "\"Chip %s does not support --spi-connection option.\"", "%", "esp", ".", "CHIP_NAME", ")", "print", "(", "\"Configuring SPI flash mode...\"", ")", "esp", ".", "flash_spi_attach", "(", "args", ".", "spi_connection", ")", "elif", "args", ".", "no_stub", ":", "print", "(", "\"Enabling default SPI flash mode...\"", ")", "# ROM loader doesn't enable flash unless we explicitly do it", "esp", ".", "flash_spi_attach", "(", "0", ")", "if", "hasattr", "(", "args", ",", "\"flash_size\"", ")", ":", "print", "(", "\"Configuring flash size...\"", ")", "detect_flash_size", "(", "esp", ",", "args", ")", "if", "args", ".", "flash_size", "!=", "'keep'", ":", "# TODO: should set this even with 'keep'", "esp", ".", "flash_set_parameters", "(", "flash_size_bytes", "(", "args", ".", "flash_size", ")", ")", "try", ":", "operation_func", "(", "esp", ",", "args", ")", "finally", ":", "try", ":", "# Clean up AddrFilenamePairAction files", "for", "address", ",", "argfile", "in", "args", ".", "addr_filename", ":", "argfile", ".", "close", "(", ")", "except", "AttributeError", ":", "pass", "# Handle post-operation behaviour (reset or other)", "if", "operation_func", "==", "load_ram", ":", "# the ESP is now running the loaded image, so let it run", "print", "(", "'Exiting immediately.'", ")", "elif", "args", ".", "after", "==", "'hard_reset'", ":", "print", "(", "'Hard resetting via RTS pin...'", ")", "esp", ".", "hard_reset", "(", ")", "elif", "args", ".", "after", "==", "'soft_reset'", ":", "print", "(", "'Soft resetting...'", ")", "# flash_finish will trigger a soft reset", "esp", ".", "soft_reset", "(", "False", ")", "else", ":", "print", "(", "'Staying in bootloader.'", ")", "if", "esp", ".", "IS_STUB", ":", "esp", ".", "soft_reset", "(", "True", ")", "# exit stub back to ROM loader", "esp", ".", "_port", ".", "close", "(", ")", "else", ":", "operation_func", "(", "args", ")" ]
45.377522
28
def start_transports(self): """start thread transports.""" self.transport = Transport( self.queue, self.batch_size, self.batch_interval, self.session_factory) thread = threading.Thread(target=self.transport.loop) self.threads.append(thread) thread.daemon = True thread.start()
[ "def", "start_transports", "(", "self", ")", ":", "self", ".", "transport", "=", "Transport", "(", "self", ".", "queue", ",", "self", ".", "batch_size", ",", "self", ".", "batch_interval", ",", "self", ".", "session_factory", ")", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "transport", ".", "loop", ")", "self", ".", "threads", ".", "append", "(", "thread", ")", "thread", ".", "daemon", "=", "True", "thread", ".", "start", "(", ")" ]
37.777778
11.333333
def _combine_arglist(self, args, kwargs): """Combine the default values and the supplied values.""" gmxargs = self.gmxargs.copy() gmxargs.update(self._combineargs(*args, **kwargs)) return (), gmxargs
[ "def", "_combine_arglist", "(", "self", ",", "args", ",", "kwargs", ")", ":", "gmxargs", "=", "self", ".", "gmxargs", ".", "copy", "(", ")", "gmxargs", ".", "update", "(", "self", ".", "_combineargs", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "return", "(", ")", ",", "gmxargs" ]
45.4
7.2
def UpdateUser(username, password=None, is_admin=False): """Updates the password or privilege-level for a user.""" user_type, password = _GetUserTypeAndPassword( username, password=password, is_admin=is_admin) grr_api = maintenance_utils.InitGRRRootAPI() grr_user = grr_api.GrrUser(username).Get() grr_user.Modify(user_type=user_type, password=password)
[ "def", "UpdateUser", "(", "username", ",", "password", "=", "None", ",", "is_admin", "=", "False", ")", ":", "user_type", ",", "password", "=", "_GetUserTypeAndPassword", "(", "username", ",", "password", "=", "password", ",", "is_admin", "=", "is_admin", ")", "grr_api", "=", "maintenance_utils", ".", "InitGRRRootAPI", "(", ")", "grr_user", "=", "grr_api", ".", "GrrUser", "(", "username", ")", ".", "Get", "(", ")", "grr_user", ".", "Modify", "(", "user_type", "=", "user_type", ",", "password", "=", "password", ")" ]
51.857143
9.142857
def values(self, corr_plus, corr_cross, snrv, psd, indices, template_plus, template_cross, u_vals, hplus_cross_corr, hpnorm, hcnorm): """ Calculate the chisq at points given by indices. Returns ------- chisq: Array Chisq values, one for each sample index chisq_dof: Array Number of statistical degrees of freedom for the chisq test in the given template """ if self.do: num_above = len(indices) if self.snr_threshold: above = abs(snrv) > self.snr_threshold num_above = above.sum() logging.info('%s above chisq activation threshold' % num_above) above_indices = indices[above] above_snrv = snrv[above] rchisq = numpy.zeros(len(indices), dtype=numpy.float32) dof = -100 else: above_indices = indices above_snrv = snrv if num_above > 0: chisq = [] curr_tmplt_mult_fac = 0. curr_corr_mult_fac = 0. if self.template_mem is None or \ (not len(self.template_mem) == len(template_plus)): self.template_mem = zeros(len(template_plus), dtype=complex_same_precision_as(corr_plus)) if self.corr_mem is None or \ (not len(self.corr_mem) == len(corr_plus)): self.corr_mem = zeros(len(corr_plus), dtype=complex_same_precision_as(corr_plus)) tmplt_data = template_cross.data corr_data = corr_cross.data numpy.copyto(self.template_mem.data, template_cross.data) numpy.copyto(self.corr_mem.data, corr_cross.data) template_cross._data = self.template_mem.data corr_cross._data = self.corr_mem.data for lidx, index in enumerate(above_indices): above_local_indices = numpy.array([index]) above_local_snr = numpy.array([above_snrv[lidx]]) local_u_val = u_vals[lidx] # Construct template from _plus and _cross # Note that this modifies in place, so we store that and # revert on the next pass. template = template_cross.multiply_and_add(template_plus, local_u_val-curr_tmplt_mult_fac) curr_tmplt_mult_fac = local_u_val template.f_lower = template_plus.f_lower template.params = template_plus.params # Construct the corr vector norm_fac = local_u_val*local_u_val + 1 norm_fac += 2 * local_u_val * hplus_cross_corr norm_fac = hcnorm / (norm_fac**0.5) hp_fac = local_u_val * hpnorm / hcnorm corr = corr_cross.multiply_and_add(corr_plus, hp_fac - curr_corr_mult_fac) curr_corr_mult_fac = hp_fac bins = self.calculate_chisq_bins(template, psd) dof = (len(bins) - 1) * 2 - 2 curr_chisq = power_chisq_at_points_from_precomputed(corr, above_local_snr/ norm_fac, norm_fac, bins, above_local_indices) chisq.append(curr_chisq[0]) chisq = numpy.array(chisq) # Must reset corr and template to original values! template_cross._data = tmplt_data corr_cross._data = corr_data if self.snr_threshold: if num_above > 0: rchisq[above] = chisq else: rchisq = chisq return rchisq, numpy.repeat(dof, len(indices))# dof * numpy.ones_like(indices) else: return None, None
[ "def", "values", "(", "self", ",", "corr_plus", ",", "corr_cross", ",", "snrv", ",", "psd", ",", "indices", ",", "template_plus", ",", "template_cross", ",", "u_vals", ",", "hplus_cross_corr", ",", "hpnorm", ",", "hcnorm", ")", ":", "if", "self", ".", "do", ":", "num_above", "=", "len", "(", "indices", ")", "if", "self", ".", "snr_threshold", ":", "above", "=", "abs", "(", "snrv", ")", ">", "self", ".", "snr_threshold", "num_above", "=", "above", ".", "sum", "(", ")", "logging", ".", "info", "(", "'%s above chisq activation threshold'", "%", "num_above", ")", "above_indices", "=", "indices", "[", "above", "]", "above_snrv", "=", "snrv", "[", "above", "]", "rchisq", "=", "numpy", ".", "zeros", "(", "len", "(", "indices", ")", ",", "dtype", "=", "numpy", ".", "float32", ")", "dof", "=", "-", "100", "else", ":", "above_indices", "=", "indices", "above_snrv", "=", "snrv", "if", "num_above", ">", "0", ":", "chisq", "=", "[", "]", "curr_tmplt_mult_fac", "=", "0.", "curr_corr_mult_fac", "=", "0.", "if", "self", ".", "template_mem", "is", "None", "or", "(", "not", "len", "(", "self", ".", "template_mem", ")", "==", "len", "(", "template_plus", ")", ")", ":", "self", ".", "template_mem", "=", "zeros", "(", "len", "(", "template_plus", ")", ",", "dtype", "=", "complex_same_precision_as", "(", "corr_plus", ")", ")", "if", "self", ".", "corr_mem", "is", "None", "or", "(", "not", "len", "(", "self", ".", "corr_mem", ")", "==", "len", "(", "corr_plus", ")", ")", ":", "self", ".", "corr_mem", "=", "zeros", "(", "len", "(", "corr_plus", ")", ",", "dtype", "=", "complex_same_precision_as", "(", "corr_plus", ")", ")", "tmplt_data", "=", "template_cross", ".", "data", "corr_data", "=", "corr_cross", ".", "data", "numpy", ".", "copyto", "(", "self", ".", "template_mem", ".", "data", ",", "template_cross", ".", "data", ")", "numpy", ".", "copyto", "(", "self", ".", "corr_mem", ".", "data", ",", "corr_cross", ".", "data", ")", "template_cross", ".", "_data", "=", "self", ".", "template_mem", ".", "data", "corr_cross", ".", "_data", "=", "self", ".", "corr_mem", ".", "data", "for", "lidx", ",", "index", "in", "enumerate", "(", "above_indices", ")", ":", "above_local_indices", "=", "numpy", ".", "array", "(", "[", "index", "]", ")", "above_local_snr", "=", "numpy", ".", "array", "(", "[", "above_snrv", "[", "lidx", "]", "]", ")", "local_u_val", "=", "u_vals", "[", "lidx", "]", "# Construct template from _plus and _cross", "# Note that this modifies in place, so we store that and", "# revert on the next pass.", "template", "=", "template_cross", ".", "multiply_and_add", "(", "template_plus", ",", "local_u_val", "-", "curr_tmplt_mult_fac", ")", "curr_tmplt_mult_fac", "=", "local_u_val", "template", ".", "f_lower", "=", "template_plus", ".", "f_lower", "template", ".", "params", "=", "template_plus", ".", "params", "# Construct the corr vector", "norm_fac", "=", "local_u_val", "*", "local_u_val", "+", "1", "norm_fac", "+=", "2", "*", "local_u_val", "*", "hplus_cross_corr", "norm_fac", "=", "hcnorm", "/", "(", "norm_fac", "**", "0.5", ")", "hp_fac", "=", "local_u_val", "*", "hpnorm", "/", "hcnorm", "corr", "=", "corr_cross", ".", "multiply_and_add", "(", "corr_plus", ",", "hp_fac", "-", "curr_corr_mult_fac", ")", "curr_corr_mult_fac", "=", "hp_fac", "bins", "=", "self", ".", "calculate_chisq_bins", "(", "template", ",", "psd", ")", "dof", "=", "(", "len", "(", "bins", ")", "-", "1", ")", "*", "2", "-", "2", "curr_chisq", "=", "power_chisq_at_points_from_precomputed", "(", "corr", ",", "above_local_snr", "/", "norm_fac", ",", "norm_fac", ",", "bins", ",", "above_local_indices", ")", "chisq", ".", "append", "(", "curr_chisq", "[", "0", "]", ")", "chisq", "=", "numpy", ".", "array", "(", "chisq", ")", "# Must reset corr and template to original values!", "template_cross", ".", "_data", "=", "tmplt_data", "corr_cross", ".", "_data", "=", "corr_data", "if", "self", ".", "snr_threshold", ":", "if", "num_above", ">", "0", ":", "rchisq", "[", "above", "]", "=", "chisq", "else", ":", "rchisq", "=", "chisq", "return", "rchisq", ",", "numpy", ".", "repeat", "(", "dof", ",", "len", "(", "indices", ")", ")", "# dof * numpy.ones_like(indices)", "else", ":", "return", "None", ",", "None" ]
45.366667
19.7
def __draw_constant_line(self, value_label_style): "Draw a constant line on the y-axis with the label" value, label, style = value_label_style start = self.transform_output_coordinates((0, value))[1] stop = self.graph_width path = etree.SubElement(self.graph, 'path', { 'd': 'M 0 %(start)s h%(stop)s' % locals(), 'class': 'constantLine'}) if style: path.set('style', style) text = etree.SubElement(self.graph, 'text', { 'x': str(2), 'y': str(start - 2), 'class': 'constantLine'}) text.text = label
[ "def", "__draw_constant_line", "(", "self", ",", "value_label_style", ")", ":", "value", ",", "label", ",", "style", "=", "value_label_style", "start", "=", "self", ".", "transform_output_coordinates", "(", "(", "0", ",", "value", ")", ")", "[", "1", "]", "stop", "=", "self", ".", "graph_width", "path", "=", "etree", ".", "SubElement", "(", "self", ".", "graph", ",", "'path'", ",", "{", "'d'", ":", "'M 0 %(start)s h%(stop)s'", "%", "locals", "(", ")", ",", "'class'", ":", "'constantLine'", "}", ")", "if", "style", ":", "path", ".", "set", "(", "'style'", ",", "style", ")", "text", "=", "etree", ".", "SubElement", "(", "self", ".", "graph", ",", "'text'", ",", "{", "'x'", ":", "str", "(", "2", ")", ",", "'y'", ":", "str", "(", "start", "-", "2", ")", ",", "'class'", ":", "'constantLine'", "}", ")", "text", ".", "text", "=", "label" ]
34.466667
13.666667
def __complete_info_dict(self, node_info_dict, is_open): # Make pika credentials creds = pika.PlainCredentials( node_info_dict['username'], node_info_dict['password'] ) node_info_dict['credentials'] = creds if 'priority' in node_info_dict and node_info_dict['priority'] is not None: node_info_dict['priority'] = str(node_info_dict['priority']) else: node_info_dict['priority'] = DEFAULT_PRIO # Mandatories: host = node_info_dict['host'] credentials = node_info_dict['credentials'] # Optional ones # If not specified, fill in defaults. vhost = "" if 'vhost' in node_info_dict and node_info_dict['vhost'] is not None: vhost = node_info_dict['vhost'] port = 15672 if 'port' in node_info_dict and node_info_dict['port'] is not None: port = node_info_dict['port'] ssl_enabled = False if 'ssl_enabled' in node_info_dict and node_info_dict['ssl_enabled'] is not None: ssl_enabled = node_info_dict['ssl_enabled'] # Get some defaults: socket_timeout = esgfpid.defaults.RABBIT_PIKA_SOCKET_TIMEOUT connection_attempts = esgfpid.defaults.RABBIT_PIKA_CONNECTION_ATTEMPTS retry_delay = esgfpid.defaults.RABBIT_PIKA_CONNECTION_RETRY_DELAY_SECONDS # Make pika connection params # https://pika.readthedocs.org/en/0.9.6/connecting.html params = pika.ConnectionParameters( host=host, ssl=ssl_enabled, port=port, virtual_host=vhost, credentials=credentials, socket_timeout=socket_timeout, connection_attempts=connection_attempts, retry_delay=retry_delay ) node_info_dict['params'] = params # Add some stuff node_info_dict['is_open'] = is_open ''' https://pika.readthedocs.org/en/0.9.6/connecting.html class pika.connection.ConnectionParameters( host=None, port=None, virtual_host=None, credentials=None, channel_max=None, frame_max=None, heartbeat_interval=None, ssl=None, ssl_options=None, connection_attempts=None, retry_delay=None, socket_timeout=None, locale=None, backpressure_detection=None) ''' return node_info_dict
[ "def", "__complete_info_dict", "(", "self", ",", "node_info_dict", ",", "is_open", ")", ":", "# Make pika credentials", "creds", "=", "pika", ".", "PlainCredentials", "(", "node_info_dict", "[", "'username'", "]", ",", "node_info_dict", "[", "'password'", "]", ")", "node_info_dict", "[", "'credentials'", "]", "=", "creds", "if", "'priority'", "in", "node_info_dict", "and", "node_info_dict", "[", "'priority'", "]", "is", "not", "None", ":", "node_info_dict", "[", "'priority'", "]", "=", "str", "(", "node_info_dict", "[", "'priority'", "]", ")", "else", ":", "node_info_dict", "[", "'priority'", "]", "=", "DEFAULT_PRIO", "# Mandatories:", "host", "=", "node_info_dict", "[", "'host'", "]", "credentials", "=", "node_info_dict", "[", "'credentials'", "]", "# Optional ones", "# If not specified, fill in defaults.", "vhost", "=", "\"\"", "if", "'vhost'", "in", "node_info_dict", "and", "node_info_dict", "[", "'vhost'", "]", "is", "not", "None", ":", "vhost", "=", "node_info_dict", "[", "'vhost'", "]", "port", "=", "15672", "if", "'port'", "in", "node_info_dict", "and", "node_info_dict", "[", "'port'", "]", "is", "not", "None", ":", "port", "=", "node_info_dict", "[", "'port'", "]", "ssl_enabled", "=", "False", "if", "'ssl_enabled'", "in", "node_info_dict", "and", "node_info_dict", "[", "'ssl_enabled'", "]", "is", "not", "None", ":", "ssl_enabled", "=", "node_info_dict", "[", "'ssl_enabled'", "]", "# Get some defaults:", "socket_timeout", "=", "esgfpid", ".", "defaults", ".", "RABBIT_PIKA_SOCKET_TIMEOUT", "connection_attempts", "=", "esgfpid", ".", "defaults", ".", "RABBIT_PIKA_CONNECTION_ATTEMPTS", "retry_delay", "=", "esgfpid", ".", "defaults", ".", "RABBIT_PIKA_CONNECTION_RETRY_DELAY_SECONDS", "# Make pika connection params", "# https://pika.readthedocs.org/en/0.9.6/connecting.html", "params", "=", "pika", ".", "ConnectionParameters", "(", "host", "=", "host", ",", "ssl", "=", "ssl_enabled", ",", "port", "=", "port", ",", "virtual_host", "=", "vhost", ",", "credentials", "=", "credentials", ",", "socket_timeout", "=", "socket_timeout", ",", "connection_attempts", "=", "connection_attempts", ",", "retry_delay", "=", "retry_delay", ")", "node_info_dict", "[", "'params'", "]", "=", "params", "# Add some stuff", "node_info_dict", "[", "'is_open'", "]", "=", "is_open", "return", "node_info_dict" ]
38.540984
20.639344
def write_gphocs(data, sidx): """ write the g-phocs output. This code is hella ugly bcz it's copy/pasted directly from the old loci2gphocs script from pyrad. I figure having it get done the stupid way is better than not having it done at all, at least for the time being. This could probably be sped up significantly. """ outfile = data.outfiles.gphocs infile = data.outfiles.loci infile = open(infile) outfile = open(outfile, 'w') ## parse the loci ## Each set of reads at a locus is appended with a line ## beginning with // and ending with |x, where x in the locus id. ## so after this call 'loci' will contain an array ## of sets of each read per locus. loci = re.compile("\|[0-9]+\|").split(infile.read())[:-1] # Print the header, the number of loci in this file outfile.write(str(len(loci)) + "\n\n") # iterate through each locus, print out the header for each locus: # <locus_name> <n_samples> <locus_length> # Then print the data for each sample in this format: # <individual_name> <sequence> for i, loc in enumerate(loci): ## Get rid of the line that contains the snp info loc = loc.rsplit("\n", 1)[0] # Separate out each sequence within the loc block. 'sequences' # will now be a list strings containing name/sequence pairs. # We select each line in the locus string that starts with ">" names = [line.split()[0] for line in loc.strip().split("\n")] try: sequences = [line.split()[1] for line in loc.strip().split("\n")] except: pass # Strips off 'nnnn' separator for paired data # replaces '-' with 'N' editsequences = [seq.replace("n","").replace('-','N') for seq in sequences] sequence_length = len(editsequences[0]) # get length of longest name and add 4 spaces longname = max(map(len,names))+4 # Print out the header for this locus outfile.write('locus{} {} {}\n'.format(str(i), len(sequences), sequence_length)) # Iterate through each sequence read at this locus and write it to the file. for name,sequence in zip(names, editsequences): # Clean up the sequence data to make gphocs happy. Only accepts UPPER # case chars for bases, and only accepts 'N' for missing data. outfile.write(name+" "*(longname-len(name))+sequence + "\n") ## Separate loci with so it's prettier outfile.write("\n")
[ "def", "write_gphocs", "(", "data", ",", "sidx", ")", ":", "outfile", "=", "data", ".", "outfiles", ".", "gphocs", "infile", "=", "data", ".", "outfiles", ".", "loci", "infile", "=", "open", "(", "infile", ")", "outfile", "=", "open", "(", "outfile", ",", "'w'", ")", "## parse the loci", "## Each set of reads at a locus is appended with a line", "## beginning with // and ending with |x, where x in the locus id.", "## so after this call 'loci' will contain an array", "## of sets of each read per locus.", "loci", "=", "re", ".", "compile", "(", "\"\\|[0-9]+\\|\"", ")", ".", "split", "(", "infile", ".", "read", "(", ")", ")", "[", ":", "-", "1", "]", "# Print the header, the number of loci in this file", "outfile", ".", "write", "(", "str", "(", "len", "(", "loci", ")", ")", "+", "\"\\n\\n\"", ")", "# iterate through each locus, print out the header for each locus:", "# <locus_name> <n_samples> <locus_length>", "# Then print the data for each sample in this format:", "# <individual_name> <sequence>", "for", "i", ",", "loc", "in", "enumerate", "(", "loci", ")", ":", "## Get rid of the line that contains the snp info", "loc", "=", "loc", ".", "rsplit", "(", "\"\\n\"", ",", "1", ")", "[", "0", "]", "# Separate out each sequence within the loc block. 'sequences'", "# will now be a list strings containing name/sequence pairs.", "# We select each line in the locus string that starts with \">\"", "names", "=", "[", "line", ".", "split", "(", ")", "[", "0", "]", "for", "line", "in", "loc", ".", "strip", "(", ")", ".", "split", "(", "\"\\n\"", ")", "]", "try", ":", "sequences", "=", "[", "line", ".", "split", "(", ")", "[", "1", "]", "for", "line", "in", "loc", ".", "strip", "(", ")", ".", "split", "(", "\"\\n\"", ")", "]", "except", ":", "pass", "# Strips off 'nnnn' separator for paired data", "# replaces '-' with 'N'", "editsequences", "=", "[", "seq", ".", "replace", "(", "\"n\"", ",", "\"\"", ")", ".", "replace", "(", "'-'", ",", "'N'", ")", "for", "seq", "in", "sequences", "]", "sequence_length", "=", "len", "(", "editsequences", "[", "0", "]", ")", "# get length of longest name and add 4 spaces", "longname", "=", "max", "(", "map", "(", "len", ",", "names", ")", ")", "+", "4", "# Print out the header for this locus", "outfile", ".", "write", "(", "'locus{} {} {}\\n'", ".", "format", "(", "str", "(", "i", ")", ",", "len", "(", "sequences", ")", ",", "sequence_length", ")", ")", "# Iterate through each sequence read at this locus and write it to the file.", "for", "name", ",", "sequence", "in", "zip", "(", "names", ",", "editsequences", ")", ":", "# Clean up the sequence data to make gphocs happy. Only accepts UPPER", "# case chars for bases, and only accepts 'N' for missing data.", "outfile", ".", "write", "(", "name", "+", "\" \"", "*", "(", "longname", "-", "len", "(", "name", ")", ")", "+", "sequence", "+", "\"\\n\"", ")", "## Separate loci with so it's prettier", "outfile", ".", "write", "(", "\"\\n\"", ")" ]
42.568966
22.5
def _get_adjtime_timezone(): ''' Return the timezone in /etc/adjtime of the system clock ''' adjtime_file = '/etc/adjtime' if os.path.exists(adjtime_file): cmd = ['tail', '-n', '1', adjtime_file] return __salt__['cmd.run'](cmd, python_shell=False) elif os.path.exists('/dev/rtc'): raise CommandExecutionError( 'Unable to get hwclock timezone from ' + adjtime_file ) else: # There is no RTC. return None
[ "def", "_get_adjtime_timezone", "(", ")", ":", "adjtime_file", "=", "'/etc/adjtime'", "if", "os", ".", "path", ".", "exists", "(", "adjtime_file", ")", ":", "cmd", "=", "[", "'tail'", ",", "'-n'", ",", "'1'", ",", "adjtime_file", "]", "return", "__salt__", "[", "'cmd.run'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "elif", "os", ".", "path", ".", "exists", "(", "'/dev/rtc'", ")", ":", "raise", "CommandExecutionError", "(", "'Unable to get hwclock timezone from '", "+", "adjtime_file", ")", "else", ":", "# There is no RTC.", "return", "None" ]
31.733333
17.6
def convert_exchange_to_compounds(model): """Convert exchange reactions in model to exchange compounds. Only exchange reactions in the extracellular compartment are converted. The extracelluar compartment must be defined for the model. Args: model: :class:`NativeModel`. """ # Build set of exchange reactions exchanges = set() for reaction in model.reactions: equation = reaction.properties.get('equation') if equation is None: continue if len(equation.compounds) != 1: # Provide warning for exchange reactions with more than # one compound, they won't be put into the exchange definition if (len(equation.left) == 0) != (len(equation.right) == 0): logger.warning('Exchange reaction {} has more than one' ' compound, it was not converted to' ' exchange compound'.format(reaction.id)) continue exchanges.add(reaction.id) # Convert exchange reactions into exchange compounds for reaction_id in exchanges: equation = model.reactions[reaction_id].equation compound, value = equation.compounds[0] if compound.compartment != model.extracellular_compartment: continue if compound in model.exchange: logger.warning( 'Compound {} is already defined in the exchange' ' definition'.format(compound)) continue # We multiply the flux bounds by value in order to create equivalent # exchange reactions with stoichiometric value of one. If the flux # bounds are not set but the reaction is unidirectional, the implicit # flux bounds must be used. lower_flux, upper_flux = None, None if reaction_id in model.limits: _, lower, upper = model.limits[reaction_id] if lower is not None: lower_flux = lower * abs(value) if upper is not None: upper_flux = upper * abs(value) if lower_flux is None and equation.direction == Direction.Forward: lower_flux = 0 if upper_flux is None and equation.direction == Direction.Reverse: upper_flux = 0 # If the stoichiometric value of the reaction is reversed, the flux # limits must be flipped. if value > 0: lower_flux, upper_flux = ( -upper_flux if upper_flux is not None else None, -lower_flux if lower_flux is not None else None) model.exchange[compound] = ( compound, reaction_id, lower_flux, upper_flux) model.reactions.discard(reaction_id) model.limits.pop(reaction_id, None)
[ "def", "convert_exchange_to_compounds", "(", "model", ")", ":", "# Build set of exchange reactions", "exchanges", "=", "set", "(", ")", "for", "reaction", "in", "model", ".", "reactions", ":", "equation", "=", "reaction", ".", "properties", ".", "get", "(", "'equation'", ")", "if", "equation", "is", "None", ":", "continue", "if", "len", "(", "equation", ".", "compounds", ")", "!=", "1", ":", "# Provide warning for exchange reactions with more than", "# one compound, they won't be put into the exchange definition", "if", "(", "len", "(", "equation", ".", "left", ")", "==", "0", ")", "!=", "(", "len", "(", "equation", ".", "right", ")", "==", "0", ")", ":", "logger", ".", "warning", "(", "'Exchange reaction {} has more than one'", "' compound, it was not converted to'", "' exchange compound'", ".", "format", "(", "reaction", ".", "id", ")", ")", "continue", "exchanges", ".", "add", "(", "reaction", ".", "id", ")", "# Convert exchange reactions into exchange compounds", "for", "reaction_id", "in", "exchanges", ":", "equation", "=", "model", ".", "reactions", "[", "reaction_id", "]", ".", "equation", "compound", ",", "value", "=", "equation", ".", "compounds", "[", "0", "]", "if", "compound", ".", "compartment", "!=", "model", ".", "extracellular_compartment", ":", "continue", "if", "compound", "in", "model", ".", "exchange", ":", "logger", ".", "warning", "(", "'Compound {} is already defined in the exchange'", "' definition'", ".", "format", "(", "compound", ")", ")", "continue", "# We multiply the flux bounds by value in order to create equivalent", "# exchange reactions with stoichiometric value of one. If the flux", "# bounds are not set but the reaction is unidirectional, the implicit", "# flux bounds must be used.", "lower_flux", ",", "upper_flux", "=", "None", ",", "None", "if", "reaction_id", "in", "model", ".", "limits", ":", "_", ",", "lower", ",", "upper", "=", "model", ".", "limits", "[", "reaction_id", "]", "if", "lower", "is", "not", "None", ":", "lower_flux", "=", "lower", "*", "abs", "(", "value", ")", "if", "upper", "is", "not", "None", ":", "upper_flux", "=", "upper", "*", "abs", "(", "value", ")", "if", "lower_flux", "is", "None", "and", "equation", ".", "direction", "==", "Direction", ".", "Forward", ":", "lower_flux", "=", "0", "if", "upper_flux", "is", "None", "and", "equation", ".", "direction", "==", "Direction", ".", "Reverse", ":", "upper_flux", "=", "0", "# If the stoichiometric value of the reaction is reversed, the flux", "# limits must be flipped.", "if", "value", ">", "0", ":", "lower_flux", ",", "upper_flux", "=", "(", "-", "upper_flux", "if", "upper_flux", "is", "not", "None", "else", "None", ",", "-", "lower_flux", "if", "lower_flux", "is", "not", "None", "else", "None", ")", "model", ".", "exchange", "[", "compound", "]", "=", "(", "compound", ",", "reaction_id", ",", "lower_flux", ",", "upper_flux", ")", "model", ".", "reactions", ".", "discard", "(", "reaction_id", ")", "model", ".", "limits", ".", "pop", "(", "reaction_id", ",", "None", ")" ]
39.391304
19.826087
def chunks(l, n): """ Yield n successive chunks from l. """ newn = int(len(l) / n) for i in xrange(0, n-1): yield l[i*newn:i*newn+newn] yield l[n*newn-newn:]
[ "def", "chunks", "(", "l", ",", "n", ")", ":", "newn", "=", "int", "(", "len", "(", "l", ")", "/", "n", ")", "for", "i", "in", "xrange", "(", "0", ",", "n", "-", "1", ")", ":", "yield", "l", "[", "i", "*", "newn", ":", "i", "*", "newn", "+", "newn", "]", "yield", "l", "[", "n", "*", "newn", "-", "newn", ":", "]" ]
25.571429
9.857143
def items(self): """ Return a copy of the dictionary's list of (key, value) pairs. """ r = [] for key in self._safe_keys(): try: r.append((key, self[key])) except KeyError: pass return r
[ "def", "items", "(", "self", ")", ":", "r", "=", "[", "]", "for", "key", "in", "self", ".", "_safe_keys", "(", ")", ":", "try", ":", "r", ".", "append", "(", "(", "key", ",", "self", "[", "key", "]", ")", ")", "except", "KeyError", ":", "pass", "return", "r" ]
29.555556
15
def url(context, view, subdomain=UNSET, *args, **kwargs): """ Resolves a URL in a template, using subdomain-based URL resolution. If no subdomain is provided and a ``request`` is in the template context when rendering, the URL will be resolved relative to the current request's subdomain. If no ``request`` is provided, the URL will be resolved relative to current domain with the ``settings.ROOT_URLCONF``. Usage:: {% load subdomainurls %} {% url 'view-name' subdomain='subdomain' %} .. note:: This tag uses the variable URL syntax introduced in Django 1.3 as ``{% load url from future %}`` and was made the standard in Django 1.5. If you are upgrading a legacy application from one of the previous template tag formats, make sure to quote your constant string URL names to avoid :exc:`~django.core.urlresolver.NoReverseMatch` errors during template rendering. """ if subdomain is UNSET: request = context.get('request') if request is not None: subdomain = getattr(request, 'subdomain', None) else: subdomain = None elif subdomain is '': subdomain = None return reverse(view, subdomain=subdomain, args=args, kwargs=kwargs)
[ "def", "url", "(", "context", ",", "view", ",", "subdomain", "=", "UNSET", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "subdomain", "is", "UNSET", ":", "request", "=", "context", ".", "get", "(", "'request'", ")", "if", "request", "is", "not", "None", ":", "subdomain", "=", "getattr", "(", "request", ",", "'subdomain'", ",", "None", ")", "else", ":", "subdomain", "=", "None", "elif", "subdomain", "is", "''", ":", "subdomain", "=", "None", "return", "reverse", "(", "view", ",", "subdomain", "=", "subdomain", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ")" ]
39.15625
25.21875
def __get_concurrency_maps(self, states, session=None): """ Get the concurrency maps. :param states: List of states to query for :type states: list[airflow.utils.state.State] :return: A map from (dag_id, task_id) to # of task instances and a map from (dag_id, task_id) to # of task instances in the given state list :rtype: dict[tuple[str, str], int] """ TI = models.TaskInstance ti_concurrency_query = ( session .query(TI.task_id, TI.dag_id, func.count('*')) .filter(TI.state.in_(states)) .group_by(TI.task_id, TI.dag_id) ).all() dag_map = defaultdict(int) task_map = defaultdict(int) for result in ti_concurrency_query: task_id, dag_id, count = result dag_map[dag_id] += count task_map[(dag_id, task_id)] = count return dag_map, task_map
[ "def", "__get_concurrency_maps", "(", "self", ",", "states", ",", "session", "=", "None", ")", ":", "TI", "=", "models", ".", "TaskInstance", "ti_concurrency_query", "=", "(", "session", ".", "query", "(", "TI", ".", "task_id", ",", "TI", ".", "dag_id", ",", "func", ".", "count", "(", "'*'", ")", ")", ".", "filter", "(", "TI", ".", "state", ".", "in_", "(", "states", ")", ")", ".", "group_by", "(", "TI", ".", "task_id", ",", "TI", ".", "dag_id", ")", ")", ".", "all", "(", ")", "dag_map", "=", "defaultdict", "(", "int", ")", "task_map", "=", "defaultdict", "(", "int", ")", "for", "result", "in", "ti_concurrency_query", ":", "task_id", ",", "dag_id", ",", "count", "=", "result", "dag_map", "[", "dag_id", "]", "+=", "count", "task_map", "[", "(", "dag_id", ",", "task_id", ")", "]", "=", "count", "return", "dag_map", ",", "task_map" ]
36.88
12.96
def build_module(project, env=None): '''Build project script as module''' from pyspider.libs import base_handler assert 'name' in project, 'need name of project' assert 'script' in project, 'need script of project' if env is None: env = {} # fix for old non-package version scripts pyspider_path = os.path.join(os.path.dirname(__file__), "..") if pyspider_path not in sys.path: sys.path.insert(1, pyspider_path) env = dict(env) env.update({ 'debug': project.get('status', 'DEBUG') == 'DEBUG', }) loader = ProjectLoader(project) module = loader.load_module(project['name']) # logger inject module.log_buffer = [] module.logging = module.logger = logging.Logger(project['name']) if env.get('enable_stdout_capture', True): handler = SaveLogHandler(module.log_buffer) handler.setFormatter(LogFormatter(color=False)) else: handler = logging.StreamHandler() handler.setFormatter(LogFormatter(color=True)) module.logger.addHandler(handler) if '__handler_cls__' not in module.__dict__: BaseHandler = module.__dict__.get('BaseHandler', base_handler.BaseHandler) for each in list(six.itervalues(module.__dict__)): if inspect.isclass(each) and each is not BaseHandler \ and issubclass(each, BaseHandler): module.__dict__['__handler_cls__'] = each _class = module.__dict__.get('__handler_cls__') assert _class is not None, "need BaseHandler in project module" instance = _class() instance.__env__ = env instance.project_name = project['name'] instance.project = project return { 'loader': loader, 'module': module, 'class': _class, 'instance': instance, 'exception': None, 'exception_log': '', 'info': project, 'load_time': time.time(), }
[ "def", "build_module", "(", "project", ",", "env", "=", "None", ")", ":", "from", "pyspider", ".", "libs", "import", "base_handler", "assert", "'name'", "in", "project", ",", "'need name of project'", "assert", "'script'", "in", "project", ",", "'need script of project'", "if", "env", "is", "None", ":", "env", "=", "{", "}", "# fix for old non-package version scripts", "pyspider_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "\"..\"", ")", "if", "pyspider_path", "not", "in", "sys", ".", "path", ":", "sys", ".", "path", ".", "insert", "(", "1", ",", "pyspider_path", ")", "env", "=", "dict", "(", "env", ")", "env", ".", "update", "(", "{", "'debug'", ":", "project", ".", "get", "(", "'status'", ",", "'DEBUG'", ")", "==", "'DEBUG'", ",", "}", ")", "loader", "=", "ProjectLoader", "(", "project", ")", "module", "=", "loader", ".", "load_module", "(", "project", "[", "'name'", "]", ")", "# logger inject", "module", ".", "log_buffer", "=", "[", "]", "module", ".", "logging", "=", "module", ".", "logger", "=", "logging", ".", "Logger", "(", "project", "[", "'name'", "]", ")", "if", "env", ".", "get", "(", "'enable_stdout_capture'", ",", "True", ")", ":", "handler", "=", "SaveLogHandler", "(", "module", ".", "log_buffer", ")", "handler", ".", "setFormatter", "(", "LogFormatter", "(", "color", "=", "False", ")", ")", "else", ":", "handler", "=", "logging", ".", "StreamHandler", "(", ")", "handler", ".", "setFormatter", "(", "LogFormatter", "(", "color", "=", "True", ")", ")", "module", ".", "logger", ".", "addHandler", "(", "handler", ")", "if", "'__handler_cls__'", "not", "in", "module", ".", "__dict__", ":", "BaseHandler", "=", "module", ".", "__dict__", ".", "get", "(", "'BaseHandler'", ",", "base_handler", ".", "BaseHandler", ")", "for", "each", "in", "list", "(", "six", ".", "itervalues", "(", "module", ".", "__dict__", ")", ")", ":", "if", "inspect", ".", "isclass", "(", "each", ")", "and", "each", "is", "not", "BaseHandler", "and", "issubclass", "(", "each", ",", "BaseHandler", ")", ":", "module", ".", "__dict__", "[", "'__handler_cls__'", "]", "=", "each", "_class", "=", "module", ".", "__dict__", ".", "get", "(", "'__handler_cls__'", ")", "assert", "_class", "is", "not", "None", ",", "\"need BaseHandler in project module\"", "instance", "=", "_class", "(", ")", "instance", ".", "__env__", "=", "env", "instance", ".", "project_name", "=", "project", "[", "'name'", "]", "instance", ".", "project", "=", "project", "return", "{", "'loader'", ":", "loader", ",", "'module'", ":", "module", ",", "'class'", ":", "_class", ",", "'instance'", ":", "instance", ",", "'exception'", ":", "None", ",", "'exception_log'", ":", "''", ",", "'info'", ":", "project", ",", "'load_time'", ":", "time", ".", "time", "(", ")", ",", "}" ]
36.892857
18.357143
def invoke_hook_spout_ack(self, message_id, complete_latency_ns): """invoke task hooks for every time spout acks a tuple :type message_id: str :param message_id: message id to which an acked tuple was anchored :type complete_latency_ns: float :param complete_latency_ns: complete latency in nano seconds """ if len(self.task_hooks) > 0: spout_ack_info = SpoutAckInfo(message_id=message_id, spout_task_id=self.get_task_id(), complete_latency_ms=complete_latency_ns * system_constants.NS_TO_MS) for task_hook in self.task_hooks: task_hook.spout_ack(spout_ack_info)
[ "def", "invoke_hook_spout_ack", "(", "self", ",", "message_id", ",", "complete_latency_ns", ")", ":", "if", "len", "(", "self", ".", "task_hooks", ")", ">", "0", ":", "spout_ack_info", "=", "SpoutAckInfo", "(", "message_id", "=", "message_id", ",", "spout_task_id", "=", "self", ".", "get_task_id", "(", ")", ",", "complete_latency_ms", "=", "complete_latency_ns", "*", "system_constants", ".", "NS_TO_MS", ")", "for", "task_hook", "in", "self", ".", "task_hooks", ":", "task_hook", ".", "spout_ack", "(", "spout_ack_info", ")" ]
47
17.066667
def run(self): """Run command.""" onnx_script = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "tools/mypy-onnx.py")) returncode = subprocess.call([sys.executable, onnx_script]) sys.exit(returncode)
[ "def", "run", "(", "self", ")", ":", "onnx_script", "=", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "__file__", ")", ")", ",", "\"tools/mypy-onnx.py\"", ")", ")", "returncode", "=", "subprocess", ".", "call", "(", "[", "sys", ".", "executable", ",", "onnx_script", "]", ")", "sys", ".", "exit", "(", "returncode", ")" ]
50.6
28.6
def _iter_module_files(): """This iterates over all relevant Python files. It goes through all loaded files from modules, all files in folders of already loaded modules as well as all files reachable through a package. """ # The list call is necessary on Python 3 in case the module # dictionary modifies during iteration. for module in list(sys.modules.values()): if module is None: continue filename = getattr(module, '__file__', None) if filename: old = None while not os.path.isfile(filename): old = filename filename = os.path.dirname(filename) if filename == old: break else: if filename[-4:] in ('.pyc', '.pyo'): filename = filename[:-1] yield filename
[ "def", "_iter_module_files", "(", ")", ":", "# The list call is necessary on Python 3 in case the module", "# dictionary modifies during iteration.", "for", "module", "in", "list", "(", "sys", ".", "modules", ".", "values", "(", ")", ")", ":", "if", "module", "is", "None", ":", "continue", "filename", "=", "getattr", "(", "module", ",", "'__file__'", ",", "None", ")", "if", "filename", ":", "old", "=", "None", "while", "not", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "old", "=", "filename", "filename", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "if", "filename", "==", "old", ":", "break", "else", ":", "if", "filename", "[", "-", "4", ":", "]", "in", "(", "'.pyc'", ",", "'.pyo'", ")", ":", "filename", "=", "filename", "[", ":", "-", "1", "]", "yield", "filename" ]
39.045455
12.681818
def get_vnetwork_vswitches_output_vnetwork_vswitches_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches") config = get_vnetwork_vswitches output = ET.SubElement(get_vnetwork_vswitches, "output") vnetwork_vswitches = ET.SubElement(output, "vnetwork-vswitches") name = ET.SubElement(vnetwork_vswitches, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_vnetwork_vswitches_output_vnetwork_vswitches_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_vnetwork_vswitches", "=", "ET", ".", "Element", "(", "\"get_vnetwork_vswitches\"", ")", "config", "=", "get_vnetwork_vswitches", "output", "=", "ET", ".", "SubElement", "(", "get_vnetwork_vswitches", ",", "\"output\"", ")", "vnetwork_vswitches", "=", "ET", ".", "SubElement", "(", "output", ",", "\"vnetwork-vswitches\"", ")", "name", "=", "ET", ".", "SubElement", "(", "vnetwork_vswitches", ",", "\"name\"", ")", "name", ".", "text", "=", "kwargs", ".", "pop", "(", "'name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
44.461538
15.923077
def _strongly_connected_subgraph(counts, weight=1, verbose=True): """Trim a transition count matrix down to its maximal strongly ergodic subgraph. From the counts matrix, we define a graph where there exists a directed edge between two nodes, `i` and `j` if `counts[i][j] > weight`. We then find the nodes belonging to the largest strongly connected subgraph of this graph, and return a new counts matrix formed by these rows and columns of the input `counts` matrix. Parameters ---------- counts : np.array, shape=(n_states_in, n_states_in) Input set of directed counts. weight : float Threshold by which ergodicity is judged in the input data. Greater or equal to this many transition counts in both directions are required to include an edge in the ergodic subgraph. verbose : bool Print a short statement Returns ------- counts_component : "Trimmed" version of ``counts``, including only states in the maximal strongly ergodic subgraph. mapping : dict Mapping from "input" states indices to "output" state indices The semantics of ``mapping[i] = j`` is that state ``i`` from the "input space" for the counts matrix is represented by the index ``j`` in counts_component """ n_states_input = counts.shape[0] n_components, component_assignments = csgraph.connected_components( csr_matrix(counts >= weight), connection="strong") populations = np.array(counts.sum(0)).flatten() component_pops = np.array([populations[component_assignments == i].sum() for i in range(n_components)]) which_component = component_pops.argmax() def cpop(which): csum = component_pops.sum() return 100 * component_pops[which] / csum if csum != 0 else np.nan percent_retained = cpop(which_component) if verbose: print("MSM contains %d strongly connected component%s " "above weight=%.2f. Component %d selected, with " "population %f%%" % ( n_components, 's' if (n_components != 1) else '', weight, which_component, percent_retained)) # keys are all of the "input states" which have a valid mapping to the output. keys = np.arange(n_states_input)[component_assignments == which_component] if n_components == n_states_input and counts[np.ix_(keys, keys)] == 0: # if we have a completely disconnected graph with no self-transitions return np.zeros((0, 0)), {}, percent_retained # values are the "output" state that these guys are mapped to values = np.arange(len(keys)) mapping = dict(zip(keys, values)) n_states_output = len(mapping) trimmed_counts = np.zeros((n_states_output, n_states_output), dtype=counts.dtype) trimmed_counts[np.ix_(values, values)] = counts[np.ix_(keys, keys)] return trimmed_counts, mapping, percent_retained
[ "def", "_strongly_connected_subgraph", "(", "counts", ",", "weight", "=", "1", ",", "verbose", "=", "True", ")", ":", "n_states_input", "=", "counts", ".", "shape", "[", "0", "]", "n_components", ",", "component_assignments", "=", "csgraph", ".", "connected_components", "(", "csr_matrix", "(", "counts", ">=", "weight", ")", ",", "connection", "=", "\"strong\"", ")", "populations", "=", "np", ".", "array", "(", "counts", ".", "sum", "(", "0", ")", ")", ".", "flatten", "(", ")", "component_pops", "=", "np", ".", "array", "(", "[", "populations", "[", "component_assignments", "==", "i", "]", ".", "sum", "(", ")", "for", "i", "in", "range", "(", "n_components", ")", "]", ")", "which_component", "=", "component_pops", ".", "argmax", "(", ")", "def", "cpop", "(", "which", ")", ":", "csum", "=", "component_pops", ".", "sum", "(", ")", "return", "100", "*", "component_pops", "[", "which", "]", "/", "csum", "if", "csum", "!=", "0", "else", "np", ".", "nan", "percent_retained", "=", "cpop", "(", "which_component", ")", "if", "verbose", ":", "print", "(", "\"MSM contains %d strongly connected component%s \"", "\"above weight=%.2f. Component %d selected, with \"", "\"population %f%%\"", "%", "(", "n_components", ",", "'s'", "if", "(", "n_components", "!=", "1", ")", "else", "''", ",", "weight", ",", "which_component", ",", "percent_retained", ")", ")", "# keys are all of the \"input states\" which have a valid mapping to the output.", "keys", "=", "np", ".", "arange", "(", "n_states_input", ")", "[", "component_assignments", "==", "which_component", "]", "if", "n_components", "==", "n_states_input", "and", "counts", "[", "np", ".", "ix_", "(", "keys", ",", "keys", ")", "]", "==", "0", ":", "# if we have a completely disconnected graph with no self-transitions", "return", "np", ".", "zeros", "(", "(", "0", ",", "0", ")", ")", ",", "{", "}", ",", "percent_retained", "# values are the \"output\" state that these guys are mapped to", "values", "=", "np", ".", "arange", "(", "len", "(", "keys", ")", ")", "mapping", "=", "dict", "(", "zip", "(", "keys", ",", "values", ")", ")", "n_states_output", "=", "len", "(", "mapping", ")", "trimmed_counts", "=", "np", ".", "zeros", "(", "(", "n_states_output", ",", "n_states_output", ")", ",", "dtype", "=", "counts", ".", "dtype", ")", "trimmed_counts", "[", "np", ".", "ix_", "(", "values", ",", "values", ")", "]", "=", "counts", "[", "np", ".", "ix_", "(", "keys", ",", "keys", ")", "]", "return", "trimmed_counts", ",", "mapping", ",", "percent_retained" ]
43.308824
22.897059
def gff(args): """ %prog gff *.gff Draw exons for genes based on gff files. Each gff file should contain only one gene, and only the "mRNA" and "CDS" feature will be drawn on the canvas. """ align_choices = ("left", "center", "right") p = OptionParser(gff.__doc__) p.add_option("--align", default="left", choices=align_choices, help="Horizontal alignment [default: %default]") p.add_option("--noUTR", default=False, action="store_true", help="Do not plot UTRs [default: %default]") opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) fig = plt.figure(1, (8, 5)) root = fig.add_axes([0, 0, 1, 1]) gffiles = args ngenes = len(gffiles) canvas = .6 setups, ratio = get_setups(gffiles, canvas=canvas, noUTR=opts.noUTR) align = opts.align xs = .2 if align == "left" else .8 yinterval = canvas / ngenes ys = .8 tip = .01 for genename, mrnabed, cdsbeds in setups: ExonGlyph(root, xs, ys, mrnabed, cdsbeds, ratio=ratio, align=align) if align == "left": root.text(xs - tip, ys, genename, ha="right", va="center") elif align == "right": root.text(xs + tip, ys, genename, ha="left", va="center") ys -= yinterval root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() figname = "exons.pdf" savefig(figname, dpi=300)
[ "def", "gff", "(", "args", ")", ":", "align_choices", "=", "(", "\"left\"", ",", "\"center\"", ",", "\"right\"", ")", "p", "=", "OptionParser", "(", "gff", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--align\"", ",", "default", "=", "\"left\"", ",", "choices", "=", "align_choices", ",", "help", "=", "\"Horizontal alignment [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--noUTR\"", ",", "default", "=", "False", ",", "action", "=", "\"store_true\"", ",", "help", "=", "\"Do not plot UTRs [default: %default]\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "<", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "fig", "=", "plt", ".", "figure", "(", "1", ",", "(", "8", ",", "5", ")", ")", "root", "=", "fig", ".", "add_axes", "(", "[", "0", ",", "0", ",", "1", ",", "1", "]", ")", "gffiles", "=", "args", "ngenes", "=", "len", "(", "gffiles", ")", "canvas", "=", ".6", "setups", ",", "ratio", "=", "get_setups", "(", "gffiles", ",", "canvas", "=", "canvas", ",", "noUTR", "=", "opts", ".", "noUTR", ")", "align", "=", "opts", ".", "align", "xs", "=", ".2", "if", "align", "==", "\"left\"", "else", ".8", "yinterval", "=", "canvas", "/", "ngenes", "ys", "=", ".8", "tip", "=", ".01", "for", "genename", ",", "mrnabed", ",", "cdsbeds", "in", "setups", ":", "ExonGlyph", "(", "root", ",", "xs", ",", "ys", ",", "mrnabed", ",", "cdsbeds", ",", "ratio", "=", "ratio", ",", "align", "=", "align", ")", "if", "align", "==", "\"left\"", ":", "root", ".", "text", "(", "xs", "-", "tip", ",", "ys", ",", "genename", ",", "ha", "=", "\"right\"", ",", "va", "=", "\"center\"", ")", "elif", "align", "==", "\"right\"", ":", "root", ".", "text", "(", "xs", "+", "tip", ",", "ys", ",", "genename", ",", "ha", "=", "\"left\"", ",", "va", "=", "\"center\"", ")", "ys", "-=", "yinterval", "root", ".", "set_xlim", "(", "0", ",", "1", ")", "root", ".", "set_ylim", "(", "0", ",", "1", ")", "root", ".", "set_axis_off", "(", ")", "figname", "=", "\"exons.pdf\"", "savefig", "(", "figname", ",", "dpi", "=", "300", ")" ]
31.266667
21.088889
def _token_auth(self): """Add ThreatConnect Token Auth to Session.""" return TcExTokenAuth( self, self.args.tc_token, self.args.tc_token_expires, self.args.tc_api_path, self.tcex.log, )
[ "def", "_token_auth", "(", "self", ")", ":", "return", "TcExTokenAuth", "(", "self", ",", "self", ".", "args", ".", "tc_token", ",", "self", ".", "args", ".", "tc_token_expires", ",", "self", ".", "args", ".", "tc_api_path", ",", "self", ".", "tcex", ".", "log", ",", ")" ]
29
12.555556
def from_environment_or_defaults(cls, environment=None): """Create a Run object taking values from the local environment where possible. The run ID comes from WANDB_RUN_ID or is randomly generated. The run mode ("dryrun", or "run") comes from WANDB_MODE or defaults to "dryrun". The run directory comes from WANDB_RUN_DIR or is generated from the run ID. The Run will have a .config attribute but its run directory won't be set by default. """ if environment is None: environment = os.environ run_id = environment.get(env.RUN_ID) resume = environment.get(env.RESUME) storage_id = environment.get(env.RUN_STORAGE_ID) mode = environment.get(env.MODE) disabled = InternalApi().disabled() if not mode and disabled: mode = "dryrun" elif disabled and mode != "dryrun": wandb.termlog( "WARNING: WANDB_MODE is set to run, but W&B was disabled. Run `wandb on` to remove this message") elif disabled: wandb.termlog( 'W&B is disabled in this directory. Run `wandb on` to enable cloud syncing.') group = environment.get(env.RUN_GROUP) job_type = environment.get(env.JOB_TYPE) run_dir = environment.get(env.RUN_DIR) sweep_id = environment.get(env.SWEEP_ID) program = environment.get(env.PROGRAM) description = environment.get(env.DESCRIPTION) args = env.get_args() wandb_dir = env.get_dir() tags = env.get_tags() config = Config.from_environment_or_defaults() run = cls(run_id, mode, run_dir, group, job_type, config, sweep_id, storage_id, program=program, description=description, args=args, wandb_dir=wandb_dir, tags=tags, resume=resume) return run
[ "def", "from_environment_or_defaults", "(", "cls", ",", "environment", "=", "None", ")", ":", "if", "environment", "is", "None", ":", "environment", "=", "os", ".", "environ", "run_id", "=", "environment", ".", "get", "(", "env", ".", "RUN_ID", ")", "resume", "=", "environment", ".", "get", "(", "env", ".", "RESUME", ")", "storage_id", "=", "environment", ".", "get", "(", "env", ".", "RUN_STORAGE_ID", ")", "mode", "=", "environment", ".", "get", "(", "env", ".", "MODE", ")", "disabled", "=", "InternalApi", "(", ")", ".", "disabled", "(", ")", "if", "not", "mode", "and", "disabled", ":", "mode", "=", "\"dryrun\"", "elif", "disabled", "and", "mode", "!=", "\"dryrun\"", ":", "wandb", ".", "termlog", "(", "\"WARNING: WANDB_MODE is set to run, but W&B was disabled. Run `wandb on` to remove this message\"", ")", "elif", "disabled", ":", "wandb", ".", "termlog", "(", "'W&B is disabled in this directory. Run `wandb on` to enable cloud syncing.'", ")", "group", "=", "environment", ".", "get", "(", "env", ".", "RUN_GROUP", ")", "job_type", "=", "environment", ".", "get", "(", "env", ".", "JOB_TYPE", ")", "run_dir", "=", "environment", ".", "get", "(", "env", ".", "RUN_DIR", ")", "sweep_id", "=", "environment", ".", "get", "(", "env", ".", "SWEEP_ID", ")", "program", "=", "environment", ".", "get", "(", "env", ".", "PROGRAM", ")", "description", "=", "environment", ".", "get", "(", "env", ".", "DESCRIPTION", ")", "args", "=", "env", ".", "get_args", "(", ")", "wandb_dir", "=", "env", ".", "get_dir", "(", ")", "tags", "=", "env", ".", "get_tags", "(", ")", "config", "=", "Config", ".", "from_environment_or_defaults", "(", ")", "run", "=", "cls", "(", "run_id", ",", "mode", ",", "run_dir", ",", "group", ",", "job_type", ",", "config", ",", "sweep_id", ",", "storage_id", ",", "program", "=", "program", ",", "description", "=", "description", ",", "args", "=", "args", ",", "wandb_dir", "=", "wandb_dir", ",", "tags", "=", "tags", ",", "resume", "=", "resume", ")", "return", "run" ]
44.690476
17.690476
def _compute_equations(self, x, verbose=False): '''Compute the values and the normals (gradients) of active constraints. Arguments: | ``x`` -- The unknowns. ''' # compute the error and the normals. normals = [] values = [] signs = [] error = 0.0 if verbose: print() print(' '.join('% 10.3e' % val for val in x), end=' ') active_str = '' for i, (sign, equation) in enumerate(self.equations): value, normal = equation(x) if (i < len(self.lock) and self.lock[i]) or \ (sign==-1 and value > -self.threshold) or \ (sign==0) or (sign==1 and value < self.threshold): values.append(value) normals.append(normal) signs.append(sign) error += value**2 if verbose: active_str += 'X' if i < len(self.lock): self.lock[i] = True elif verbose: active_str += '-' error = np.sqrt(error) normals = np.array(normals, float) values = np.array(values, float) signs = np.array(signs, int) if verbose: print('[%s]' % active_str, end=' ') if error < self.threshold: print('OK') else: print('%.5e' % error) return normals, values, error, signs
[ "def", "_compute_equations", "(", "self", ",", "x", ",", "verbose", "=", "False", ")", ":", "# compute the error and the normals.", "normals", "=", "[", "]", "values", "=", "[", "]", "signs", "=", "[", "]", "error", "=", "0.0", "if", "verbose", ":", "print", "(", ")", "print", "(", "' '", ".", "join", "(", "'% 10.3e'", "%", "val", "for", "val", "in", "x", ")", ",", "end", "=", "' '", ")", "active_str", "=", "''", "for", "i", ",", "(", "sign", ",", "equation", ")", "in", "enumerate", "(", "self", ".", "equations", ")", ":", "value", ",", "normal", "=", "equation", "(", "x", ")", "if", "(", "i", "<", "len", "(", "self", ".", "lock", ")", "and", "self", ".", "lock", "[", "i", "]", ")", "or", "(", "sign", "==", "-", "1", "and", "value", ">", "-", "self", ".", "threshold", ")", "or", "(", "sign", "==", "0", ")", "or", "(", "sign", "==", "1", "and", "value", "<", "self", ".", "threshold", ")", ":", "values", ".", "append", "(", "value", ")", "normals", ".", "append", "(", "normal", ")", "signs", ".", "append", "(", "sign", ")", "error", "+=", "value", "**", "2", "if", "verbose", ":", "active_str", "+=", "'X'", "if", "i", "<", "len", "(", "self", ".", "lock", ")", ":", "self", ".", "lock", "[", "i", "]", "=", "True", "elif", "verbose", ":", "active_str", "+=", "'-'", "error", "=", "np", ".", "sqrt", "(", "error", ")", "normals", "=", "np", ".", "array", "(", "normals", ",", "float", ")", "values", "=", "np", ".", "array", "(", "values", ",", "float", ")", "signs", "=", "np", ".", "array", "(", "signs", ",", "int", ")", "if", "verbose", ":", "print", "(", "'[%s]'", "%", "active_str", ",", "end", "=", "' '", ")", "if", "error", "<", "self", ".", "threshold", ":", "print", "(", "'OK'", ")", "else", ":", "print", "(", "'%.5e'", "%", "error", ")", "return", "normals", ",", "values", ",", "error", ",", "signs" ]
35.195122
13.146341
def calculate_angle_bw_tangents(self, base_step, cumulative=False, masked=False): """To calculate angle (Radian) between two tangent vectors of global helical axis. Parameters ---------- base_step : 1D list List of two base-steps for which angle will be calculated. For example: **base_step** = ``[5, 50]`` either of following can be calculated. (1) angle between tangent vector 5th and 50th base-steps. (2) summation over 44 angles that are formed between adjacent tangent vectors of 5-50 bp DNA segment. See below two choose between these two types. cumulative : bool) ``Default: False``: If it is false, first type of angle is calculated otherwise second type of angle is calculated as explained in the above example of option ``base_step``. masked : bool ``Default=False``. To skip specific frames/snapshots. ``DNA.mask`` array should be set to use this functionality. This array contains boolean (either ``True`` or ``False``) value for each frame to mask the frames. Presently, mask array is automatically generated during :meth:`DNA.generate_smooth_axis` to skip those frames where 3D fitting curve was not successful within the given criteria. Returns ------- angle : 1D array Array of calculated angle of length is equal to number of frames. When ``masked`` is applied, length of this array can be smaller than total number of frames. """ if (len(base_step) != 2): raise ValueError("See, documentation for step usage!!!") if base_step[0] > base_step[1]: raise ValueError("See, documentation for step usage!!!") angle = [] if cumulative: angle_tmp = [] tangent, idx = self.get_parameters( 'tangent', bp=base_step, bp_range=True, masked=masked) for i in range(len(idx) - 1): angle_tmp.append(vector_angle(tangent[i], tangent[i + 1])) angle = np.asarray(angle_tmp).sum(axis=0) else: tangent1, idx1 = self.get_parameters( 'tangent', bp=[base_step[0]], bp_range=False, masked=masked) tangent2, idx2 = self.get_parameters( 'tangent', bp=[base_step[1]], bp_range=False, masked=masked) angle = vector_angle(tangent1[0], tangent2[0]) return np.asarray(angle)
[ "def", "calculate_angle_bw_tangents", "(", "self", ",", "base_step", ",", "cumulative", "=", "False", ",", "masked", "=", "False", ")", ":", "if", "(", "len", "(", "base_step", ")", "!=", "2", ")", ":", "raise", "ValueError", "(", "\"See, documentation for step usage!!!\"", ")", "if", "base_step", "[", "0", "]", ">", "base_step", "[", "1", "]", ":", "raise", "ValueError", "(", "\"See, documentation for step usage!!!\"", ")", "angle", "=", "[", "]", "if", "cumulative", ":", "angle_tmp", "=", "[", "]", "tangent", ",", "idx", "=", "self", ".", "get_parameters", "(", "'tangent'", ",", "bp", "=", "base_step", ",", "bp_range", "=", "True", ",", "masked", "=", "masked", ")", "for", "i", "in", "range", "(", "len", "(", "idx", ")", "-", "1", ")", ":", "angle_tmp", ".", "append", "(", "vector_angle", "(", "tangent", "[", "i", "]", ",", "tangent", "[", "i", "+", "1", "]", ")", ")", "angle", "=", "np", ".", "asarray", "(", "angle_tmp", ")", ".", "sum", "(", "axis", "=", "0", ")", "else", ":", "tangent1", ",", "idx1", "=", "self", ".", "get_parameters", "(", "'tangent'", ",", "bp", "=", "[", "base_step", "[", "0", "]", "]", ",", "bp_range", "=", "False", ",", "masked", "=", "masked", ")", "tangent2", ",", "idx2", "=", "self", ".", "get_parameters", "(", "'tangent'", ",", "bp", "=", "[", "base_step", "[", "1", "]", "]", ",", "bp_range", "=", "False", ",", "masked", "=", "masked", ")", "angle", "=", "vector_angle", "(", "tangent1", "[", "0", "]", ",", "tangent2", "[", "0", "]", ")", "return", "np", ".", "asarray", "(", "angle", ")" ]
41.868852
26.016393
def _basic_auth_str(username, password): """Returns a Basic Auth string.""" authstr = 'Basic ' + to_native_string( b64encode(('%s:%s' % (username, password)).encode('latin1')).strip() ) return authstr
[ "def", "_basic_auth_str", "(", "username", ",", "password", ")", ":", "authstr", "=", "'Basic '", "+", "to_native_string", "(", "b64encode", "(", "(", "'%s:%s'", "%", "(", "username", ",", "password", ")", ")", ".", "encode", "(", "'latin1'", ")", ")", ".", "strip", "(", ")", ")", "return", "authstr" ]
27.375
21.875
def apply(self, flag_set: AbstractSet[Flag], operand: AbstractSet[Flag]) \ -> FrozenSet[Flag]: """Apply the flag operation on the two sets, returning the result. Args: flag_set: The flag set being operated on. operand: The flags to use as the operand. """ if self == FlagOp.ADD: return frozenset(flag_set | operand) elif self == FlagOp.DELETE: return frozenset(flag_set - operand) else: # op == FlagOp.REPLACE return frozenset(operand)
[ "def", "apply", "(", "self", ",", "flag_set", ":", "AbstractSet", "[", "Flag", "]", ",", "operand", ":", "AbstractSet", "[", "Flag", "]", ")", "->", "FrozenSet", "[", "Flag", "]", ":", "if", "self", "==", "FlagOp", ".", "ADD", ":", "return", "frozenset", "(", "flag_set", "|", "operand", ")", "elif", "self", "==", "FlagOp", ".", "DELETE", ":", "return", "frozenset", "(", "flag_set", "-", "operand", ")", "else", ":", "# op == FlagOp.REPLACE", "return", "frozenset", "(", "operand", ")" ]
36.266667
14.2
def read_eeprom_calibration(self, temperature=False): # use default values for temperature, EEPROM values are usually not calibrated and random '''Reading EEPROM calibration for power regulators and temperature ''' header = self.get_format() if header == self.HEADER_V1: data = self._read_eeprom(self.CAL_DATA_ADDR, size=calcsize(self.CAL_DATA_V1_FORMAT)) for idx, channel in enumerate(self._ch_cal.iterkeys()): ch_data = data[idx * calcsize(self.CAL_DATA_CH_V1_FORMAT):(idx + 1) * calcsize(self.CAL_DATA_CH_V1_FORMAT)] values = unpack_from(self.CAL_DATA_CH_V1_FORMAT, ch_data) self._ch_cal[channel]['name'] = "".join([c for c in values[0] if (c in string.printable)]) # values[0].strip() self._ch_cal[channel]['default'] = values[1] self._ch_cal[channel]['ADCI']['gain'] = values[2] self._ch_cal[channel]['ADCI']['offset'] = values[3] self._ch_cal[channel]['ADCI']['iq_gain'] = values[4] self._ch_cal[channel]['ADCI']['iq_offset'] = values[5] self._ch_cal[channel]['ADCV']['gain'] = values[6] self._ch_cal[channel]['ADCV']['offset'] = values[7] self._ch_cal[channel]['DACV']['gain'] = values[8] self._ch_cal[channel]['DACV']['offset'] = values[9] const_data = data[-calcsize(self.CAL_DATA_CONST_V1_FORMAT):] values = unpack_from(self.CAL_DATA_CONST_V1_FORMAT, const_data) if temperature: for channel in self._ch_cal.keys(): self._ch_cal[channel]['VNTC']['B_NTC'] = values[0] self._ch_cal[channel]['VNTC']['R1'] = values[1] self._ch_cal[channel]['VNTC']['R2'] = values[2] self._ch_cal[channel]['VNTC']['R4'] = values[3] self._ch_cal[channel]['VNTC']['R_NTC_25'] = values[4] self._ch_cal[channel]['VNTC']['VREF'] = values[5] else: raise ValueError('EEPROM data format not supported (header: %s)' % header)
[ "def", "read_eeprom_calibration", "(", "self", ",", "temperature", "=", "False", ")", ":", "# use default values for temperature, EEPROM values are usually not calibrated and random", "header", "=", "self", ".", "get_format", "(", ")", "if", "header", "==", "self", ".", "HEADER_V1", ":", "data", "=", "self", ".", "_read_eeprom", "(", "self", ".", "CAL_DATA_ADDR", ",", "size", "=", "calcsize", "(", "self", ".", "CAL_DATA_V1_FORMAT", ")", ")", "for", "idx", ",", "channel", "in", "enumerate", "(", "self", ".", "_ch_cal", ".", "iterkeys", "(", ")", ")", ":", "ch_data", "=", "data", "[", "idx", "*", "calcsize", "(", "self", ".", "CAL_DATA_CH_V1_FORMAT", ")", ":", "(", "idx", "+", "1", ")", "*", "calcsize", "(", "self", ".", "CAL_DATA_CH_V1_FORMAT", ")", "]", "values", "=", "unpack_from", "(", "self", ".", "CAL_DATA_CH_V1_FORMAT", ",", "ch_data", ")", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'name'", "]", "=", "\"\"", ".", "join", "(", "[", "c", "for", "c", "in", "values", "[", "0", "]", "if", "(", "c", "in", "string", ".", "printable", ")", "]", ")", "# values[0].strip()", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'default'", "]", "=", "values", "[", "1", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'ADCI'", "]", "[", "'gain'", "]", "=", "values", "[", "2", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'ADCI'", "]", "[", "'offset'", "]", "=", "values", "[", "3", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'ADCI'", "]", "[", "'iq_gain'", "]", "=", "values", "[", "4", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'ADCI'", "]", "[", "'iq_offset'", "]", "=", "values", "[", "5", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'ADCV'", "]", "[", "'gain'", "]", "=", "values", "[", "6", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'ADCV'", "]", "[", "'offset'", "]", "=", "values", "[", "7", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'DACV'", "]", "[", "'gain'", "]", "=", "values", "[", "8", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'DACV'", "]", "[", "'offset'", "]", "=", "values", "[", "9", "]", "const_data", "=", "data", "[", "-", "calcsize", "(", "self", ".", "CAL_DATA_CONST_V1_FORMAT", ")", ":", "]", "values", "=", "unpack_from", "(", "self", ".", "CAL_DATA_CONST_V1_FORMAT", ",", "const_data", ")", "if", "temperature", ":", "for", "channel", "in", "self", ".", "_ch_cal", ".", "keys", "(", ")", ":", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'VNTC'", "]", "[", "'B_NTC'", "]", "=", "values", "[", "0", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'VNTC'", "]", "[", "'R1'", "]", "=", "values", "[", "1", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'VNTC'", "]", "[", "'R2'", "]", "=", "values", "[", "2", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'VNTC'", "]", "[", "'R4'", "]", "=", "values", "[", "3", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'VNTC'", "]", "[", "'R_NTC_25'", "]", "=", "values", "[", "4", "]", "self", ".", "_ch_cal", "[", "channel", "]", "[", "'VNTC'", "]", "[", "'VREF'", "]", "=", "values", "[", "5", "]", "else", ":", "raise", "ValueError", "(", "'EEPROM data format not supported (header: %s)'", "%", "header", ")" ]
68.258065
33.354839
def _set(self, path, value, build_dir=''): """Create and set a node by path This creates a node from a filename or pandas DataFrame. If `value` is a filename, it must be relative to `build_dir`. `value` is stored as the export path. `build_dir` defaults to the current directory, but may be any arbitrary directory path, including an absolute path. Example: # Set `pkg.graph_image` to the data in '/home/user/bin/graph.png'. # If exported, it would export to '<export_dir>/bin/graph.png' `pkg._set(['graph_image'], 'bin/fizz.bin', '/home/user')` :param path: Path list -- I.e. ['examples', 'new_node'] :param value: Pandas dataframe, or a filename relative to build_dir :param build_dir: Directory containing `value` if value is a filename. """ assert isinstance(path, list) and len(path) > 0 if isinstance(value, pd.DataFrame): metadata = {SYSTEM_METADATA: {'target': TargetType.PANDAS.value}} elif isinstance(value, np.ndarray): metadata = {SYSTEM_METADATA: {'target': TargetType.NUMPY.value}} elif isinstance(value, string_types + (bytes,)): # bytes -> string for consistency when retrieving metadata value = value.decode() if isinstance(value, bytes) else value if os.path.isabs(value): raise ValueError("Invalid path: expected a relative path, but received {!r}".format(value)) # Security: filepath does not and should not retain the build_dir's location! metadata = {SYSTEM_METADATA: {'filepath': value, 'transform': 'id'}} if build_dir: value = os.path.join(build_dir, value) else: accepted_types = tuple(set((pd.DataFrame, np.ndarray, bytes) + string_types)) raise TypeError("Bad value type: Expected instance of any type {!r}, but received type {!r}" .format(accepted_types, type(value)), repr(value)[0:100]) for key in path: if not is_nodename(key): raise ValueError("Invalid name for node: {}".format(key)) node = self for key in path[:-1]: child = node._get(key) if not isinstance(child, GroupNode): child = GroupNode({}) node[key] = child node = child key = path[-1] node[key] = DataNode(None, None, value, metadata)
[ "def", "_set", "(", "self", ",", "path", ",", "value", ",", "build_dir", "=", "''", ")", ":", "assert", "isinstance", "(", "path", ",", "list", ")", "and", "len", "(", "path", ")", ">", "0", "if", "isinstance", "(", "value", ",", "pd", ".", "DataFrame", ")", ":", "metadata", "=", "{", "SYSTEM_METADATA", ":", "{", "'target'", ":", "TargetType", ".", "PANDAS", ".", "value", "}", "}", "elif", "isinstance", "(", "value", ",", "np", ".", "ndarray", ")", ":", "metadata", "=", "{", "SYSTEM_METADATA", ":", "{", "'target'", ":", "TargetType", ".", "NUMPY", ".", "value", "}", "}", "elif", "isinstance", "(", "value", ",", "string_types", "+", "(", "bytes", ",", ")", ")", ":", "# bytes -> string for consistency when retrieving metadata", "value", "=", "value", ".", "decode", "(", ")", "if", "isinstance", "(", "value", ",", "bytes", ")", "else", "value", "if", "os", ".", "path", ".", "isabs", "(", "value", ")", ":", "raise", "ValueError", "(", "\"Invalid path: expected a relative path, but received {!r}\"", ".", "format", "(", "value", ")", ")", "# Security: filepath does not and should not retain the build_dir's location!", "metadata", "=", "{", "SYSTEM_METADATA", ":", "{", "'filepath'", ":", "value", ",", "'transform'", ":", "'id'", "}", "}", "if", "build_dir", ":", "value", "=", "os", ".", "path", ".", "join", "(", "build_dir", ",", "value", ")", "else", ":", "accepted_types", "=", "tuple", "(", "set", "(", "(", "pd", ".", "DataFrame", ",", "np", ".", "ndarray", ",", "bytes", ")", "+", "string_types", ")", ")", "raise", "TypeError", "(", "\"Bad value type: Expected instance of any type {!r}, but received type {!r}\"", ".", "format", "(", "accepted_types", ",", "type", "(", "value", ")", ")", ",", "repr", "(", "value", ")", "[", "0", ":", "100", "]", ")", "for", "key", "in", "path", ":", "if", "not", "is_nodename", "(", "key", ")", ":", "raise", "ValueError", "(", "\"Invalid name for node: {}\"", ".", "format", "(", "key", ")", ")", "node", "=", "self", "for", "key", "in", "path", "[", ":", "-", "1", "]", ":", "child", "=", "node", ".", "_get", "(", "key", ")", "if", "not", "isinstance", "(", "child", ",", "GroupNode", ")", ":", "child", "=", "GroupNode", "(", "{", "}", ")", "node", "[", "key", "]", "=", "child", "node", "=", "child", "key", "=", "path", "[", "-", "1", "]", "node", "[", "key", "]", "=", "DataNode", "(", "None", ",", "None", ",", "value", ",", "metadata", ")" ]
45.018182
26.345455
def create_table(self, model): """Create model and table in database. >> migrator.create_table(model) """ self.orm[model._meta.table_name] = model model._meta.database = self.database self.ops.append(model.create_table) return model
[ "def", "create_table", "(", "self", ",", "model", ")", ":", "self", ".", "orm", "[", "model", ".", "_meta", ".", "table_name", "]", "=", "model", "model", ".", "_meta", ".", "database", "=", "self", ".", "database", "self", ".", "ops", ".", "append", "(", "model", ".", "create_table", ")", "return", "model" ]
31.222222
9.555556
def set_server_setting(settings, server=_DEFAULT_SERVER): ''' Set the value of the setting for the SMTP virtual server. .. note:: The setting names are case-sensitive. :param str settings: A dictionary of the setting names and their values. :param str server: The SMTP server name. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_server_setting settings="{'MaxRecipients': '500'}" ''' if not settings: _LOG.warning('No settings provided') return False # Some fields are formatted like '{data}'. Salt tries to convert these to dicts # automatically on input, so convert them back to the proper format. settings = _normalize_server_settings(**settings) current_settings = get_server_setting(settings=settings.keys(), server=server) if settings == current_settings: _LOG.debug('Settings already contain the provided values.') return True # Note that we must fetch all properties of IIsSmtpServerSetting below, since # filtering for specific properties and then attempting to set them will cause # an error like: wmi.x_wmi Unexpected COM Error -2147352567 with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting(Name=server)[0] except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) for setting in settings: if six.text_type(settings[setting]) != six.text_type(current_settings[setting]): try: setattr(objs, setting, settings[setting]) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except AttributeError as error: _LOG.error('Error setting %s: %s', setting, error) # Get the settings post-change so that we can verify tht all properties # were modified successfully. Track the ones that weren't. new_settings = get_server_setting(settings=settings.keys(), server=server) failed_settings = dict() for setting in settings: if six.text_type(settings[setting]) != six.text_type(new_settings[setting]): failed_settings[setting] = settings[setting] if failed_settings: _LOG.error('Failed to change settings: %s', failed_settings) return False _LOG.debug('Settings configured successfully: %s', settings.keys()) return True
[ "def", "set_server_setting", "(", "settings", ",", "server", "=", "_DEFAULT_SERVER", ")", ":", "if", "not", "settings", ":", "_LOG", ".", "warning", "(", "'No settings provided'", ")", "return", "False", "# Some fields are formatted like '{data}'. Salt tries to convert these to dicts", "# automatically on input, so convert them back to the proper format.", "settings", "=", "_normalize_server_settings", "(", "*", "*", "settings", ")", "current_settings", "=", "get_server_setting", "(", "settings", "=", "settings", ".", "keys", "(", ")", ",", "server", "=", "server", ")", "if", "settings", "==", "current_settings", ":", "_LOG", ".", "debug", "(", "'Settings already contain the provided values.'", ")", "return", "True", "# Note that we must fetch all properties of IIsSmtpServerSetting below, since", "# filtering for specific properties and then attempting to set them will cause", "# an error like: wmi.x_wmi Unexpected COM Error -2147352567", "with", "salt", ".", "utils", ".", "winapi", ".", "Com", "(", ")", ":", "try", ":", "connection", "=", "wmi", ".", "WMI", "(", "namespace", "=", "_WMI_NAMESPACE", ")", "objs", "=", "connection", ".", "IIsSmtpServerSetting", "(", "Name", "=", "server", ")", "[", "0", "]", "except", "wmi", ".", "x_wmi", "as", "error", ":", "_LOG", ".", "error", "(", "'Encountered WMI error: %s'", ",", "error", ".", "com_error", ")", "except", "(", "AttributeError", ",", "IndexError", ")", "as", "error", ":", "_LOG", ".", "error", "(", "'Error getting IIsSmtpServerSetting: %s'", ",", "error", ")", "for", "setting", "in", "settings", ":", "if", "six", ".", "text_type", "(", "settings", "[", "setting", "]", ")", "!=", "six", ".", "text_type", "(", "current_settings", "[", "setting", "]", ")", ":", "try", ":", "setattr", "(", "objs", ",", "setting", ",", "settings", "[", "setting", "]", ")", "except", "wmi", ".", "x_wmi", "as", "error", ":", "_LOG", ".", "error", "(", "'Encountered WMI error: %s'", ",", "error", ".", "com_error", ")", "except", "AttributeError", "as", "error", ":", "_LOG", ".", "error", "(", "'Error setting %s: %s'", ",", "setting", ",", "error", ")", "# Get the settings post-change so that we can verify tht all properties", "# were modified successfully. Track the ones that weren't.", "new_settings", "=", "get_server_setting", "(", "settings", "=", "settings", ".", "keys", "(", ")", ",", "server", "=", "server", ")", "failed_settings", "=", "dict", "(", ")", "for", "setting", "in", "settings", ":", "if", "six", ".", "text_type", "(", "settings", "[", "setting", "]", ")", "!=", "six", ".", "text_type", "(", "new_settings", "[", "setting", "]", ")", ":", "failed_settings", "[", "setting", "]", "=", "settings", "[", "setting", "]", "if", "failed_settings", ":", "_LOG", ".", "error", "(", "'Failed to change settings: %s'", ",", "failed_settings", ")", "return", "False", "_LOG", ".", "debug", "(", "'Settings configured successfully: %s'", ",", "settings", ".", "keys", "(", ")", ")", "return", "True" ]
38.898551
27.217391
def create(self, store_id, product_id, data): """ Add a new image to the product. :param store_id: The store id. :type store_id: :py:class:`str` :param product_id: The id for the product of a store. :type product_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { "id": string*, "url": string* } """ self.store_id = store_id self.product_id = product_id if 'id' not in data: raise KeyError('The product image must have an id') if 'title' not in data: raise KeyError('The product image must have a url') response = self._mc_client._post(url=self._build_path(store_id, 'products', product_id, 'images'), data=data) if response is not None: self.image_id = response['id'] else: self.image_id = None return response
[ "def", "create", "(", "self", ",", "store_id", ",", "product_id", ",", "data", ")", ":", "self", ".", "store_id", "=", "store_id", "self", ".", "product_id", "=", "product_id", "if", "'id'", "not", "in", "data", ":", "raise", "KeyError", "(", "'The product image must have an id'", ")", "if", "'title'", "not", "in", "data", ":", "raise", "KeyError", "(", "'The product image must have a url'", ")", "response", "=", "self", ".", "_mc_client", ".", "_post", "(", "url", "=", "self", ".", "_build_path", "(", "store_id", ",", "'products'", ",", "product_id", ",", "'images'", ")", ",", "data", "=", "data", ")", "if", "response", "is", "not", "None", ":", "self", ".", "image_id", "=", "response", "[", "'id'", "]", "else", ":", "self", ".", "image_id", "=", "None", "return", "response" ]
35.481481
14.222222
def has_option(self, section, option): """Checks for the existence of a given option in a given section. Args: section (str): name of section option (str): name of option Returns: bool: whether the option exists in the given section """ if section not in self.sections(): return False else: option = self.optionxform(option) return option in self[section]
[ "def", "has_option", "(", "self", ",", "section", ",", "option", ")", ":", "if", "section", "not", "in", "self", ".", "sections", "(", ")", ":", "return", "False", "else", ":", "option", "=", "self", ".", "optionxform", "(", "option", ")", "return", "option", "in", "self", "[", "section", "]" ]
30.866667
14.066667
def normalise_correlation_coefficient(image_tile_dict, transformed_array, template, normed_tolerance=1): """As above, but for when the correlation coefficient matching method is used """ template_mean = np.mean(template) template_minus_mean = template - template_mean template_norm = np.linalg.norm(template_minus_mean) image_norms = {(x,y):np.linalg.norm(image_tile_dict[(x,y)]- np.mean(image_tile_dict[(x,y)]))*template_norm for (x,y) in image_tile_dict.keys()} match_points = image_tile_dict.keys() # for correlation, then need to transofrm back to get correct value for division h, w = template.shape image_matches_normalised = {match_points[i]:transformed_array[match_points[i][0], match_points[i][1]]/image_norms[match_points[i]] for i in range(len(match_points))} normalised_matches = {key:value for key, value in image_matches_normalised.items() if np.round(value, decimals=3) >= normed_tolerance} return normalised_matches.keys()
[ "def", "normalise_correlation_coefficient", "(", "image_tile_dict", ",", "transformed_array", ",", "template", ",", "normed_tolerance", "=", "1", ")", ":", "template_mean", "=", "np", ".", "mean", "(", "template", ")", "template_minus_mean", "=", "template", "-", "template_mean", "template_norm", "=", "np", ".", "linalg", ".", "norm", "(", "template_minus_mean", ")", "image_norms", "=", "{", "(", "x", ",", "y", ")", ":", "np", ".", "linalg", ".", "norm", "(", "image_tile_dict", "[", "(", "x", ",", "y", ")", "]", "-", "np", ".", "mean", "(", "image_tile_dict", "[", "(", "x", ",", "y", ")", "]", ")", ")", "*", "template_norm", "for", "(", "x", ",", "y", ")", "in", "image_tile_dict", ".", "keys", "(", ")", "}", "match_points", "=", "image_tile_dict", ".", "keys", "(", ")", "# for correlation, then need to transofrm back to get correct value for division", "h", ",", "w", "=", "template", ".", "shape", "image_matches_normalised", "=", "{", "match_points", "[", "i", "]", ":", "transformed_array", "[", "match_points", "[", "i", "]", "[", "0", "]", ",", "match_points", "[", "i", "]", "[", "1", "]", "]", "/", "image_norms", "[", "match_points", "[", "i", "]", "]", "for", "i", "in", "range", "(", "len", "(", "match_points", ")", ")", "}", "normalised_matches", "=", "{", "key", ":", "value", "for", "key", ",", "value", "in", "image_matches_normalised", ".", "items", "(", ")", "if", "np", ".", "round", "(", "value", ",", "decimals", "=", "3", ")", ">=", "normed_tolerance", "}", "return", "normalised_matches", ".", "keys", "(", ")" ]
74.923077
37.692308
def _kernel_versions_debian(): ''' Last installed kernel name, for Debian based systems. Returns: List with possible names of last installed kernel as they are probably interpreted in output of `uname -a` command. ''' kernel_get_selections = __salt__['cmd.run']('dpkg --get-selections linux-image-*') kernels = [] kernel_versions = [] for line in kernel_get_selections.splitlines(): kernels.append(line) try: kernel = kernels[-2] except IndexError: kernel = kernels[0] kernel = kernel.rstrip('\t\tinstall') kernel_get_version = __salt__['cmd.run']('apt-cache policy ' + kernel) for line in kernel_get_version.splitlines(): if line.startswith(' Installed: '): kernel_v = line.strip(' Installed: ') kernel_versions.append(kernel_v) break if __grains__['os'] == 'Ubuntu': kernel_v = kernel_versions[0].rsplit('.', 1) kernel_ubuntu_generic = kernel_v[0] + '-generic #' + kernel_v[1] kernel_ubuntu_lowlatency = kernel_v[0] + '-lowlatency #' + kernel_v[1] kernel_versions.extend([kernel_ubuntu_generic, kernel_ubuntu_lowlatency]) return kernel_versions
[ "def", "_kernel_versions_debian", "(", ")", ":", "kernel_get_selections", "=", "__salt__", "[", "'cmd.run'", "]", "(", "'dpkg --get-selections linux-image-*'", ")", "kernels", "=", "[", "]", "kernel_versions", "=", "[", "]", "for", "line", "in", "kernel_get_selections", ".", "splitlines", "(", ")", ":", "kernels", ".", "append", "(", "line", ")", "try", ":", "kernel", "=", "kernels", "[", "-", "2", "]", "except", "IndexError", ":", "kernel", "=", "kernels", "[", "0", "]", "kernel", "=", "kernel", ".", "rstrip", "(", "'\\t\\tinstall'", ")", "kernel_get_version", "=", "__salt__", "[", "'cmd.run'", "]", "(", "'apt-cache policy '", "+", "kernel", ")", "for", "line", "in", "kernel_get_version", ".", "splitlines", "(", ")", ":", "if", "line", ".", "startswith", "(", "' Installed: '", ")", ":", "kernel_v", "=", "line", ".", "strip", "(", "' Installed: '", ")", "kernel_versions", ".", "append", "(", "kernel_v", ")", "break", "if", "__grains__", "[", "'os'", "]", "==", "'Ubuntu'", ":", "kernel_v", "=", "kernel_versions", "[", "0", "]", ".", "rsplit", "(", "'.'", ",", "1", ")", "kernel_ubuntu_generic", "=", "kernel_v", "[", "0", "]", "+", "'-generic #'", "+", "kernel_v", "[", "1", "]", "kernel_ubuntu_lowlatency", "=", "kernel_v", "[", "0", "]", "+", "'-lowlatency #'", "+", "kernel_v", "[", "1", "]", "kernel_versions", ".", "extend", "(", "[", "kernel_ubuntu_generic", ",", "kernel_ubuntu_lowlatency", "]", ")", "return", "kernel_versions" ]
33.444444
24.111111
def from_json(cls, json): """Deserialize from json. Args: json: a dict of json compatible fields. Returns: a KeyRanges object. Raises: ValueError: if the json is invalid. """ if json["name"] in _KEYRANGES_CLASSES: return _KEYRANGES_CLASSES[json["name"]].from_json(json) raise ValueError("Invalid json %s", json)
[ "def", "from_json", "(", "cls", ",", "json", ")", ":", "if", "json", "[", "\"name\"", "]", "in", "_KEYRANGES_CLASSES", ":", "return", "_KEYRANGES_CLASSES", "[", "json", "[", "\"name\"", "]", "]", ".", "from_json", "(", "json", ")", "raise", "ValueError", "(", "\"Invalid json %s\"", ",", "json", ")" ]
23.466667
18.133333
def next_down(x, context=None): """next_down(x): return the greatest representable float that's strictly less than x. This operation is quiet: flags are not affected. """ x = BigFloat._implicit_convert(x) # make sure we don't alter any flags with _saved_flags(): with (context if context is not None else EmptyContext): with RoundTowardNegative: # nan maps to itself if is_nan(x): return +x # round to current context; if value changes, we're done y = +x if y != x: return y # otherwise apply mpfr_nextabove bf = y.copy() mpfr.mpfr_nextbelow(bf) # apply + one more time to deal with subnormals return +bf
[ "def", "next_down", "(", "x", ",", "context", "=", "None", ")", ":", "x", "=", "BigFloat", ".", "_implicit_convert", "(", "x", ")", "# make sure we don't alter any flags", "with", "_saved_flags", "(", ")", ":", "with", "(", "context", "if", "context", "is", "not", "None", "else", "EmptyContext", ")", ":", "with", "RoundTowardNegative", ":", "# nan maps to itself", "if", "is_nan", "(", "x", ")", ":", "return", "+", "x", "# round to current context; if value changes, we're done", "y", "=", "+", "x", "if", "y", "!=", "x", ":", "return", "y", "# otherwise apply mpfr_nextabove", "bf", "=", "y", ".", "copy", "(", ")", "mpfr", ".", "mpfr_nextbelow", "(", "bf", ")", "# apply + one more time to deal with subnormals", "return", "+", "bf" ]
32
15.461538
def generate_form(args): """Generate form.""" form_name = args.get('<form>') logger.info('Start generating form.') _generate_form(form_name) logger.info('Finish generating form.')
[ "def", "generate_form", "(", "args", ")", ":", "form_name", "=", "args", ".", "get", "(", "'<form>'", ")", "logger", ".", "info", "(", "'Start generating form.'", ")", "_generate_form", "(", "form_name", ")", "logger", ".", "info", "(", "'Finish generating form.'", ")" ]
32.333333
6
def __parse_fc_data(fc_data): """Parse the forecast data from the json section.""" fc = [] for day in fc_data: fcdata = { CONDITION: __cond_from_desc( __get_str( day, __WEATHERDESCRIPTION) ), TEMPERATURE: __get_float(day, __MAXTEMPERATURE), MIN_TEMP: __get_float(day, __MINTEMPERATURE), MAX_TEMP: __get_float(day, __MAXTEMPERATURE), SUN_CHANCE: __get_int(day, __SUNCHANCE), RAIN_CHANCE: __get_int(day, __RAINCHANCE), RAIN: __get_float(day, __MMRAINMAX), MIN_RAIN: __get_float(day, __MMRAINMIN), # new MAX_RAIN: __get_float(day, __MMRAINMAX), # new SNOW: 0, # for compatibility WINDFORCE: __get_int(day, __WIND), WINDDIRECTION: __get_str(day, __WINDDIRECTION), # new DATETIME: __to_localdatetime(__get_str(day, __DAY)), } fcdata[CONDITION][IMAGE] = day[__ICONURL] fc.append(fcdata) return fc
[ "def", "__parse_fc_data", "(", "fc_data", ")", ":", "fc", "=", "[", "]", "for", "day", "in", "fc_data", ":", "fcdata", "=", "{", "CONDITION", ":", "__cond_from_desc", "(", "__get_str", "(", "day", ",", "__WEATHERDESCRIPTION", ")", ")", ",", "TEMPERATURE", ":", "__get_float", "(", "day", ",", "__MAXTEMPERATURE", ")", ",", "MIN_TEMP", ":", "__get_float", "(", "day", ",", "__MINTEMPERATURE", ")", ",", "MAX_TEMP", ":", "__get_float", "(", "day", ",", "__MAXTEMPERATURE", ")", ",", "SUN_CHANCE", ":", "__get_int", "(", "day", ",", "__SUNCHANCE", ")", ",", "RAIN_CHANCE", ":", "__get_int", "(", "day", ",", "__RAINCHANCE", ")", ",", "RAIN", ":", "__get_float", "(", "day", ",", "__MMRAINMAX", ")", ",", "MIN_RAIN", ":", "__get_float", "(", "day", ",", "__MMRAINMIN", ")", ",", "# new", "MAX_RAIN", ":", "__get_float", "(", "day", ",", "__MMRAINMAX", ")", ",", "# new", "SNOW", ":", "0", ",", "# for compatibility", "WINDFORCE", ":", "__get_int", "(", "day", ",", "__WIND", ")", ",", "WINDDIRECTION", ":", "__get_str", "(", "day", ",", "__WINDDIRECTION", ")", ",", "# new", "DATETIME", ":", "__to_localdatetime", "(", "__get_str", "(", "day", ",", "__DAY", ")", ")", ",", "}", "fcdata", "[", "CONDITION", "]", "[", "IMAGE", "]", "=", "day", "[", "__ICONURL", "]", "fc", ".", "append", "(", "fcdata", ")", "return", "fc" ]
38.703704
16.481481
def reverse(self, request, view_name): """ Returns the URL of this tenant. """ http_type = 'https://' if request.is_secure() else 'http://' domain = get_current_site(request).domain url = ''.join((http_type, self.schema_name, '.', domain, reverse(view_name))) return url
[ "def", "reverse", "(", "self", ",", "request", ",", "view_name", ")", ":", "http_type", "=", "'https://'", "if", "request", ".", "is_secure", "(", ")", "else", "'http://'", "domain", "=", "get_current_site", "(", "request", ")", ".", "domain", "url", "=", "''", ".", "join", "(", "(", "http_type", ",", "self", ".", "schema_name", ",", "'.'", ",", "domain", ",", "reverse", "(", "view_name", ")", ")", ")", "return", "url" ]
29
20.636364
def _checkSetupNeeded(self, message): """Check an id_res message to see if it is a checkid_immediate cancel response. @raises SetupNeededError: if it is a checkid_immediate cancellation """ # In OpenID 1, we check to see if this is a cancel from # immediate mode by the presence of the user_setup_url # parameter. if message.isOpenID1(): user_setup_url = message.getArg(OPENID1_NS, 'user_setup_url') if user_setup_url is not None: raise SetupNeededError(user_setup_url)
[ "def", "_checkSetupNeeded", "(", "self", ",", "message", ")", ":", "# In OpenID 1, we check to see if this is a cancel from", "# immediate mode by the presence of the user_setup_url", "# parameter.", "if", "message", ".", "isOpenID1", "(", ")", ":", "user_setup_url", "=", "message", ".", "getArg", "(", "OPENID1_NS", ",", "'user_setup_url'", ")", "if", "user_setup_url", "is", "not", "None", ":", "raise", "SetupNeededError", "(", "user_setup_url", ")" ]
43.230769
15.615385
def nested_to_ring(nested_index, nside): """ Convert a HEALPix 'nested' index to a HEALPix 'ring' index Parameters ---------- nested_index : int or `~numpy.ndarray` Healpix index using the 'nested' ordering nside : int or `~numpy.ndarray` Number of pixels along the side of each of the 12 top-level HEALPix tiles Returns ------- ring_index : int or `~numpy.ndarray` Healpix index using the 'ring' ordering """ nside = np.asarray(nside, dtype=np.intc) return _core.nested_to_ring(nested_index, nside)
[ "def", "nested_to_ring", "(", "nested_index", ",", "nside", ")", ":", "nside", "=", "np", ".", "asarray", "(", "nside", ",", "dtype", "=", "np", ".", "intc", ")", "return", "_core", ".", "nested_to_ring", "(", "nested_index", ",", "nside", ")" ]
27.8
18.6
async def status(dev: Device): """Display status information.""" power = await dev.get_power() click.echo(click.style("%s" % power, bold=power)) vol = await dev.get_volume_information() click.echo(vol.pop()) play_info = await dev.get_play_info() if not play_info.is_idle: click.echo("Playing %s" % play_info) else: click.echo("Not playing any media") outs = await dev.get_inputs() for out in outs: if out.active: click.echo("Active output: %s" % out) sysinfo = await dev.get_system_info() click.echo("System information: %s" % sysinfo)
[ "async", "def", "status", "(", "dev", ":", "Device", ")", ":", "power", "=", "await", "dev", ".", "get_power", "(", ")", "click", ".", "echo", "(", "click", ".", "style", "(", "\"%s\"", "%", "power", ",", "bold", "=", "power", ")", ")", "vol", "=", "await", "dev", ".", "get_volume_information", "(", ")", "click", ".", "echo", "(", "vol", ".", "pop", "(", ")", ")", "play_info", "=", "await", "dev", ".", "get_play_info", "(", ")", "if", "not", "play_info", ".", "is_idle", ":", "click", ".", "echo", "(", "\"Playing %s\"", "%", "play_info", ")", "else", ":", "click", ".", "echo", "(", "\"Not playing any media\"", ")", "outs", "=", "await", "dev", ".", "get_inputs", "(", ")", "for", "out", "in", "outs", ":", "if", "out", ".", "active", ":", "click", ".", "echo", "(", "\"Active output: %s\"", "%", "out", ")", "sysinfo", "=", "await", "dev", ".", "get_system_info", "(", ")", "click", ".", "echo", "(", "\"System information: %s\"", "%", "sysinfo", ")" ]
28.714286
15.428571
def create_field(field_info): """ Create a field by field info dict. """ field_type = field_info.get('type') if field_type not in FIELDS_NAME_MAP: raise ValueError(_('not support this field: {}').format(field_type)) field_class = FIELDS_NAME_MAP.get(field_type) params = dict(field_info) params.pop('type') return field_class.from_dict(params)
[ "def", "create_field", "(", "field_info", ")", ":", "field_type", "=", "field_info", ".", "get", "(", "'type'", ")", "if", "field_type", "not", "in", "FIELDS_NAME_MAP", ":", "raise", "ValueError", "(", "_", "(", "'not support this field: {}'", ")", ".", "format", "(", "field_type", ")", ")", "field_class", "=", "FIELDS_NAME_MAP", ".", "get", "(", "field_type", ")", "params", "=", "dict", "(", "field_info", ")", "params", ".", "pop", "(", "'type'", ")", "return", "field_class", ".", "from_dict", "(", "params", ")" ]
34.272727
8.090909
def get_signature(self, base_commit=None): """Get the signature of the current state of the repository TODO right now `get_signature` is an effectful process in that it adds all untracked file to staging. This is the only way to get accruate diff on new files. This is ok because we only use it on a disposable copy of the repo. Args: base_commit - the base commit ('HEAD', sha, etc.) Returns: str """ if base_commit is None: base_commit = 'HEAD' self.run('add', '-A', self.path) sha = self.run('rev-parse', '--verify', base_commit).strip() diff = self.run('diff', sha).strip() if len(diff) == 0: try: return self.get_signature(base_commit + '~1') except CommandError: pass h = hashlib.sha1() h.update(sha) h.update(diff) return h.hexdigest()
[ "def", "get_signature", "(", "self", ",", "base_commit", "=", "None", ")", ":", "if", "base_commit", "is", "None", ":", "base_commit", "=", "'HEAD'", "self", ".", "run", "(", "'add'", ",", "'-A'", ",", "self", ".", "path", ")", "sha", "=", "self", ".", "run", "(", "'rev-parse'", ",", "'--verify'", ",", "base_commit", ")", ".", "strip", "(", ")", "diff", "=", "self", ".", "run", "(", "'diff'", ",", "sha", ")", ".", "strip", "(", ")", "if", "len", "(", "diff", ")", "==", "0", ":", "try", ":", "return", "self", ".", "get_signature", "(", "base_commit", "+", "'~1'", ")", "except", "CommandError", ":", "pass", "h", "=", "hashlib", ".", "sha1", "(", ")", "h", ".", "update", "(", "sha", ")", "h", ".", "update", "(", "diff", ")", "return", "h", ".", "hexdigest", "(", ")" ]
33.785714
18.571429