text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def plot(self, *args, **kwargs): """Plot data onto these axes Parameters ---------- args a single instance of - `~gwpy.segments.DataQualityFlag` - `~gwpy.segments.Segment` - `~gwpy.segments.SegmentList` - `~gwpy.segments.SegmentListDict` or equivalent types upstream from :mod:`ligo.segments` kwargs keyword arguments applicable to `~matplotib.axes.Axes.plot` Returns ------- Line2D the `~matplotlib.lines.Line2D` for this line layer See Also -------- :meth:`matplotlib.axes.Axes.plot` for a full description of acceptable ``*args` and ``**kwargs`` """ out = [] args = list(args) while args: try: plotter = self._plot_method(args[0]) except TypeError: break out.append(plotter(args[0], **kwargs)) args.pop(0) if args: out.extend(super(SegmentAxes, self).plot(*args, **kwargs)) self.autoscale(enable=None, axis='both', tight=False) return out
[ "def", "plot", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "out", "=", "[", "]", "args", "=", "list", "(", "args", ")", "while", "args", ":", "try", ":", "plotter", "=", "self", ".", "_plot_method", "(", "args", "[", "0", "]", ")", "except", "TypeError", ":", "break", "out", ".", "append", "(", "plotter", "(", "args", "[", "0", "]", ",", "*", "*", "kwargs", ")", ")", "args", ".", "pop", "(", "0", ")", "if", "args", ":", "out", ".", "extend", "(", "super", "(", "SegmentAxes", ",", "self", ")", ".", "plot", "(", "*", "args", ",", "*", "*", "kwargs", ")", ")", "self", ".", "autoscale", "(", "enable", "=", "None", ",", "axis", "=", "'both'", ",", "tight", "=", "False", ")", "return", "out" ]
28.463415
21.219512
def ls_(path='/', profile=None, **kwargs): ''' .. versionadded:: 2014.7.0 Return all keys and dirs inside a specific path. Returns an empty dict on failure. CLI Example: .. code-block:: bash salt myminion etcd.ls /path/to/dir/ salt myminion etcd.ls /path/to/dir/ profile=my_etcd_config salt myminion etcd.ls /path/to/dir/ host=127.0.0.1 port=2379 ''' client = __utils__['etcd_util.get_conn'](__opts__, profile, **kwargs) return client.ls(path)
[ "def", "ls_", "(", "path", "=", "'/'", ",", "profile", "=", "None", ",", "*", "*", "kwargs", ")", ":", "client", "=", "__utils__", "[", "'etcd_util.get_conn'", "]", "(", "__opts__", ",", "profile", ",", "*", "*", "kwargs", ")", "return", "client", ".", "ls", "(", "path", ")" ]
27.277778
27.055556
def update_or_create(self, attributes, values=None, joining=None, touch=True): """ Create or update a related record matching the attributes, and fill it with values. :param attributes: The attributes :type attributes: dict :param values: The values :type values: dict :rtype: Model """ if values is None: values = {} instance = self._query.where(attributes).first() if instance is None: return self.create(values, joining, touch) instance.fill(**values) instance.save({"touch": False}) return instance
[ "def", "update_or_create", "(", "self", ",", "attributes", ",", "values", "=", "None", ",", "joining", "=", "None", ",", "touch", "=", "True", ")", ":", "if", "values", "is", "None", ":", "values", "=", "{", "}", "instance", "=", "self", ".", "_query", ".", "where", "(", "attributes", ")", ".", "first", "(", ")", "if", "instance", "is", "None", ":", "return", "self", ".", "create", "(", "values", ",", "joining", ",", "touch", ")", "instance", ".", "fill", "(", "*", "*", "values", ")", "instance", ".", "save", "(", "{", "\"touch\"", ":", "False", "}", ")", "return", "instance" ]
24.88
22.4
def __gridconnections(self): """Level-2 parser for gridconnections. pattern: object 2 class gridconnections counts 97 93 99 """ try: tok = self.__consume() except DXParserNoTokens: return if tok.equals('counts'): shape = [] try: while True: # raises exception if not an int self.__peek().value('INTEGER') tok = self.__consume() shape.append(tok.value('INTEGER')) except (DXParserNoTokens, ValueError): pass if len(shape) == 0: raise DXParseError('gridconnections: no shape parameters') self.currentobject['shape'] = shape else: raise DXParseError('gridconnections: '+str(tok)+' not recognized.')
[ "def", "__gridconnections", "(", "self", ")", ":", "try", ":", "tok", "=", "self", ".", "__consume", "(", ")", "except", "DXParserNoTokens", ":", "return", "if", "tok", ".", "equals", "(", "'counts'", ")", ":", "shape", "=", "[", "]", "try", ":", "while", "True", ":", "# raises exception if not an int", "self", ".", "__peek", "(", ")", ".", "value", "(", "'INTEGER'", ")", "tok", "=", "self", ".", "__consume", "(", ")", "shape", ".", "append", "(", "tok", ".", "value", "(", "'INTEGER'", ")", ")", "except", "(", "DXParserNoTokens", ",", "ValueError", ")", ":", "pass", "if", "len", "(", "shape", ")", "==", "0", ":", "raise", "DXParseError", "(", "'gridconnections: no shape parameters'", ")", "self", ".", "currentobject", "[", "'shape'", "]", "=", "shape", "else", ":", "raise", "DXParseError", "(", "'gridconnections: '", "+", "str", "(", "tok", ")", "+", "' not recognized.'", ")" ]
33.076923
16.961538
def pkginfo_to_metadata(egg_info_path, pkginfo_path): """ Convert .egg-info directory with PKG-INFO to the Metadata 2.1 format """ pkg_info = read_pkg_info(pkginfo_path) pkg_info.replace_header('Metadata-Version', '2.1') requires_path = os.path.join(egg_info_path, 'requires.txt') if os.path.exists(requires_path): with open(requires_path) as requires_file: requires = requires_file.read() for extra, reqs in sorted(pkg_resources.split_sections(requires), key=lambda x: x[0] or ''): for item in generate_requirements({extra: reqs}): pkg_info[item[0]] = item[1] description = pkg_info['Description'] if description: pkg_info.set_payload(dedent_description(pkg_info)) del pkg_info['Description'] return pkg_info
[ "def", "pkginfo_to_metadata", "(", "egg_info_path", ",", "pkginfo_path", ")", ":", "pkg_info", "=", "read_pkg_info", "(", "pkginfo_path", ")", "pkg_info", ".", "replace_header", "(", "'Metadata-Version'", ",", "'2.1'", ")", "requires_path", "=", "os", ".", "path", ".", "join", "(", "egg_info_path", ",", "'requires.txt'", ")", "if", "os", ".", "path", ".", "exists", "(", "requires_path", ")", ":", "with", "open", "(", "requires_path", ")", "as", "requires_file", ":", "requires", "=", "requires_file", ".", "read", "(", ")", "for", "extra", ",", "reqs", "in", "sorted", "(", "pkg_resources", ".", "split_sections", "(", "requires", ")", ",", "key", "=", "lambda", "x", ":", "x", "[", "0", "]", "or", "''", ")", ":", "for", "item", "in", "generate_requirements", "(", "{", "extra", ":", "reqs", "}", ")", ":", "pkg_info", "[", "item", "[", "0", "]", "]", "=", "item", "[", "1", "]", "description", "=", "pkg_info", "[", "'Description'", "]", "if", "description", ":", "pkg_info", ".", "set_payload", "(", "dedent_description", "(", "pkg_info", ")", ")", "del", "pkg_info", "[", "'Description'", "]", "return", "pkg_info" ]
39.857143
15.380952
def to_html(self): """Render a Text MessageElement as html Args: None Returns: Str the html representation of the Text MessageElement Raises: Errors are propagated """ if self.items is None: return else: html = '<ol%s>\n' % self.html_attributes() for item in self.items: html += '<li>%s</li>\n' % item.to_html() html += '</ol>' return html
[ "def", "to_html", "(", "self", ")", ":", "if", "self", ".", "items", "is", "None", ":", "return", "else", ":", "html", "=", "'<ol%s>\\n'", "%", "self", ".", "html_attributes", "(", ")", "for", "item", "in", "self", ".", "items", ":", "html", "+=", "'<li>%s</li>\\n'", "%", "item", ".", "to_html", "(", ")", "html", "+=", "'</ol>'", "return", "html" ]
24.55
19.95
def assemble_phi5_works_filepaths(): """Reads PHI5 index and builds a list of absolute filepaths.""" plaintext_dir_rel = '~/cltk_data/latin/text/phi5/individual_works/' plaintext_dir = os.path.expanduser(plaintext_dir_rel) all_filepaths = [] for author_code in PHI5_WORKS_INDEX: author_data = PHI5_WORKS_INDEX[author_code] works = author_data['works'] for work in works: f = os.path.join(plaintext_dir, author_code + '.TXT' + '-' + work + '.txt') all_filepaths.append(f) return all_filepaths
[ "def", "assemble_phi5_works_filepaths", "(", ")", ":", "plaintext_dir_rel", "=", "'~/cltk_data/latin/text/phi5/individual_works/'", "plaintext_dir", "=", "os", ".", "path", ".", "expanduser", "(", "plaintext_dir_rel", ")", "all_filepaths", "=", "[", "]", "for", "author_code", "in", "PHI5_WORKS_INDEX", ":", "author_data", "=", "PHI5_WORKS_INDEX", "[", "author_code", "]", "works", "=", "author_data", "[", "'works'", "]", "for", "work", "in", "works", ":", "f", "=", "os", ".", "path", ".", "join", "(", "plaintext_dir", ",", "author_code", "+", "'.TXT'", "+", "'-'", "+", "work", "+", "'.txt'", ")", "all_filepaths", ".", "append", "(", "f", ")", "return", "all_filepaths" ]
46
13.916667
def real_time_statistics(self): """ Access the real_time_statistics :returns: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsList :rtype: twilio.rest.taskrouter.v1.workspace.workflow.workflow_real_time_statistics.WorkflowRealTimeStatisticsList """ if self._real_time_statistics is None: self._real_time_statistics = WorkflowRealTimeStatisticsList( self._version, workspace_sid=self._solution['workspace_sid'], workflow_sid=self._solution['sid'], ) return self._real_time_statistics
[ "def", "real_time_statistics", "(", "self", ")", ":", "if", "self", ".", "_real_time_statistics", "is", "None", ":", "self", ".", "_real_time_statistics", "=", "WorkflowRealTimeStatisticsList", "(", "self", ".", "_version", ",", "workspace_sid", "=", "self", ".", "_solution", "[", "'workspace_sid'", "]", ",", "workflow_sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")", "return", "self", ".", "_real_time_statistics" ]
46.5
23.071429
def create_silence(length): """Create a piece of silence.""" data = bytearray(length) i = 0 while i < length: data[i] = 128 i += 1 return data
[ "def", "create_silence", "(", "length", ")", ":", "data", "=", "bytearray", "(", "length", ")", "i", "=", "0", "while", "i", "<", "length", ":", "data", "[", "i", "]", "=", "128", "i", "+=", "1", "return", "data" ]
21.375
18.125
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'limit') and self.limit is not None: _dict['limit'] = self.limit return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'limit'", ")", "and", "self", ".", "limit", "is", "not", "None", ":", "_dict", "[", "'limit'", "]", "=", "self", ".", "limit", "return", "_dict" ]
36.666667
14.166667
def sync_blockchain( working_dir, bt_opts, last_block, server_state, expected_snapshots={}, **virtualchain_args ): """ synchronize state with the blockchain. Return True on success Return False if we're supposed to stop indexing Abort on error """ subdomain_index = server_state['subdomains'] atlas_state = server_state['atlas'] # make this usable even if we haven't explicitly configured virtualchain impl = sys.modules[__name__] log.info("Synchronizing database {} up to block {}".format(working_dir, last_block)) # NOTE: this is the only place where a read-write handle should be created, # since this is the only place where the db should be modified. new_db = BlockstackDB.borrow_readwrite_instance(working_dir, last_block, expected_snapshots=expected_snapshots) # propagate runtime state to virtualchain callbacks new_db.subdomain_index = subdomain_index new_db.atlas_state = atlas_state rc = virtualchain.sync_virtualchain(bt_opts, last_block, new_db, expected_snapshots=expected_snapshots, **virtualchain_args) BlockstackDB.release_readwrite_instance(new_db, last_block) return rc
[ "def", "sync_blockchain", "(", "working_dir", ",", "bt_opts", ",", "last_block", ",", "server_state", ",", "expected_snapshots", "=", "{", "}", ",", "*", "*", "virtualchain_args", ")", ":", "subdomain_index", "=", "server_state", "[", "'subdomains'", "]", "atlas_state", "=", "server_state", "[", "'atlas'", "]", "# make this usable even if we haven't explicitly configured virtualchain ", "impl", "=", "sys", ".", "modules", "[", "__name__", "]", "log", ".", "info", "(", "\"Synchronizing database {} up to block {}\"", ".", "format", "(", "working_dir", ",", "last_block", ")", ")", "# NOTE: this is the only place where a read-write handle should be created,", "# since this is the only place where the db should be modified.", "new_db", "=", "BlockstackDB", ".", "borrow_readwrite_instance", "(", "working_dir", ",", "last_block", ",", "expected_snapshots", "=", "expected_snapshots", ")", "# propagate runtime state to virtualchain callbacks", "new_db", ".", "subdomain_index", "=", "subdomain_index", "new_db", ".", "atlas_state", "=", "atlas_state", "rc", "=", "virtualchain", ".", "sync_virtualchain", "(", "bt_opts", ",", "last_block", ",", "new_db", ",", "expected_snapshots", "=", "expected_snapshots", ",", "*", "*", "virtualchain_args", ")", "BlockstackDB", ".", "release_readwrite_instance", "(", "new_db", ",", "last_block", ")", "return", "rc" ]
41.607143
28.25
def BE8(value, min_value=None, max_value=None, fuzzable=True, name=None, full_range=False): '''8-bit field, Big endian encoded''' return UInt8(value, min_value=min_value, max_value=max_value, encoder=ENC_INT_BE, fuzzable=fuzzable, name=name, full_range=full_range)
[ "def", "BE8", "(", "value", ",", "min_value", "=", "None", ",", "max_value", "=", "None", ",", "fuzzable", "=", "True", ",", "name", "=", "None", ",", "full_range", "=", "False", ")", ":", "return", "UInt8", "(", "value", ",", "min_value", "=", "min_value", ",", "max_value", "=", "max_value", ",", "encoder", "=", "ENC_INT_BE", ",", "fuzzable", "=", "fuzzable", ",", "name", "=", "name", ",", "full_range", "=", "full_range", ")" ]
90
50
def flag_message(current): """ Flag inappropriate messages .. code-block:: python # request: { 'view':'_zops_flag_message', 'message_key': key, } # response: { ' 'status': 'Created', 'code': 201, } """ current.output = {'status': 'Created', 'code': 201} FlaggedMessage.objects.get_or_create(user_id=current.user_id, message_id=current.input['key'])
[ "def", "flag_message", "(", "current", ")", ":", "current", ".", "output", "=", "{", "'status'", ":", "'Created'", ",", "'code'", ":", "201", "}", "FlaggedMessage", ".", "objects", ".", "get_or_create", "(", "user_id", "=", "current", ".", "user_id", ",", "message_id", "=", "current", ".", "input", "[", "'key'", "]", ")" ]
23.227273
20.409091
def variance(self) -> Optional[float]: #, ddof: int = 0) -> float: """Statistical variance of all values entered into histogram. This number is precise, because we keep the necessary data separate from bin contents. Returns ------- float """ # TODO: Add DOF # http://stats.stackexchange.com/questions/6534/how-do-i-calculate-a-weighted-standard-deviation-in-excel if self._stats: if self.total > 0: return (self._stats["sum2"] - self._stats["sum"] ** 2 / self.total) / self.total else: return np.nan else: return None
[ "def", "variance", "(", "self", ")", "->", "Optional", "[", "float", "]", ":", "#, ddof: int = 0) -> float:", "# TODO: Add DOF", "# http://stats.stackexchange.com/questions/6534/how-do-i-calculate-a-weighted-standard-deviation-in-excel", "if", "self", ".", "_stats", ":", "if", "self", ".", "total", ">", "0", ":", "return", "(", "self", ".", "_stats", "[", "\"sum2\"", "]", "-", "self", ".", "_stats", "[", "\"sum\"", "]", "**", "2", "/", "self", ".", "total", ")", "/", "self", ".", "total", "else", ":", "return", "np", ".", "nan", "else", ":", "return", "None" ]
34.631579
24.526316
def save_csv(self): """ Dump all results to CSV. """ # Sort results so we can start to see patterns right in the raw CSV. self.results.sort_values(by=self.column_ids, inplace=True) # Gotcha: integers seems to be promoted to float64 because of # reindexation. See: https://pandas.pydata.org/pandas-docs/stable # /gotchas.html#na-type-promotions self.results.reindex(columns=self.column_ids).to_csv( self.csv_filepath, index=False)
[ "def", "save_csv", "(", "self", ")", ":", "# Sort results so we can start to see patterns right in the raw CSV.", "self", ".", "results", ".", "sort_values", "(", "by", "=", "self", ".", "column_ids", ",", "inplace", "=", "True", ")", "# Gotcha: integers seems to be promoted to float64 because of", "# reindexation. See: https://pandas.pydata.org/pandas-docs/stable", "# /gotchas.html#na-type-promotions", "self", ".", "results", ".", "reindex", "(", "columns", "=", "self", ".", "column_ids", ")", ".", "to_csv", "(", "self", ".", "csv_filepath", ",", "index", "=", "False", ")" ]
54.333333
19
def check_anchor(self, url_data): """If URL is valid, parseable and has an anchor, check it. A warning is logged and True is returned if the anchor is not found. """ log.debug(LOG_PLUGIN, "checking anchor %r in %s", url_data.anchor, self.anchors) enc = lambda anchor: urlutil.url_quote_part(anchor, encoding=url_data.encoding) if any(x for x in self.anchors if enc(x[0]) == url_data.anchor): return if self.anchors: anchornames = sorted(set(u"`%s'" % x[0] for x in self.anchors)) anchors = u", ".join(anchornames) else: anchors = u"-" args = {"name": url_data.anchor, "anchors": anchors} msg = u"%s %s" % (_("Anchor `%(name)s' not found.") % args, _("Available anchors: %(anchors)s.") % args) url_data.add_warning(msg)
[ "def", "check_anchor", "(", "self", ",", "url_data", ")", ":", "log", ".", "debug", "(", "LOG_PLUGIN", ",", "\"checking anchor %r in %s\"", ",", "url_data", ".", "anchor", ",", "self", ".", "anchors", ")", "enc", "=", "lambda", "anchor", ":", "urlutil", ".", "url_quote_part", "(", "anchor", ",", "encoding", "=", "url_data", ".", "encoding", ")", "if", "any", "(", "x", "for", "x", "in", "self", ".", "anchors", "if", "enc", "(", "x", "[", "0", "]", ")", "==", "url_data", ".", "anchor", ")", ":", "return", "if", "self", ".", "anchors", ":", "anchornames", "=", "sorted", "(", "set", "(", "u\"`%s'\"", "%", "x", "[", "0", "]", "for", "x", "in", "self", ".", "anchors", ")", ")", "anchors", "=", "u\", \"", ".", "join", "(", "anchornames", ")", "else", ":", "anchors", "=", "u\"-\"", "args", "=", "{", "\"name\"", ":", "url_data", ".", "anchor", ",", "\"anchors\"", ":", "anchors", "}", "msg", "=", "u\"%s %s\"", "%", "(", "_", "(", "\"Anchor `%(name)s' not found.\"", ")", "%", "args", ",", "_", "(", "\"Available anchors: %(anchors)s.\"", ")", "%", "args", ")", "url_data", ".", "add_warning", "(", "msg", ")" ]
50.823529
21.941176
def get_value_product_unique(self, pos): """ Return all products unique relationship with POS's Storage (only salable zones) """ qs = ProductUnique.objects.filter( box__box_structure__zone__storage__in=pos.storage_stock.filter(storage_zones__salable=True), product_final=self ) return qs
[ "def", "get_value_product_unique", "(", "self", ",", "pos", ")", ":", "qs", "=", "ProductUnique", ".", "objects", ".", "filter", "(", "box__box_structure__zone__storage__in", "=", "pos", ".", "storage_stock", ".", "filter", "(", "storage_zones__salable", "=", "True", ")", ",", "product_final", "=", "self", ")", "return", "qs" ]
39
19.666667
def update_course(self, course, enterprise_customer, enterprise_context): """ Update course metadata of the given course and return updated course. Arguments: course (dict): Course Metadata returned by course catalog API enterprise_customer (EnterpriseCustomer): enterprise customer instance. enterprise_context (dict): Enterprise context to be added to course runs and URLs.. Returns: (dict): Updated course metadata """ course['course_runs'] = self.update_course_runs( course_runs=course.get('course_runs') or [], enterprise_customer=enterprise_customer, enterprise_context=enterprise_context, ) # Update marketing urls in course metadata to include enterprise related info (i.e. our global context). marketing_url = course.get('marketing_url') if marketing_url: query_parameters = dict(enterprise_context, **utils.get_enterprise_utm_context(enterprise_customer)) course.update({'marketing_url': utils.update_query_parameters(marketing_url, query_parameters)}) # Finally, add context to the course as a whole. course.update(enterprise_context) return course
[ "def", "update_course", "(", "self", ",", "course", ",", "enterprise_customer", ",", "enterprise_context", ")", ":", "course", "[", "'course_runs'", "]", "=", "self", ".", "update_course_runs", "(", "course_runs", "=", "course", ".", "get", "(", "'course_runs'", ")", "or", "[", "]", ",", "enterprise_customer", "=", "enterprise_customer", ",", "enterprise_context", "=", "enterprise_context", ",", ")", "# Update marketing urls in course metadata to include enterprise related info (i.e. our global context).", "marketing_url", "=", "course", ".", "get", "(", "'marketing_url'", ")", "if", "marketing_url", ":", "query_parameters", "=", "dict", "(", "enterprise_context", ",", "*", "*", "utils", ".", "get_enterprise_utm_context", "(", "enterprise_customer", ")", ")", "course", ".", "update", "(", "{", "'marketing_url'", ":", "utils", ".", "update_query_parameters", "(", "marketing_url", ",", "query_parameters", ")", "}", ")", "# Finally, add context to the course as a whole.", "course", ".", "update", "(", "enterprise_context", ")", "return", "course" ]
46.259259
28.481481
def H13(self): "Information measure of correlation 2." # An imaginary result has been encountered once in the Matlab # version. The reason is unclear. return np.sqrt(1 - np.exp(-2 * (self.hxy2 - self.H9())))
[ "def", "H13", "(", "self", ")", ":", "# An imaginary result has been encountered once in the Matlab", "# version. The reason is unclear.", "return", "np", ".", "sqrt", "(", "1", "-", "np", ".", "exp", "(", "-", "2", "*", "(", "self", ".", "hxy2", "-", "self", ".", "H9", "(", ")", ")", ")", ")" ]
47.2
17.6
def _parse_metadata(response): ''' Extracts out resource metadata information. ''' if response is None or response.headers is None: return None metadata = _dict() for key, value in response.headers.items(): if key.lower().startswith('x-ms-meta-'): metadata[key[10:]] = _to_str(value) return metadata
[ "def", "_parse_metadata", "(", "response", ")", ":", "if", "response", "is", "None", "or", "response", ".", "headers", "is", "None", ":", "return", "None", "metadata", "=", "_dict", "(", ")", "for", "key", ",", "value", "in", "response", ".", "headers", ".", "items", "(", ")", ":", "if", "key", ".", "lower", "(", ")", ".", "startswith", "(", "'x-ms-meta-'", ")", ":", "metadata", "[", "key", "[", "10", ":", "]", "]", "=", "_to_str", "(", "value", ")", "return", "metadata" ]
24.642857
21.214286
def _init(self): """Create and grid the widgets.""" for label in self.ticklabels: label.destroy() self.label.place_forget() self.ticks = [] self.ticklabels = [] if self._resolution > 0: nb_steps = round((self.scale.cget('to') - self.scale.cget('from')) / self._resolution) self.scale.configure(to=self.scale.cget('from') + nb_steps * self._resolution) self._extent = self.scale.cget('to') - self.scale.cget('from') if str(self.scale.cget('orient')) == "horizontal": self.get_scale_length = self.scale.winfo_width self.display_value = self._display_value_horizontal self._update_slider_length = self._update_slider_length_horizontal self.place_ticks = self._place_ticks_horizontal self._init_horizontal() else: self.get_scale_length = self.scale.winfo_height self.display_value = self._display_value_vertical self._update_slider_length = self._update_slider_length_vertical self.place_ticks = self._place_ticks_vertical self._init_vertical() self.scale.lift() try: self._var.trace_remove('write', self._trace) self._trace = self._var.trace_add('write', self._increment) except AttributeError: # backward compatibility self._var.trace_vdelete('w', self._trace) self._trace = self._var.trace('w', self._increment) self._update_slider_length()
[ "def", "_init", "(", "self", ")", ":", "for", "label", "in", "self", ".", "ticklabels", ":", "label", ".", "destroy", "(", ")", "self", ".", "label", ".", "place_forget", "(", ")", "self", ".", "ticks", "=", "[", "]", "self", ".", "ticklabels", "=", "[", "]", "if", "self", ".", "_resolution", ">", "0", ":", "nb_steps", "=", "round", "(", "(", "self", ".", "scale", ".", "cget", "(", "'to'", ")", "-", "self", ".", "scale", ".", "cget", "(", "'from'", ")", ")", "/", "self", ".", "_resolution", ")", "self", ".", "scale", ".", "configure", "(", "to", "=", "self", ".", "scale", ".", "cget", "(", "'from'", ")", "+", "nb_steps", "*", "self", ".", "_resolution", ")", "self", ".", "_extent", "=", "self", ".", "scale", ".", "cget", "(", "'to'", ")", "-", "self", ".", "scale", ".", "cget", "(", "'from'", ")", "if", "str", "(", "self", ".", "scale", ".", "cget", "(", "'orient'", ")", ")", "==", "\"horizontal\"", ":", "self", ".", "get_scale_length", "=", "self", ".", "scale", ".", "winfo_width", "self", ".", "display_value", "=", "self", ".", "_display_value_horizontal", "self", ".", "_update_slider_length", "=", "self", ".", "_update_slider_length_horizontal", "self", ".", "place_ticks", "=", "self", ".", "_place_ticks_horizontal", "self", ".", "_init_horizontal", "(", ")", "else", ":", "self", ".", "get_scale_length", "=", "self", ".", "scale", ".", "winfo_height", "self", ".", "display_value", "=", "self", ".", "_display_value_vertical", "self", ".", "_update_slider_length", "=", "self", ".", "_update_slider_length_vertical", "self", ".", "place_ticks", "=", "self", ".", "_place_ticks_vertical", "self", ".", "_init_vertical", "(", ")", "self", ".", "scale", ".", "lift", "(", ")", "try", ":", "self", ".", "_var", ".", "trace_remove", "(", "'write'", ",", "self", ".", "_trace", ")", "self", ".", "_trace", "=", "self", ".", "_var", ".", "trace_add", "(", "'write'", ",", "self", ".", "_increment", ")", "except", "AttributeError", ":", "# backward compatibility", "self", ".", "_var", ".", "trace_vdelete", "(", "'w'", ",", "self", ".", "_trace", ")", "self", ".", "_trace", "=", "self", ".", "_var", ".", "trace", "(", "'w'", ",", "self", ".", "_increment", ")", "self", ".", "_update_slider_length", "(", ")" ]
47.875
19.3125
def vor_to_am(vor): r""" Given a Voronoi tessellation object from Scipy's ``spatial`` module, converts to a sparse adjacency matrix network representation in COO format. Parameters ---------- vor : Voronoi Tessellation object This object is produced by ``scipy.spatial.Voronoi`` Returns ------- A sparse adjacency matrix in COO format. The network is undirected and unweighted, so the adjacency matrix is upper-triangular and all the weights are set to 1. """ # Create adjacency matrix in lil format for quick matrix construction N = vor.vertices.shape[0] rc = [[], []] for ij in vor.ridge_dict.keys(): row = vor.ridge_dict[ij].copy() # Make sure voronoi cell closes upon itself row.append(row[0]) # Add connections to rc list rc[0].extend(row[:-1]) rc[1].extend(row[1:]) rc = sp.vstack(rc).T # Make adj mat upper triangular rc = sp.sort(rc, axis=1) # Remove any pairs with ends at infinity (-1) keep = ~sp.any(rc == -1, axis=1) rc = rc[keep] data = sp.ones_like(rc[:, 0]) # Build adj mat in COO format M = N = sp.amax(rc) + 1 am = sprs.coo_matrix((data, (rc[:, 0], rc[:, 1])), shape=(M, N)) # Remove diagonal, and convert to csr remove duplicates am = sp.sparse.triu(A=am, k=1, format='csr') # The convert back to COO and return am = am.tocoo() return am
[ "def", "vor_to_am", "(", "vor", ")", ":", "# Create adjacency matrix in lil format for quick matrix construction", "N", "=", "vor", ".", "vertices", ".", "shape", "[", "0", "]", "rc", "=", "[", "[", "]", ",", "[", "]", "]", "for", "ij", "in", "vor", ".", "ridge_dict", ".", "keys", "(", ")", ":", "row", "=", "vor", ".", "ridge_dict", "[", "ij", "]", ".", "copy", "(", ")", "# Make sure voronoi cell closes upon itself", "row", ".", "append", "(", "row", "[", "0", "]", ")", "# Add connections to rc list", "rc", "[", "0", "]", ".", "extend", "(", "row", "[", ":", "-", "1", "]", ")", "rc", "[", "1", "]", ".", "extend", "(", "row", "[", "1", ":", "]", ")", "rc", "=", "sp", ".", "vstack", "(", "rc", ")", ".", "T", "# Make adj mat upper triangular", "rc", "=", "sp", ".", "sort", "(", "rc", ",", "axis", "=", "1", ")", "# Remove any pairs with ends at infinity (-1)", "keep", "=", "~", "sp", ".", "any", "(", "rc", "==", "-", "1", ",", "axis", "=", "1", ")", "rc", "=", "rc", "[", "keep", "]", "data", "=", "sp", ".", "ones_like", "(", "rc", "[", ":", ",", "0", "]", ")", "# Build adj mat in COO format", "M", "=", "N", "=", "sp", ".", "amax", "(", "rc", ")", "+", "1", "am", "=", "sprs", ".", "coo_matrix", "(", "(", "data", ",", "(", "rc", "[", ":", ",", "0", "]", ",", "rc", "[", ":", ",", "1", "]", ")", ")", ",", "shape", "=", "(", "M", ",", "N", ")", ")", "# Remove diagonal, and convert to csr remove duplicates", "am", "=", "sp", ".", "sparse", ".", "triu", "(", "A", "=", "am", ",", "k", "=", "1", ",", "format", "=", "'csr'", ")", "# The convert back to COO and return", "am", "=", "am", ".", "tocoo", "(", ")", "return", "am" ]
33.285714
17.785714
def content(): """Helper method that returns just the content. This method was added so that the text could be reused in the dock_help module. .. versionadded:: 3.2.2 :returns: A message object without brand element. :rtype: safe.messaging.message.Message """ message = m.Message() message.add(m.Paragraph(tr( 'The InaSAFE options dialog is used to control various aspects of ' 'the InaSAFE analysis and reporting environment. Here are brief ' 'descriptions of all the options available, grouped by the tab ' 'page on which they occur.' ))) header = m.Heading(tr('Organisation Profile tab'), **INFO_STYLE) message.add(header) paragraph = m.Paragraph( m.Image( 'file:///%s/img/screenshots/' 'inasafe-options-organisation-screenshot.png' % resources_path()), style_class='text-center' ) message.add(paragraph) message.add(m.Paragraph(tr( 'The Organisation Profile tab provides several general settings:' ))) bullets = m.BulletedList() bullets.add(m.Text( m.ImportantText(tr( 'Organisation')), tr(' - Use this option to specify the name of your organisation.'))) bullets.add(m.Text( m.ImportantText(tr( 'Contact email')), tr(' - Use this option to specify the contact person\'s email ' 'address to use in the generated metadata document.'))) bullets.add(m.Text( m.ImportantText(tr( 'Website')), tr(' - Use this option to set the website address to be used in ' 'the generated metadata document.'))) bullets.add(m.Text( m.ImportantText(tr( 'Use custom organisation logo')), tr(' - By default, InaSAFE will add the supporters logo to each ' 'map template. The supporters logo is also used at tbe bottom ' 'of the dock panel if the \'show organisation logo in dock\' ' 'option is enabled. You can use this option to replace the ' 'organisation logo with that of your own organisation. The logo ' 'will be rescaled automatically to fill the space provided.'))) bullets.add(m.Text( m.ImportantText(tr( 'Currency')), tr(' - InaSAFE will use the selected currency for the analysis.'))) bullets.add(m.Text( m.ImportantText(tr( 'Analysis license')), tr(' - Use this to set the usage and redistribution license for the ' 'generated impact layer.'))) message.add(bullets) header = m.Heading(tr('Population Parameters tab'), **INFO_STYLE) message.add(header) paragraph = m.Paragraph( m.Image( 'file:///%s/img/screenshots/' 'inasafe-options-population-screenshot.png' % resources_path()), style_class='text-center' ) message.add(paragraph) message.add(m.Paragraph(tr( 'In this tab you can define some parameters that will be used by ' 'InaSAFE in the analysis of exposed populations. You have the option ' 'to change the parameters for whether the exposed population is ' 'considered to be affected by each hazard type and class, and the ' 'displacement rate that will be used for affected people.' ))) bullets = m.BulletedList() bullets.add(m.Text( m.ImportantText(tr( 'Affected')), tr( ' - When this option is checked, people exposed to the hazard ' 'class will be included in the count of affected people.'))) bullets.add(m.Text( m.ImportantText(tr( 'Displacement Rate')), tr( ' - The displacement rate is used to estimate the number of ' 'people displaced for each hazard class. People must be affected ' 'before they can be displaced. '))) message.add(bullets) message.add(m.Paragraph(tr( 'Please refer to the InaSAFE manual for concept definitions and ' 'more information on the source of the hazard classifications and ' 'default settings. We really encourage you to consider these ' 'parameters carefully and to choose appropriate values for your ' 'local situation based on past events and expert knowledge.' ))) header = m.Heading(tr('GIS Environment tab'), **INFO_STYLE) message.add(header) paragraph = m.Paragraph( m.Image( 'file:///%s/img/screenshots/' 'inasafe-options-environment-screenshot.png' % resources_path()), style_class='text-center' ) message.add(paragraph) message.add(m.Paragraph(tr( 'The GIS Environment tab provides several general settings:' ))) bullets = m.BulletedList() bullets.add(m.Text( m.ImportantText(tr( 'Always show welcome message when opening QGIS with InaSAFE')), tr( ' - When this option is enabled, the welcome message will be ' 'enabled when opening QGIS with InaSAFE. By default the Welcome ' 'message will be displayed.'))) bullets.add(m.Text( m.ImportantText(tr( 'Show organisation logo in InaSAFE dock')), tr( ' - When this option is enabled, a logo will be displayed at the ' 'bottom of the InaSAFE dock widget. By default the logo used ' 'is the InaSAFE supporters logo, but you can alter this by ' 'setting the \'Use custom organisation logo\' option in ' 'the template options tab (see below).'))) bullets.add(m.Text( m.ImportantText(tr( 'Show only visible layers in the InaSAFE dock')), tr( ' - When this option is enabled layers that are not visible ' 'in the QGIS layers panel will not be shown in the hazard, ' 'exposure and aggregation combo boxes in the dock area.'))) bullets.add(m.Text( m.ImportantText(tr( 'Set QGIS layer name from title in keywords')), tr(' - If this option is enabled, the InaSAFE keywords title ' 'attribute will be used for the layer name in the QGIS Layers list ' 'when adding a layer.'))) bullets.add(m.Text( m.ImportantText(tr( 'Zoom to impact layer on scenario estimate completion')), tr(' - When this option is enabled, the map view extents will ' 'be updated to match the extents of the generated impact layer ' 'once the analysis completes.'))) bullets.add(m.Text( m.ImportantText(tr( 'Hide exposure on scenario estimate completion')), tr(' - Use this option if you prefer to not show the exposure ' 'layer as an underlay behind the generated impact layer.'))) bullets.add(m.Text( m.ImportantText(tr( 'Show only impact layer on report map')), tr('When this option is enabled, the map report created after an ' 'analysis completes will not show any other layers in your ' 'current project except for the impact layer. '))) bullets.add(m.Text( m.ImportantText(tr( 'Print atlas report on atlas driven template with the ' 'aggregation layer')), tr('When this option is enabled, InaSAFE will generate an atlas ' 'report based on aggregation area if the template has atlas ' 'generation flag enabled.'))) bullets.add(m.Text( m.ImportantText(tr( 'Use selected features only with the aggregation layer')), tr('If enabled, running an analysis with some features of the ' 'aggregation layer selected will constrain the analysis to only ' 'those selected aggregation areas, all others will be ignored.'))) bullets.add(m.Text( m.ImportantText(tr('Location for results')), tr(' - By default, InaSAFE will write impact layer and intermediate ' 'outputs to the system temporary directory. On some operating ' 'systems, these temporary files will be deleted on each reboot. ' 'If you wish to, you can specify an alternative directory ' 'to use for storing these temporary files.'))) message.add(bullets) header = m.Heading(tr('Earthquake tab'), **INFO_STYLE) message.add(header) paragraph = m.Paragraph( m.Image( 'file:///%s/img/screenshots/' 'inasafe-options-earthquake-screenshot.png' % resources_path()), style_class='text-center' ) message.add(paragraph) paragraph = m.Paragraph(tr( 'In this tab you can select which earthquake fatality model to use ' 'when estimating earthquake impact on population. This option is ' 'global - it will affect all subsequent earthquake analyses carried ' 'out in InaSAFE.' )) message.add(paragraph) paragraph = m.Paragraph(tr( 'When selecting an earthquake analysis model, its details will be ' 'shown below in the text box area.' )) message.add(paragraph) header = m.Heading(tr('Template Options tab'), **INFO_STYLE) message.add(header) paragraph = m.Paragraph( m.Image( 'file:///%s/img/screenshots/' 'inasafe-options-template-screenshot.png' % resources_path()), style_class='text-center' ) message.add(paragraph) message.add(m.Paragraph(tr( 'This tab has options relating to the generation of map composer ' 'templates and how reports will be printed:' '.' ))) bullets = m.BulletedList() bullets.add(m.Text( m.ImportantText(tr( 'Use custom north arrow image')), tr(' - InaSAFE provides a basic north arrow which is placed on ' 'generated map compositions and rendered PDF reports. You can ' 'replace this north arrow with one of your own choosing using ' 'this option.'))) bullets.add(m.Text( m.ImportantText(tr( 'Use custom disclaimer text')), tr(' - By default, InaSAFE will display a disclaimer on reports ' 'advising readers of the report to exercise caution when ' 'interpreting the outputs presented. You can override this ' 'text using this option, though we do advise that you include ' 'a similar statement of caution in your overridden text.'))) message.add(bullets) header = m.Heading(tr('Demographic Defaults tab'), **INFO_STYLE) message.add(header) paragraph = m.Paragraph( m.Image( 'file:///%s/img/screenshots/' 'inasafe-options-demographic-screenshot.png' % resources_path()), style_class='text-center' ) message.add(paragraph) paragraph = m.Paragraph(tr( 'In this tab you will find options for setting the default ratios ' 'for demographic groups. There is more detailed help on demographic ' 'groups within the main help page for InaSAFE in the Field Mapping ' 'Tool section. Essentially default ratios for demographic groups ' 'determine what proportion of the population are within each ' 'population group (e.g. infants versus children etc.). The options ' 'defined in this tab are used in cases where you choose to use the ' 'global default ratios while configuring the keywords for an ' 'aggregation layer as shown below.' )) message.add(paragraph) paragraph = m.Paragraph(tr( 'Note that the contents of this tab may changed depending on what ' 'groups have been defined for demographic breakdowns.' )) message.add(paragraph) paragraph = m.Paragraph( m.Image( 'file:///%s/img/screenshots/' 'field-mapping-tool-default-ratios-screenshot.png' % resources_path()), style_class='text-center' ) message.add(paragraph) header = m.Heading(tr('Advanced tab'), **INFO_STYLE) message.add(header) paragraph = m.Paragraph( m.Image( 'file:///%s/img/screenshots/' 'inasafe-options-advanced-screenshot.png' % resources_path()), style_class='text-center' ) message.add(paragraph) message.add(m.Paragraph(tr( 'This tab contains options intended for advanced users only: ' ))) bullets = m.BulletedList() bullets.add(m.Text( m.ImportantText(tr( 'Keyword cache for remote databases')), tr(' - When InaSAFE is used with remote layers (for example a ' 'database layer or a WFS layer), it is not possible to store the ' 'keywords for the layer with the layer itself. To accommodate for ' 'these types of layers, InaSAFE writes the keywords to a small ' 'file based database (using sqlite) and then retrieves them based ' 'on unique connection details used for that layer. You can ' 'specify a custom path to be used for storing the keywords ' 'database using this option.'))) bullets.add(m.Text( m.ImportantText(tr( 'Help to improve InaSAFE by submitting errors to a ' 'remote server')), tr(' - With this option enabled, InaSAFE will post any errors that ' 'occur to an online server for analysis by our development team. ' 'This option is disabled by default as some may consider some of ' 'the data submitted (IP Address, logged in user name) to be ' 'sensitive.'))) bullets.add(m.Text( m.ImportantText(tr('Enable developer mode')), tr(' - When this option is enabled, right clicking on the webview ' 'widget in the dock will allow you to debug the generated HTML. ' 'In addition, if the metadata.txt for the running InaSAFE is ' 'set to \'alpha\', an additional icon will be added to the ' 'toolbar to add test layers to the QGIS project.'))) bullets.add(m.Text( m.ImportantText(tr('Generate reports')), tr(' - When this option is enabled, InaSAFE will generate reports. '))) bullets.add(m.Text( m.ImportantText(tr('Show memory profile')), tr(' - When this option is enabled, InaSAFE will display the memory ' 'profile when it runs. '))) message.add(bullets) return message
[ "def", "content", "(", ")", ":", "message", "=", "m", ".", "Message", "(", ")", "message", ".", "add", "(", "m", ".", "Paragraph", "(", "tr", "(", "'The InaSAFE options dialog is used to control various aspects of '", "'the InaSAFE analysis and reporting environment. Here are brief '", "'descriptions of all the options available, grouped by the tab '", "'page on which they occur.'", ")", ")", ")", "header", "=", "m", ".", "Heading", "(", "tr", "(", "'Organisation Profile tab'", ")", ",", "*", "*", "INFO_STYLE", ")", "message", ".", "add", "(", "header", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "m", ".", "Image", "(", "'file:///%s/img/screenshots/'", "'inasafe-options-organisation-screenshot.png'", "%", "resources_path", "(", ")", ")", ",", "style_class", "=", "'text-center'", ")", "message", ".", "add", "(", "paragraph", ")", "message", ".", "add", "(", "m", ".", "Paragraph", "(", "tr", "(", "'The Organisation Profile tab provides several general settings:'", ")", ")", ")", "bullets", "=", "m", ".", "BulletedList", "(", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Organisation'", ")", ")", ",", "tr", "(", "' - Use this option to specify the name of your organisation.'", ")", ")", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Contact email'", ")", ")", ",", "tr", "(", "' - Use this option to specify the contact person\\'s email '", "'address to use in the generated metadata document.'", ")", ")", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Website'", ")", ")", ",", "tr", "(", "' - Use this option to set the website address to be used in '", "'the generated metadata document.'", ")", ")", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Use custom organisation logo'", ")", ")", ",", "tr", "(", "' - By default, InaSAFE will add the supporters logo to each '", "'map template. The supporters logo is also used at tbe bottom '", "'of the dock panel if the \\'show organisation logo in dock\\' '", "'option is enabled. You can use this option to replace the '", "'organisation logo with that of your own organisation. The logo '", "'will be rescaled automatically to fill the space provided.'", ")", ")", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Currency'", ")", ")", ",", "tr", "(", "' - InaSAFE will use the selected currency for the analysis.'", ")", ")", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Analysis license'", ")", ")", ",", "tr", "(", "' - Use this to set the usage and redistribution license for the '", "'generated impact layer.'", ")", ")", ")", "message", ".", "add", "(", "bullets", ")", "header", "=", "m", ".", "Heading", "(", "tr", "(", "'Population Parameters tab'", ")", ",", "*", "*", "INFO_STYLE", ")", "message", ".", "add", "(", "header", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "m", ".", "Image", "(", "'file:///%s/img/screenshots/'", "'inasafe-options-population-screenshot.png'", "%", "resources_path", "(", ")", ")", ",", "style_class", "=", "'text-center'", ")", "message", ".", "add", "(", "paragraph", ")", "message", ".", "add", "(", "m", ".", "Paragraph", "(", "tr", "(", "'In this tab you can define some parameters that will be used by '", "'InaSAFE in the analysis of exposed populations. You have the option '", "'to change the parameters for whether the exposed population is '", "'considered to be affected by each hazard type and class, and the '", "'displacement rate that will be used for affected people.'", ")", ")", ")", "bullets", "=", "m", ".", "BulletedList", "(", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Affected'", ")", ")", ",", "tr", "(", "' - When this option is checked, people exposed to the hazard '", "'class will be included in the count of affected people.'", ")", ")", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Displacement Rate'", ")", ")", ",", "tr", "(", "' - The displacement rate is used to estimate the number of '", "'people displaced for each hazard class. People must be affected '", "'before they can be displaced. '", ")", ")", ")", "message", ".", "add", "(", "bullets", ")", "message", ".", "add", "(", "m", ".", "Paragraph", "(", "tr", "(", "'Please refer to the InaSAFE manual for concept definitions and '", "'more information on the source of the hazard classifications and '", "'default settings. We really encourage you to consider these '", "'parameters carefully and to choose appropriate values for your '", "'local situation based on past events and expert knowledge.'", ")", ")", ")", "header", "=", "m", ".", "Heading", "(", "tr", "(", "'GIS Environment tab'", ")", ",", "*", "*", "INFO_STYLE", ")", "message", ".", "add", "(", "header", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "m", ".", "Image", "(", "'file:///%s/img/screenshots/'", "'inasafe-options-environment-screenshot.png'", "%", "resources_path", "(", ")", ")", ",", "style_class", "=", "'text-center'", ")", "message", ".", "add", "(", "paragraph", ")", "message", ".", "add", "(", "m", ".", "Paragraph", "(", "tr", "(", "'The GIS Environment tab provides several general settings:'", ")", ")", ")", "bullets", "=", "m", ".", "BulletedList", "(", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Always show welcome message when opening QGIS with InaSAFE'", ")", ")", ",", "tr", "(", "' - When this option is enabled, the welcome message will be '", "'enabled when opening QGIS with InaSAFE. By default the Welcome '", "'message will be displayed.'", ")", ")", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Show organisation logo in InaSAFE dock'", ")", ")", ",", "tr", "(", "' - When this option is enabled, a logo will be displayed at the '", "'bottom of the InaSAFE dock widget. By default the logo used '", "'is the InaSAFE supporters logo, but you can alter this by '", "'setting the \\'Use custom organisation logo\\' option in '", "'the template options tab (see below).'", ")", ")", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Show only visible layers in the InaSAFE dock'", ")", ")", ",", "tr", "(", "' - When this option is enabled layers that are not visible '", "'in the QGIS layers panel will not be shown in the hazard, '", "'exposure and aggregation combo boxes in the dock area.'", ")", ")", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Set QGIS layer name from title in keywords'", ")", ")", ",", "tr", "(", "' - If this option is enabled, the InaSAFE keywords title '", "'attribute will be used for the layer name in the QGIS Layers list '", "'when adding a layer.'", ")", ")", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Zoom to impact layer on scenario estimate completion'", ")", ")", ",", "tr", "(", "' - When this option is enabled, the map view extents will '", "'be updated to match the extents of the generated impact layer '", "'once the analysis completes.'", ")", ")", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Hide exposure on scenario estimate completion'", ")", ")", ",", "tr", "(", "' - Use this option if you prefer to not show the exposure '", "'layer as an underlay behind the generated impact layer.'", ")", ")", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Show only impact layer on report map'", ")", ")", ",", "tr", "(", "'When this option is enabled, the map report created after an '", "'analysis completes will not show any other layers in your '", "'current project except for the impact layer. '", ")", ")", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Print atlas report on atlas driven template with the '", "'aggregation layer'", ")", ")", ",", "tr", "(", "'When this option is enabled, InaSAFE will generate an atlas '", "'report based on aggregation area if the template has atlas '", "'generation flag enabled.'", ")", ")", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Use selected features only with the aggregation layer'", ")", ")", ",", "tr", "(", "'If enabled, running an analysis with some features of the '", "'aggregation layer selected will constrain the analysis to only '", "'those selected aggregation areas, all others will be ignored.'", ")", ")", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Location for results'", ")", ")", ",", "tr", "(", "' - By default, InaSAFE will write impact layer and intermediate '", "'outputs to the system temporary directory. On some operating '", "'systems, these temporary files will be deleted on each reboot. '", "'If you wish to, you can specify an alternative directory '", "'to use for storing these temporary files.'", ")", ")", ")", "message", ".", "add", "(", "bullets", ")", "header", "=", "m", ".", "Heading", "(", "tr", "(", "'Earthquake tab'", ")", ",", "*", "*", "INFO_STYLE", ")", "message", ".", "add", "(", "header", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "m", ".", "Image", "(", "'file:///%s/img/screenshots/'", "'inasafe-options-earthquake-screenshot.png'", "%", "resources_path", "(", ")", ")", ",", "style_class", "=", "'text-center'", ")", "message", ".", "add", "(", "paragraph", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "tr", "(", "'In this tab you can select which earthquake fatality model to use '", "'when estimating earthquake impact on population. This option is '", "'global - it will affect all subsequent earthquake analyses carried '", "'out in InaSAFE.'", ")", ")", "message", ".", "add", "(", "paragraph", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "tr", "(", "'When selecting an earthquake analysis model, its details will be '", "'shown below in the text box area.'", ")", ")", "message", ".", "add", "(", "paragraph", ")", "header", "=", "m", ".", "Heading", "(", "tr", "(", "'Template Options tab'", ")", ",", "*", "*", "INFO_STYLE", ")", "message", ".", "add", "(", "header", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "m", ".", "Image", "(", "'file:///%s/img/screenshots/'", "'inasafe-options-template-screenshot.png'", "%", "resources_path", "(", ")", ")", ",", "style_class", "=", "'text-center'", ")", "message", ".", "add", "(", "paragraph", ")", "message", ".", "add", "(", "m", ".", "Paragraph", "(", "tr", "(", "'This tab has options relating to the generation of map composer '", "'templates and how reports will be printed:'", "'.'", ")", ")", ")", "bullets", "=", "m", ".", "BulletedList", "(", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Use custom north arrow image'", ")", ")", ",", "tr", "(", "' - InaSAFE provides a basic north arrow which is placed on '", "'generated map compositions and rendered PDF reports. You can '", "'replace this north arrow with one of your own choosing using '", "'this option.'", ")", ")", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Use custom disclaimer text'", ")", ")", ",", "tr", "(", "' - By default, InaSAFE will display a disclaimer on reports '", "'advising readers of the report to exercise caution when '", "'interpreting the outputs presented. You can override this '", "'text using this option, though we do advise that you include '", "'a similar statement of caution in your overridden text.'", ")", ")", ")", "message", ".", "add", "(", "bullets", ")", "header", "=", "m", ".", "Heading", "(", "tr", "(", "'Demographic Defaults tab'", ")", ",", "*", "*", "INFO_STYLE", ")", "message", ".", "add", "(", "header", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "m", ".", "Image", "(", "'file:///%s/img/screenshots/'", "'inasafe-options-demographic-screenshot.png'", "%", "resources_path", "(", ")", ")", ",", "style_class", "=", "'text-center'", ")", "message", ".", "add", "(", "paragraph", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "tr", "(", "'In this tab you will find options for setting the default ratios '", "'for demographic groups. There is more detailed help on demographic '", "'groups within the main help page for InaSAFE in the Field Mapping '", "'Tool section. Essentially default ratios for demographic groups '", "'determine what proportion of the population are within each '", "'population group (e.g. infants versus children etc.). The options '", "'defined in this tab are used in cases where you choose to use the '", "'global default ratios while configuring the keywords for an '", "'aggregation layer as shown below.'", ")", ")", "message", ".", "add", "(", "paragraph", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "tr", "(", "'Note that the contents of this tab may changed depending on what '", "'groups have been defined for demographic breakdowns.'", ")", ")", "message", ".", "add", "(", "paragraph", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "m", ".", "Image", "(", "'file:///%s/img/screenshots/'", "'field-mapping-tool-default-ratios-screenshot.png'", "%", "resources_path", "(", ")", ")", ",", "style_class", "=", "'text-center'", ")", "message", ".", "add", "(", "paragraph", ")", "header", "=", "m", ".", "Heading", "(", "tr", "(", "'Advanced tab'", ")", ",", "*", "*", "INFO_STYLE", ")", "message", ".", "add", "(", "header", ")", "paragraph", "=", "m", ".", "Paragraph", "(", "m", ".", "Image", "(", "'file:///%s/img/screenshots/'", "'inasafe-options-advanced-screenshot.png'", "%", "resources_path", "(", ")", ")", ",", "style_class", "=", "'text-center'", ")", "message", ".", "add", "(", "paragraph", ")", "message", ".", "add", "(", "m", ".", "Paragraph", "(", "tr", "(", "'This tab contains options intended for advanced users only: '", ")", ")", ")", "bullets", "=", "m", ".", "BulletedList", "(", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Keyword cache for remote databases'", ")", ")", ",", "tr", "(", "' - When InaSAFE is used with remote layers (for example a '", "'database layer or a WFS layer), it is not possible to store the '", "'keywords for the layer with the layer itself. To accommodate for '", "'these types of layers, InaSAFE writes the keywords to a small '", "'file based database (using sqlite) and then retrieves them based '", "'on unique connection details used for that layer. You can '", "'specify a custom path to be used for storing the keywords '", "'database using this option.'", ")", ")", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Help to improve InaSAFE by submitting errors to a '", "'remote server'", ")", ")", ",", "tr", "(", "' - With this option enabled, InaSAFE will post any errors that '", "'occur to an online server for analysis by our development team. '", "'This option is disabled by default as some may consider some of '", "'the data submitted (IP Address, logged in user name) to be '", "'sensitive.'", ")", ")", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Enable developer mode'", ")", ")", ",", "tr", "(", "' - When this option is enabled, right clicking on the webview '", "'widget in the dock will allow you to debug the generated HTML. '", "'In addition, if the metadata.txt for the running InaSAFE is '", "'set to \\'alpha\\', an additional icon will be added to the '", "'toolbar to add test layers to the QGIS project.'", ")", ")", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Generate reports'", ")", ")", ",", "tr", "(", "' - When this option is enabled, InaSAFE will generate reports. '", ")", ")", ")", "bullets", ".", "add", "(", "m", ".", "Text", "(", "m", ".", "ImportantText", "(", "tr", "(", "'Show memory profile'", ")", ")", ",", "tr", "(", "' - When this option is enabled, InaSAFE will display the memory '", "'profile when it runs. '", ")", ")", ")", "message", ".", "add", "(", "bullets", ")", "return", "message" ]
41.017391
22.663768
def _condition_as_sql(self, qn, connection): ''' Return sql for condition. ''' def escape(value): if isinstance(value, bool): value = str(int(value)) if isinstance(value, six.string_types): # Escape params used with LIKE if '%' in value: value = value.replace('%', '%%') # Escape single quotes if "'" in value: value = value.replace("'", "''") # Add single quote to text values value = "'" + value + "'" return value sql, param = self.condition.query.where.as_sql(qn, connection) param = map(escape, param) return sql % tuple(param)
[ "def", "_condition_as_sql", "(", "self", ",", "qn", ",", "connection", ")", ":", "def", "escape", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "bool", ")", ":", "value", "=", "str", "(", "int", "(", "value", ")", ")", "if", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", ":", "# Escape params used with LIKE", "if", "'%'", "in", "value", ":", "value", "=", "value", ".", "replace", "(", "'%'", ",", "'%%'", ")", "# Escape single quotes", "if", "\"'\"", "in", "value", ":", "value", "=", "value", ".", "replace", "(", "\"'\"", ",", "\"''\"", ")", "# Add single quote to text values", "value", "=", "\"'\"", "+", "value", "+", "\"'\"", "return", "value", "sql", ",", "param", "=", "self", ".", "condition", ".", "query", ".", "where", ".", "as_sql", "(", "qn", ",", "connection", ")", "param", "=", "map", "(", "escape", ",", "param", ")", "return", "sql", "%", "tuple", "(", "param", ")" ]
34.409091
13.318182
def keys(self, element=None, mode=None): r""" This subclass works exactly like ``keys`` when no arguments are passed, but optionally accepts an ``element`` and/or a ``mode``, which filters the output to only the requested keys. The default behavior is exactly equivalent to the normal ``keys`` method. Parameters ---------- element : string Can be either 'pore' or 'throat', which limits the returned list of keys to only 'pore' or 'throat' keys. If neither is given, then both are assumed. mode : string (optional, default is 'skip') Controls which keys are returned. Options are: **``None``** : This mode (default) bypasses this subclassed method and just returns the normal KeysView object. **'labels'** : Limits the returned list of keys to only 'labels' (boolean arrays) **'props'** : Limits he return list of keys to only 'props' (numerical arrays). **'all'** : Returns both 'labels' and 'props'. This is equivalent to sending a list of both 'labels' and 'props'. See Also -------- props labels Notes ----- This subclass can be used to get dictionary keys of specific kinds of data. It's use augments ``props`` and ``labels`` by returning a list containing both types, but possibly limited by element type ('pores' or 'throats'.) Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic([5, 5, 5]) >>> pn.keys(mode='props') # Get all props ['pore.coords', 'throat.conns'] >>> pn.keys(mode='props', element='pore') # Get only pore props ['pore.coords'] """ if mode is None: return super().keys() element = self._parse_element(element=element) allowed = ['props', 'labels'] if 'all' in mode: mode = allowed mode = self._parse_mode(mode=mode, allowed=allowed) keys = super().keys() temp = [] if 'props' in mode: temp.extend([i for i in keys if self.get(i).dtype != bool]) if 'labels' in mode: temp.extend([i for i in keys if self.get(i).dtype == bool]) if element: temp = [i for i in temp if i.split('.')[0] in element] return temp
[ "def", "keys", "(", "self", ",", "element", "=", "None", ",", "mode", "=", "None", ")", ":", "if", "mode", "is", "None", ":", "return", "super", "(", ")", ".", "keys", "(", ")", "element", "=", "self", ".", "_parse_element", "(", "element", "=", "element", ")", "allowed", "=", "[", "'props'", ",", "'labels'", "]", "if", "'all'", "in", "mode", ":", "mode", "=", "allowed", "mode", "=", "self", ".", "_parse_mode", "(", "mode", "=", "mode", ",", "allowed", "=", "allowed", ")", "keys", "=", "super", "(", ")", ".", "keys", "(", ")", "temp", "=", "[", "]", "if", "'props'", "in", "mode", ":", "temp", ".", "extend", "(", "[", "i", "for", "i", "in", "keys", "if", "self", ".", "get", "(", "i", ")", ".", "dtype", "!=", "bool", "]", ")", "if", "'labels'", "in", "mode", ":", "temp", ".", "extend", "(", "[", "i", "for", "i", "in", "keys", "if", "self", ".", "get", "(", "i", ")", ".", "dtype", "==", "bool", "]", ")", "if", "element", ":", "temp", "=", "[", "i", "for", "i", "in", "temp", "if", "i", ".", "split", "(", "'.'", ")", "[", "0", "]", "in", "element", "]", "return", "temp" ]
35.057971
23.710145
def _filter_headers(self): """ Add headers designed for filtering messages based on objects. Returns: dict: Filter-related headers to be combined with the existing headers """ headers = {} for user in self.usernames: headers["fedora_messaging_user_{}".format(user)] = True for package in self.packages: headers["fedora_messaging_rpm_{}".format(package)] = True for container in self.containers: headers["fedora_messaging_container_{}".format(container)] = True for module in self.modules: headers["fedora_messaging_module_{}".format(module)] = True for flatpak in self.flatpaks: headers["fedora_messaging_flatpak_{}".format(flatpak)] = True return headers
[ "def", "_filter_headers", "(", "self", ")", ":", "headers", "=", "{", "}", "for", "user", "in", "self", ".", "usernames", ":", "headers", "[", "\"fedora_messaging_user_{}\"", ".", "format", "(", "user", ")", "]", "=", "True", "for", "package", "in", "self", ".", "packages", ":", "headers", "[", "\"fedora_messaging_rpm_{}\"", ".", "format", "(", "package", ")", "]", "=", "True", "for", "container", "in", "self", ".", "containers", ":", "headers", "[", "\"fedora_messaging_container_{}\"", ".", "format", "(", "container", ")", "]", "=", "True", "for", "module", "in", "self", ".", "modules", ":", "headers", "[", "\"fedora_messaging_module_{}\"", ".", "format", "(", "module", ")", "]", "=", "True", "for", "flatpak", "in", "self", ".", "flatpaks", ":", "headers", "[", "\"fedora_messaging_flatpak_{}\"", ".", "format", "(", "flatpak", ")", "]", "=", "True", "return", "headers" ]
42
18.947368
def enable_request_loader(): """ Enable request loader Optional user loader based on incomin request object. This is useful to enable on top of default user loader if you want to authenticate API requests via bearer token header. :return: """ @login_manager.request_loader def load_user_from_request(request): user = None auth = request.headers.get('Authorization') if auth and auth.startswith('Bearer'): try: token = auth[7:] user = user_service.get_user_by_token(token) except x.UserException as exception: msg = 'JWT token login failed for [{ip}] with message: [{msg}]' msg = msg.format( ip=request.environ['REMOTE_ADDR'], msg=str(exception) ) current_app.logger.log(msg=msg, level=logging.INFO) abort(401, description=str(exception)) return user
[ "def", "enable_request_loader", "(", ")", ":", "@", "login_manager", ".", "request_loader", "def", "load_user_from_request", "(", "request", ")", ":", "user", "=", "None", "auth", "=", "request", ".", "headers", ".", "get", "(", "'Authorization'", ")", "if", "auth", "and", "auth", ".", "startswith", "(", "'Bearer'", ")", ":", "try", ":", "token", "=", "auth", "[", "7", ":", "]", "user", "=", "user_service", ".", "get_user_by_token", "(", "token", ")", "except", "x", ".", "UserException", "as", "exception", ":", "msg", "=", "'JWT token login failed for [{ip}] with message: [{msg}]'", "msg", "=", "msg", ".", "format", "(", "ip", "=", "request", ".", "environ", "[", "'REMOTE_ADDR'", "]", ",", "msg", "=", "str", "(", "exception", ")", ")", "current_app", ".", "logger", ".", "log", "(", "msg", "=", "msg", ",", "level", "=", "logging", ".", "INFO", ")", "abort", "(", "401", ",", "description", "=", "str", "(", "exception", ")", ")", "return", "user" ]
37.269231
16.038462
def evaluate(obj, array): """Evaluate a ROOT histogram, function, graph, or spline over an array. Parameters ---------- obj : TH[1|2|3], TF[1|2|3], TFormula, TGraph, TSpline, or string A ROOT histogram, function, formula, graph, spline, or string. If a string is specified, a TFormula is created. array : ndarray An array containing the values to evaluate the ROOT object on. The shape must match the dimensionality of the ROOT object. Returns ------- y : array An array containing the values of the ROOT object evaluated at each value in the input array. Raises ------ TypeError If the ROOT object is not a histogram, function, graph, or spline. ValueError If the shape of the array is not compatible with the dimensionality of the ROOT object being evaluated. If the string expression does not compile to a valid TFormula expression. Examples -------- >>> from root_numpy import evaluate >>> from ROOT import TF1, TF2 >>> func = TF1("f1", "x*x") >>> evaluate(func, [1, 2, 3, 4]) array([ 1., 4., 9., 16.]) >>> func = TF2("f2", "x*y") >>> evaluate(func, [[1, 1], [1, 2], [3, 1]]) array([ 1., 2., 3.]) >>> evaluate("x*y", [[1, 1], [1, 2], [3, 1]]) array([ 1., 2., 3.]) """ import ROOT array = np.asarray(array, dtype=np.double) if isinstance(obj, ROOT.TH1): if isinstance(obj, ROOT.TH3): if array.ndim != 2: raise ValueError("array must be 2-dimensional") if array.shape[1] != 3: raise ValueError( "length of the second dimension must equal " "the dimension of the histogram") return _librootnumpy.evaluate_h3( ROOT.AsCObject(obj), array) elif isinstance(obj, ROOT.TH2): if array.ndim != 2: raise ValueError("array must be 2-dimensional") if array.shape[1] != 2: raise ValueError( "length of the second dimension must equal " "the dimension of the histogram") return _librootnumpy.evaluate_h2( ROOT.AsCObject(obj), array) if array.ndim != 1: raise ValueError("array must be 1-dimensional") return _librootnumpy.evaluate_h1( ROOT.AsCObject(obj), array) elif isinstance(obj, ROOT.TF1): if isinstance(obj, ROOT.TF3): if array.ndim != 2: raise ValueError("array must be 2-dimensional") if array.shape[1] != 3: raise ValueError( "length of the second dimension must equal " "the dimension of the function") return _librootnumpy.evaluate_f3( ROOT.AsCObject(obj), array) elif isinstance(obj, ROOT.TF2): if array.ndim != 2: raise ValueError("array must be 2-dimensional") if array.shape[1] != 2: raise ValueError( "length of the second dimension must equal " "the dimension of the function") return _librootnumpy.evaluate_f2( ROOT.AsCObject(obj), array) if array.ndim != 1: raise ValueError("array must be 1-dimensional") return _librootnumpy.evaluate_f1( ROOT.AsCObject(obj), array) elif isinstance(obj, (string_types, ROOT.TFormula)): if isinstance(obj, string_types): # attempt to create a formula obj = ROOT.TFormula(uuid.uuid4().hex, obj) ndim = obj.GetNdim() if ndim == 0: raise ValueError("invalid formula expression") if ndim == 1: if array.ndim != 1: raise ValueError("array must be 1-dimensional") return _librootnumpy.evaluate_formula_1d( ROOT.AsCObject(obj), array) if array.ndim != 2: raise ValueError("array must be 2-dimensional") if array.shape[1] != ndim: raise ValueError( "length of the second dimension must equal " "the dimension of the function") if ndim == 2: return _librootnumpy.evaluate_formula_2d( ROOT.AsCObject(obj), array) elif ndim == 3: return _librootnumpy.evaluate_formula_3d( ROOT.AsCObject(obj), array) # 4d return _librootnumpy.evaluate_formula_4d( ROOT.AsCObject(obj), array) elif isinstance(obj, ROOT.TGraph): if array.ndim != 1: raise ValueError("array must be 1-dimensional") return _librootnumpy.evaluate_graph( ROOT.AsCObject(obj), array) elif isinstance(obj, ROOT.TSpline): if array.ndim != 1: raise ValueError("array must be 1-dimensional") return _librootnumpy.evaluate_spline( ROOT.AsCObject(obj), array) raise TypeError( "obj is not a ROOT histogram, function, formula, " "graph, spline or string")
[ "def", "evaluate", "(", "obj", ",", "array", ")", ":", "import", "ROOT", "array", "=", "np", ".", "asarray", "(", "array", ",", "dtype", "=", "np", ".", "double", ")", "if", "isinstance", "(", "obj", ",", "ROOT", ".", "TH1", ")", ":", "if", "isinstance", "(", "obj", ",", "ROOT", ".", "TH3", ")", ":", "if", "array", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "\"array must be 2-dimensional\"", ")", "if", "array", ".", "shape", "[", "1", "]", "!=", "3", ":", "raise", "ValueError", "(", "\"length of the second dimension must equal \"", "\"the dimension of the histogram\"", ")", "return", "_librootnumpy", ".", "evaluate_h3", "(", "ROOT", ".", "AsCObject", "(", "obj", ")", ",", "array", ")", "elif", "isinstance", "(", "obj", ",", "ROOT", ".", "TH2", ")", ":", "if", "array", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "\"array must be 2-dimensional\"", ")", "if", "array", ".", "shape", "[", "1", "]", "!=", "2", ":", "raise", "ValueError", "(", "\"length of the second dimension must equal \"", "\"the dimension of the histogram\"", ")", "return", "_librootnumpy", ".", "evaluate_h2", "(", "ROOT", ".", "AsCObject", "(", "obj", ")", ",", "array", ")", "if", "array", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"array must be 1-dimensional\"", ")", "return", "_librootnumpy", ".", "evaluate_h1", "(", "ROOT", ".", "AsCObject", "(", "obj", ")", ",", "array", ")", "elif", "isinstance", "(", "obj", ",", "ROOT", ".", "TF1", ")", ":", "if", "isinstance", "(", "obj", ",", "ROOT", ".", "TF3", ")", ":", "if", "array", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "\"array must be 2-dimensional\"", ")", "if", "array", ".", "shape", "[", "1", "]", "!=", "3", ":", "raise", "ValueError", "(", "\"length of the second dimension must equal \"", "\"the dimension of the function\"", ")", "return", "_librootnumpy", ".", "evaluate_f3", "(", "ROOT", ".", "AsCObject", "(", "obj", ")", ",", "array", ")", "elif", "isinstance", "(", "obj", ",", "ROOT", ".", "TF2", ")", ":", "if", "array", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "\"array must be 2-dimensional\"", ")", "if", "array", ".", "shape", "[", "1", "]", "!=", "2", ":", "raise", "ValueError", "(", "\"length of the second dimension must equal \"", "\"the dimension of the function\"", ")", "return", "_librootnumpy", ".", "evaluate_f2", "(", "ROOT", ".", "AsCObject", "(", "obj", ")", ",", "array", ")", "if", "array", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"array must be 1-dimensional\"", ")", "return", "_librootnumpy", ".", "evaluate_f1", "(", "ROOT", ".", "AsCObject", "(", "obj", ")", ",", "array", ")", "elif", "isinstance", "(", "obj", ",", "(", "string_types", ",", "ROOT", ".", "TFormula", ")", ")", ":", "if", "isinstance", "(", "obj", ",", "string_types", ")", ":", "# attempt to create a formula", "obj", "=", "ROOT", ".", "TFormula", "(", "uuid", ".", "uuid4", "(", ")", ".", "hex", ",", "obj", ")", "ndim", "=", "obj", ".", "GetNdim", "(", ")", "if", "ndim", "==", "0", ":", "raise", "ValueError", "(", "\"invalid formula expression\"", ")", "if", "ndim", "==", "1", ":", "if", "array", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"array must be 1-dimensional\"", ")", "return", "_librootnumpy", ".", "evaluate_formula_1d", "(", "ROOT", ".", "AsCObject", "(", "obj", ")", ",", "array", ")", "if", "array", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "\"array must be 2-dimensional\"", ")", "if", "array", ".", "shape", "[", "1", "]", "!=", "ndim", ":", "raise", "ValueError", "(", "\"length of the second dimension must equal \"", "\"the dimension of the function\"", ")", "if", "ndim", "==", "2", ":", "return", "_librootnumpy", ".", "evaluate_formula_2d", "(", "ROOT", ".", "AsCObject", "(", "obj", ")", ",", "array", ")", "elif", "ndim", "==", "3", ":", "return", "_librootnumpy", ".", "evaluate_formula_3d", "(", "ROOT", ".", "AsCObject", "(", "obj", ")", ",", "array", ")", "# 4d", "return", "_librootnumpy", ".", "evaluate_formula_4d", "(", "ROOT", ".", "AsCObject", "(", "obj", ")", ",", "array", ")", "elif", "isinstance", "(", "obj", ",", "ROOT", ".", "TGraph", ")", ":", "if", "array", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"array must be 1-dimensional\"", ")", "return", "_librootnumpy", ".", "evaluate_graph", "(", "ROOT", ".", "AsCObject", "(", "obj", ")", ",", "array", ")", "elif", "isinstance", "(", "obj", ",", "ROOT", ".", "TSpline", ")", ":", "if", "array", ".", "ndim", "!=", "1", ":", "raise", "ValueError", "(", "\"array must be 1-dimensional\"", ")", "return", "_librootnumpy", ".", "evaluate_spline", "(", "ROOT", ".", "AsCObject", "(", "obj", ")", ",", "array", ")", "raise", "TypeError", "(", "\"obj is not a ROOT histogram, function, formula, \"", "\"graph, spline or string\"", ")" ]
39.03876
14.217054
def rest_get(self, url, params=None, headers=None, auth=None, verify=True, cert=None): """ Perform a GET request to url with optional authentication """ res = requests.get(url, params=params, headers=headers, auth=auth, verify=verify, cert=cert) return res.text, res.status_code
[ "def", "rest_get", "(", "self", ",", "url", ",", "params", "=", "None", ",", "headers", "=", "None", ",", "auth", "=", "None", ",", "verify", "=", "True", ",", "cert", "=", "None", ")", ":", "res", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ",", "headers", "=", "headers", ",", "auth", "=", "auth", ",", "verify", "=", "verify", ",", "cert", "=", "cert", ")", "return", "res", ".", "text", ",", "res", ".", "status_code" ]
48.428571
17.571429
def list_cache_nodes_full(opts=None, provider=None, base=None): ''' Return a list of minion data from the cloud cache, rather from the cloud providers themselves. This is the cloud cache version of list_nodes_full(). ''' if opts is None: opts = __opts__ if opts.get('update_cachedir', False) is False: return if base is None: base = os.path.join(opts['cachedir'], 'active') minions = {} # First, get a list of all drivers in use for driver in os.listdir(base): minions[driver] = {} prov_dir = os.path.join(base, driver) # Then, get a list of all providers per driver for prov in os.listdir(prov_dir): # If a specific provider is requested, filter out everyone else if provider and provider != prov: continue minions[driver][prov] = {} min_dir = os.path.join(prov_dir, prov) # Get a list of all nodes per provider for fname in os.listdir(min_dir): # Finally, get a list of full minion data fpath = os.path.join(min_dir, fname) minion_id = fname[:-2] # strip '.p' from end of msgpack filename mode = 'rb' if six.PY3 else 'r' with salt.utils.files.fopen(fpath, mode) as fh_: minions[driver][prov][minion_id] = salt.utils.data.decode( salt.utils.msgpack.load(fh_, encoding=MSGPACK_ENCODING)) return minions
[ "def", "list_cache_nodes_full", "(", "opts", "=", "None", ",", "provider", "=", "None", ",", "base", "=", "None", ")", ":", "if", "opts", "is", "None", ":", "opts", "=", "__opts__", "if", "opts", ".", "get", "(", "'update_cachedir'", ",", "False", ")", "is", "False", ":", "return", "if", "base", "is", "None", ":", "base", "=", "os", ".", "path", ".", "join", "(", "opts", "[", "'cachedir'", "]", ",", "'active'", ")", "minions", "=", "{", "}", "# First, get a list of all drivers in use", "for", "driver", "in", "os", ".", "listdir", "(", "base", ")", ":", "minions", "[", "driver", "]", "=", "{", "}", "prov_dir", "=", "os", ".", "path", ".", "join", "(", "base", ",", "driver", ")", "# Then, get a list of all providers per driver", "for", "prov", "in", "os", ".", "listdir", "(", "prov_dir", ")", ":", "# If a specific provider is requested, filter out everyone else", "if", "provider", "and", "provider", "!=", "prov", ":", "continue", "minions", "[", "driver", "]", "[", "prov", "]", "=", "{", "}", "min_dir", "=", "os", ".", "path", ".", "join", "(", "prov_dir", ",", "prov", ")", "# Get a list of all nodes per provider", "for", "fname", "in", "os", ".", "listdir", "(", "min_dir", ")", ":", "# Finally, get a list of full minion data", "fpath", "=", "os", ".", "path", ".", "join", "(", "min_dir", ",", "fname", ")", "minion_id", "=", "fname", "[", ":", "-", "2", "]", "# strip '.p' from end of msgpack filename", "mode", "=", "'rb'", "if", "six", ".", "PY3", "else", "'r'", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "fpath", ",", "mode", ")", "as", "fh_", ":", "minions", "[", "driver", "]", "[", "prov", "]", "[", "minion_id", "]", "=", "salt", ".", "utils", ".", "data", ".", "decode", "(", "salt", ".", "utils", ".", "msgpack", ".", "load", "(", "fh_", ",", "encoding", "=", "MSGPACK_ENCODING", ")", ")", "return", "minions" ]
41.194444
20.638889
def render_template(self, template_parameters, template_id): """RenderTemplate. [Preview API] :param :class:`<TemplateParameters> <azure.devops.v5_1.cix.models.TemplateParameters>` template_parameters: :param str template_id: :rtype: :class:`<Template> <azure.devops.v5_1.cix.models.Template>` """ route_values = {} if template_id is not None: route_values['templateId'] = self._serialize.url('template_id', template_id, 'str') content = self._serialize.body(template_parameters, 'TemplateParameters') response = self._send(http_method='POST', location_id='eb5d6d1d-98a2-4bbd-9028-f9a6b2d66515', version='5.1-preview.1', route_values=route_values, content=content) return self._deserialize('Template', response)
[ "def", "render_template", "(", "self", ",", "template_parameters", ",", "template_id", ")", ":", "route_values", "=", "{", "}", "if", "template_id", "is", "not", "None", ":", "route_values", "[", "'templateId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'template_id'", ",", "template_id", ",", "'str'", ")", "content", "=", "self", ".", "_serialize", ".", "body", "(", "template_parameters", ",", "'TemplateParameters'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'POST'", ",", "location_id", "=", "'eb5d6d1d-98a2-4bbd-9028-f9a6b2d66515'", ",", "version", "=", "'5.1-preview.1'", ",", "route_values", "=", "route_values", ",", "content", "=", "content", ")", "return", "self", ".", "_deserialize", "(", "'Template'", ",", "response", ")" ]
53.823529
22
def _cartesian(arraySizes, out=None): """ NAME: cartesian PURPOSE: Generate a cartesian product of input arrays. INPUT: arraySizes - list of size of arrays out - Array to place the cartesian product in. OUTPUT: 2-D array of shape (product(arraySizes), len(arraySizes)) containing cartesian products formed of input arrays. HISTORY: 2016-06-02 - Obtained from http://stackoverflow.com/questions/1208118/using-numpy-to-build-an-array-of-all-combinations-of-two-arrays """ arrays = [] for i in range(len(arraySizes)): arrays.append(nu.arange(0, arraySizes[i])) arrays = [nu.asarray(x) for x in arrays] dtype = arrays[0].dtype n = nu.prod([x.size for x in arrays]) if out is None: out = nu.zeros([n, len(arrays)], dtype=dtype) m = n // arrays[0].size out[:,0] = nu.repeat(arrays[0], m) if arrays[1:]: _cartesian(arraySizes[1:], out=out[0:m,1:]) for j in range(1, arrays[0].size): out[j*m:(j+1)*m,1:] = out[0:m,1:] return out
[ "def", "_cartesian", "(", "arraySizes", ",", "out", "=", "None", ")", ":", "arrays", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "arraySizes", ")", ")", ":", "arrays", ".", "append", "(", "nu", ".", "arange", "(", "0", ",", "arraySizes", "[", "i", "]", ")", ")", "arrays", "=", "[", "nu", ".", "asarray", "(", "x", ")", "for", "x", "in", "arrays", "]", "dtype", "=", "arrays", "[", "0", "]", ".", "dtype", "n", "=", "nu", ".", "prod", "(", "[", "x", ".", "size", "for", "x", "in", "arrays", "]", ")", "if", "out", "is", "None", ":", "out", "=", "nu", ".", "zeros", "(", "[", "n", ",", "len", "(", "arrays", ")", "]", ",", "dtype", "=", "dtype", ")", "m", "=", "n", "//", "arrays", "[", "0", "]", ".", "size", "out", "[", ":", ",", "0", "]", "=", "nu", ".", "repeat", "(", "arrays", "[", "0", "]", ",", "m", ")", "if", "arrays", "[", "1", ":", "]", ":", "_cartesian", "(", "arraySizes", "[", "1", ":", "]", ",", "out", "=", "out", "[", "0", ":", "m", ",", "1", ":", "]", ")", "for", "j", "in", "range", "(", "1", ",", "arrays", "[", "0", "]", ".", "size", ")", ":", "out", "[", "j", "*", "m", ":", "(", "j", "+", "1", ")", "*", "m", ",", "1", ":", "]", "=", "out", "[", "0", ":", "m", ",", "1", ":", "]", "return", "out" ]
31.617647
18.5
def run(locations, random, bikes, crime, nearby, json, update_bikes, api_server, cross_origin, host, port, db_path, verbose): """ Runs the program. Takes a list of postcodes or coordinates and returns various information about them. If using the cli, make sure to update the bikes database with the -u command. Locations can be either a specific postcode, or a pair of coordinates. Coordinates are passed in the form "55.948824,-3.196425". :param locations: The list of postcodes or coordinates to search. :param random: The number of random postcodes to include. :param bikes: Includes a list of stolen bikes in that area. :param crime: Includes a list of committed crimes in that area. :param nearby: Includes a list of wikipedia articles in that area. :param json: Returns the data in json format. :param update_bikes: Whether to force update bikes. :param api_server: If given, the program will instead run a rest api. :param cross_origin: :param host: :param port: Defines the port to run the rest api on. :param db_path: The path to the sqlite db to use. :param verbose: The verbosity. """ log_levels = [logging.WARNING, logging.INFO, logging.DEBUG] logging.basicConfig(level=log_levels[min(verbose, 2)]) initialize_database(db_path) loop = get_event_loop() if update_bikes: logger.info("Force updating bikes.") loop.run_until_complete(util.update_bikes()) if api_server: if cross_origin: enable_cross_origin(app) try: web.run_app(app, host=host, port=port) except CancelledError as e: if e.__context__ is not None: click.echo(Fore.RED + ( f"Could not bind to address {host}:{port}" if e.__context__.errno == 48 else e.__context__)) exit(1) else: click.echo("Goodbye!") elif len(locations) > 0 or random > 0: exit(loop.run_until_complete(cli(locations, random, bikes=bikes, crime=crime, nearby=nearby, as_json=json))) else: click.echo(Fore.RED + "Either include a post code, or the --api-server flag.")
[ "def", "run", "(", "locations", ",", "random", ",", "bikes", ",", "crime", ",", "nearby", ",", "json", ",", "update_bikes", ",", "api_server", ",", "cross_origin", ",", "host", ",", "port", ",", "db_path", ",", "verbose", ")", ":", "log_levels", "=", "[", "logging", ".", "WARNING", ",", "logging", ".", "INFO", ",", "logging", ".", "DEBUG", "]", "logging", ".", "basicConfig", "(", "level", "=", "log_levels", "[", "min", "(", "verbose", ",", "2", ")", "]", ")", "initialize_database", "(", "db_path", ")", "loop", "=", "get_event_loop", "(", ")", "if", "update_bikes", ":", "logger", ".", "info", "(", "\"Force updating bikes.\"", ")", "loop", ".", "run_until_complete", "(", "util", ".", "update_bikes", "(", ")", ")", "if", "api_server", ":", "if", "cross_origin", ":", "enable_cross_origin", "(", "app", ")", "try", ":", "web", ".", "run_app", "(", "app", ",", "host", "=", "host", ",", "port", "=", "port", ")", "except", "CancelledError", "as", "e", ":", "if", "e", ".", "__context__", "is", "not", "None", ":", "click", ".", "echo", "(", "Fore", ".", "RED", "+", "(", "f\"Could not bind to address {host}:{port}\"", "if", "e", ".", "__context__", ".", "errno", "==", "48", "else", "e", ".", "__context__", ")", ")", "exit", "(", "1", ")", "else", ":", "click", ".", "echo", "(", "\"Goodbye!\"", ")", "elif", "len", "(", "locations", ")", ">", "0", "or", "random", ">", "0", ":", "exit", "(", "loop", ".", "run_until_complete", "(", "cli", "(", "locations", ",", "random", ",", "bikes", "=", "bikes", ",", "crime", "=", "crime", ",", "nearby", "=", "nearby", ",", "as_json", "=", "json", ")", ")", ")", "else", ":", "click", ".", "echo", "(", "Fore", ".", "RED", "+", "\"Either include a post code, or the --api-server flag.\"", ")" ]
39.925926
24.074074
def _toState(self, state, *args, **kwargs): """ Transition to the next state. @param state: Name of the next state. """ try: method = getattr(self, '_state_%s' % state) except AttributeError: raise ValueError("No such state %r" % state) log.msg("%s: to state %r" % (self.__class__.__name__, state)) self._state = state method(*args, **kwargs)
[ "def", "_toState", "(", "self", ",", "state", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "method", "=", "getattr", "(", "self", ",", "'_state_%s'", "%", "state", ")", "except", "AttributeError", ":", "raise", "ValueError", "(", "\"No such state %r\"", "%", "state", ")", "log", ".", "msg", "(", "\"%s: to state %r\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "state", ")", ")", "self", ".", "_state", "=", "state", "method", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
30.5
15.071429
def pre_save(sender, instance, raw, using, update_fields, **kwargs): """https://docs.djangoproject.com/es/1.10/ref/signals/#post-save""" if raw: # Return if loading Fixtures return try: with transaction.atomic(): if not should_audit(instance): return False try: object_json_repr = serializers.serialize("json", [instance]) except Exception: # We need a better way for this to work. ManyToMany will fail on pre_save on create return None if instance.pk is None: created = True else: created = False # created or updated? if not created: old_model = sender.objects.get(pk=instance.pk) delta = model_delta(old_model, instance) changed_fields = json.dumps(delta) event_type = CRUDEvent.UPDATE # user try: user = get_current_user() # validate that the user still exists user = get_user_model().objects.get(pk=user.pk) except: user = None if isinstance(user, AnonymousUser): user = None # callbacks kwargs['request'] = get_current_request() # make request available for callbacks create_crud_event = all( callback(instance, object_json_repr, created, raw, using, update_fields, **kwargs) for callback in CRUD_DIFFERENCE_CALLBACKS if callable(callback)) # create crud event only if all callbacks returned True if create_crud_event and not created: c_t = ContentType.objects.get_for_model(instance) sid = transaction.savepoint() try: with transaction.atomic(): crud_event = CRUDEvent.objects.create( event_type=event_type, object_repr=str(instance), object_json_repr=object_json_repr, changed_fields=changed_fields, content_type_id=c_t.id, object_id=instance.pk, user_id=getattr(user, 'id', None), datetime=timezone.now(), user_pk_as_string=str(user.pk) if user else user ) except Exception as e: logger.exception( "easy audit had a pre-save exception on CRUDEvent creation. instance: {}, instance pk: {}".format( instance, instance.pk)) transaction.savepoint_rollback(sid) except Exception: logger.exception('easy audit had a pre-save exception.')
[ "def", "pre_save", "(", "sender", ",", "instance", ",", "raw", ",", "using", ",", "update_fields", ",", "*", "*", "kwargs", ")", ":", "if", "raw", ":", "# Return if loading Fixtures", "return", "try", ":", "with", "transaction", ".", "atomic", "(", ")", ":", "if", "not", "should_audit", "(", "instance", ")", ":", "return", "False", "try", ":", "object_json_repr", "=", "serializers", ".", "serialize", "(", "\"json\"", ",", "[", "instance", "]", ")", "except", "Exception", ":", "# We need a better way for this to work. ManyToMany will fail on pre_save on create", "return", "None", "if", "instance", ".", "pk", "is", "None", ":", "created", "=", "True", "else", ":", "created", "=", "False", "# created or updated?", "if", "not", "created", ":", "old_model", "=", "sender", ".", "objects", ".", "get", "(", "pk", "=", "instance", ".", "pk", ")", "delta", "=", "model_delta", "(", "old_model", ",", "instance", ")", "changed_fields", "=", "json", ".", "dumps", "(", "delta", ")", "event_type", "=", "CRUDEvent", ".", "UPDATE", "# user", "try", ":", "user", "=", "get_current_user", "(", ")", "# validate that the user still exists", "user", "=", "get_user_model", "(", ")", ".", "objects", ".", "get", "(", "pk", "=", "user", ".", "pk", ")", "except", ":", "user", "=", "None", "if", "isinstance", "(", "user", ",", "AnonymousUser", ")", ":", "user", "=", "None", "# callbacks", "kwargs", "[", "'request'", "]", "=", "get_current_request", "(", ")", "# make request available for callbacks", "create_crud_event", "=", "all", "(", "callback", "(", "instance", ",", "object_json_repr", ",", "created", ",", "raw", ",", "using", ",", "update_fields", ",", "*", "*", "kwargs", ")", "for", "callback", "in", "CRUD_DIFFERENCE_CALLBACKS", "if", "callable", "(", "callback", ")", ")", "# create crud event only if all callbacks returned True", "if", "create_crud_event", "and", "not", "created", ":", "c_t", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "instance", ")", "sid", "=", "transaction", ".", "savepoint", "(", ")", "try", ":", "with", "transaction", ".", "atomic", "(", ")", ":", "crud_event", "=", "CRUDEvent", ".", "objects", ".", "create", "(", "event_type", "=", "event_type", ",", "object_repr", "=", "str", "(", "instance", ")", ",", "object_json_repr", "=", "object_json_repr", ",", "changed_fields", "=", "changed_fields", ",", "content_type_id", "=", "c_t", ".", "id", ",", "object_id", "=", "instance", ".", "pk", ",", "user_id", "=", "getattr", "(", "user", ",", "'id'", ",", "None", ")", ",", "datetime", "=", "timezone", ".", "now", "(", ")", ",", "user_pk_as_string", "=", "str", "(", "user", ".", "pk", ")", "if", "user", "else", "user", ")", "except", "Exception", "as", "e", ":", "logger", ".", "exception", "(", "\"easy audit had a pre-save exception on CRUDEvent creation. instance: {}, instance pk: {}\"", ".", "format", "(", "instance", ",", "instance", ".", "pk", ")", ")", "transaction", ".", "savepoint_rollback", "(", "sid", ")", "except", "Exception", ":", "logger", ".", "exception", "(", "'easy audit had a pre-save exception.'", ")" ]
42.176471
20.455882
def _fetchSequence(ac, startIndex=None, endIndex=None): """Fetch sequences from NCBI using the eself interface. An interbase interval may be optionally provided with startIndex and endIndex. NCBI eself will return just the requested subsequence, which might greatly reduce payload sizes (especially with chromosome-scale sequences). When wrapped is True, return list of sequence lines rather than concatenated sequence. >>> len(_fetchSequence('NP_056374.2')) 1596 Pass the desired interval rather than using Python's [] slice operator. >>> _fetchSequence('NP_056374.2',0,10) 'MESRETLSSS' >>> _fetchSequence('NP_056374.2')[0:10] 'MESRETLSSS' """ urlFmt = ( "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?" "db=nucleotide&id={ac}&rettype=fasta&retmode=text") if startIndex is None or endIndex is None: url = urlFmt.format(ac=ac) else: urlFmt += "&seq_start={start}&seq_stop={stop}" url = urlFmt.format(ac=ac, start=startIndex + 1, stop=endIndex) resp = requests.get(url) resp.raise_for_status() seqlines = resp.content.splitlines()[1:] print("{ac}[{s},{e}) => {n} lines ({u})".format( ac=ac, s=startIndex, e=endIndex, n=len(seqlines), u=url)) # return response as list of lines, already line wrapped return seqlines
[ "def", "_fetchSequence", "(", "ac", ",", "startIndex", "=", "None", ",", "endIndex", "=", "None", ")", ":", "urlFmt", "=", "(", "\"http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?\"", "\"db=nucleotide&id={ac}&rettype=fasta&retmode=text\"", ")", "if", "startIndex", "is", "None", "or", "endIndex", "is", "None", ":", "url", "=", "urlFmt", ".", "format", "(", "ac", "=", "ac", ")", "else", ":", "urlFmt", "+=", "\"&seq_start={start}&seq_stop={stop}\"", "url", "=", "urlFmt", ".", "format", "(", "ac", "=", "ac", ",", "start", "=", "startIndex", "+", "1", ",", "stop", "=", "endIndex", ")", "resp", "=", "requests", ".", "get", "(", "url", ")", "resp", ".", "raise_for_status", "(", ")", "seqlines", "=", "resp", ".", "content", ".", "splitlines", "(", ")", "[", "1", ":", "]", "print", "(", "\"{ac}[{s},{e}) => {n} lines ({u})\"", ".", "format", "(", "ac", "=", "ac", ",", "s", "=", "startIndex", ",", "e", "=", "endIndex", ",", "n", "=", "len", "(", "seqlines", ")", ",", "u", "=", "url", ")", ")", "# return response as list of lines, already line wrapped", "return", "seqlines" ]
36.162162
21.675676
def drop_schema(self, schema, cascade=False): """Drop specified schema """ if schema in self.schemas: sql = "DROP SCHEMA " + schema if cascade: sql = sql + " CASCADE" self.execute(sql)
[ "def", "drop_schema", "(", "self", ",", "schema", ",", "cascade", "=", "False", ")", ":", "if", "schema", "in", "self", ".", "schemas", ":", "sql", "=", "\"DROP SCHEMA \"", "+", "schema", "if", "cascade", ":", "sql", "=", "sql", "+", "\" CASCADE\"", "self", ".", "execute", "(", "sql", ")" ]
31.625
5.25
def change_sample(self, old_samp_name, new_samp_name, new_site_name=None, new_er_data=None, new_pmag_data=None, replace_data=False): """ Find actual data objects for sample and site. Then call Sample class change method to update sample name and data.. """ sample = self.find_by_name(old_samp_name, self.samples) if not sample: print('-W- {} is not a currently existing sample, so it cannot be updated'.format(old_samp_name)) return False if new_site_name: new_site = self.find_by_name(new_site_name, self.sites) if not new_site: print("""-W- {} is not a currently existing site. Adding site named: {}""".format(new_site_name, new_site_name))#sample.site or '*empty*', sample) new_site = self.add_site(new_site_name) else: new_site = None sample.change_sample(new_samp_name, new_site, new_er_data, new_pmag_data, replace_data) return sample
[ "def", "change_sample", "(", "self", ",", "old_samp_name", ",", "new_samp_name", ",", "new_site_name", "=", "None", ",", "new_er_data", "=", "None", ",", "new_pmag_data", "=", "None", ",", "replace_data", "=", "False", ")", ":", "sample", "=", "self", ".", "find_by_name", "(", "old_samp_name", ",", "self", ".", "samples", ")", "if", "not", "sample", ":", "print", "(", "'-W- {} is not a currently existing sample, so it cannot be updated'", ".", "format", "(", "old_samp_name", ")", ")", "return", "False", "if", "new_site_name", ":", "new_site", "=", "self", ".", "find_by_name", "(", "new_site_name", ",", "self", ".", "sites", ")", "if", "not", "new_site", ":", "print", "(", "\"\"\"-W- {} is not a currently existing site.\nAdding site named: {}\"\"\"", ".", "format", "(", "new_site_name", ",", "new_site_name", ")", ")", "#sample.site or '*empty*', sample)", "new_site", "=", "self", ".", "add_site", "(", "new_site_name", ")", "else", ":", "new_site", "=", "None", "sample", ".", "change_sample", "(", "new_samp_name", ",", "new_site", ",", "new_er_data", ",", "new_pmag_data", ",", "replace_data", ")", "return", "sample" ]
50.75
21.6
def get(self, index: pd.Index, query: str='', omit_missing_columns: bool=False) -> pd.DataFrame: """For the rows in ``index`` get the columns from the simulation's population which this view is configured. The result may be further filtered by the view's query. Parameters ---------- index : Index of the population to get. query : Conditions used to filter the index. May use columns not in the requested view. omit_missing_columns : Silently skip loading columns which are not present in the population table. In general you want this to be False because that situation indicates an error but sometimes, like during population initialization, it can be convenient to just load whatever data is actually available. Returns ------- pd.DataFrame A table with the subset of the population requested. """ pop = self.manager.get_population(True).loc[index] if self._query: pop = pop.query(self._query) if query: pop = pop.query(query) if not self._columns: return pop else: if omit_missing_columns: columns = list(set(self._columns).intersection(pop.columns)) else: columns = self._columns try: return pop[columns].copy() except KeyError: non_existent_columns = set(columns) - set(pop.columns) raise PopulationError(f'Requested column(s) {non_existent_columns} not in population table.')
[ "def", "get", "(", "self", ",", "index", ":", "pd", ".", "Index", ",", "query", ":", "str", "=", "''", ",", "omit_missing_columns", ":", "bool", "=", "False", ")", "->", "pd", ".", "DataFrame", ":", "pop", "=", "self", ".", "manager", ".", "get_population", "(", "True", ")", ".", "loc", "[", "index", "]", "if", "self", ".", "_query", ":", "pop", "=", "pop", ".", "query", "(", "self", ".", "_query", ")", "if", "query", ":", "pop", "=", "pop", ".", "query", "(", "query", ")", "if", "not", "self", ".", "_columns", ":", "return", "pop", "else", ":", "if", "omit_missing_columns", ":", "columns", "=", "list", "(", "set", "(", "self", ".", "_columns", ")", ".", "intersection", "(", "pop", ".", "columns", ")", ")", "else", ":", "columns", "=", "self", ".", "_columns", "try", ":", "return", "pop", "[", "columns", "]", ".", "copy", "(", ")", "except", "KeyError", ":", "non_existent_columns", "=", "set", "(", "columns", ")", "-", "set", "(", "pop", ".", "columns", ")", "raise", "PopulationError", "(", "f'Requested column(s) {non_existent_columns} not in population table.'", ")" ]
40.35
26.175
def main(): """Simple test.""" from spyder.utils.qthelpers import qapplication app = qapplication() widget = NotebookClient(plugin=None, name='') widget.show() widget.set_url('http://google.com') sys.exit(app.exec_())
[ "def", "main", "(", ")", ":", "from", "spyder", ".", "utils", ".", "qthelpers", "import", "qapplication", "app", "=", "qapplication", "(", ")", "widget", "=", "NotebookClient", "(", "plugin", "=", "None", ",", "name", "=", "''", ")", "widget", ".", "show", "(", ")", "widget", ".", "set_url", "(", "'http://google.com'", ")", "sys", ".", "exit", "(", "app", ".", "exec_", "(", ")", ")" ]
29.75
13
def dim_lower_extent(self, *args, **kwargs): """ Returns the lower extent of the dimensions in args. .. code-block:: python t_ex, bl_ex, ch_ex = cube.dim_lower_extent('ntime', 'nbl', 'nchan') or .. code-block:: python t_ex, bl_ex, ch_ex, src_ex = cube.dim_lower_extent('ntime,nbl:nchan nsrc') """ # The lower extent of any integral dimension is 0 by default args = tuple(0 if isinstance(a, (int, np.integer)) else a for a in args) return self._dim_attribute('lower_extent', *args, **kwargs)
[ "def", "dim_lower_extent", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# The lower extent of any integral dimension is 0 by default", "args", "=", "tuple", "(", "0", "if", "isinstance", "(", "a", ",", "(", "int", ",", "np", ".", "integer", ")", ")", "else", "a", "for", "a", "in", "args", ")", "return", "self", ".", "_dim_attribute", "(", "'lower_extent'", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
30.842105
25.157895
def data_to_bytes(data, encoding): """\ Converts the provided data into bytes. If the data is already a byte sequence, it will be left unchanged. This function tries to use the provided `encoding` (if not ``None``) or the default encoding (ISO/IEC 8859-1). It uses UTF-8 as fallback. Returns the (byte) data, the data length and the encoding of the data. :param data: The data to encode :type data: str or bytes :param encoding: str or ``None`` :rtype: tuple: data, data length, encoding """ if isinstance(data, bytes): return data, len(data), encoding or consts.DEFAULT_BYTE_ENCODING data = str(data) if encoding is not None: # Use the provided encoding; could raise an exception by intention data = data.encode(encoding) else: try: # Try to use the default byte encoding encoding = consts.DEFAULT_BYTE_ENCODING data = data.encode(encoding) except UnicodeError: try: # Try Kanji / Shift_JIS encoding = consts.KANJI_ENCODING data = data.encode(encoding) except UnicodeError: # Use UTF-8 encoding = 'utf-8' data = data.encode(encoding) return data, len(data), encoding
[ "def", "data_to_bytes", "(", "data", ",", "encoding", ")", ":", "if", "isinstance", "(", "data", ",", "bytes", ")", ":", "return", "data", ",", "len", "(", "data", ")", ",", "encoding", "or", "consts", ".", "DEFAULT_BYTE_ENCODING", "data", "=", "str", "(", "data", ")", "if", "encoding", "is", "not", "None", ":", "# Use the provided encoding; could raise an exception by intention", "data", "=", "data", ".", "encode", "(", "encoding", ")", "else", ":", "try", ":", "# Try to use the default byte encoding", "encoding", "=", "consts", ".", "DEFAULT_BYTE_ENCODING", "data", "=", "data", ".", "encode", "(", "encoding", ")", "except", "UnicodeError", ":", "try", ":", "# Try Kanji / Shift_JIS", "encoding", "=", "consts", ".", "KANJI_ENCODING", "data", "=", "data", ".", "encode", "(", "encoding", ")", "except", "UnicodeError", ":", "# Use UTF-8", "encoding", "=", "'utf-8'", "data", "=", "data", ".", "encode", "(", "encoding", ")", "return", "data", ",", "len", "(", "data", ")", ",", "encoding" ]
35.972222
15.5
def ajax_recalculate_records(self): """Recalculate all AR records and dependencies - samples - templates - profiles - services - dependecies XXX: This function has grown too much and needs refactoring! """ out = {} # The sorted records from the request records = self.get_records() for n, record in enumerate(records): # Mapping of client UID -> client object info client_metadata = {} # Mapping of contact UID -> contact object info contact_metadata = {} # Mapping of sample UID -> sample object info sample_metadata = {} # Mapping of sampletype UID -> sampletype object info sampletype_metadata = {} # Mapping of specification UID -> specification object info specification_metadata = {} # Mapping of specification UID -> list of service UIDs specification_to_services = {} # Mapping of service UID -> list of specification UIDs service_to_specifications = {} # Mapping of template UID -> template object info template_metadata = {} # Mapping of template UID -> list of service UIDs template_to_services = {} # Mapping of service UID -> list of template UIDs service_to_templates = {} # Mapping of profile UID -> list of service UIDs profile_to_services = {} # Mapping of service UID -> list of profile UIDs service_to_profiles = {} # Profile metadata for UI purposes profile_metadata = {} # Mapping of service UID -> service object info service_metadata = {} # mapping of service UID -> unmet service dependency UIDs unmet_dependencies = {} # Mappings of UID -> object of selected items in this record _clients = self.get_objs_from_record(record, "Client_uid") _contacts = self.get_objs_from_record(record, "Contact_uid") _specifications = self.get_objs_from_record( record, "Specification_uid") _templates = self.get_objs_from_record(record, "Template_uid") _samples = self.get_objs_from_record(record, "PrimaryAnalysisRequest_uid") _profiles = self.get_objs_from_record(record, "Profiles_uid") _services = self.get_objs_from_record(record, "Analyses") _sampletypes = self.get_objs_from_record(record, "SampleType_uid") # CLIENTS for uid, obj in _clients.iteritems(): # get the client metadata metadata = self.get_client_info(obj) # remember the sampletype metadata client_metadata[uid] = metadata # CONTACTS for uid, obj in _contacts.iteritems(): # get the client metadata metadata = self.get_contact_info(obj) # remember the sampletype metadata contact_metadata[uid] = metadata # SPECIFICATIONS for uid, obj in _specifications.iteritems(): # get the specification metadata metadata = self.get_specification_info(obj) # remember the metadata of this specification specification_metadata[uid] = metadata # get the spec'd service UIDs service_uids = metadata["service_uids"] # remember a mapping of specification uid -> spec'd services specification_to_services[uid] = service_uids # remember a mapping of service uid -> specifications for service_uid in service_uids: if service_uid in service_to_specifications: service_to_specifications[service_uid].append(uid) else: service_to_specifications[service_uid] = [uid] # AR TEMPLATES for uid, obj in _templates.iteritems(): # get the template metadata metadata = self.get_template_info(obj) # remember the template metadata template_metadata[uid] = metadata # profile from the template profile = obj.getAnalysisProfile() # add the profile to the other profiles if profile is not None: profile_uid = api.get_uid(profile) _profiles[profile_uid] = profile # get the template analyses # [{'partition': 'part-1', 'service_uid': '...'}, # {'partition': 'part-1', 'service_uid': '...'}] analyses = obj.getAnalyses() or [] # get all UIDs of the template records service_uids = map( lambda rec: rec.get("service_uid"), analyses) # remember a mapping of template uid -> service template_to_services[uid] = service_uids # remember a mapping of service uid -> templates for service_uid in service_uids: # append service to services mapping service = self.get_object_by_uid(service_uid) # remember the template of all services if service_uid in service_to_templates: service_to_templates[service_uid].append(uid) else: service_to_templates[service_uid] = [uid] # remember the service metadata if service_uid not in service_metadata: metadata = self.get_service_info(service) service_metadata[service_uid] = metadata # PROFILES for uid, obj in _profiles.iteritems(): # get the profile metadata metadata = self.get_profile_info(obj) # remember the profile metadata profile_metadata[uid] = metadata # get all services of this profile services = obj.getService() # get all UIDs of the profile services service_uids = map(api.get_uid, services) # remember all services of this profile profile_to_services[uid] = service_uids # remember a mapping of service uid -> profiles for service in services: # get the UID of this service service_uid = api.get_uid(service) # add the service to the other services _services[service_uid] = service # remember the profiles of this service if service_uid in service_to_profiles: service_to_profiles[service_uid].append(uid) else: service_to_profiles[service_uid] = [uid] # PRIMARY ANALYSIS REQUESTS for uid, obj in _samples.iteritems(): # get the sample metadata metadata = self.get_sample_info(obj) # remember the sample metadata sample_metadata[uid] = metadata # SAMPLETYPES for uid, obj in _sampletypes.iteritems(): # get the sampletype metadata metadata = self.get_sampletype_info(obj) # remember the sampletype metadata sampletype_metadata[uid] = metadata # SERVICES for uid, obj in _services.iteritems(): # get the service metadata metadata = self.get_service_info(obj) # remember the services' metadata service_metadata[uid] = metadata # DEPENDENCIES for uid, obj in _services.iteritems(): # get the dependencies of this service deps = get_service_dependencies_for(obj) # check for unmet dependencies for dep in deps["dependencies"]: # we use the UID to test for equality dep_uid = api.get_uid(dep) if dep_uid not in _services.keys(): if uid in unmet_dependencies: unmet_dependencies[uid].append( self.get_base_info(dep)) else: unmet_dependencies[uid] = [self.get_base_info(dep)] # remember the dependencies in the service metadata service_metadata[uid].update({ "dependencies": map( self.get_base_info, deps["dependencies"]), }) # Each key `n` (1,2,3...) contains the form data for one AR Add # column in the UI. # All relevant form data will be set accoriding to this data. out[n] = { "client_metadata": client_metadata, "contact_metadata": contact_metadata, "sample_metadata": sample_metadata, "sampletype_metadata": sampletype_metadata, "specification_metadata": specification_metadata, "specification_to_services": specification_to_services, "service_to_specifications": service_to_specifications, "template_metadata": template_metadata, "template_to_services": template_to_services, "service_to_templates": service_to_templates, "profile_metadata": profile_metadata, "profile_to_services": profile_to_services, "service_to_profiles": service_to_profiles, "service_metadata": service_metadata, "unmet_dependencies": unmet_dependencies, } return out
[ "def", "ajax_recalculate_records", "(", "self", ")", ":", "out", "=", "{", "}", "# The sorted records from the request", "records", "=", "self", ".", "get_records", "(", ")", "for", "n", ",", "record", "in", "enumerate", "(", "records", ")", ":", "# Mapping of client UID -> client object info", "client_metadata", "=", "{", "}", "# Mapping of contact UID -> contact object info", "contact_metadata", "=", "{", "}", "# Mapping of sample UID -> sample object info", "sample_metadata", "=", "{", "}", "# Mapping of sampletype UID -> sampletype object info", "sampletype_metadata", "=", "{", "}", "# Mapping of specification UID -> specification object info", "specification_metadata", "=", "{", "}", "# Mapping of specification UID -> list of service UIDs", "specification_to_services", "=", "{", "}", "# Mapping of service UID -> list of specification UIDs", "service_to_specifications", "=", "{", "}", "# Mapping of template UID -> template object info", "template_metadata", "=", "{", "}", "# Mapping of template UID -> list of service UIDs", "template_to_services", "=", "{", "}", "# Mapping of service UID -> list of template UIDs", "service_to_templates", "=", "{", "}", "# Mapping of profile UID -> list of service UIDs", "profile_to_services", "=", "{", "}", "# Mapping of service UID -> list of profile UIDs", "service_to_profiles", "=", "{", "}", "# Profile metadata for UI purposes", "profile_metadata", "=", "{", "}", "# Mapping of service UID -> service object info", "service_metadata", "=", "{", "}", "# mapping of service UID -> unmet service dependency UIDs", "unmet_dependencies", "=", "{", "}", "# Mappings of UID -> object of selected items in this record", "_clients", "=", "self", ".", "get_objs_from_record", "(", "record", ",", "\"Client_uid\"", ")", "_contacts", "=", "self", ".", "get_objs_from_record", "(", "record", ",", "\"Contact_uid\"", ")", "_specifications", "=", "self", ".", "get_objs_from_record", "(", "record", ",", "\"Specification_uid\"", ")", "_templates", "=", "self", ".", "get_objs_from_record", "(", "record", ",", "\"Template_uid\"", ")", "_samples", "=", "self", ".", "get_objs_from_record", "(", "record", ",", "\"PrimaryAnalysisRequest_uid\"", ")", "_profiles", "=", "self", ".", "get_objs_from_record", "(", "record", ",", "\"Profiles_uid\"", ")", "_services", "=", "self", ".", "get_objs_from_record", "(", "record", ",", "\"Analyses\"", ")", "_sampletypes", "=", "self", ".", "get_objs_from_record", "(", "record", ",", "\"SampleType_uid\"", ")", "# CLIENTS", "for", "uid", ",", "obj", "in", "_clients", ".", "iteritems", "(", ")", ":", "# get the client metadata", "metadata", "=", "self", ".", "get_client_info", "(", "obj", ")", "# remember the sampletype metadata", "client_metadata", "[", "uid", "]", "=", "metadata", "# CONTACTS", "for", "uid", ",", "obj", "in", "_contacts", ".", "iteritems", "(", ")", ":", "# get the client metadata", "metadata", "=", "self", ".", "get_contact_info", "(", "obj", ")", "# remember the sampletype metadata", "contact_metadata", "[", "uid", "]", "=", "metadata", "# SPECIFICATIONS", "for", "uid", ",", "obj", "in", "_specifications", ".", "iteritems", "(", ")", ":", "# get the specification metadata", "metadata", "=", "self", ".", "get_specification_info", "(", "obj", ")", "# remember the metadata of this specification", "specification_metadata", "[", "uid", "]", "=", "metadata", "# get the spec'd service UIDs", "service_uids", "=", "metadata", "[", "\"service_uids\"", "]", "# remember a mapping of specification uid -> spec'd services", "specification_to_services", "[", "uid", "]", "=", "service_uids", "# remember a mapping of service uid -> specifications", "for", "service_uid", "in", "service_uids", ":", "if", "service_uid", "in", "service_to_specifications", ":", "service_to_specifications", "[", "service_uid", "]", ".", "append", "(", "uid", ")", "else", ":", "service_to_specifications", "[", "service_uid", "]", "=", "[", "uid", "]", "# AR TEMPLATES", "for", "uid", ",", "obj", "in", "_templates", ".", "iteritems", "(", ")", ":", "# get the template metadata", "metadata", "=", "self", ".", "get_template_info", "(", "obj", ")", "# remember the template metadata", "template_metadata", "[", "uid", "]", "=", "metadata", "# profile from the template", "profile", "=", "obj", ".", "getAnalysisProfile", "(", ")", "# add the profile to the other profiles", "if", "profile", "is", "not", "None", ":", "profile_uid", "=", "api", ".", "get_uid", "(", "profile", ")", "_profiles", "[", "profile_uid", "]", "=", "profile", "# get the template analyses", "# [{'partition': 'part-1', 'service_uid': '...'},", "# {'partition': 'part-1', 'service_uid': '...'}]", "analyses", "=", "obj", ".", "getAnalyses", "(", ")", "or", "[", "]", "# get all UIDs of the template records", "service_uids", "=", "map", "(", "lambda", "rec", ":", "rec", ".", "get", "(", "\"service_uid\"", ")", ",", "analyses", ")", "# remember a mapping of template uid -> service", "template_to_services", "[", "uid", "]", "=", "service_uids", "# remember a mapping of service uid -> templates", "for", "service_uid", "in", "service_uids", ":", "# append service to services mapping", "service", "=", "self", ".", "get_object_by_uid", "(", "service_uid", ")", "# remember the template of all services", "if", "service_uid", "in", "service_to_templates", ":", "service_to_templates", "[", "service_uid", "]", ".", "append", "(", "uid", ")", "else", ":", "service_to_templates", "[", "service_uid", "]", "=", "[", "uid", "]", "# remember the service metadata", "if", "service_uid", "not", "in", "service_metadata", ":", "metadata", "=", "self", ".", "get_service_info", "(", "service", ")", "service_metadata", "[", "service_uid", "]", "=", "metadata", "# PROFILES", "for", "uid", ",", "obj", "in", "_profiles", ".", "iteritems", "(", ")", ":", "# get the profile metadata", "metadata", "=", "self", ".", "get_profile_info", "(", "obj", ")", "# remember the profile metadata", "profile_metadata", "[", "uid", "]", "=", "metadata", "# get all services of this profile", "services", "=", "obj", ".", "getService", "(", ")", "# get all UIDs of the profile services", "service_uids", "=", "map", "(", "api", ".", "get_uid", ",", "services", ")", "# remember all services of this profile", "profile_to_services", "[", "uid", "]", "=", "service_uids", "# remember a mapping of service uid -> profiles", "for", "service", "in", "services", ":", "# get the UID of this service", "service_uid", "=", "api", ".", "get_uid", "(", "service", ")", "# add the service to the other services", "_services", "[", "service_uid", "]", "=", "service", "# remember the profiles of this service", "if", "service_uid", "in", "service_to_profiles", ":", "service_to_profiles", "[", "service_uid", "]", ".", "append", "(", "uid", ")", "else", ":", "service_to_profiles", "[", "service_uid", "]", "=", "[", "uid", "]", "# PRIMARY ANALYSIS REQUESTS", "for", "uid", ",", "obj", "in", "_samples", ".", "iteritems", "(", ")", ":", "# get the sample metadata", "metadata", "=", "self", ".", "get_sample_info", "(", "obj", ")", "# remember the sample metadata", "sample_metadata", "[", "uid", "]", "=", "metadata", "# SAMPLETYPES", "for", "uid", ",", "obj", "in", "_sampletypes", ".", "iteritems", "(", ")", ":", "# get the sampletype metadata", "metadata", "=", "self", ".", "get_sampletype_info", "(", "obj", ")", "# remember the sampletype metadata", "sampletype_metadata", "[", "uid", "]", "=", "metadata", "# SERVICES", "for", "uid", ",", "obj", "in", "_services", ".", "iteritems", "(", ")", ":", "# get the service metadata", "metadata", "=", "self", ".", "get_service_info", "(", "obj", ")", "# remember the services' metadata", "service_metadata", "[", "uid", "]", "=", "metadata", "# DEPENDENCIES", "for", "uid", ",", "obj", "in", "_services", ".", "iteritems", "(", ")", ":", "# get the dependencies of this service", "deps", "=", "get_service_dependencies_for", "(", "obj", ")", "# check for unmet dependencies", "for", "dep", "in", "deps", "[", "\"dependencies\"", "]", ":", "# we use the UID to test for equality", "dep_uid", "=", "api", ".", "get_uid", "(", "dep", ")", "if", "dep_uid", "not", "in", "_services", ".", "keys", "(", ")", ":", "if", "uid", "in", "unmet_dependencies", ":", "unmet_dependencies", "[", "uid", "]", ".", "append", "(", "self", ".", "get_base_info", "(", "dep", ")", ")", "else", ":", "unmet_dependencies", "[", "uid", "]", "=", "[", "self", ".", "get_base_info", "(", "dep", ")", "]", "# remember the dependencies in the service metadata", "service_metadata", "[", "uid", "]", ".", "update", "(", "{", "\"dependencies\"", ":", "map", "(", "self", ".", "get_base_info", ",", "deps", "[", "\"dependencies\"", "]", ")", ",", "}", ")", "# Each key `n` (1,2,3...) contains the form data for one AR Add", "# column in the UI.", "# All relevant form data will be set accoriding to this data.", "out", "[", "n", "]", "=", "{", "\"client_metadata\"", ":", "client_metadata", ",", "\"contact_metadata\"", ":", "contact_metadata", ",", "\"sample_metadata\"", ":", "sample_metadata", ",", "\"sampletype_metadata\"", ":", "sampletype_metadata", ",", "\"specification_metadata\"", ":", "specification_metadata", ",", "\"specification_to_services\"", ":", "specification_to_services", ",", "\"service_to_specifications\"", ":", "service_to_specifications", ",", "\"template_metadata\"", ":", "template_metadata", ",", "\"template_to_services\"", ":", "template_to_services", ",", "\"service_to_templates\"", ":", "service_to_templates", ",", "\"profile_metadata\"", ":", "profile_metadata", ",", "\"profile_to_services\"", ":", "profile_to_services", ",", "\"service_to_profiles\"", ":", "service_to_profiles", ",", "\"service_metadata\"", ":", "service_metadata", ",", "\"unmet_dependencies\"", ":", "unmet_dependencies", ",", "}", "return", "out" ]
45.155963
17.655963
def _get_stddevs(self, C, rup, shape, stddev_types): """ Return standard deviations as defined in p. 971. """ weight = self._compute_weight_std(C, rup.mag) std_intra = weight * C["sd1"] * np.ones(shape) std_inter = weight * C["sd2"] * np.ones(shape) stddevs = [] for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev_type == const.StdDev.TOTAL: stddevs.append(np.sqrt(std_intra ** 2. + std_inter ** 2.)) elif stddev_type == const.StdDev.INTRA_EVENT: stddevs.append(std_intra) elif stddev_type == const.StdDev.INTER_EVENT: stddevs.append(std_inter) return stddevs
[ "def", "_get_stddevs", "(", "self", ",", "C", ",", "rup", ",", "shape", ",", "stddev_types", ")", ":", "weight", "=", "self", ".", "_compute_weight_std", "(", "C", ",", "rup", ".", "mag", ")", "std_intra", "=", "weight", "*", "C", "[", "\"sd1\"", "]", "*", "np", ".", "ones", "(", "shape", ")", "std_inter", "=", "weight", "*", "C", "[", "\"sd2\"", "]", "*", "np", ".", "ones", "(", "shape", ")", "stddevs", "=", "[", "]", "for", "stddev_type", "in", "stddev_types", ":", "assert", "stddev_type", "in", "self", ".", "DEFINED_FOR_STANDARD_DEVIATION_TYPES", "if", "stddev_type", "==", "const", ".", "StdDev", ".", "TOTAL", ":", "stddevs", ".", "append", "(", "np", ".", "sqrt", "(", "std_intra", "**", "2.", "+", "std_inter", "**", "2.", ")", ")", "elif", "stddev_type", "==", "const", ".", "StdDev", ".", "INTRA_EVENT", ":", "stddevs", ".", "append", "(", "std_intra", ")", "elif", "stddev_type", "==", "const", ".", "StdDev", ".", "INTER_EVENT", ":", "stddevs", ".", "append", "(", "std_inter", ")", "return", "stddevs" ]
44
14.666667
def is_bifurcating(self, include_root=True): """ Returns False if there is a polytomy in the tree, including if the tree is unrooted (basal polytomy), unless you use the include_root=False argument. """ ctn1 = -1 + (2 * len(self)) ctn2 = -2 + (2 * len(self)) if self.is_rooted(): return bool(ctn1 == sum(1 for i in self.treenode.traverse())) if include_root: return bool(ctn2 == -1 + sum(1 for i in self.treenode.traverse())) return bool(ctn2 == sum(1 for i in self.treenode.traverse()))
[ "def", "is_bifurcating", "(", "self", ",", "include_root", "=", "True", ")", ":", "ctn1", "=", "-", "1", "+", "(", "2", "*", "len", "(", "self", ")", ")", "ctn2", "=", "-", "2", "+", "(", "2", "*", "len", "(", "self", ")", ")", "if", "self", ".", "is_rooted", "(", ")", ":", "return", "bool", "(", "ctn1", "==", "sum", "(", "1", "for", "i", "in", "self", ".", "treenode", ".", "traverse", "(", ")", ")", ")", "if", "include_root", ":", "return", "bool", "(", "ctn2", "==", "-", "1", "+", "sum", "(", "1", "for", "i", "in", "self", ".", "treenode", ".", "traverse", "(", ")", ")", ")", "return", "bool", "(", "ctn2", "==", "sum", "(", "1", "for", "i", "in", "self", ".", "treenode", ".", "traverse", "(", ")", ")", ")" ]
44.538462
18.384615
def OnPreferences(self, event): """Preferences event handler that launches preferences dialog""" preferences = self.interfaces.get_preferences_from_user() if preferences: for key in preferences: if type(config[key]) in (type(u""), type("")): config[key] = preferences[key] else: config[key] = ast.literal_eval(preferences[key]) self.main_window.grid.grid_renderer.cell_cache.clear() self.main_window.grid.ForceRefresh()
[ "def", "OnPreferences", "(", "self", ",", "event", ")", ":", "preferences", "=", "self", ".", "interfaces", ".", "get_preferences_from_user", "(", ")", "if", "preferences", ":", "for", "key", "in", "preferences", ":", "if", "type", "(", "config", "[", "key", "]", ")", "in", "(", "type", "(", "u\"\"", ")", ",", "type", "(", "\"\"", ")", ")", ":", "config", "[", "key", "]", "=", "preferences", "[", "key", "]", "else", ":", "config", "[", "key", "]", "=", "ast", ".", "literal_eval", "(", "preferences", "[", "key", "]", ")", "self", ".", "main_window", ".", "grid", ".", "grid_renderer", ".", "cell_cache", ".", "clear", "(", ")", "self", ".", "main_window", ".", "grid", ".", "ForceRefresh", "(", ")" ]
38.071429
20.071429
def client_sends_binary(self, message, name=None, label=None): """Send raw binary `message`. If client `name` is not given, uses the latest client. Optional message `label` is shown on logs. Examples: | Client sends binary | Hello! | | Client sends binary | ${some binary} | Client1 | label=DebugMessage | """ client, name = self._clients.get_with_name(name) client.send(message) self._register_send(client, label, name)
[ "def", "client_sends_binary", "(", "self", ",", "message", ",", "name", "=", "None", ",", "label", "=", "None", ")", ":", "client", ",", "name", "=", "self", ".", "_clients", ".", "get_with_name", "(", "name", ")", "client", ".", "send", "(", "message", ")", "self", ".", "_register_send", "(", "client", ",", "label", ",", "name", ")" ]
37.692308
18.923077
def generate_orbital_path(self, factor=3., n_points=20, viewup=None, z_shift=None): """Genrates an orbital path around the data scene Parameters ---------- facotr : float A scaling factor when biulding the orbital extent n_points : int number of points on the orbital path viewup : list(float) the normal to the orbital plane z_shift : float, optional shift the plane up/down from the center of the scene by this amount """ if viewup is None: viewup = rcParams['camera']['viewup'] center = list(self.center) bnds = list(self.bounds) if z_shift is None: z_shift = (bnds[5] - bnds[4]) * factor center[2] = center[2] + z_shift radius = (bnds[1] - bnds[0]) * factor y = (bnds[3] - bnds[2]) * factor if y > radius: radius = y return vtki.Polygon(center=center, radius=radius, normal=viewup, n_sides=n_points)
[ "def", "generate_orbital_path", "(", "self", ",", "factor", "=", "3.", ",", "n_points", "=", "20", ",", "viewup", "=", "None", ",", "z_shift", "=", "None", ")", ":", "if", "viewup", "is", "None", ":", "viewup", "=", "rcParams", "[", "'camera'", "]", "[", "'viewup'", "]", "center", "=", "list", "(", "self", ".", "center", ")", "bnds", "=", "list", "(", "self", ".", "bounds", ")", "if", "z_shift", "is", "None", ":", "z_shift", "=", "(", "bnds", "[", "5", "]", "-", "bnds", "[", "4", "]", ")", "*", "factor", "center", "[", "2", "]", "=", "center", "[", "2", "]", "+", "z_shift", "radius", "=", "(", "bnds", "[", "1", "]", "-", "bnds", "[", "0", "]", ")", "*", "factor", "y", "=", "(", "bnds", "[", "3", "]", "-", "bnds", "[", "2", "]", ")", "*", "factor", "if", "y", ">", "radius", ":", "radius", "=", "y", "return", "vtki", ".", "Polygon", "(", "center", "=", "center", ",", "radius", "=", "radius", ",", "normal", "=", "viewup", ",", "n_sides", "=", "n_points", ")" ]
34.448276
18.103448
def list(self, teamId=None, type=None, sortBy=None, max=None, **request_parameters): """List rooms. By default, lists rooms to which the authenticated user belongs. This method supports Webex Teams's implementation of RFC5988 Web Linking to provide pagination support. It returns a generator container that incrementally yields all rooms returned by the query. The generator will automatically request additional 'pages' of responses from Webex as needed until all responses have been returned. The container makes the generator safe for reuse. A new API call will be made, using the same parameters that were specified when the generator was created, every time a new iterator is requested from the container. Args: teamId(basestring): Limit the rooms to those associated with a team, by ID. type(basestring): 'direct' returns all 1-to-1 rooms. `group` returns all group rooms. If not specified or values not matched, will return all room types. sortBy(basestring): Sort results by room ID (`id`), most recent activity (`lastactivity`), or most recently created (`created`). max(int): Limit the maximum number of items returned from the Webex Teams service per request. **request_parameters: Additional request parameters (provides support for parameters that may be added in the future). Returns: GeneratorContainer: A GeneratorContainer which, when iterated, yields the rooms returned by the Webex Teams query. Raises: TypeError: If the parameter types are incorrect. ApiError: If the Webex Teams cloud returns an error. """ check_type(teamId, basestring) check_type(type, basestring) check_type(sortBy, basestring) check_type(max, int) params = dict_from_items_with_values( request_parameters, teamId=teamId, type=type, sortBy=sortBy, max=max, ) # API request - get items items = self._session.get_items(API_ENDPOINT, params=params) # Yield room objects created from the returned items JSON objects for item in items: yield self._object_factory(OBJECT_TYPE, item)
[ "def", "list", "(", "self", ",", "teamId", "=", "None", ",", "type", "=", "None", ",", "sortBy", "=", "None", ",", "max", "=", "None", ",", "*", "*", "request_parameters", ")", ":", "check_type", "(", "teamId", ",", "basestring", ")", "check_type", "(", "type", ",", "basestring", ")", "check_type", "(", "sortBy", ",", "basestring", ")", "check_type", "(", "max", ",", "int", ")", "params", "=", "dict_from_items_with_values", "(", "request_parameters", ",", "teamId", "=", "teamId", ",", "type", "=", "type", ",", "sortBy", "=", "sortBy", ",", "max", "=", "max", ",", ")", "# API request - get items", "items", "=", "self", ".", "_session", ".", "get_items", "(", "API_ENDPOINT", ",", "params", "=", "params", ")", "# Yield room objects created from the returned items JSON objects", "for", "item", "in", "items", ":", "yield", "self", ".", "_object_factory", "(", "OBJECT_TYPE", ",", "item", ")" ]
41.844828
24.517241
def _complete_original_tasks( self, setName): """*mark original tasks as completed if they are marked as complete in the index taskpaper document* **Key Arguments:** - ``setName`` -- the name of the sync tag set """ self.log.info('starting the ``_complete_original_tasks`` method') if self.editorialRootPath: taskpaperDocPath = self.syncFolder + "/e-" + \ self.workspaceName + "-" + setName + "-tasks.taskpaper" else: taskpaperDocPath = self.syncFolder + "/" + \ self.workspaceName + "-" + setName + "-tasks.taskpaper" exists = os.path.exists(taskpaperDocPath) if not exists: return # OPEN TASKPAPER INDEX FILE doc = document(taskpaperDocPath) doneTasks = doc.tagged_tasks("@done") for t in doneTasks: theseNotes = t.notes parent = t.parent while not len(theseNotes) and parent and parent.parent: theseNotes = parent.notes parent = parent.parent if self.editorialRootPath: theseNotes[0].title = theseNotes[0].title.replace( "editorial://open", self.editorialRootPath).replace("?root=dropbox", "") theseNotes[0].title = urllib.unquote( theseNotes[0].title).replace("%40", "@") originalFile = theseNotes[0].title.split(" > ")[0].strip() if len(theseNotes[0].title.split(" > ")) > 1: projectName = theseNotes[0].title.split(" > ")[1].strip() else: projectName = False odoc = document(originalFile) odoc.tidy() odoc.save() odoc = document(originalFile) if projectName: thisObject = odoc.get_project(projectName) else: thisObject = odoc oTask = thisObject.get_task(t.title) if oTask: oTask.done("all") odoc.save() self.log.info('completed the ``_complete_original_tasks`` method') return None
[ "def", "_complete_original_tasks", "(", "self", ",", "setName", ")", ":", "self", ".", "log", ".", "info", "(", "'starting the ``_complete_original_tasks`` method'", ")", "if", "self", ".", "editorialRootPath", ":", "taskpaperDocPath", "=", "self", ".", "syncFolder", "+", "\"/e-\"", "+", "self", ".", "workspaceName", "+", "\"-\"", "+", "setName", "+", "\"-tasks.taskpaper\"", "else", ":", "taskpaperDocPath", "=", "self", ".", "syncFolder", "+", "\"/\"", "+", "self", ".", "workspaceName", "+", "\"-\"", "+", "setName", "+", "\"-tasks.taskpaper\"", "exists", "=", "os", ".", "path", ".", "exists", "(", "taskpaperDocPath", ")", "if", "not", "exists", ":", "return", "# OPEN TASKPAPER INDEX FILE", "doc", "=", "document", "(", "taskpaperDocPath", ")", "doneTasks", "=", "doc", ".", "tagged_tasks", "(", "\"@done\"", ")", "for", "t", "in", "doneTasks", ":", "theseNotes", "=", "t", ".", "notes", "parent", "=", "t", ".", "parent", "while", "not", "len", "(", "theseNotes", ")", "and", "parent", "and", "parent", ".", "parent", ":", "theseNotes", "=", "parent", ".", "notes", "parent", "=", "parent", ".", "parent", "if", "self", ".", "editorialRootPath", ":", "theseNotes", "[", "0", "]", ".", "title", "=", "theseNotes", "[", "0", "]", ".", "title", ".", "replace", "(", "\"editorial://open\"", ",", "self", ".", "editorialRootPath", ")", ".", "replace", "(", "\"?root=dropbox\"", ",", "\"\"", ")", "theseNotes", "[", "0", "]", ".", "title", "=", "urllib", ".", "unquote", "(", "theseNotes", "[", "0", "]", ".", "title", ")", ".", "replace", "(", "\"%40\"", ",", "\"@\"", ")", "originalFile", "=", "theseNotes", "[", "0", "]", ".", "title", ".", "split", "(", "\" > \"", ")", "[", "0", "]", ".", "strip", "(", ")", "if", "len", "(", "theseNotes", "[", "0", "]", ".", "title", ".", "split", "(", "\" > \"", ")", ")", ">", "1", ":", "projectName", "=", "theseNotes", "[", "0", "]", ".", "title", ".", "split", "(", "\" > \"", ")", "[", "1", "]", ".", "strip", "(", ")", "else", ":", "projectName", "=", "False", "odoc", "=", "document", "(", "originalFile", ")", "odoc", ".", "tidy", "(", ")", "odoc", ".", "save", "(", ")", "odoc", "=", "document", "(", "originalFile", ")", "if", "projectName", ":", "thisObject", "=", "odoc", ".", "get_project", "(", "projectName", ")", "else", ":", "thisObject", "=", "odoc", "oTask", "=", "thisObject", ".", "get_task", "(", "t", ".", "title", ")", "if", "oTask", ":", "oTask", ".", "done", "(", "\"all\"", ")", "odoc", ".", "save", "(", ")", "self", ".", "log", ".", "info", "(", "'completed the ``_complete_original_tasks`` method'", ")", "return", "None" ]
36.169492
19.440678
def appliance_device_read_community(self): """ Gets the ApplianceDeviceReadCommunity API client. Returns: ApplianceDeviceReadCommunity: """ if not self.__appliance_device_read_community: self.__appliance_device_read_community = ApplianceDeviceReadCommunity(self.__connection) return self.__appliance_device_read_community
[ "def", "appliance_device_read_community", "(", "self", ")", ":", "if", "not", "self", ".", "__appliance_device_read_community", ":", "self", ".", "__appliance_device_read_community", "=", "ApplianceDeviceReadCommunity", "(", "self", ".", "__connection", ")", "return", "self", ".", "__appliance_device_read_community" ]
38.5
17.1
def _update_param(self): r"""Update parameters This method updates the values of the algorthm parameters with the methods provided """ # Update relaxation parameter. if not isinstance(self._rho_update, type(None)): self._rho = self._rho_update(self._rho) # Update proximal dual parameter. if not isinstance(self._sigma_update, type(None)): self._sigma = self._sigma_update(self._sigma) # Update proximal primal parameter. if not isinstance(self._tau_update, type(None)): self._tau = self._tau_update(self._tau)
[ "def", "_update_param", "(", "self", ")", ":", "# Update relaxation parameter.", "if", "not", "isinstance", "(", "self", ".", "_rho_update", ",", "type", "(", "None", ")", ")", ":", "self", ".", "_rho", "=", "self", ".", "_rho_update", "(", "self", ".", "_rho", ")", "# Update proximal dual parameter.", "if", "not", "isinstance", "(", "self", ".", "_sigma_update", ",", "type", "(", "None", ")", ")", ":", "self", ".", "_sigma", "=", "self", ".", "_sigma_update", "(", "self", ".", "_sigma", ")", "# Update proximal primal parameter.", "if", "not", "isinstance", "(", "self", ".", "_tau_update", ",", "type", "(", "None", ")", ")", ":", "self", ".", "_tau", "=", "self", ".", "_tau_update", "(", "self", ".", "_tau", ")" ]
32.263158
19
def frames(self, key=None, orig_order=False): """Returns a list of frames in this tag. If KEY is None, returns all frames in the tag; otherwise returns all frames whose frameid matches KEY. If ORIG_ORDER is True, then the frames are returned in their original order. Otherwise the frames are sorted in canonical order according to the frame_order field of this tag. """ if key is not None: # If there are multiple frames, then they are already in original order. key = self._normalize_key(key) if len(self._frames[key]) == 0: raise KeyError("Key not found: " + repr(key)) return self._frames[key] frames = [] for frameid in self._frames.keys(): for frame in self._frames[frameid]: frames.append(frame) if orig_order: key = (lambda frame: (0, frame.frameno) if frame.frameno is not None else (1,)) else: key = self.frame_order.key frames.sort(key=key) return frames
[ "def", "frames", "(", "self", ",", "key", "=", "None", ",", "orig_order", "=", "False", ")", ":", "if", "key", "is", "not", "None", ":", "# If there are multiple frames, then they are already in original order.", "key", "=", "self", ".", "_normalize_key", "(", "key", ")", "if", "len", "(", "self", ".", "_frames", "[", "key", "]", ")", "==", "0", ":", "raise", "KeyError", "(", "\"Key not found: \"", "+", "repr", "(", "key", ")", ")", "return", "self", ".", "_frames", "[", "key", "]", "frames", "=", "[", "]", "for", "frameid", "in", "self", ".", "_frames", ".", "keys", "(", ")", ":", "for", "frame", "in", "self", ".", "_frames", "[", "frameid", "]", ":", "frames", ".", "append", "(", "frame", ")", "if", "orig_order", ":", "key", "=", "(", "lambda", "frame", ":", "(", "0", ",", "frame", ".", "frameno", ")", "if", "frame", ".", "frameno", "is", "not", "None", "else", "(", "1", ",", ")", ")", "else", ":", "key", "=", "self", ".", "frame_order", ".", "key", "frames", ".", "sort", "(", "key", "=", "key", ")", "return", "frames" ]
39.37931
15.551724
def _setup(self): """ Run setup tasks after initialization """ self._populate_local() try: self._populate_latest() except Exception as e: self.log.exception('Unable to retrieve latest %s version information', self.meta_name) self._sort()
[ "def", "_setup", "(", "self", ")", ":", "self", ".", "_populate_local", "(", ")", "try", ":", "self", ".", "_populate_latest", "(", ")", "except", "Exception", "as", "e", ":", "self", ".", "log", ".", "exception", "(", "'Unable to retrieve latest %s version information'", ",", "self", ".", "meta_name", ")", "self", ".", "_sort", "(", ")" ]
30.8
15.8
def gist_diff(): """Diff this file with the gist on github""" remote_file = wget(RAW_GIST) proc = subprocess.Popen(('diff - %s'%MY_PATH).split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE) stdout, stderr = proc.communicate(remote_file) return stdout
[ "def", "gist_diff", "(", ")", ":", "remote_file", "=", "wget", "(", "RAW_GIST", ")", "proc", "=", "subprocess", ".", "Popen", "(", "(", "'diff - %s'", "%", "MY_PATH", ")", ".", "split", "(", ")", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "stdout", ",", "stderr", "=", "proc", ".", "communicate", "(", "remote_file", ")", "return", "stdout" ]
40.25
13
def verb_chain_ends(self): """The end positions of ``verb_chains`` elements.""" if not self.is_tagged(VERB_CHAINS): self.tag_verb_chains() return self.ends(VERB_CHAINS)
[ "def", "verb_chain_ends", "(", "self", ")", ":", "if", "not", "self", ".", "is_tagged", "(", "VERB_CHAINS", ")", ":", "self", ".", "tag_verb_chains", "(", ")", "return", "self", ".", "ends", "(", "VERB_CHAINS", ")" ]
40
5.2
def reducer_count(self, key, values): """ count occurences for each (metro, POI) record """ total = sum(values) metro, poi = key # group data by metro areas for final output yield metro, (total, poi)
[ "def", "reducer_count", "(", "self", ",", "key", ",", "values", ")", ":", "total", "=", "sum", "(", "values", ")", "metro", ",", "poi", "=", "key", "# group data by metro areas for final output ", "yield", "metro", ",", "(", "total", ",", "poi", ")" ]
39.666667
9.166667
def frequencies(self, sides=None): """Return the frequency vector according to :attr:`sides`""" # use the attribute sides except if a valid sides argument is provided if sides is None: sides = self.sides if sides not in self._sides_choices: raise errors.SpectrumChoiceError(sides, self._sides_choices) if sides == 'onesided': return self._range.onesided() if sides == 'twosided': return self._range.twosided() if sides == 'centerdc': return self._range.centerdc()
[ "def", "frequencies", "(", "self", ",", "sides", "=", "None", ")", ":", "# use the attribute sides except if a valid sides argument is provided", "if", "sides", "is", "None", ":", "sides", "=", "self", ".", "sides", "if", "sides", "not", "in", "self", ".", "_sides_choices", ":", "raise", "errors", ".", "SpectrumChoiceError", "(", "sides", ",", "self", ".", "_sides_choices", ")", "if", "sides", "==", "'onesided'", ":", "return", "self", ".", "_range", ".", "onesided", "(", ")", "if", "sides", "==", "'twosided'", ":", "return", "self", ".", "_range", ".", "twosided", "(", ")", "if", "sides", "==", "'centerdc'", ":", "return", "self", ".", "_range", ".", "centerdc", "(", ")" ]
37.8
14.333333
def prob(self, pw): """ returns the probabiltiy of pw in the model. P[pw] = n(pw)/n(__total__) """ return float(self._T.get(pw, 0)) / self._T[TOTALF_W]
[ "def", "prob", "(", "self", ",", "pw", ")", ":", "return", "float", "(", "self", ".", "_T", ".", "get", "(", "pw", ",", "0", ")", ")", "/", "self", ".", "_T", "[", "TOTALF_W", "]" ]
31
9.666667
def from_string(date_str): """ construction from the following string patterns '%Y-%m-%d' '%d.%m.%Y' '%m/%d/%Y' '%Y%m%d' :param str date_str: :return BusinessDate: """ if date_str.count('-'): str_format = '%Y-%m-%d' elif date_str.count('.'): str_format = '%d.%m.%Y' elif date_str.count('/'): str_format = '%m/%d/%Y' elif len(date_str) == 8: str_format = '%Y%m%d' elif len(date_str) == 4: year = ord(date_str[0]) * 256 + ord(date_str[1]) month = ord(date_str[2]) day = ord(date_str[3]) return BusinessDate.from_ymd(year, month, day) else: msg = "the date string " + date_str + " has not the right format" raise ValueError(msg) d = datetime.strptime(date_str, str_format) return BusinessDate.from_ymd(d.year, d.month, d.day)
[ "def", "from_string", "(", "date_str", ")", ":", "if", "date_str", ".", "count", "(", "'-'", ")", ":", "str_format", "=", "'%Y-%m-%d'", "elif", "date_str", ".", "count", "(", "'.'", ")", ":", "str_format", "=", "'%d.%m.%Y'", "elif", "date_str", ".", "count", "(", "'/'", ")", ":", "str_format", "=", "'%m/%d/%Y'", "elif", "len", "(", "date_str", ")", "==", "8", ":", "str_format", "=", "'%Y%m%d'", "elif", "len", "(", "date_str", ")", "==", "4", ":", "year", "=", "ord", "(", "date_str", "[", "0", "]", ")", "*", "256", "+", "ord", "(", "date_str", "[", "1", "]", ")", "month", "=", "ord", "(", "date_str", "[", "2", "]", ")", "day", "=", "ord", "(", "date_str", "[", "3", "]", ")", "return", "BusinessDate", ".", "from_ymd", "(", "year", ",", "month", ",", "day", ")", "else", ":", "msg", "=", "\"the date string \"", "+", "date_str", "+", "\" has not the right format\"", "raise", "ValueError", "(", "msg", ")", "d", "=", "datetime", ".", "strptime", "(", "date_str", ",", "str_format", ")", "return", "BusinessDate", ".", "from_ymd", "(", "d", ".", "year", ",", "d", ".", "month", ",", "d", ".", "day", ")" ]
30.677419
15.258065
def get_name(self): """ Return the name of the field :rtype: string """ if self.name_idx_value == None: self.name_idx_value = self.CM.get_string(self.name_idx) return self.name_idx_value
[ "def", "get_name", "(", "self", ")", ":", "if", "self", ".", "name_idx_value", "==", "None", ":", "self", ".", "name_idx_value", "=", "self", ".", "CM", ".", "get_string", "(", "self", ".", "name_idx", ")", "return", "self", ".", "name_idx_value" ]
24.7
14.9
def redirect_to(): """302/3XX Redirects to the given URL. --- tags: - Redirects produces: - text/html get: parameters: - in: query name: url type: string required: true - in: query name: status_code type: int post: consumes: - application/x-www-form-urlencoded parameters: - in: formData name: url type: string required: true - in: formData name: status_code type: int required: false patch: consumes: - application/x-www-form-urlencoded parameters: - in: formData name: url type: string required: true - in: formData name: status_code type: int required: false put: consumes: - application/x-www-form-urlencoded parameters: - in: formData name: url type: string required: true - in: formData name: status_code type: int required: false responses: 302: description: A redirection. """ args_dict = request.args.items() args = CaseInsensitiveDict(args_dict) # We need to build the response manually and convert to UTF-8 to prevent # werkzeug from "fixing" the URL. This endpoint should set the Location # header to the exact string supplied. response = app.make_response("") response.status_code = 302 if "status_code" in args: status_code = int(args["status_code"]) if status_code >= 300 and status_code < 400: response.status_code = status_code response.headers["Location"] = args["url"].encode("utf-8") return response
[ "def", "redirect_to", "(", ")", ":", "args_dict", "=", "request", ".", "args", ".", "items", "(", ")", "args", "=", "CaseInsensitiveDict", "(", "args_dict", ")", "# We need to build the response manually and convert to UTF-8 to prevent", "# werkzeug from \"fixing\" the URL. This endpoint should set the Location", "# header to the exact string supplied.", "response", "=", "app", ".", "make_response", "(", "\"\"", ")", "response", ".", "status_code", "=", "302", "if", "\"status_code\"", "in", "args", ":", "status_code", "=", "int", "(", "args", "[", "\"status_code\"", "]", ")", "if", "status_code", ">=", "300", "and", "status_code", "<", "400", ":", "response", ".", "status_code", "=", "status_code", "response", ".", "headers", "[", "\"Location\"", "]", "=", "args", "[", "\"url\"", "]", ".", "encode", "(", "\"utf-8\"", ")", "return", "response" ]
24.236111
18.916667
def hash_vector(self, v, querying=False): """ Hashes the vector and returns the binary bucket key as string. """ if scipy.sparse.issparse(v): # If vector is sparse, make sure we have the CSR representation # of the projection matrix if self.normals_csr == None: self.normals_csr = scipy.sparse.csr_matrix(self.normals) # Make sure that we are using CSR format for multiplication if not scipy.sparse.isspmatrix_csr(v): v = scipy.sparse.csr_matrix(v) # Project vector onto all hyperplane normals projection = self.normals_csr.dot(v) else: # Project vector onto all hyperplane normals projection = numpy.dot(self.normals, v) # Build binary key binary_key = ''.join(['1' if x > 0.0 else '0' for x in projection]) if querying: #print 'Querying...' # Make sure returned buckets keys contain at least N results return self.tree_root.bucket_keys_to_guarantee_result_set_size(binary_key, self.minimum_result_size, 0) else: # We are indexing, so adapt bucket key counter in binary tree self.tree_root.insert_entry_for_bucket(binary_key, 0) # Return binary key return [binary_key]
[ "def", "hash_vector", "(", "self", ",", "v", ",", "querying", "=", "False", ")", ":", "if", "scipy", ".", "sparse", ".", "issparse", "(", "v", ")", ":", "# If vector is sparse, make sure we have the CSR representation", "# of the projection matrix", "if", "self", ".", "normals_csr", "==", "None", ":", "self", ".", "normals_csr", "=", "scipy", ".", "sparse", ".", "csr_matrix", "(", "self", ".", "normals", ")", "# Make sure that we are using CSR format for multiplication", "if", "not", "scipy", ".", "sparse", ".", "isspmatrix_csr", "(", "v", ")", ":", "v", "=", "scipy", ".", "sparse", ".", "csr_matrix", "(", "v", ")", "# Project vector onto all hyperplane normals", "projection", "=", "self", ".", "normals_csr", ".", "dot", "(", "v", ")", "else", ":", "# Project vector onto all hyperplane normals", "projection", "=", "numpy", ".", "dot", "(", "self", ".", "normals", ",", "v", ")", "# Build binary key", "binary_key", "=", "''", ".", "join", "(", "[", "'1'", "if", "x", ">", "0.0", "else", "'0'", "for", "x", "in", "projection", "]", ")", "if", "querying", ":", "#print 'Querying...'", "# Make sure returned buckets keys contain at least N results", "return", "self", ".", "tree_root", ".", "bucket_keys_to_guarantee_result_set_size", "(", "binary_key", ",", "self", ".", "minimum_result_size", ",", "0", ")", "else", ":", "# We are indexing, so adapt bucket key counter in binary tree", "self", ".", "tree_root", ".", "insert_entry_for_bucket", "(", "binary_key", ",", "0", ")", "# Return binary key", "return", "[", "binary_key", "]" ]
43.16129
20.516129
def start(self, func=None): """Start the roaster control process. This function will kick off the processing thread for the Hottop and register any user-defined callback function. By default, it will not begin collecting any reading information or saving it. In order to do that users, must issue the monitor/record bit via `set_monitor`. :param func: Callback function for Hottop stream data :type func: function :returns: None """ self._user_callback = func if not self._simulate: self._process = ControlProcess(self._conn, self._config, self._q, self._log, callback=self._callback) else: self._process = MockProcess(self._config, self._q, self._log, callback=self._callback) self._process.start() self._roasting = True
[ "def", "start", "(", "self", ",", "func", "=", "None", ")", ":", "self", ".", "_user_callback", "=", "func", "if", "not", "self", ".", "_simulate", ":", "self", ".", "_process", "=", "ControlProcess", "(", "self", ".", "_conn", ",", "self", ".", "_config", ",", "self", ".", "_q", ",", "self", ".", "_log", ",", "callback", "=", "self", ".", "_callback", ")", "else", ":", "self", ".", "_process", "=", "MockProcess", "(", "self", ".", "_config", ",", "self", ".", "_q", ",", "self", ".", "_log", ",", "callback", "=", "self", ".", "_callback", ")", "self", ".", "_process", ".", "start", "(", ")", "self", ".", "_roasting", "=", "True" ]
43.904762
22.952381
def fo_pct_by_zone(self): """ Get the by team face-off win % by zone. Format is :returns: dict ``{ 'home/away': { 'off/def/neut': % } }`` """ bz = self.by_zone return { t: { z: bz[t][z]['won']/(1.0*bz[t][z]['total']) if bz[t][z]['total'] else 0.0 for z in self.__zones if z != 'all' } for t in [ 'home', 'away' ] }
[ "def", "fo_pct_by_zone", "(", "self", ")", ":", "bz", "=", "self", ".", "by_zone", "return", "{", "t", ":", "{", "z", ":", "bz", "[", "t", "]", "[", "z", "]", "[", "'won'", "]", "/", "(", "1.0", "*", "bz", "[", "t", "]", "[", "z", "]", "[", "'total'", "]", ")", "if", "bz", "[", "t", "]", "[", "z", "]", "[", "'total'", "]", "else", "0.0", "for", "z", "in", "self", ".", "__zones", "if", "z", "!=", "'all'", "}", "for", "t", "in", "[", "'home'", ",", "'away'", "]", "}" ]
29.0625
18.5625
def parserunstats(self): """Parses the XML run statistics file (GenerateFASTQRunStatistics.xml). In some cases, the file is not available. Equivalent data can be pulled from Basespace.Generate a text file name indexingQC.txt containing the copied tables from the Indexing QC tab of the run on Basespace""" # metadata = GenObject() # If the default file GenerateFASTQRunStatistics.xml is present, parse it if os.path.isfile(os.path.join(self.path, "GenerateFASTQRunStatistics.xml")): # Create a list of keys for which values are to be extracted datalist = ["SampleNumber", "SampleID", "SampleName", "NumberOfClustersPF"] # Load the file as an xml ElementTree object runstatistics = ElementTree.ElementTree(file=os.path.join(self.path, "GenerateFASTQRunStatistics.xml")) # Iterate through all the elements in the object # .iterfind() allow for the matching and iterating though matches # This is stored as a float to allow subsequent calculations tclusterspf = [float(element.text) for element in runstatistics.iterfind("RunStats/NumberOfClustersPF")][0] # Iterate through all the elements (strains) in the OverallSamples/SummarizedSampleStatistics category for element in runstatistics.iterfind("OverallSamples/SummarizedSampleStatistics"): # List comprehension. Essentially iterate through each element for each category in datalist: # (element.iter(category) and pull out the value for nestedelement straindata = [nestedelement.text for category in datalist for nestedelement in element.iter(category)] # Try and replicate the Illumina rules to create file names from "Sample_Name" samplename = samplenamer(straindata, 1) # Calculate the percentage of clusters associated with each strain # noinspection PyTypeChecker percentperstrain = "{:.2f}".format((float(straindata[3]) / tclusterspf * 100)) try: # Use the sample number -1 as the index in the list of objects created in parsesamplesheet strainindex = int(straindata[0]) - 1 # Set run to the .run object of self.samples[index] run = self.samples[strainindex].run # An assertion that compares the sample computed above to the previously entered sample name # to ensure that the samples are the same assert self.samples[strainindex].name == samplename, \ "Sample name does not match object name {0!r:s}".format(straindata[1]) # Add the appropriate values to the strain metadata object run.SampleNumber = straindata[0] run.NumberofClustersPF = straindata[3] run.TotalClustersinRun = tclusterspf run.PercentOfClusters = percentperstrain run.flowcell = self.flowcell run.instrument = self.instrument except IndexError: pass else: strainindex = 0 for i in range(len(self.samples)): # Set run to the .run object of self.samples[index] run = self.samples[strainindex].run # Update the object with the variables run.SampleNumber = strainindex + 1 run.NumberofClustersPF = 'NA' run.TotalClustersinRun = 'NA' run.PercentOfClusters = 'NA' run.flowcell = self.flowcell run.instrument = self.instrument strainindex += 1
[ "def", "parserunstats", "(", "self", ")", ":", "# metadata = GenObject()", "# If the default file GenerateFASTQRunStatistics.xml is present, parse it", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "\"GenerateFASTQRunStatistics.xml\"", ")", ")", ":", "# Create a list of keys for which values are to be extracted", "datalist", "=", "[", "\"SampleNumber\"", ",", "\"SampleID\"", ",", "\"SampleName\"", ",", "\"NumberOfClustersPF\"", "]", "# Load the file as an xml ElementTree object", "runstatistics", "=", "ElementTree", ".", "ElementTree", "(", "file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "path", ",", "\"GenerateFASTQRunStatistics.xml\"", ")", ")", "# Iterate through all the elements in the object", "# .iterfind() allow for the matching and iterating though matches", "# This is stored as a float to allow subsequent calculations", "tclusterspf", "=", "[", "float", "(", "element", ".", "text", ")", "for", "element", "in", "runstatistics", ".", "iterfind", "(", "\"RunStats/NumberOfClustersPF\"", ")", "]", "[", "0", "]", "# Iterate through all the elements (strains) in the OverallSamples/SummarizedSampleStatistics category", "for", "element", "in", "runstatistics", ".", "iterfind", "(", "\"OverallSamples/SummarizedSampleStatistics\"", ")", ":", "# List comprehension. Essentially iterate through each element for each category in datalist:", "# (element.iter(category) and pull out the value for nestedelement", "straindata", "=", "[", "nestedelement", ".", "text", "for", "category", "in", "datalist", "for", "nestedelement", "in", "element", ".", "iter", "(", "category", ")", "]", "# Try and replicate the Illumina rules to create file names from \"Sample_Name\"", "samplename", "=", "samplenamer", "(", "straindata", ",", "1", ")", "# Calculate the percentage of clusters associated with each strain", "# noinspection PyTypeChecker", "percentperstrain", "=", "\"{:.2f}\"", ".", "format", "(", "(", "float", "(", "straindata", "[", "3", "]", ")", "/", "tclusterspf", "*", "100", ")", ")", "try", ":", "# Use the sample number -1 as the index in the list of objects created in parsesamplesheet", "strainindex", "=", "int", "(", "straindata", "[", "0", "]", ")", "-", "1", "# Set run to the .run object of self.samples[index]", "run", "=", "self", ".", "samples", "[", "strainindex", "]", ".", "run", "# An assertion that compares the sample computed above to the previously entered sample name", "# to ensure that the samples are the same", "assert", "self", ".", "samples", "[", "strainindex", "]", ".", "name", "==", "samplename", ",", "\"Sample name does not match object name {0!r:s}\"", ".", "format", "(", "straindata", "[", "1", "]", ")", "# Add the appropriate values to the strain metadata object", "run", ".", "SampleNumber", "=", "straindata", "[", "0", "]", "run", ".", "NumberofClustersPF", "=", "straindata", "[", "3", "]", "run", ".", "TotalClustersinRun", "=", "tclusterspf", "run", ".", "PercentOfClusters", "=", "percentperstrain", "run", ".", "flowcell", "=", "self", ".", "flowcell", "run", ".", "instrument", "=", "self", ".", "instrument", "except", "IndexError", ":", "pass", "else", ":", "strainindex", "=", "0", "for", "i", "in", "range", "(", "len", "(", "self", ".", "samples", ")", ")", ":", "# Set run to the .run object of self.samples[index]", "run", "=", "self", ".", "samples", "[", "strainindex", "]", ".", "run", "# Update the object with the variables", "run", ".", "SampleNumber", "=", "strainindex", "+", "1", "run", ".", "NumberofClustersPF", "=", "'NA'", "run", ".", "TotalClustersinRun", "=", "'NA'", "run", ".", "PercentOfClusters", "=", "'NA'", "run", ".", "flowcell", "=", "self", ".", "flowcell", "run", ".", "instrument", "=", "self", ".", "instrument", "strainindex", "+=", "1" ]
66.982143
29.142857
def notification_sm_changed(self, model, prop_name, info): """Remove references to non-existing state machines""" for state_machine_id in list(self._expansion_state.keys()): if state_machine_id not in self.model.state_machines: del self._expansion_state[state_machine_id]
[ "def", "notification_sm_changed", "(", "self", ",", "model", ",", "prop_name", ",", "info", ")", ":", "for", "state_machine_id", "in", "list", "(", "self", ".", "_expansion_state", ".", "keys", "(", ")", ")", ":", "if", "state_machine_id", "not", "in", "self", ".", "model", ".", "state_machines", ":", "del", "self", ".", "_expansion_state", "[", "state_machine_id", "]" ]
62.2
17.8
def formatDuration(self, duration): """Format the duration. This method could be overridden if really needed, as the duration format in gerrit is an arbitrary string. :param duration: duration in timedelta """ days = duration.days hours, remainder = divmod(duration.seconds, 3600) minutes, seconds = divmod(remainder, 60) if days: return '{} day{} {}h {}m {}s'.format(days, "s" if days > 1 else "", hours, minutes, seconds) elif hours: return '{}h {}m {}s'.format(hours, minutes, seconds) return '{}m {}s'.format(minutes, seconds)
[ "def", "formatDuration", "(", "self", ",", "duration", ")", ":", "days", "=", "duration", ".", "days", "hours", ",", "remainder", "=", "divmod", "(", "duration", ".", "seconds", ",", "3600", ")", "minutes", ",", "seconds", "=", "divmod", "(", "remainder", ",", "60", ")", "if", "days", ":", "return", "'{} day{} {}h {}m {}s'", ".", "format", "(", "days", ",", "\"s\"", "if", "days", ">", "1", "else", "\"\"", ",", "hours", ",", "minutes", ",", "seconds", ")", "elif", "hours", ":", "return", "'{}h {}m {}s'", ".", "format", "(", "hours", ",", "minutes", ",", "seconds", ")", "return", "'{}m {}s'", ".", "format", "(", "minutes", ",", "seconds", ")" ]
42.3125
18.5625
def allocate(self): """Initializes libvirt resources.""" disk_path = self.provider_image self._hypervisor = libvirt.open( self.configuration.get('hypervisor', 'vbox:///session')) self._domain = domain_create(self._hypervisor, self.identifier, self.configuration['domain'], disk_path)
[ "def", "allocate", "(", "self", ")", ":", "disk_path", "=", "self", ".", "provider_image", "self", ".", "_hypervisor", "=", "libvirt", ".", "open", "(", "self", ".", "configuration", ".", "get", "(", "'hypervisor'", ",", "'vbox:///session'", ")", ")", "self", ".", "_domain", "=", "domain_create", "(", "self", ".", "_hypervisor", ",", "self", ".", "identifier", ",", "self", ".", "configuration", "[", "'domain'", "]", ",", "disk_path", ")" ]
39.777778
22
def replay_detection_negotiated(self): """ After :meth:`step` has been called, this property will be set to True if the security context can use replay detection for messages protected by :meth:`get_mic` and :meth:`wrap`. False if replay detection cannot be used. """ return ( self.flags & C.GSS_C_REPLAY_FLAG ) and ( self.established or (self.flags & C.GSS_C_PROT_READY_FLAG) )
[ "def", "replay_detection_negotiated", "(", "self", ")", ":", "return", "(", "self", ".", "flags", "&", "C", ".", "GSS_C_REPLAY_FLAG", ")", "and", "(", "self", ".", "established", "or", "(", "self", ".", "flags", "&", "C", ".", "GSS_C_PROT_READY_FLAG", ")", ")" ]
41.454545
21.636364
def get_policy_type(self, project, type_id): """GetPolicyType. Retrieve a specific policy type by ID. :param str project: Project ID or project name :param str type_id: The policy ID. :rtype: :class:`<PolicyType> <azure.devops.v5_0.policy.models.PolicyType>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if type_id is not None: route_values['typeId'] = self._serialize.url('type_id', type_id, 'str') response = self._send(http_method='GET', location_id='44096322-2d3d-466a-bb30-d1b7de69f61f', version='5.0', route_values=route_values) return self._deserialize('PolicyType', response)
[ "def", "get_policy_type", "(", "self", ",", "project", ",", "type_id", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "if", "type_id", "is", "not", "None", ":", "route_values", "[", "'typeId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'type_id'", ",", "type_id", ",", "'str'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'44096322-2d3d-466a-bb30-d1b7de69f61f'", ",", "version", "=", "'5.0'", ",", "route_values", "=", "route_values", ")", "return", "self", ".", "_deserialize", "(", "'PolicyType'", ",", "response", ")" ]
49.588235
16.058824
def cancel(self, mark_completed_as_cancelled=False): """ Cancel the future. If the future has not been started yet, it will never start running. If the future is already running, it will run until the worker function exists. The worker function can check if the future has been cancelled using the :meth:`cancelled` method. If the future has already been completed, it will not be marked as cancelled unless you set *mark_completed_as_cancelled* to :const:`True`. :param mark_completed_as_cancelled: If this is :const:`True` and the future has already completed, it will be marked as cancelled anyway. """ with self._lock: if not self._completed or mark_completed_as_cancelled: self._cancelled = True callbacks = self._prepare_done_callbacks() callbacks()
[ "def", "cancel", "(", "self", ",", "mark_completed_as_cancelled", "=", "False", ")", ":", "with", "self", ".", "_lock", ":", "if", "not", "self", ".", "_completed", "or", "mark_completed_as_cancelled", ":", "self", ".", "_cancelled", "=", "True", "callbacks", "=", "self", ".", "_prepare_done_callbacks", "(", ")", "callbacks", "(", ")" ]
42.631579
24.526316
def data_x_range(self): """Return a 2-tuple giving the minimum and maximum x-axis data range. """ try: lower = min([min(self._filter_none(s)) for type, s in self.annotated_data() if type == 'x']) upper = max([max(self._filter_none(s)) for type, s in self.annotated_data() if type == 'x']) return (lower, upper) except ValueError: return None
[ "def", "data_x_range", "(", "self", ")", ":", "try", ":", "lower", "=", "min", "(", "[", "min", "(", "self", ".", "_filter_none", "(", "s", ")", ")", "for", "type", ",", "s", "in", "self", ".", "annotated_data", "(", ")", "if", "type", "==", "'x'", "]", ")", "upper", "=", "max", "(", "[", "max", "(", "self", ".", "_filter_none", "(", "s", ")", ")", "for", "type", ",", "s", "in", "self", ".", "annotated_data", "(", ")", "if", "type", "==", "'x'", "]", ")", "return", "(", "lower", ",", "upper", ")", "except", "ValueError", ":", "return", "None" ]
36.857143
12
def get_clamav_conf(filename): """Initialize clamav configuration.""" if os.path.isfile(filename): return ClamavConfig(filename) log.warn(LOG_PLUGIN, "No ClamAV config file found at %r.", filename)
[ "def", "get_clamav_conf", "(", "filename", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "return", "ClamavConfig", "(", "filename", ")", "log", ".", "warn", "(", "LOG_PLUGIN", ",", "\"No ClamAV config file found at %r.\"", ",", "filename", ")" ]
42.6
10.6
def processResponse(cls, soapdata, **kw): """called by deferred, returns pyobj representing reply. Parameters and Key Words: soapdata -- SOAP Data replytype -- reply type of response """ if len(soapdata) == 0: raise TypeError('Received empty response') # log.msg("_" * 33, time.ctime(time.time()), # "RESPONSE: \n%s" %soapdata, debug=True) ps = ParsedSoap(soapdata, readerclass=cls.readerClass) if ps.IsAFault() is True: log.msg('Received SOAP:Fault', debug=True) raise FaultFromFaultMessage(ps) return ps
[ "def", "processResponse", "(", "cls", ",", "soapdata", ",", "*", "*", "kw", ")", ":", "if", "len", "(", "soapdata", ")", "==", "0", ":", "raise", "TypeError", "(", "'Received empty response'", ")", "# log.msg(\"_\" * 33, time.ctime(time.time()), ", "# \"RESPONSE: \\n%s\" %soapdata, debug=True)", "ps", "=", "ParsedSoap", "(", "soapdata", ",", "readerclass", "=", "cls", ".", "readerClass", ")", "if", "ps", ".", "IsAFault", "(", ")", "is", "True", ":", "log", ".", "msg", "(", "'Received SOAP:Fault'", ",", "debug", "=", "True", ")", "raise", "FaultFromFaultMessage", "(", "ps", ")", "return", "ps" ]
34.888889
14.722222
def groups_unarchive(self, room_id, **kwargs): """Unarchives a private group.""" return self.__call_api_post('groups.unarchive', roomId=room_id, kwargs=kwargs)
[ "def", "groups_unarchive", "(", "self", ",", "room_id", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "__call_api_post", "(", "'groups.unarchive'", ",", "roomId", "=", "room_id", ",", "kwargs", "=", "kwargs", ")" ]
57.666667
17.333333
def execute(self, conn, app="", release_version="", pset_hash="", output_label="", global_tag='', transaction = False): """ returns id for a given application """ sql = self.sql binds = {} setAnd=False if not app == "": sql += " A.APP_NAME=:app_name" binds["app_name"]=app setAnd=True if not release_version == "": if setAnd : sql += " AND " sql += " R.RELEASE_VERSION=:release_version" binds["release_version"]=release_version setAnd=True if not pset_hash == "": if setAnd : sql += " AND " sql += " P.PSET_HASH=:pset_hash" binds["pset_hash"]=pset_hash setAnd=True if not output_label == "": if setAnd : sql += " AND " sql += " O.OUTPUT_MODULE_LABEL=:output_module_label" binds["output_module_label"]=output_label setAnd=True if not global_tag == "": if setAnd : sql += " AND " sql += " O.GLOBAL_TAG=:global_tag" binds["global_tag"]=global_tag if app == release_version == pset_hash == global_tag == "": dbsExceptionHandler('dbsException-invalid-input', "%s Either app_name, release_version, pset_hash or global_tag must be provided", self.logger.exception) result = self.dbi.processData(sql, binds, conn, transaction) plist = self.formatDict(result) if len(plist) < 1: return -1 return plist[0]["output_mod_config_id"]
[ "def", "execute", "(", "self", ",", "conn", ",", "app", "=", "\"\"", ",", "release_version", "=", "\"\"", ",", "pset_hash", "=", "\"\"", ",", "output_label", "=", "\"\"", ",", "global_tag", "=", "''", ",", "transaction", "=", "False", ")", ":", "sql", "=", "self", ".", "sql", "binds", "=", "{", "}", "setAnd", "=", "False", "if", "not", "app", "==", "\"\"", ":", "sql", "+=", "\" A.APP_NAME=:app_name\"", "binds", "[", "\"app_name\"", "]", "=", "app", "setAnd", "=", "True", "if", "not", "release_version", "==", "\"\"", ":", "if", "setAnd", ":", "sql", "+=", "\" AND \"", "sql", "+=", "\" R.RELEASE_VERSION=:release_version\"", "binds", "[", "\"release_version\"", "]", "=", "release_version", "setAnd", "=", "True", "if", "not", "pset_hash", "==", "\"\"", ":", "if", "setAnd", ":", "sql", "+=", "\" AND \"", "sql", "+=", "\" P.PSET_HASH=:pset_hash\"", "binds", "[", "\"pset_hash\"", "]", "=", "pset_hash", "setAnd", "=", "True", "if", "not", "output_label", "==", "\"\"", ":", "if", "setAnd", ":", "sql", "+=", "\" AND \"", "sql", "+=", "\" O.OUTPUT_MODULE_LABEL=:output_module_label\"", "binds", "[", "\"output_module_label\"", "]", "=", "output_label", "setAnd", "=", "True", "if", "not", "global_tag", "==", "\"\"", ":", "if", "setAnd", ":", "sql", "+=", "\" AND \"", "sql", "+=", "\" O.GLOBAL_TAG=:global_tag\"", "binds", "[", "\"global_tag\"", "]", "=", "global_tag", "if", "app", "==", "release_version", "==", "pset_hash", "==", "global_tag", "==", "\"\"", ":", "dbsExceptionHandler", "(", "'dbsException-invalid-input'", ",", "\"%s Either app_name, release_version, pset_hash or global_tag must be provided\"", ",", "self", ".", "logger", ".", "exception", ")", "result", "=", "self", ".", "dbi", ".", "processData", "(", "sql", ",", "binds", ",", "conn", ",", "transaction", ")", "plist", "=", "self", ".", "formatDict", "(", "result", ")", "if", "len", "(", "plist", ")", "<", "1", ":", "return", "-", "1", "return", "plist", "[", "0", "]", "[", "\"output_mod_config_id\"", "]" ]
36.447368
16.868421
def create_dvportgroup(portgroup_dict, portgroup_name, dvs, service_instance=None): ''' Creates a distributed virtual portgroup. Note: The ``portgroup_name`` param will override any name already set in ``portgroup_dict``. portgroup_dict Dictionary with the config values the portgroup should be created with (example in salt.states.dvs). portgroup_name Name of the portgroup to be created. dvs Name of the DVS that will contain the portgroup. service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. .. code-block:: bash salt '*' vsphere.create_dvportgroup portgroup_dict=<dict> portgroup_name=pg1 dvs=dvs1 ''' log.trace('Creating portgroup\'%s\' in dvs \'%s\' ' 'with dict = %s', portgroup_name, dvs, portgroup_dict) proxy_type = get_proxy_type() if proxy_type == 'esxdatacenter': datacenter = __salt__['esxdatacenter.get_details']()['datacenter'] dc_ref = _get_proxy_target(service_instance) elif proxy_type == 'esxcluster': datacenter = __salt__['esxcluster.get_details']()['datacenter'] dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter) dvs_refs = salt.utils.vmware.get_dvss(dc_ref, dvs_names=[dvs]) if not dvs_refs: raise VMwareObjectRetrievalError('DVS \'{0}\' was not ' 'retrieved'.format(dvs)) # Make the name of the dvportgroup consistent with the parameter portgroup_dict['name'] = portgroup_name spec = vim.DVPortgroupConfigSpec() _apply_dvportgroup_config(portgroup_name, spec, portgroup_dict) salt.utils.vmware.create_dvportgroup(dvs_refs[0], spec) return True
[ "def", "create_dvportgroup", "(", "portgroup_dict", ",", "portgroup_name", ",", "dvs", ",", "service_instance", "=", "None", ")", ":", "log", ".", "trace", "(", "'Creating portgroup\\'%s\\' in dvs \\'%s\\' '", "'with dict = %s'", ",", "portgroup_name", ",", "dvs", ",", "portgroup_dict", ")", "proxy_type", "=", "get_proxy_type", "(", ")", "if", "proxy_type", "==", "'esxdatacenter'", ":", "datacenter", "=", "__salt__", "[", "'esxdatacenter.get_details'", "]", "(", ")", "[", "'datacenter'", "]", "dc_ref", "=", "_get_proxy_target", "(", "service_instance", ")", "elif", "proxy_type", "==", "'esxcluster'", ":", "datacenter", "=", "__salt__", "[", "'esxcluster.get_details'", "]", "(", ")", "[", "'datacenter'", "]", "dc_ref", "=", "salt", ".", "utils", ".", "vmware", ".", "get_datacenter", "(", "service_instance", ",", "datacenter", ")", "dvs_refs", "=", "salt", ".", "utils", ".", "vmware", ".", "get_dvss", "(", "dc_ref", ",", "dvs_names", "=", "[", "dvs", "]", ")", "if", "not", "dvs_refs", ":", "raise", "VMwareObjectRetrievalError", "(", "'DVS \\'{0}\\' was not '", "'retrieved'", ".", "format", "(", "dvs", ")", ")", "# Make the name of the dvportgroup consistent with the parameter", "portgroup_dict", "[", "'name'", "]", "=", "portgroup_name", "spec", "=", "vim", ".", "DVPortgroupConfigSpec", "(", ")", "_apply_dvportgroup_config", "(", "portgroup_name", ",", "spec", ",", "portgroup_dict", ")", "salt", ".", "utils", ".", "vmware", ".", "create_dvportgroup", "(", "dvs_refs", "[", "0", "]", ",", "spec", ")", "return", "True" ]
38.326087
22.413043
def _clean_algorithm(data): """Clean algorithm keys, handling items that can be specified as lists or single items. """ # convert single items to lists for key in ["variantcaller", "jointcaller", "svcaller"]: val = tz.get_in(["algorithm", key], data) if val: if not isinstance(val, (list, tuple)) and isinstance(val, six.string_types): val = [val] # check for cases like [false] or [None] if isinstance(val, (list, tuple)): if len(val) == 1 and not val[0] or (isinstance(val[0], six.string_types) and val[0].lower() in ["none", "false"]): val = False data["algorithm"][key] = val return data
[ "def", "_clean_algorithm", "(", "data", ")", ":", "# convert single items to lists", "for", "key", "in", "[", "\"variantcaller\"", ",", "\"jointcaller\"", ",", "\"svcaller\"", "]", ":", "val", "=", "tz", ".", "get_in", "(", "[", "\"algorithm\"", ",", "key", "]", ",", "data", ")", "if", "val", ":", "if", "not", "isinstance", "(", "val", ",", "(", "list", ",", "tuple", ")", ")", "and", "isinstance", "(", "val", ",", "six", ".", "string_types", ")", ":", "val", "=", "[", "val", "]", "# check for cases like [false] or [None]", "if", "isinstance", "(", "val", ",", "(", "list", ",", "tuple", ")", ")", ":", "if", "len", "(", "val", ")", "==", "1", "and", "not", "val", "[", "0", "]", "or", "(", "isinstance", "(", "val", "[", "0", "]", ",", "six", ".", "string_types", ")", "and", "val", "[", "0", "]", ".", "lower", "(", ")", "in", "[", "\"none\"", ",", "\"false\"", "]", ")", ":", "val", "=", "False", "data", "[", "\"algorithm\"", "]", "[", "key", "]", "=", "val", "return", "data" ]
47.75
17.875
def _set_clear_mpls_rsvp_statistics(self, v, load=False): """ Setter method for clear_mpls_rsvp_statistics, mapped from YANG variable /brocade_mpls_rpc/clear_mpls_rsvp_statistics (rpc) If this variable is read-only (config: false) in the source YANG file, then _set_clear_mpls_rsvp_statistics is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_clear_mpls_rsvp_statistics() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=clear_mpls_rsvp_statistics.clear_mpls_rsvp_statistics, is_leaf=True, yang_name="clear-mpls-rsvp-statistics", rest_name="clear-mpls-rsvp-statistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'clearMplsRsvpStatistics'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """clear_mpls_rsvp_statistics must be of a type compatible with rpc""", 'defined-type': "rpc", 'generated-type': """YANGDynClass(base=clear_mpls_rsvp_statistics.clear_mpls_rsvp_statistics, is_leaf=True, yang_name="clear-mpls-rsvp-statistics", rest_name="clear-mpls-rsvp-statistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'clearMplsRsvpStatistics'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""", }) self.__clear_mpls_rsvp_statistics = t if hasattr(self, '_set'): self._set()
[ "def", "_set_clear_mpls_rsvp_statistics", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "clear_mpls_rsvp_statistics", ".", "clear_mpls_rsvp_statistics", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"clear-mpls-rsvp-statistics\"", ",", "rest_name", "=", "\"clear-mpls-rsvp-statistics\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "False", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'hidden'", ":", "u'rpccmd'", ",", "u'actionpoint'", ":", "u'clearMplsRsvpStatistics'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-mpls'", ",", "defining_module", "=", "'brocade-mpls'", ",", "yang_type", "=", "'rpc'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"clear_mpls_rsvp_statistics must be of a type compatible with rpc\"\"\"", ",", "'defined-type'", ":", "\"rpc\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=clear_mpls_rsvp_statistics.clear_mpls_rsvp_statistics, is_leaf=True, yang_name=\"clear-mpls-rsvp-statistics\", rest_name=\"clear-mpls-rsvp-statistics\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'rpccmd', u'actionpoint': u'clearMplsRsvpStatistics'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__clear_mpls_rsvp_statistics", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
81.954545
39
def source_model_info(nodes): """ Extract information about NRML/0.5 source models. Returns a table with TRTs as rows and source classes as columns. """ c = collections.Counter() for node in nodes: for src_group in node: trt = src_group['tectonicRegion'] for src in src_group: src_class = src.tag.split('}')[1] c[trt, src_class] += 1 trts, classes = zip(*c) trts = sorted(set(trts)) classes = sorted(set(classes)) dtlist = [('TRT', (bytes, 30))] + [(name, int) for name in classes] out = numpy.zeros(len(trts) + 1, dtlist) # +1 for the totals for i, trt in enumerate(trts): out[i]['TRT'] = trt for src_class in classes: out[i][src_class] = c[trt, src_class] out[-1]['TRT'] = 'Total' for name in out.dtype.names[1:]: out[-1][name] = out[name][:-1].sum() return rst_table(out)
[ "def", "source_model_info", "(", "nodes", ")", ":", "c", "=", "collections", ".", "Counter", "(", ")", "for", "node", "in", "nodes", ":", "for", "src_group", "in", "node", ":", "trt", "=", "src_group", "[", "'tectonicRegion'", "]", "for", "src", "in", "src_group", ":", "src_class", "=", "src", ".", "tag", ".", "split", "(", "'}'", ")", "[", "1", "]", "c", "[", "trt", ",", "src_class", "]", "+=", "1", "trts", ",", "classes", "=", "zip", "(", "*", "c", ")", "trts", "=", "sorted", "(", "set", "(", "trts", ")", ")", "classes", "=", "sorted", "(", "set", "(", "classes", ")", ")", "dtlist", "=", "[", "(", "'TRT'", ",", "(", "bytes", ",", "30", ")", ")", "]", "+", "[", "(", "name", ",", "int", ")", "for", "name", "in", "classes", "]", "out", "=", "numpy", ".", "zeros", "(", "len", "(", "trts", ")", "+", "1", ",", "dtlist", ")", "# +1 for the totals", "for", "i", ",", "trt", "in", "enumerate", "(", "trts", ")", ":", "out", "[", "i", "]", "[", "'TRT'", "]", "=", "trt", "for", "src_class", "in", "classes", ":", "out", "[", "i", "]", "[", "src_class", "]", "=", "c", "[", "trt", ",", "src_class", "]", "out", "[", "-", "1", "]", "[", "'TRT'", "]", "=", "'Total'", "for", "name", "in", "out", ".", "dtype", ".", "names", "[", "1", ":", "]", ":", "out", "[", "-", "1", "]", "[", "name", "]", "=", "out", "[", "name", "]", "[", ":", "-", "1", "]", ".", "sum", "(", ")", "return", "rst_table", "(", "out", ")" ]
36.44
10.84
def add_ms1_quant_from_top3_mzidtsv(proteins, psms, headerfields, protcol): """Collects PSMs with the highes precursor quant values, adds sum of the top 3 of these to a protein table""" if not protcol: protcol = mzidtsvdata.HEADER_MASTER_PROT top_ms1_psms = generate_top_psms(psms, protcol) for protein in proteins: prot_acc = protein[prottabledata.HEADER_PROTEIN] prec_area = calculate_protein_precursor_quant(top_ms1_psms, prot_acc) outprotein = {k: v for k, v in protein.items()} outprotein[headerfields['precursorquant'][ prottabledata.HEADER_AREA][None]] = str(prec_area) yield outprotein
[ "def", "add_ms1_quant_from_top3_mzidtsv", "(", "proteins", ",", "psms", ",", "headerfields", ",", "protcol", ")", ":", "if", "not", "protcol", ":", "protcol", "=", "mzidtsvdata", ".", "HEADER_MASTER_PROT", "top_ms1_psms", "=", "generate_top_psms", "(", "psms", ",", "protcol", ")", "for", "protein", "in", "proteins", ":", "prot_acc", "=", "protein", "[", "prottabledata", ".", "HEADER_PROTEIN", "]", "prec_area", "=", "calculate_protein_precursor_quant", "(", "top_ms1_psms", ",", "prot_acc", ")", "outprotein", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "protein", ".", "items", "(", ")", "}", "outprotein", "[", "headerfields", "[", "'precursorquant'", "]", "[", "prottabledata", ".", "HEADER_AREA", "]", "[", "None", "]", "]", "=", "str", "(", "prec_area", ")", "yield", "outprotein" ]
50.846154
15.615385
def _get_ref_info_helper(cls, repo, ref_path): """Return: (str(sha), str(target_ref_path)) if available, the sha the file at rela_path points to, or None. target_ref_path is the reference we point to, or None""" tokens = None repodir = _git_dir(repo, ref_path) try: with open(osp.join(repodir, ref_path), 'rt') as fp: value = fp.read().rstrip() # Don't only split on spaces, but on whitespace, which allows to parse lines like # 60b64ef992065e2600bfef6187a97f92398a9144 branch 'master' of git-server:/path/to/repo tokens = value.split() assert(len(tokens) != 0) except (OSError, IOError): # Probably we are just packed, find our entry in the packed refs file # NOTE: We are not a symbolic ref if we are in a packed file, as these # are excluded explicitly for sha, path in cls._iter_packed_refs(repo): if path != ref_path: continue # sha will be used tokens = sha, path break # END for each packed ref # END handle packed refs if tokens is None: raise ValueError("Reference at %r does not exist" % ref_path) # is it a reference ? if tokens[0] == 'ref:': return (None, tokens[1]) # its a commit if repo.re_hexsha_only.match(tokens[0]): return (tokens[0], None) raise ValueError("Failed to parse reference information from %r" % ref_path)
[ "def", "_get_ref_info_helper", "(", "cls", ",", "repo", ",", "ref_path", ")", ":", "tokens", "=", "None", "repodir", "=", "_git_dir", "(", "repo", ",", "ref_path", ")", "try", ":", "with", "open", "(", "osp", ".", "join", "(", "repodir", ",", "ref_path", ")", ",", "'rt'", ")", "as", "fp", ":", "value", "=", "fp", ".", "read", "(", ")", ".", "rstrip", "(", ")", "# Don't only split on spaces, but on whitespace, which allows to parse lines like", "# 60b64ef992065e2600bfef6187a97f92398a9144 branch 'master' of git-server:/path/to/repo", "tokens", "=", "value", ".", "split", "(", ")", "assert", "(", "len", "(", "tokens", ")", "!=", "0", ")", "except", "(", "OSError", ",", "IOError", ")", ":", "# Probably we are just packed, find our entry in the packed refs file", "# NOTE: We are not a symbolic ref if we are in a packed file, as these", "# are excluded explicitly", "for", "sha", ",", "path", "in", "cls", ".", "_iter_packed_refs", "(", "repo", ")", ":", "if", "path", "!=", "ref_path", ":", "continue", "# sha will be used", "tokens", "=", "sha", ",", "path", "break", "# END for each packed ref", "# END handle packed refs", "if", "tokens", "is", "None", ":", "raise", "ValueError", "(", "\"Reference at %r does not exist\"", "%", "ref_path", ")", "# is it a reference ?", "if", "tokens", "[", "0", "]", "==", "'ref:'", ":", "return", "(", "None", ",", "tokens", "[", "1", "]", ")", "# its a commit", "if", "repo", ".", "re_hexsha_only", ".", "match", "(", "tokens", "[", "0", "]", ")", ":", "return", "(", "tokens", "[", "0", "]", ",", "None", ")", "raise", "ValueError", "(", "\"Failed to parse reference information from %r\"", "%", "ref_path", ")" ]
42.864865
18.405405
def run_maelstrom(infile, genome, outdir, pwmfile=None, plot=True, cluster=False, score_table=None, count_table=None, methods=None, ncpus=None): """Run maelstrom on an input table. Parameters ---------- infile : str Filename of input table. Can be either a text-separated tab file or a feather file. genome : str Genome name. Can be either the name of a FASTA-formatted file or a genomepy genome name. outdir : str Output directory for all results. pwmfile : str, optional Specify a PFM file for scanning. plot : bool, optional Create heatmaps. cluster : bool, optional If True and if the input table has more than one column, the data is clustered and the cluster activity methods are also run. Not well-tested. score_table : str, optional Filename of pre-calculated table with motif scores. count_table : str, optional Filename of pre-calculated table with motif counts. methods : list, optional Activity methods to use. By default are all used. ncpus : int, optional If defined this specifies the number of cores to use. """ logger.info("Starting maelstrom") if infile.endswith("feather"): df = pd.read_feather(infile) df = df.set_index(df.columns[0]) else: df = pd.read_table(infile, index_col=0, comment="#") # Check for duplicates if df.index.duplicated(keep=False).any(): raise ValueError("Input file contains duplicate regions! " "Please remove them.") if not os.path.exists(outdir): os.mkdir(outdir) if methods is None: methods = Moap.list_predictors() methods = [m.lower() for m in methods] shutil.copyfile(infile, os.path.join(outdir, "input.table.txt")) # Copy the motif informatuon pwmfile = pwmfile_location(pwmfile) if pwmfile: shutil.copy2(pwmfile, outdir) mapfile = re.sub(".p[fw]m$", ".motif2factors.txt", pwmfile) if os.path.exists(mapfile): shutil.copy2(mapfile, outdir) # Create a file with the number of motif matches if not count_table: count_table = os.path.join(outdir, "motif.count.txt.gz") if not os.path.exists(count_table): logger.info("Motif scanning (counts)") counts = scan_to_table(infile, genome, "count", pwmfile=pwmfile, ncpus=ncpus) counts.to_csv(count_table, sep="\t", compression="gzip") else: logger.info("Counts, using: %s", count_table) # Create a file with the score of the best motif match if not score_table: score_table = os.path.join(outdir, "motif.score.txt.gz") if not os.path.exists(score_table): logger.info("Motif scanning (scores)") scores = scan_to_table(infile, genome, "score", pwmfile=pwmfile, ncpus=ncpus) scores.to_csv(score_table, sep="\t", float_format="%.3f", compression="gzip") else: logger.info("Scores, using: %s", score_table) if cluster: cluster = False for method in methods: m = Moap.create(method, ncpus=ncpus) if m.ptype == "classification": cluster = True break if not cluster: logger.info("Skipping clustering, no classification methods") exps = [] clusterfile = infile if df.shape[1] != 1: # More than one column for method in Moap.list_regression_predictors(): if method in methods: m = Moap.create(method, ncpus=ncpus) exps.append([method, m.pref_table, infile]) logger.debug("Adding %s", method) if cluster: clusterfile = os.path.join(outdir, os.path.basename(infile) + ".cluster.txt") df[:] = scale(df, axis=0) names = df.columns df_changed = pd.DataFrame(index=df.index) df_changed["cluster"] = np.nan for name in names: df_changed.loc[(df[name] - df.loc[:,df.columns != name].max(1)) > 0.5, "cluster"] = name df_changed.dropna().to_csv(clusterfile, sep="\t") if df.shape[1] == 1 or cluster: for method in Moap.list_classification_predictors(): if method in methods: m = Moap.create(method, ncpus=ncpus) exps.append([method, m.pref_table, clusterfile]) if len(exps) == 0: logger.error("No method to run.") sys.exit(1) for method, scoring, fname in exps: try: if scoring == "count" and count_table: moap_with_table(fname, count_table, outdir, method, scoring, ncpus=ncpus) elif scoring == "score" and score_table: moap_with_table(fname, score_table, outdir, method, scoring, ncpus=ncpus) else: moap_with_bg(fname, genome, outdir, method, scoring, pwmfile=pwmfile, ncpus=ncpus) except Exception as e: logger.warn("Method %s with scoring %s failed", method, scoring) logger.warn(e) logger.warn("Skipping") raise dfs = {} for method, scoring,fname in exps: t = "{}.{}".format(method,scoring) fname = os.path.join(outdir, "activity.{}.{}.out.txt".format( method, scoring)) try: dfs[t] = pd.read_table(fname, index_col=0, comment="#") except: logging.warn("Activity file for {} not found!\n".format(t)) if len(methods) > 1: logger.info("Rank aggregation") df_p = df_rank_aggregation(df, dfs, exps) df_p.to_csv(os.path.join(outdir, "final.out.csv"), sep="\t") #df_p = df_p.join(m2f) # Write motif frequency table if df.shape[1] == 1: mcount = df.join(pd.read_table(count_table, index_col=0, comment="#")) m_group = mcount.groupby(df.columns[0]) freq = m_group.sum() / m_group.count() freq.to_csv(os.path.join(outdir, "motif.freq.txt"), sep="\t") if plot and len(methods) > 1: logger.info("html report") maelstrom_html_report( outdir, os.path.join(outdir, "final.out.csv"), pwmfile ) logger.info(os.path.join(outdir, "gimme.maelstrom.report.html"))
[ "def", "run_maelstrom", "(", "infile", ",", "genome", ",", "outdir", ",", "pwmfile", "=", "None", ",", "plot", "=", "True", ",", "cluster", "=", "False", ",", "score_table", "=", "None", ",", "count_table", "=", "None", ",", "methods", "=", "None", ",", "ncpus", "=", "None", ")", ":", "logger", ".", "info", "(", "\"Starting maelstrom\"", ")", "if", "infile", ".", "endswith", "(", "\"feather\"", ")", ":", "df", "=", "pd", ".", "read_feather", "(", "infile", ")", "df", "=", "df", ".", "set_index", "(", "df", ".", "columns", "[", "0", "]", ")", "else", ":", "df", "=", "pd", ".", "read_table", "(", "infile", ",", "index_col", "=", "0", ",", "comment", "=", "\"#\"", ")", "# Check for duplicates", "if", "df", ".", "index", ".", "duplicated", "(", "keep", "=", "False", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"Input file contains duplicate regions! \"", "\"Please remove them.\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "outdir", ")", ":", "os", ".", "mkdir", "(", "outdir", ")", "if", "methods", "is", "None", ":", "methods", "=", "Moap", ".", "list_predictors", "(", ")", "methods", "=", "[", "m", ".", "lower", "(", ")", "for", "m", "in", "methods", "]", "shutil", ".", "copyfile", "(", "infile", ",", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"input.table.txt\"", ")", ")", "# Copy the motif informatuon", "pwmfile", "=", "pwmfile_location", "(", "pwmfile", ")", "if", "pwmfile", ":", "shutil", ".", "copy2", "(", "pwmfile", ",", "outdir", ")", "mapfile", "=", "re", ".", "sub", "(", "\".p[fw]m$\"", ",", "\".motif2factors.txt\"", ",", "pwmfile", ")", "if", "os", ".", "path", ".", "exists", "(", "mapfile", ")", ":", "shutil", ".", "copy2", "(", "mapfile", ",", "outdir", ")", "# Create a file with the number of motif matches", "if", "not", "count_table", ":", "count_table", "=", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"motif.count.txt.gz\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "count_table", ")", ":", "logger", ".", "info", "(", "\"Motif scanning (counts)\"", ")", "counts", "=", "scan_to_table", "(", "infile", ",", "genome", ",", "\"count\"", ",", "pwmfile", "=", "pwmfile", ",", "ncpus", "=", "ncpus", ")", "counts", ".", "to_csv", "(", "count_table", ",", "sep", "=", "\"\\t\"", ",", "compression", "=", "\"gzip\"", ")", "else", ":", "logger", ".", "info", "(", "\"Counts, using: %s\"", ",", "count_table", ")", "# Create a file with the score of the best motif match", "if", "not", "score_table", ":", "score_table", "=", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"motif.score.txt.gz\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "score_table", ")", ":", "logger", ".", "info", "(", "\"Motif scanning (scores)\"", ")", "scores", "=", "scan_to_table", "(", "infile", ",", "genome", ",", "\"score\"", ",", "pwmfile", "=", "pwmfile", ",", "ncpus", "=", "ncpus", ")", "scores", ".", "to_csv", "(", "score_table", ",", "sep", "=", "\"\\t\"", ",", "float_format", "=", "\"%.3f\"", ",", "compression", "=", "\"gzip\"", ")", "else", ":", "logger", ".", "info", "(", "\"Scores, using: %s\"", ",", "score_table", ")", "if", "cluster", ":", "cluster", "=", "False", "for", "method", "in", "methods", ":", "m", "=", "Moap", ".", "create", "(", "method", ",", "ncpus", "=", "ncpus", ")", "if", "m", ".", "ptype", "==", "\"classification\"", ":", "cluster", "=", "True", "break", "if", "not", "cluster", ":", "logger", ".", "info", "(", "\"Skipping clustering, no classification methods\"", ")", "exps", "=", "[", "]", "clusterfile", "=", "infile", "if", "df", ".", "shape", "[", "1", "]", "!=", "1", ":", "# More than one column", "for", "method", "in", "Moap", ".", "list_regression_predictors", "(", ")", ":", "if", "method", "in", "methods", ":", "m", "=", "Moap", ".", "create", "(", "method", ",", "ncpus", "=", "ncpus", ")", "exps", ".", "append", "(", "[", "method", ",", "m", ".", "pref_table", ",", "infile", "]", ")", "logger", ".", "debug", "(", "\"Adding %s\"", ",", "method", ")", "if", "cluster", ":", "clusterfile", "=", "os", ".", "path", ".", "join", "(", "outdir", ",", "os", ".", "path", ".", "basename", "(", "infile", ")", "+", "\".cluster.txt\"", ")", "df", "[", ":", "]", "=", "scale", "(", "df", ",", "axis", "=", "0", ")", "names", "=", "df", ".", "columns", "df_changed", "=", "pd", ".", "DataFrame", "(", "index", "=", "df", ".", "index", ")", "df_changed", "[", "\"cluster\"", "]", "=", "np", ".", "nan", "for", "name", "in", "names", ":", "df_changed", ".", "loc", "[", "(", "df", "[", "name", "]", "-", "df", ".", "loc", "[", ":", ",", "df", ".", "columns", "!=", "name", "]", ".", "max", "(", "1", ")", ")", ">", "0.5", ",", "\"cluster\"", "]", "=", "name", "df_changed", ".", "dropna", "(", ")", ".", "to_csv", "(", "clusterfile", ",", "sep", "=", "\"\\t\"", ")", "if", "df", ".", "shape", "[", "1", "]", "==", "1", "or", "cluster", ":", "for", "method", "in", "Moap", ".", "list_classification_predictors", "(", ")", ":", "if", "method", "in", "methods", ":", "m", "=", "Moap", ".", "create", "(", "method", ",", "ncpus", "=", "ncpus", ")", "exps", ".", "append", "(", "[", "method", ",", "m", ".", "pref_table", ",", "clusterfile", "]", ")", "if", "len", "(", "exps", ")", "==", "0", ":", "logger", ".", "error", "(", "\"No method to run.\"", ")", "sys", ".", "exit", "(", "1", ")", "for", "method", ",", "scoring", ",", "fname", "in", "exps", ":", "try", ":", "if", "scoring", "==", "\"count\"", "and", "count_table", ":", "moap_with_table", "(", "fname", ",", "count_table", ",", "outdir", ",", "method", ",", "scoring", ",", "ncpus", "=", "ncpus", ")", "elif", "scoring", "==", "\"score\"", "and", "score_table", ":", "moap_with_table", "(", "fname", ",", "score_table", ",", "outdir", ",", "method", ",", "scoring", ",", "ncpus", "=", "ncpus", ")", "else", ":", "moap_with_bg", "(", "fname", ",", "genome", ",", "outdir", ",", "method", ",", "scoring", ",", "pwmfile", "=", "pwmfile", ",", "ncpus", "=", "ncpus", ")", "except", "Exception", "as", "e", ":", "logger", ".", "warn", "(", "\"Method %s with scoring %s failed\"", ",", "method", ",", "scoring", ")", "logger", ".", "warn", "(", "e", ")", "logger", ".", "warn", "(", "\"Skipping\"", ")", "raise", "dfs", "=", "{", "}", "for", "method", ",", "scoring", ",", "fname", "in", "exps", ":", "t", "=", "\"{}.{}\"", ".", "format", "(", "method", ",", "scoring", ")", "fname", "=", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"activity.{}.{}.out.txt\"", ".", "format", "(", "method", ",", "scoring", ")", ")", "try", ":", "dfs", "[", "t", "]", "=", "pd", ".", "read_table", "(", "fname", ",", "index_col", "=", "0", ",", "comment", "=", "\"#\"", ")", "except", ":", "logging", ".", "warn", "(", "\"Activity file for {} not found!\\n\"", ".", "format", "(", "t", ")", ")", "if", "len", "(", "methods", ")", ">", "1", ":", "logger", ".", "info", "(", "\"Rank aggregation\"", ")", "df_p", "=", "df_rank_aggregation", "(", "df", ",", "dfs", ",", "exps", ")", "df_p", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"final.out.csv\"", ")", ",", "sep", "=", "\"\\t\"", ")", "#df_p = df_p.join(m2f)", "# Write motif frequency table", "if", "df", ".", "shape", "[", "1", "]", "==", "1", ":", "mcount", "=", "df", ".", "join", "(", "pd", ".", "read_table", "(", "count_table", ",", "index_col", "=", "0", ",", "comment", "=", "\"#\"", ")", ")", "m_group", "=", "mcount", ".", "groupby", "(", "df", ".", "columns", "[", "0", "]", ")", "freq", "=", "m_group", ".", "sum", "(", ")", "/", "m_group", ".", "count", "(", ")", "freq", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"motif.freq.txt\"", ")", ",", "sep", "=", "\"\\t\"", ")", "if", "plot", "and", "len", "(", "methods", ")", ">", "1", ":", "logger", ".", "info", "(", "\"html report\"", ")", "maelstrom_html_report", "(", "outdir", ",", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"final.out.csv\"", ")", ",", "pwmfile", ")", "logger", ".", "info", "(", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"gimme.maelstrom.report.html\"", ")", ")" ]
35.653631
19.821229
def GetService(self, service_name, version=None, server=None): """Creates a service client for the given service. Args: service_name: A string identifying which AdWords service to create a service client for. [optional] version: A string identifying the AdWords version to connect to. This defaults to what is currently the latest version. This will be updated in future releases to point to what is then the latest version. server: A string identifying the webserver hosting the AdWords API. Returns: A googleads.common.GoogleSoapService instance which has the headers and proxy configured for use. Raises: A GoogleAdsValueError if the service or version provided do not exist. """ if not server: server = _DEFAULT_ENDPOINT server = server.rstrip('/') if not version: version = sorted(_SERVICE_MAP.keys())[-1] try: version_service_mapping = _SERVICE_MAP[version][service_name] except KeyError: msg_fmt = 'Unrecognized %s for the AdWords API. Given: %s Supported: %s' if version in _SERVICE_MAP: raise googleads.errors.GoogleAdsValueError( msg_fmt % ('service', service_name, _SERVICE_MAP[version].keys())) else: raise googleads.errors.GoogleAdsValueError( msg_fmt % ('version', version, _SERVICE_MAP.keys())) service = googleads.common.GetServiceClassForLibrary(self.soap_impl)( self._SOAP_SERVICE_FORMAT % ( server, version_service_mapping, version, service_name), _AdWordsHeaderHandler( self, version, self.enable_compression, self.custom_http_headers), _AdWordsPacker, self.proxy_config, self.timeout, version, cache=self.cache) return service
[ "def", "GetService", "(", "self", ",", "service_name", ",", "version", "=", "None", ",", "server", "=", "None", ")", ":", "if", "not", "server", ":", "server", "=", "_DEFAULT_ENDPOINT", "server", "=", "server", ".", "rstrip", "(", "'/'", ")", "if", "not", "version", ":", "version", "=", "sorted", "(", "_SERVICE_MAP", ".", "keys", "(", ")", ")", "[", "-", "1", "]", "try", ":", "version_service_mapping", "=", "_SERVICE_MAP", "[", "version", "]", "[", "service_name", "]", "except", "KeyError", ":", "msg_fmt", "=", "'Unrecognized %s for the AdWords API. Given: %s Supported: %s'", "if", "version", "in", "_SERVICE_MAP", ":", "raise", "googleads", ".", "errors", ".", "GoogleAdsValueError", "(", "msg_fmt", "%", "(", "'service'", ",", "service_name", ",", "_SERVICE_MAP", "[", "version", "]", ".", "keys", "(", ")", ")", ")", "else", ":", "raise", "googleads", ".", "errors", ".", "GoogleAdsValueError", "(", "msg_fmt", "%", "(", "'version'", ",", "version", ",", "_SERVICE_MAP", ".", "keys", "(", ")", ")", ")", "service", "=", "googleads", ".", "common", ".", "GetServiceClassForLibrary", "(", "self", ".", "soap_impl", ")", "(", "self", ".", "_SOAP_SERVICE_FORMAT", "%", "(", "server", ",", "version_service_mapping", ",", "version", ",", "service_name", ")", ",", "_AdWordsHeaderHandler", "(", "self", ",", "version", ",", "self", ".", "enable_compression", ",", "self", ".", "custom_http_headers", ")", ",", "_AdWordsPacker", ",", "self", ".", "proxy_config", ",", "self", ".", "timeout", ",", "version", ",", "cache", "=", "self", ".", "cache", ")", "return", "service" ]
35.64
24.82
def glob(cls, files=None): ''' Glob a pattern or a list of pattern static storage relative(s). ''' files = files or [] if isinstance(files, str): files = os.path.normpath(files) matches = lambda path: matches_patterns(path, [files]) return [path for path in cls.get_static_files() if matches(path)] elif isinstance(files, (list, tuple)): all_files = cls.get_static_files() files = [os.path.normpath(f) for f in files] sorted_result = [] for pattern in files: sorted_result.extend([f for f in all_files if matches_patterns(f, [pattern])]) return sorted_result
[ "def", "glob", "(", "cls", ",", "files", "=", "None", ")", ":", "files", "=", "files", "or", "[", "]", "if", "isinstance", "(", "files", ",", "str", ")", ":", "files", "=", "os", ".", "path", ".", "normpath", "(", "files", ")", "matches", "=", "lambda", "path", ":", "matches_patterns", "(", "path", ",", "[", "files", "]", ")", "return", "[", "path", "for", "path", "in", "cls", ".", "get_static_files", "(", ")", "if", "matches", "(", "path", ")", "]", "elif", "isinstance", "(", "files", ",", "(", "list", ",", "tuple", ")", ")", ":", "all_files", "=", "cls", ".", "get_static_files", "(", ")", "files", "=", "[", "os", ".", "path", ".", "normpath", "(", "f", ")", "for", "f", "in", "files", "]", "sorted_result", "=", "[", "]", "for", "pattern", "in", "files", ":", "sorted_result", ".", "extend", "(", "[", "f", "for", "f", "in", "all_files", "if", "matches_patterns", "(", "f", ",", "[", "pattern", "]", ")", "]", ")", "return", "sorted_result" ]
43.9375
18.4375
def _determine_username(self, ip): """SSH in as root and determine the username.""" ssh = subprocess.Popen([ "ssh", "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no", "root@%s" % ip], stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL) first_line = ssh.stdout.readline() ssh.kill() ssh.wait() if first_line: match = re.search( r"Please login as the user \"(\w+)\" rather than " r"the user \"root\".", first_line.decode('utf-8')) if match: return match.groups()[0] else: return None
[ "def", "_determine_username", "(", "self", ",", "ip", ")", ":", "ssh", "=", "subprocess", ".", "Popen", "(", "[", "\"ssh\"", ",", "\"-o\"", ",", "\"UserKnownHostsFile=/dev/null\"", ",", "\"-o\"", ",", "\"StrictHostKeyChecking=no\"", ",", "\"root@%s\"", "%", "ip", "]", ",", "stdin", "=", "subprocess", ".", "DEVNULL", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "DEVNULL", ")", "first_line", "=", "ssh", ".", "stdout", ".", "readline", "(", ")", "ssh", ".", "kill", "(", ")", "ssh", ".", "wait", "(", ")", "if", "first_line", ":", "match", "=", "re", ".", "search", "(", "r\"Please login as the user \\\"(\\w+)\\\" rather than \"", "r\"the user \\\"root\\\".\"", ",", "first_line", ".", "decode", "(", "'utf-8'", ")", ")", "if", "match", ":", "return", "match", ".", "groups", "(", ")", "[", "0", "]", "else", ":", "return", "None" ]
34.809524
12.428571
def hacking_docstring_summary(physical_line, previous_logical, tokens): r"""Check multi line docstring summary is separated with empty line. OpenStack HACKING guide recommendation for docstring: Docstring should start with a one-line summary, less than 80 characters. Okay: def foo():\n a = '''\nnot\na docstring\n''' Okay: '''foobar\n\nfoo\nbar\n''' H405: def foo():\n '''foobar\nfoo\nbar\n''' H405: def foo():\n r'''foobar\nfoo\nbar\n''' H405: def foo():\n '''foobar\n''' """ docstring = is_docstring(tokens, previous_logical) if docstring: if '\n' not in docstring: # not a multi line docstring return lines = docstring.split('\n') if len(lines) > 1 and len(lines[1].strip()) is not 0: # docstrings get tokenized on the last line of the docstring, so # we don't know the exact position. return (0, "H405: multi line docstring " "summary not separated with an empty line")
[ "def", "hacking_docstring_summary", "(", "physical_line", ",", "previous_logical", ",", "tokens", ")", ":", "docstring", "=", "is_docstring", "(", "tokens", ",", "previous_logical", ")", "if", "docstring", ":", "if", "'\\n'", "not", "in", "docstring", ":", "# not a multi line docstring", "return", "lines", "=", "docstring", ".", "split", "(", "'\\n'", ")", "if", "len", "(", "lines", ")", ">", "1", "and", "len", "(", "lines", "[", "1", "]", ".", "strip", "(", ")", ")", "is", "not", "0", ":", "# docstrings get tokenized on the last line of the docstring, so", "# we don't know the exact position.", "return", "(", "0", ",", "\"H405: multi line docstring \"", "\"summary not separated with an empty line\"", ")" ]
44.086957
16.217391
def comment_lines(lines): """Comment out the given list of lines and return them. The hash mark will be inserted before the first non-whitespace character on each line.""" ret = [] for line in lines: ws_prefix, rest, ignore = RE_LINE_SPLITTER_COMMENT.match(line).groups() ret.append(ws_prefix + '#' + rest) return ''.join(ret)
[ "def", "comment_lines", "(", "lines", ")", ":", "ret", "=", "[", "]", "for", "line", "in", "lines", ":", "ws_prefix", ",", "rest", ",", "ignore", "=", "RE_LINE_SPLITTER_COMMENT", ".", "match", "(", "line", ")", ".", "groups", "(", ")", "ret", ".", "append", "(", "ws_prefix", "+", "'#'", "+", "rest", ")", "return", "''", ".", "join", "(", "ret", ")" ]
44.5
14.875
def get_filebase(path, pattern): """Get the end of *path* of same length as *pattern*.""" # A pattern can include directories tail_len = len(pattern.split(os.path.sep)) return os.path.join(*str(path).split(os.path.sep)[-tail_len:])
[ "def", "get_filebase", "(", "path", ",", "pattern", ")", ":", "# A pattern can include directories", "tail_len", "=", "len", "(", "pattern", ".", "split", "(", "os", ".", "path", ".", "sep", ")", ")", "return", "os", ".", "path", ".", "join", "(", "*", "str", "(", "path", ")", ".", "split", "(", "os", ".", "path", ".", "sep", ")", "[", "-", "tail_len", ":", "]", ")" ]
48.6
8.2
def __get_global_options(cmd_line_options, conf_file_options=None): """ Get all global options :type cmd_line_options: dict :param cmd_line_options: Dictionary with all command line options :type conf_file_options: dict :param conf_file_options: Dictionary with all config file options :returns: dict """ options = {} for option in DEFAULT_OPTIONS['global'].keys(): options[option] = DEFAULT_OPTIONS['global'][option] if conf_file_options and option in conf_file_options: options[option] = conf_file_options[option] if cmd_line_options and option in cmd_line_options: options[option] = cmd_line_options[option] return options
[ "def", "__get_global_options", "(", "cmd_line_options", ",", "conf_file_options", "=", "None", ")", ":", "options", "=", "{", "}", "for", "option", "in", "DEFAULT_OPTIONS", "[", "'global'", "]", ".", "keys", "(", ")", ":", "options", "[", "option", "]", "=", "DEFAULT_OPTIONS", "[", "'global'", "]", "[", "option", "]", "if", "conf_file_options", "and", "option", "in", "conf_file_options", ":", "options", "[", "option", "]", "=", "conf_file_options", "[", "option", "]", "if", "cmd_line_options", "and", "option", "in", "cmd_line_options", ":", "options", "[", "option", "]", "=", "cmd_line_options", "[", "option", "]", "return", "options" ]
33.238095
22.238095
def addTextOut(self, text): """add black text""" self._currentColor = self._black self.addText(text)
[ "def", "addTextOut", "(", "self", ",", "text", ")", ":", "self", ".", "_currentColor", "=", "self", ".", "_black", "self", ".", "addText", "(", "text", ")" ]
30.25
6.75
def upgrade(self): """Upgrade deployment.""" if not self.is_valid: raise PolyaxonDeploymentConfigError( 'Deployment type `{}` not supported'.format(self.deployment_type)) if self.is_kubernetes: self.upgrade_on_kubernetes() elif self.is_docker_compose: self.upgrade_on_docker_compose() elif self.is_docker: self.upgrade_on_docker() elif self.is_heroku: self.upgrade_on_heroku()
[ "def", "upgrade", "(", "self", ")", ":", "if", "not", "self", ".", "is_valid", ":", "raise", "PolyaxonDeploymentConfigError", "(", "'Deployment type `{}` not supported'", ".", "format", "(", "self", ".", "deployment_type", ")", ")", "if", "self", ".", "is_kubernetes", ":", "self", ".", "upgrade_on_kubernetes", "(", ")", "elif", "self", ".", "is_docker_compose", ":", "self", ".", "upgrade_on_docker_compose", "(", ")", "elif", "self", ".", "is_docker", ":", "self", ".", "upgrade_on_docker", "(", ")", "elif", "self", ".", "is_heroku", ":", "self", ".", "upgrade_on_heroku", "(", ")" ]
34.857143
12.357143
def shutdown(self): """Shuts down the scheduler and immediately end all pending callbacks. """ # Drop all pending item from the executor. Without this, the executor # will block until all pending items are complete, which is # undesirable. try: while True: self._executor._work_queue.get(block=False) except queue.Empty: pass self._executor.shutdown()
[ "def", "shutdown", "(", "self", ")", ":", "# Drop all pending item from the executor. Without this, the executor", "# will block until all pending items are complete, which is", "# undesirable.", "try", ":", "while", "True", ":", "self", ".", "_executor", ".", "_work_queue", ".", "get", "(", "block", "=", "False", ")", "except", "queue", ".", "Empty", ":", "pass", "self", ".", "_executor", ".", "shutdown", "(", ")" ]
37
17.583333
def RemoveMultiLineCommentsFromRange(lines, begin, end): """Clears a range of lines for multi-line comments.""" # Having // dummy comments makes the lines non-empty, so we will not get # unnecessary blank line warnings later in the code. for i in range(begin, end): lines[i] = '/**/'
[ "def", "RemoveMultiLineCommentsFromRange", "(", "lines", ",", "begin", ",", "end", ")", ":", "# Having // dummy comments makes the lines non-empty, so we will not get", "# unnecessary blank line warnings later in the code.", "for", "i", "in", "range", "(", "begin", ",", "end", ")", ":", "lines", "[", "i", "]", "=", "'/**/'" ]
48.333333
15.666667
def _from_nested_schema(self, obj, field): """Support nested field.""" if isinstance(field.nested, basestring): nested = get_class(field.nested) else: nested = field.nested name = nested.__name__ outer_name = obj.__class__.__name__ only = field.only exclude = field.exclude # If this is not a schema we've seen, and it's not this schema, # put it in our list of schema defs if name not in self._nested_schema_classes and name != outer_name: wrapped_nested = self.__class__(nested=True) wrapped_dumped = wrapped_nested.dump( nested(only=only, exclude=exclude) ) # Handle change in return value type between Marshmallow # versions 2 and 3. if marshmallow.__version__.split('.', 1)[0] >= '3': self._nested_schema_classes[name] = wrapped_dumped else: self._nested_schema_classes[name] = wrapped_dumped.data self._nested_schema_classes.update( wrapped_nested._nested_schema_classes ) # and the schema is just a reference to the def schema = { 'type': 'object', '$ref': '#/definitions/{}'.format(name) } # NOTE: doubled up to maintain backwards compatibility metadata = field.metadata.get('metadata', {}) metadata.update(field.metadata) for md_key, md_val in metadata.items(): if md_key == 'metadata': continue schema[md_key] = md_val if field.many: schema = { 'type': ["array"] if field.required else ['array', 'null'], 'items': schema, } return schema
[ "def", "_from_nested_schema", "(", "self", ",", "obj", ",", "field", ")", ":", "if", "isinstance", "(", "field", ".", "nested", ",", "basestring", ")", ":", "nested", "=", "get_class", "(", "field", ".", "nested", ")", "else", ":", "nested", "=", "field", ".", "nested", "name", "=", "nested", ".", "__name__", "outer_name", "=", "obj", ".", "__class__", ".", "__name__", "only", "=", "field", ".", "only", "exclude", "=", "field", ".", "exclude", "# If this is not a schema we've seen, and it's not this schema,", "# put it in our list of schema defs", "if", "name", "not", "in", "self", ".", "_nested_schema_classes", "and", "name", "!=", "outer_name", ":", "wrapped_nested", "=", "self", ".", "__class__", "(", "nested", "=", "True", ")", "wrapped_dumped", "=", "wrapped_nested", ".", "dump", "(", "nested", "(", "only", "=", "only", ",", "exclude", "=", "exclude", ")", ")", "# Handle change in return value type between Marshmallow", "# versions 2 and 3.", "if", "marshmallow", ".", "__version__", ".", "split", "(", "'.'", ",", "1", ")", "[", "0", "]", ">=", "'3'", ":", "self", ".", "_nested_schema_classes", "[", "name", "]", "=", "wrapped_dumped", "else", ":", "self", ".", "_nested_schema_classes", "[", "name", "]", "=", "wrapped_dumped", ".", "data", "self", ".", "_nested_schema_classes", ".", "update", "(", "wrapped_nested", ".", "_nested_schema_classes", ")", "# and the schema is just a reference to the def", "schema", "=", "{", "'type'", ":", "'object'", ",", "'$ref'", ":", "'#/definitions/{}'", ".", "format", "(", "name", ")", "}", "# NOTE: doubled up to maintain backwards compatibility", "metadata", "=", "field", ".", "metadata", ".", "get", "(", "'metadata'", ",", "{", "}", ")", "metadata", ".", "update", "(", "field", ".", "metadata", ")", "for", "md_key", ",", "md_val", "in", "metadata", ".", "items", "(", ")", ":", "if", "md_key", "==", "'metadata'", ":", "continue", "schema", "[", "md_key", "]", "=", "md_val", "if", "field", ".", "many", ":", "schema", "=", "{", "'type'", ":", "[", "\"array\"", "]", "if", "field", ".", "required", "else", "[", "'array'", ",", "'null'", "]", ",", "'items'", ":", "schema", ",", "}", "return", "schema" ]
34.076923
19.326923
def delete(self, robj, rw=None, r=None, w=None, dw=None, pr=None, pw=None, timeout=None): """ Delete an object. """ # We could detect quorum_controls here but HTTP ignores # unknown flags/params. params = {'rw': rw, 'r': r, 'w': w, 'dw': dw, 'pr': pr, 'pw': pw, 'timeout': timeout} headers = {} bucket_type = self._get_bucket_type(robj.bucket.bucket_type) url = self.object_path(robj.bucket.name, robj.key, bucket_type=bucket_type, **params) use_vclocks = (self.tombstone_vclocks() and hasattr(robj, 'vclock') and robj.vclock is not None) if use_vclocks: headers['X-Riak-Vclock'] = robj.vclock.encode('base64') response = self._request('DELETE', url, headers) self.check_http_code(response[0], [204, 404]) return self
[ "def", "delete", "(", "self", ",", "robj", ",", "rw", "=", "None", ",", "r", "=", "None", ",", "w", "=", "None", ",", "dw", "=", "None", ",", "pr", "=", "None", ",", "pw", "=", "None", ",", "timeout", "=", "None", ")", ":", "# We could detect quorum_controls here but HTTP ignores", "# unknown flags/params.", "params", "=", "{", "'rw'", ":", "rw", ",", "'r'", ":", "r", ",", "'w'", ":", "w", ",", "'dw'", ":", "dw", ",", "'pr'", ":", "pr", ",", "'pw'", ":", "pw", ",", "'timeout'", ":", "timeout", "}", "headers", "=", "{", "}", "bucket_type", "=", "self", ".", "_get_bucket_type", "(", "robj", ".", "bucket", ".", "bucket_type", ")", "url", "=", "self", ".", "object_path", "(", "robj", ".", "bucket", ".", "name", ",", "robj", ".", "key", ",", "bucket_type", "=", "bucket_type", ",", "*", "*", "params", ")", "use_vclocks", "=", "(", "self", ".", "tombstone_vclocks", "(", ")", "and", "hasattr", "(", "robj", ",", "'vclock'", ")", "and", "robj", ".", "vclock", "is", "not", "None", ")", "if", "use_vclocks", ":", "headers", "[", "'X-Riak-Vclock'", "]", "=", "robj", ".", "vclock", ".", "encode", "(", "'base64'", ")", "response", "=", "self", ".", "_request", "(", "'DELETE'", ",", "url", ",", "headers", ")", "self", ".", "check_http_code", "(", "response", "[", "0", "]", ",", "[", "204", ",", "404", "]", ")", "return", "self" ]
41.318182
19.954545
def current_time_is_in_interval(start, end): """ Determine whether the current time is on the interval [start, end]. """ interval_start = parse_lms_api_datetime(start or UNIX_MIN_DATE_STRING) interval_end = parse_lms_api_datetime(end or UNIX_MAX_DATE_STRING) return interval_start <= timezone.now() <= interval_end
[ "def", "current_time_is_in_interval", "(", "start", ",", "end", ")", ":", "interval_start", "=", "parse_lms_api_datetime", "(", "start", "or", "UNIX_MIN_DATE_STRING", ")", "interval_end", "=", "parse_lms_api_datetime", "(", "end", "or", "UNIX_MAX_DATE_STRING", ")", "return", "interval_start", "<=", "timezone", ".", "now", "(", ")", "<=", "interval_end" ]
47.428571
16.857143
def rn_theory(af, b): """ R(n) ratio expected from theory for given noise type alpha = b + 2 """ # From IEEE1139-2008 # alpha beta ADEV_mu MDEV_mu Rn_mu # -2 -4 1 1 0 Random Walk FM # -1 -3 0 0 0 Flicker FM # 0 -2 -1 -1 0 White FM # 1 -1 -2 -2 0 Flicker PM # 2 0 -2 -3 -1 White PM # (a=-3 flicker walk FM) # (a=-4 random run FM) if b==0: return pow(af,-1) elif b==-1: # f_h = 0.5/tau0 (assumed!) # af = tau/tau0 # so f_h*tau = 0.5/tau0 * af*tau0 = 0.5*af avar = (1.038+3*np.log(2*np.pi*0.5*af)) / (4.0*pow(np.pi,2)) mvar = 3*np.log(256.0/27.0)/(8.0*pow(np.pi,2)) return mvar/avar else: return pow(af,0)
[ "def", "rn_theory", "(", "af", ",", "b", ")", ":", "# From IEEE1139-2008", "# alpha beta ADEV_mu MDEV_mu Rn_mu", "# -2 -4 1 1 0 Random Walk FM", "# -1 -3 0 0 0 Flicker FM", "# 0 -2 -1 -1 0 White FM", "# 1 -1 -2 -2 0 Flicker PM", "# 2 0 -2 -3 -1 White PM", "# (a=-3 flicker walk FM)", "# (a=-4 random run FM)", "if", "b", "==", "0", ":", "return", "pow", "(", "af", ",", "-", "1", ")", "elif", "b", "==", "-", "1", ":", "# f_h = 0.5/tau0 (assumed!)", "# af = tau/tau0", "# so f_h*tau = 0.5/tau0 * af*tau0 = 0.5*af", "avar", "=", "(", "1.038", "+", "3", "*", "np", ".", "log", "(", "2", "*", "np", ".", "pi", "*", "0.5", "*", "af", ")", ")", "/", "(", "4.0", "*", "pow", "(", "np", ".", "pi", ",", "2", ")", ")", "mvar", "=", "3", "*", "np", ".", "log", "(", "256.0", "/", "27.0", ")", "/", "(", "8.0", "*", "pow", "(", "np", ".", "pi", ",", "2", ")", ")", "return", "mvar", "/", "avar", "else", ":", "return", "pow", "(", "af", ",", "0", ")" ]
33.461538
17.346154
def data(self, index, role=Qt.DisplayRole): """return data depending on index, Qt::ItemDataRole and data type of the column. Args: index (QtCore.QModelIndex): Index to define column and row you want to return role (Qt::ItemDataRole): Define which data you want to return. Returns: None if index is invalid None if role is none of: DisplayRole, EditRole, CheckStateRole, DATAFRAME_ROLE if role DisplayRole: unmodified _dataFrame value if column dtype is object (string or unicode). _dataFrame value as int or long if column dtype is in _intDtypes. _dataFrame value as float if column dtype is in _floatDtypes. Rounds to defined precision (look at: _float16_precision, _float32_precision). None if column dtype is in _boolDtypes. QDateTime if column dtype is numpy.timestamp64[ns]. Uses timestampFormat as conversion template. if role EditRole: unmodified _dataFrame value if column dtype is object (string or unicode). _dataFrame value as int or long if column dtype is in _intDtypes. _dataFrame value as float if column dtype is in _floatDtypes. Rounds to defined precision (look at: _float16_precision, _float32_precision). _dataFrame value as bool if column dtype is in _boolDtypes. QDateTime if column dtype is numpy.timestamp64[ns]. Uses timestampFormat as conversion template. if role CheckStateRole: Qt.Checked or Qt.Unchecked if dtype is numpy.bool_ otherwise None for all other dtypes. if role DATAFRAME_ROLE: unmodified _dataFrame value. raises TypeError if an unhandled dtype is found in column. """ if not index.isValid(): return None def convertValue(row, col, columnDtype): value = None if columnDtype == object: value = self._dataFrame.ix[row, col] elif columnDtype in self._floatDtypes: value = round(float(self._dataFrame.ix[row, col]), self._float_precisions[str(columnDtype)]) elif columnDtype in self._intDtypes: value = int(self._dataFrame.ix[row, col]) elif columnDtype in self._boolDtypes: # TODO this will most likely always be true # See: http://stackoverflow.com/a/715455 # well no: I am mistaken here, the data is already in the dataframe # so its already converted to a bool value = bool(self._dataFrame.ix[row, col]) elif columnDtype in self._dateDtypes: #print numpy.datetime64(self._dataFrame.ix[row, col]) value = pandas.Timestamp(self._dataFrame.ix[row, col]) value = QtCore.QDateTime.fromString(str(value), self.timestampFormat) #print value # else: # raise TypeError, "returning unhandled data type" return value row = self._dataFrame.index[index.row()] col = self._dataFrame.columns[index.column()] columnDtype = self._dataFrame[col].dtype if role == Qt.DisplayRole: # return the value if you wanne show True/False as text if columnDtype == numpy.bool: result = self._dataFrame.ix[row, col] else: result = convertValue(row, col, columnDtype) elif role == Qt.EditRole: result = convertValue(row, col, columnDtype) elif role == Qt.CheckStateRole: if columnDtype == numpy.bool_: if convertValue(row, col, columnDtype): result = Qt.Checked else: result = Qt.Unchecked else: result = None elif role == DATAFRAME_ROLE: result = self._dataFrame.ix[row, col] else: result = None return result
[ "def", "data", "(", "self", ",", "index", ",", "role", "=", "Qt", ".", "DisplayRole", ")", ":", "if", "not", "index", ".", "isValid", "(", ")", ":", "return", "None", "def", "convertValue", "(", "row", ",", "col", ",", "columnDtype", ")", ":", "value", "=", "None", "if", "columnDtype", "==", "object", ":", "value", "=", "self", ".", "_dataFrame", ".", "ix", "[", "row", ",", "col", "]", "elif", "columnDtype", "in", "self", ".", "_floatDtypes", ":", "value", "=", "round", "(", "float", "(", "self", ".", "_dataFrame", ".", "ix", "[", "row", ",", "col", "]", ")", ",", "self", ".", "_float_precisions", "[", "str", "(", "columnDtype", ")", "]", ")", "elif", "columnDtype", "in", "self", ".", "_intDtypes", ":", "value", "=", "int", "(", "self", ".", "_dataFrame", ".", "ix", "[", "row", ",", "col", "]", ")", "elif", "columnDtype", "in", "self", ".", "_boolDtypes", ":", "# TODO this will most likely always be true", "# See: http://stackoverflow.com/a/715455", "# well no: I am mistaken here, the data is already in the dataframe", "# so its already converted to a bool", "value", "=", "bool", "(", "self", ".", "_dataFrame", ".", "ix", "[", "row", ",", "col", "]", ")", "elif", "columnDtype", "in", "self", ".", "_dateDtypes", ":", "#print numpy.datetime64(self._dataFrame.ix[row, col])", "value", "=", "pandas", ".", "Timestamp", "(", "self", ".", "_dataFrame", ".", "ix", "[", "row", ",", "col", "]", ")", "value", "=", "QtCore", ".", "QDateTime", ".", "fromString", "(", "str", "(", "value", ")", ",", "self", ".", "timestampFormat", ")", "#print value", "# else:", "# raise TypeError, \"returning unhandled data type\"", "return", "value", "row", "=", "self", ".", "_dataFrame", ".", "index", "[", "index", ".", "row", "(", ")", "]", "col", "=", "self", ".", "_dataFrame", ".", "columns", "[", "index", ".", "column", "(", ")", "]", "columnDtype", "=", "self", ".", "_dataFrame", "[", "col", "]", ".", "dtype", "if", "role", "==", "Qt", ".", "DisplayRole", ":", "# return the value if you wanne show True/False as text", "if", "columnDtype", "==", "numpy", ".", "bool", ":", "result", "=", "self", ".", "_dataFrame", ".", "ix", "[", "row", ",", "col", "]", "else", ":", "result", "=", "convertValue", "(", "row", ",", "col", ",", "columnDtype", ")", "elif", "role", "==", "Qt", ".", "EditRole", ":", "result", "=", "convertValue", "(", "row", ",", "col", ",", "columnDtype", ")", "elif", "role", "==", "Qt", ".", "CheckStateRole", ":", "if", "columnDtype", "==", "numpy", ".", "bool_", ":", "if", "convertValue", "(", "row", ",", "col", ",", "columnDtype", ")", ":", "result", "=", "Qt", ".", "Checked", "else", ":", "result", "=", "Qt", ".", "Unchecked", "else", ":", "result", "=", "None", "elif", "role", "==", "DATAFRAME_ROLE", ":", "result", "=", "self", ".", "_dataFrame", ".", "ix", "[", "row", ",", "col", "]", "else", ":", "result", "=", "None", "return", "result" ]
46.511628
25.174419