text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def hardware_info(): """ Returns basic hardware information about the computer. Gives actual number of CPU's in the machine, even when hyperthreading is turned on. Returns ------- info : dict Dictionary containing cpu and memory information. """ try: if sys.platform == 'darwin': out = _mac_hardware_info() elif sys.platform == 'win32': out = _win_hardware_info() elif sys.platform in ['linux', 'linux2']: out = _linux_hardware_info() else: out = {} except: return {} else: return out
[ "def", "hardware_info", "(", ")", ":", "try", ":", "if", "sys", ".", "platform", "==", "'darwin'", ":", "out", "=", "_mac_hardware_info", "(", ")", "elif", "sys", ".", "platform", "==", "'win32'", ":", "out", "=", "_win_hardware_info", "(", ")", "elif", "sys", ".", "platform", "in", "[", "'linux'", ",", "'linux2'", "]", ":", "out", "=", "_linux_hardware_info", "(", ")", "else", ":", "out", "=", "{", "}", "except", ":", "return", "{", "}", "else", ":", "return", "out" ]
23.461538
20.153846
def autodiscover_siteprefs(admin_site=None): """Automatically discovers and registers all preferences available in all apps. :param admin.AdminSite admin_site: Custom AdminSite object. """ if admin_site is None: admin_site = admin.site # Do not discover anything if called from manage.py (e.g. executing commands from cli). if 'manage' not in sys.argv[0] or (len(sys.argv) > 1 and sys.argv[1] in MANAGE_SAFE_COMMANDS): import_prefs() Preference.read_prefs(get_prefs()) register_admin_models(admin_site)
[ "def", "autodiscover_siteprefs", "(", "admin_site", "=", "None", ")", ":", "if", "admin_site", "is", "None", ":", "admin_site", "=", "admin", ".", "site", "# Do not discover anything if called from manage.py (e.g. executing commands from cli).", "if", "'manage'", "not", "in", "sys", ".", "argv", "[", "0", "]", "or", "(", "len", "(", "sys", ".", "argv", ")", ">", "1", "and", "sys", ".", "argv", "[", "1", "]", "in", "MANAGE_SAFE_COMMANDS", ")", ":", "import_prefs", "(", ")", "Preference", ".", "read_prefs", "(", "get_prefs", "(", ")", ")", "register_admin_models", "(", "admin_site", ")" ]
39.142857
21.428571
def load(cls, database, doc_id): """Load a specific document from the given database. :param database: the `Database` object to retrieve the document from :param doc_id: the document ID :return: the `Document` instance, or `None` if no document with the given ID was found """ doc = database.get(doc_id) if doc is None: return None return cls.wrap(doc)
[ "def", "load", "(", "cls", ",", "database", ",", "doc_id", ")", ":", "doc", "=", "database", ".", "get", "(", "doc_id", ")", "if", "doc", "is", "None", ":", "return", "None", "return", "cls", ".", "wrap", "(", "doc", ")" ]
36.25
14.833333
def __process_by_python(self): """! @brief Performs cluster analysis using python code. """ maximum_change = float('inf') iteration = 0 if self.__observer is not None: initial_clusters = self.__update_clusters() self.__observer.notify(initial_clusters, self.__centers.tolist()) while maximum_change > self.__tolerance and iteration < self.__itermax: self.__clusters = self.__update_clusters() updated_centers = self.__update_centers() # changes should be calculated before assignment if self.__observer is not None: self.__observer.notify(self.__clusters, updated_centers.tolist()) maximum_change = self.__calculate_changes(updated_centers) self.__centers = updated_centers # assign center after change calculation iteration += 1 self.__calculate_total_wce()
[ "def", "__process_by_python", "(", "self", ")", ":", "maximum_change", "=", "float", "(", "'inf'", ")", "iteration", "=", "0", "if", "self", ".", "__observer", "is", "not", "None", ":", "initial_clusters", "=", "self", ".", "__update_clusters", "(", ")", "self", ".", "__observer", ".", "notify", "(", "initial_clusters", ",", "self", ".", "__centers", ".", "tolist", "(", ")", ")", "while", "maximum_change", ">", "self", ".", "__tolerance", "and", "iteration", "<", "self", ".", "__itermax", ":", "self", ".", "__clusters", "=", "self", ".", "__update_clusters", "(", ")", "updated_centers", "=", "self", ".", "__update_centers", "(", ")", "# changes should be calculated before assignment\r", "if", "self", ".", "__observer", "is", "not", "None", ":", "self", ".", "__observer", ".", "notify", "(", "self", ".", "__clusters", ",", "updated_centers", ".", "tolist", "(", ")", ")", "maximum_change", "=", "self", ".", "__calculate_changes", "(", "updated_centers", ")", "self", ".", "__centers", "=", "updated_centers", "# assign center after change calculation\r", "iteration", "+=", "1", "self", ".", "__calculate_total_wce", "(", ")" ]
36.384615
26.038462
def read_all(self, n, check_rekey=False): """ Read as close to N bytes as possible, blocking as long as necessary. @param n: number of bytes to read @type n: int @return: the data read @rtype: str @raise EOFError: if the socket was closed before all the bytes could be read """ out = '' # handle over-reading from reading the banner line if len(self.__remainder) > 0: out = self.__remainder[:n] self.__remainder = self.__remainder[n:] n -= len(out) if PY22: return self._py22_read_all(n, out) while n > 0: got_timeout = False try: x = self.__socket.recv(n) if len(x) == 0: raise EOFError() out += x n -= len(x) except socket.timeout: got_timeout = True except socket.error, e: # on Linux, sometimes instead of socket.timeout, we get # EAGAIN. this is a bug in recent (> 2.6.9) kernels but # we need to work around it. if (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EAGAIN): got_timeout = True elif (type(e.args) is tuple) and (len(e.args) > 0) and (e.args[0] == errno.EINTR): # syscall interrupted; try again pass elif self.__closed: raise EOFError() else: raise if got_timeout: if self.__closed: raise EOFError() if check_rekey and (len(out) == 0) and self.__need_rekey: raise NeedRekeyException() self._check_keepalive() return out
[ "def", "read_all", "(", "self", ",", "n", ",", "check_rekey", "=", "False", ")", ":", "out", "=", "''", "# handle over-reading from reading the banner line", "if", "len", "(", "self", ".", "__remainder", ")", ">", "0", ":", "out", "=", "self", ".", "__remainder", "[", ":", "n", "]", "self", ".", "__remainder", "=", "self", ".", "__remainder", "[", "n", ":", "]", "n", "-=", "len", "(", "out", ")", "if", "PY22", ":", "return", "self", ".", "_py22_read_all", "(", "n", ",", "out", ")", "while", "n", ">", "0", ":", "got_timeout", "=", "False", "try", ":", "x", "=", "self", ".", "__socket", ".", "recv", "(", "n", ")", "if", "len", "(", "x", ")", "==", "0", ":", "raise", "EOFError", "(", ")", "out", "+=", "x", "n", "-=", "len", "(", "x", ")", "except", "socket", ".", "timeout", ":", "got_timeout", "=", "True", "except", "socket", ".", "error", ",", "e", ":", "# on Linux, sometimes instead of socket.timeout, we get", "# EAGAIN. this is a bug in recent (> 2.6.9) kernels but", "# we need to work around it.", "if", "(", "type", "(", "e", ".", "args", ")", "is", "tuple", ")", "and", "(", "len", "(", "e", ".", "args", ")", ">", "0", ")", "and", "(", "e", ".", "args", "[", "0", "]", "==", "errno", ".", "EAGAIN", ")", ":", "got_timeout", "=", "True", "elif", "(", "type", "(", "e", ".", "args", ")", "is", "tuple", ")", "and", "(", "len", "(", "e", ".", "args", ")", ">", "0", ")", "and", "(", "e", ".", "args", "[", "0", "]", "==", "errno", ".", "EINTR", ")", ":", "# syscall interrupted; try again", "pass", "elif", "self", ".", "__closed", ":", "raise", "EOFError", "(", ")", "else", ":", "raise", "if", "got_timeout", ":", "if", "self", ".", "__closed", ":", "raise", "EOFError", "(", ")", "if", "check_rekey", "and", "(", "len", "(", "out", ")", "==", "0", ")", "and", "self", ".", "__need_rekey", ":", "raise", "NeedRekeyException", "(", ")", "self", ".", "_check_keepalive", "(", ")", "return", "out" ]
37.653061
15.163265
def cond_init(m:nn.Module, init_func:LayerFunc): "Initialize the non-batchnorm layers of `m` with `init_func`." if (not isinstance(m, bn_types)) and requires_grad(m): init_default(m, init_func)
[ "def", "cond_init", "(", "m", ":", "nn", ".", "Module", ",", "init_func", ":", "LayerFunc", ")", ":", "if", "(", "not", "isinstance", "(", "m", ",", "bn_types", ")", ")", "and", "requires_grad", "(", "m", ")", ":", "init_default", "(", "m", ",", "init_func", ")" ]
66.333333
26.333333
def ssd(p1, p2): """Calculates motif position similarity based on sum of squared distances. Parameters ---------- p1 : list Motif position 1. p2 : list Motif position 2. Returns ------- score : float """ return 2 - np.sum([(a-b)**2 for a,b in zip(p1,p2)])
[ "def", "ssd", "(", "p1", ",", "p2", ")", ":", "return", "2", "-", "np", ".", "sum", "(", "[", "(", "a", "-", "b", ")", "**", "2", "for", "a", ",", "b", "in", "zip", "(", "p1", ",", "p2", ")", "]", ")" ]
19.4375
22.75
def query_ehs( self, data: Optional[pd.DataFrame] = None, failure_mode: str = "warning", progressbar: Optional[Callable[[Iterable], Iterable]] = None, ) -> "Flight": """Extend data with extra columns from EHS messages. By default, raw messages are requested from the OpenSky Impala server. Making a lot of small requests can be very inefficient and may look like a denial of service. If you get the raw messages using a different channel, you can provide the resulting dataframe as a parameter. The data parameter expect three colmuns: icao24, rawmsg and mintime, in conformance with the OpenSky API. """ from ..data import opensky, ModeS_Decoder if not isinstance(self.icao24, str): raise RuntimeError("Several icao24 for this flight") def fail_warning(): """Called when nothing can be added to data.""" id_ = self.flight_id if id_ is None: id_ = self.callsign logging.warn(f"No data on Impala for flight {id_}.") return self def fail_silent(): return self failure_dict = dict(warning=fail_warning, silent=fail_silent) failure = failure_dict[failure_mode] if data is None: df = opensky.extended(self.start, self.stop, icao24=self.icao24) else: df = data.query("icao24 == @self.icao24").sort_values("mintime") if df is None: return failure() timestamped_df = df.sort_values("mintime").assign( timestamp=lambda df: df.mintime.dt.round("s") ) referenced_df = ( timestamped_df.merge(self.data, on="timestamp", how="outer") .sort_values("timestamp") .rename( columns=dict( altitude="alt", altitude_y="alt", groundspeed="spd", track="trk", ) )[["timestamp", "alt", "spd", "trk"]] .ffill() .drop_duplicates() # bugfix! NEVER ERASE THAT LINE! .merge( timestamped_df[["timestamp", "icao24", "rawmsg"]], on="timestamp", how="right", ) ) identifier = ( self.flight_id if self.flight_id is not None else self.callsign ) # who cares about default lat0, lon0 with EHS decoder = ModeS_Decoder((0, 0)) if progressbar is None: progressbar = lambda x: tqdm( # noqa: E731 x, total=referenced_df.shape[0], desc=f"{identifier}:", leave=False, ) for _, line in progressbar(referenced_df.iterrows()): decoder.process( line.timestamp, line.rawmsg, spd=line.spd, trk=line.trk, alt=line.alt, ) if decoder.traffic is None: return failure() extended = decoder.traffic[self.icao24] if extended is None: return failure() # fix for https://stackoverflow.com/q/53657210/1595335 if "last_position" in self.data.columns: extended = extended.assign(last_position=pd.NaT) if "start" in self.data.columns: extended = extended.assign(start=pd.NaT) if "stop" in self.data.columns: extended = extended.assign(stop=pd.NaT) t = extended + self if "flight_id" in self.data.columns: t.data.flight_id = self.flight_id # sometimes weird callsigns are decoded and should be discarded # so it seems better to filter on callsign rather than on icao24 flight = t[self.callsign] if flight is None: return failure() return flight.sort_values("timestamp")
[ "def", "query_ehs", "(", "self", ",", "data", ":", "Optional", "[", "pd", ".", "DataFrame", "]", "=", "None", ",", "failure_mode", ":", "str", "=", "\"warning\"", ",", "progressbar", ":", "Optional", "[", "Callable", "[", "[", "Iterable", "]", ",", "Iterable", "]", "]", "=", "None", ",", ")", "->", "\"Flight\"", ":", "from", ".", ".", "data", "import", "opensky", ",", "ModeS_Decoder", "if", "not", "isinstance", "(", "self", ".", "icao24", ",", "str", ")", ":", "raise", "RuntimeError", "(", "\"Several icao24 for this flight\"", ")", "def", "fail_warning", "(", ")", ":", "\"\"\"Called when nothing can be added to data.\"\"\"", "id_", "=", "self", ".", "flight_id", "if", "id_", "is", "None", ":", "id_", "=", "self", ".", "callsign", "logging", ".", "warn", "(", "f\"No data on Impala for flight {id_}.\"", ")", "return", "self", "def", "fail_silent", "(", ")", ":", "return", "self", "failure_dict", "=", "dict", "(", "warning", "=", "fail_warning", ",", "silent", "=", "fail_silent", ")", "failure", "=", "failure_dict", "[", "failure_mode", "]", "if", "data", "is", "None", ":", "df", "=", "opensky", ".", "extended", "(", "self", ".", "start", ",", "self", ".", "stop", ",", "icao24", "=", "self", ".", "icao24", ")", "else", ":", "df", "=", "data", ".", "query", "(", "\"icao24 == @self.icao24\"", ")", ".", "sort_values", "(", "\"mintime\"", ")", "if", "df", "is", "None", ":", "return", "failure", "(", ")", "timestamped_df", "=", "df", ".", "sort_values", "(", "\"mintime\"", ")", ".", "assign", "(", "timestamp", "=", "lambda", "df", ":", "df", ".", "mintime", ".", "dt", ".", "round", "(", "\"s\"", ")", ")", "referenced_df", "=", "(", "timestamped_df", ".", "merge", "(", "self", ".", "data", ",", "on", "=", "\"timestamp\"", ",", "how", "=", "\"outer\"", ")", ".", "sort_values", "(", "\"timestamp\"", ")", ".", "rename", "(", "columns", "=", "dict", "(", "altitude", "=", "\"alt\"", ",", "altitude_y", "=", "\"alt\"", ",", "groundspeed", "=", "\"spd\"", ",", "track", "=", "\"trk\"", ",", ")", ")", "[", "[", "\"timestamp\"", ",", "\"alt\"", ",", "\"spd\"", ",", "\"trk\"", "]", "]", ".", "ffill", "(", ")", ".", "drop_duplicates", "(", ")", "# bugfix! NEVER ERASE THAT LINE!", ".", "merge", "(", "timestamped_df", "[", "[", "\"timestamp\"", ",", "\"icao24\"", ",", "\"rawmsg\"", "]", "]", ",", "on", "=", "\"timestamp\"", ",", "how", "=", "\"right\"", ",", ")", ")", "identifier", "=", "(", "self", ".", "flight_id", "if", "self", ".", "flight_id", "is", "not", "None", "else", "self", ".", "callsign", ")", "# who cares about default lat0, lon0 with EHS", "decoder", "=", "ModeS_Decoder", "(", "(", "0", ",", "0", ")", ")", "if", "progressbar", "is", "None", ":", "progressbar", "=", "lambda", "x", ":", "tqdm", "(", "# noqa: E731", "x", ",", "total", "=", "referenced_df", ".", "shape", "[", "0", "]", ",", "desc", "=", "f\"{identifier}:\"", ",", "leave", "=", "False", ",", ")", "for", "_", ",", "line", "in", "progressbar", "(", "referenced_df", ".", "iterrows", "(", ")", ")", ":", "decoder", ".", "process", "(", "line", ".", "timestamp", ",", "line", ".", "rawmsg", ",", "spd", "=", "line", ".", "spd", ",", "trk", "=", "line", ".", "trk", ",", "alt", "=", "line", ".", "alt", ",", ")", "if", "decoder", ".", "traffic", "is", "None", ":", "return", "failure", "(", ")", "extended", "=", "decoder", ".", "traffic", "[", "self", ".", "icao24", "]", "if", "extended", "is", "None", ":", "return", "failure", "(", ")", "# fix for https://stackoverflow.com/q/53657210/1595335", "if", "\"last_position\"", "in", "self", ".", "data", ".", "columns", ":", "extended", "=", "extended", ".", "assign", "(", "last_position", "=", "pd", ".", "NaT", ")", "if", "\"start\"", "in", "self", ".", "data", ".", "columns", ":", "extended", "=", "extended", ".", "assign", "(", "start", "=", "pd", ".", "NaT", ")", "if", "\"stop\"", "in", "self", ".", "data", ".", "columns", ":", "extended", "=", "extended", ".", "assign", "(", "stop", "=", "pd", ".", "NaT", ")", "t", "=", "extended", "+", "self", "if", "\"flight_id\"", "in", "self", ".", "data", ".", "columns", ":", "t", ".", "data", ".", "flight_id", "=", "self", ".", "flight_id", "# sometimes weird callsigns are decoded and should be discarded", "# so it seems better to filter on callsign rather than on icao24", "flight", "=", "t", "[", "self", ".", "callsign", "]", "if", "flight", "is", "None", ":", "return", "failure", "(", ")", "return", "flight", ".", "sort_values", "(", "\"timestamp\"", ")" ]
32.754237
20.144068
def update_bookmark(self, bookmark_id, favorite=None, archive=None, read_percent=None): """ Updates given bookmark. The requested bookmark must belong to the current user. :param bookmark_id: ID of the bookmark to update. :param favorite (optional): Whether this article is favorited or not. :param archive (optional): Whether this article is archived or not. :param read_percent (optional): The read progress made in this article, where 1.0 means the bottom and 0.0 means the very top. """ rdb_url = self._generate_url('bookmarks/{0}'.format(bookmark_id)) params = {} if favorite is not None: params['favorite'] = 1 if favorite == True else 0 if archive is not None: params['archive'] = 1 if archive == True else 0 if read_percent is not None: try: params['read_percent'] = float(read_percent) except ValueError: pass return self.post(rdb_url, params)
[ "def", "update_bookmark", "(", "self", ",", "bookmark_id", ",", "favorite", "=", "None", ",", "archive", "=", "None", ",", "read_percent", "=", "None", ")", ":", "rdb_url", "=", "self", ".", "_generate_url", "(", "'bookmarks/{0}'", ".", "format", "(", "bookmark_id", ")", ")", "params", "=", "{", "}", "if", "favorite", "is", "not", "None", ":", "params", "[", "'favorite'", "]", "=", "1", "if", "favorite", "==", "True", "else", "0", "if", "archive", "is", "not", "None", ":", "params", "[", "'archive'", "]", "=", "1", "if", "archive", "==", "True", "else", "0", "if", "read_percent", "is", "not", "None", ":", "try", ":", "params", "[", "'read_percent'", "]", "=", "float", "(", "read_percent", ")", "except", "ValueError", ":", "pass", "return", "self", ".", "post", "(", "rdb_url", ",", "params", ")" ]
45
21
def build_link(href, text, cls=None, icon_class=None, **attrs): """Builds an html link. :param href: link for the anchor element :param text: text for the anchor element :param attrs: other attribute kwargs >>> build_link('xyz.com', 'hello', 'big') u'<a href="xyz.com" class="big">hello</a>' >>> build_link('xyz.com', 'hello', 'big', 'fa fa-times') u'<a href="xyz.com" class="big"><i class="fa fa-times"></i> hello</a>' """ return build_html_element(tag='a', text=text, href=href, cls=cls, icon_class=icon_class, **attrs)
[ "def", "build_link", "(", "href", ",", "text", ",", "cls", "=", "None", ",", "icon_class", "=", "None", ",", "*", "*", "attrs", ")", ":", "return", "build_html_element", "(", "tag", "=", "'a'", ",", "text", "=", "text", ",", "href", "=", "href", ",", "cls", "=", "cls", ",", "icon_class", "=", "icon_class", ",", "*", "*", "attrs", ")" ]
38.666667
10.777778
def calc_scene_bbox(self): """Calculate scene bbox""" bbox_min, bbox_max = None, None for node in self.root_nodes: bbox_min, bbox_max = node.calc_global_bbox( matrix44.create_identity(), bbox_min, bbox_max ) self.bbox_min = bbox_min self.bbox_max = bbox_max self.diagonal_size = vector3.length(self.bbox_max - self.bbox_min)
[ "def", "calc_scene_bbox", "(", "self", ")", ":", "bbox_min", ",", "bbox_max", "=", "None", ",", "None", "for", "node", "in", "self", ".", "root_nodes", ":", "bbox_min", ",", "bbox_max", "=", "node", ".", "calc_global_bbox", "(", "matrix44", ".", "create_identity", "(", ")", ",", "bbox_min", ",", "bbox_max", ")", "self", ".", "bbox_min", "=", "bbox_min", "self", ".", "bbox_max", "=", "bbox_max", "self", ".", "diagonal_size", "=", "vector3", ".", "length", "(", "self", ".", "bbox_max", "-", "self", ".", "bbox_min", ")" ]
30.928571
16.071429
def get_grounded_agent(gene_name): """Return a grounded Agent based on an HGNC symbol.""" db_refs = {'TEXT': gene_name} if gene_name in hgnc_map: gene_name = hgnc_map[gene_name] hgnc_id = hgnc_client.get_hgnc_id(gene_name) if hgnc_id: db_refs['HGNC'] = hgnc_id up_id = hgnc_client.get_uniprot_id(hgnc_id) if up_id: db_refs['UP'] = up_id agent = Agent(gene_name, db_refs=db_refs) return agent
[ "def", "get_grounded_agent", "(", "gene_name", ")", ":", "db_refs", "=", "{", "'TEXT'", ":", "gene_name", "}", "if", "gene_name", "in", "hgnc_map", ":", "gene_name", "=", "hgnc_map", "[", "gene_name", "]", "hgnc_id", "=", "hgnc_client", ".", "get_hgnc_id", "(", "gene_name", ")", "if", "hgnc_id", ":", "db_refs", "[", "'HGNC'", "]", "=", "hgnc_id", "up_id", "=", "hgnc_client", ".", "get_uniprot_id", "(", "hgnc_id", ")", "if", "up_id", ":", "db_refs", "[", "'UP'", "]", "=", "up_id", "agent", "=", "Agent", "(", "gene_name", ",", "db_refs", "=", "db_refs", ")", "return", "agent" ]
34.692308
10.384615
def _generate_input(options): """First send strings from any given file, one string per line, sends any strings provided on the command line. :param options: ArgumentParser or equivalent to provide options.input and options.strings. :return: string """ if options.input: fp = open(options.input) if options.input != "-" else sys.stdin for string in fp.readlines(): yield string if options.strings: for string in options.strings: yield string
[ "def", "_generate_input", "(", "options", ")", ":", "if", "options", ".", "input", ":", "fp", "=", "open", "(", "options", ".", "input", ")", "if", "options", ".", "input", "!=", "\"-\"", "else", "sys", ".", "stdin", "for", "string", "in", "fp", ".", "readlines", "(", ")", ":", "yield", "string", "if", "options", ".", "strings", ":", "for", "string", "in", "options", ".", "strings", ":", "yield", "string" ]
32
15.125
def delete(self, endpoint, json=None, params=None, **kwargs): """ DELETE from DHIS2 :param endpoint: DHIS2 API endpoint :param json: HTTP payload :param params: HTTP parameters (dict) :return: requests.Response object """ json = kwargs['data'] if 'data' in kwargs else json return self._make_request('delete', endpoint, data=json, params=params)
[ "def", "delete", "(", "self", ",", "endpoint", ",", "json", "=", "None", ",", "params", "=", "None", ",", "*", "*", "kwargs", ")", ":", "json", "=", "kwargs", "[", "'data'", "]", "if", "'data'", "in", "kwargs", "else", "json", "return", "self", ".", "_make_request", "(", "'delete'", ",", "endpoint", ",", "data", "=", "json", ",", "params", "=", "params", ")" ]
40.8
11
def apply_to(self, db: Union[BaseDB, ABC_Mutable_Mapping], apply_deletes: bool = True) -> None: """ Apply the changes in this diff to the given database. You may choose to opt out of deleting any underlying keys. :param apply_deletes: whether the pending deletes should be applied to the database """ for key, value in self._changes.items(): if value is DELETED: if apply_deletes: try: del db[key] except KeyError: pass else: pass else: db[key] = value
[ "def", "apply_to", "(", "self", ",", "db", ":", "Union", "[", "BaseDB", ",", "ABC_Mutable_Mapping", "]", ",", "apply_deletes", ":", "bool", "=", "True", ")", "->", "None", ":", "for", "key", ",", "value", "in", "self", ".", "_changes", ".", "items", "(", ")", ":", "if", "value", "is", "DELETED", ":", "if", "apply_deletes", ":", "try", ":", "del", "db", "[", "key", "]", "except", "KeyError", ":", "pass", "else", ":", "pass", "else", ":", "db", "[", "key", "]", "=", "value" ]
33.666667
14.142857
def string(name, value, expire=None, expireat=None, **connection_args): ''' Ensure that the key exists in redis with the value specified name Redis key to manage value Data to persist in key expire Sets time to live for key in seconds expireat Sets expiration time for key via UNIX timestamp, overrides `expire` ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'Key already set to defined value'} old_key = __salt__['redis.get_key'](name, **connection_args) if old_key != value: __salt__['redis.set_key'](name, value, **connection_args) ret['changes'][name] = 'Value updated' ret['comment'] = 'Key updated to new value' if expireat: __salt__['redis.expireat'](name, expireat, **connection_args) ret['changes']['expireat'] = 'Key expires at {0}'.format(expireat) elif expire: __salt__['redis.expire'](name, expire, **connection_args) ret['changes']['expire'] = 'TTL set to {0} seconds'.format(expire) return ret
[ "def", "string", "(", "name", ",", "value", ",", "expire", "=", "None", ",", "expireat", "=", "None", ",", "*", "*", "connection_args", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "True", ",", "'comment'", ":", "'Key already set to defined value'", "}", "old_key", "=", "__salt__", "[", "'redis.get_key'", "]", "(", "name", ",", "*", "*", "connection_args", ")", "if", "old_key", "!=", "value", ":", "__salt__", "[", "'redis.set_key'", "]", "(", "name", ",", "value", ",", "*", "*", "connection_args", ")", "ret", "[", "'changes'", "]", "[", "name", "]", "=", "'Value updated'", "ret", "[", "'comment'", "]", "=", "'Key updated to new value'", "if", "expireat", ":", "__salt__", "[", "'redis.expireat'", "]", "(", "name", ",", "expireat", ",", "*", "*", "connection_args", ")", "ret", "[", "'changes'", "]", "[", "'expireat'", "]", "=", "'Key expires at {0}'", ".", "format", "(", "expireat", ")", "elif", "expire", ":", "__salt__", "[", "'redis.expire'", "]", "(", "name", ",", "expire", ",", "*", "*", "connection_args", ")", "ret", "[", "'changes'", "]", "[", "'expire'", "]", "=", "'TTL set to {0} seconds'", ".", "format", "(", "expire", ")", "return", "ret" ]
29.833333
26.777778
def base_address(self): """Returns the base address without payment id. :rtype: :class:`Address` """ prefix = 53 if self.is_testnet() else 24 if self.is_stagenet() else 18 data = bytearray([prefix]) + self._decoded[1:65] checksum = keccak_256(data).digest()[:4] return Address(base58.encode(hexlify(data + checksum)))
[ "def", "base_address", "(", "self", ")", ":", "prefix", "=", "53", "if", "self", ".", "is_testnet", "(", ")", "else", "24", "if", "self", ".", "is_stagenet", "(", ")", "else", "18", "data", "=", "bytearray", "(", "[", "prefix", "]", ")", "+", "self", ".", "_decoded", "[", "1", ":", "65", "]", "checksum", "=", "keccak_256", "(", "data", ")", ".", "digest", "(", ")", "[", ":", "4", "]", "return", "Address", "(", "base58", ".", "encode", "(", "hexlify", "(", "data", "+", "checksum", ")", ")", ")" ]
45.75
13.75
def namespace_for_prefix(self, prefix): """Get the namespace the given prefix maps to. Args: prefix (str): The prefix Returns: str: The namespace, or None if the prefix isn't mapped to anything in this set. """ try: ni = self.__lookup_prefix(prefix) except PrefixNotFoundError: return None else: return ni.uri
[ "def", "namespace_for_prefix", "(", "self", ",", "prefix", ")", ":", "try", ":", "ni", "=", "self", ".", "__lookup_prefix", "(", "prefix", ")", "except", "PrefixNotFoundError", ":", "return", "None", "else", ":", "return", "ni", ".", "uri" ]
26.75
16.5625
def parse(cls, parser, text, pos): """Imitates parsing a list grammar. Specifically, this grammar = [ SimpleValueUnit.date_specifiers_regex, SimpleValueUnit.arxiv_token_regex, SimpleValueUnit.token_regex, SimpleValueUnit.parenthesized_token_grammar ]. Parses plaintext which matches date specifiers or arxiv_identifier syntax, or is comprised of either 1) simple terminal (no parentheses) or 2) a parenthesized SimpleValue. For example, "e(+)" will be parsed in two steps, first, "e" token will be recognized and then "(+)", as a parenthesized SimpleValue. """ found = False # Attempt to parse date specifier match = cls.date_specifiers_regex.match(text) if match: remaining_text, token, found = text[len(match.group(0)):], match.group(0), True else: # Attempt to parse arxiv identifier match = cls.arxiv_token_regex.match(text) if match: remaining_text, token, found = text[len(match.group()):], match.group(2), True else: # Attempt to parse a terminal token remaining_text, token = SimpleValueUnit.parse_terminal_token(parser, text) if type(token) != SyntaxError: found = True else: # Attempt to parse a terminal with parentheses try: # Enable parsing a parenthesized terminal so that we can accept {+, -, |} as terminals. parser._parsing_parenthesized_terminal = True remaining_text, token = parser.parse(text, cls.parenthesized_token_grammar, pos) found = True except SyntaxError: pass except GrammarValueError: raise except ValueError: pass finally: parser._parsing_parenthesized_terminal = False if found: result = remaining_text, SimpleValueUnit(token) else: result = text, SyntaxError("expecting match on " + cls.__name__) return result
[ "def", "parse", "(", "cls", ",", "parser", ",", "text", ",", "pos", ")", ":", "found", "=", "False", "# Attempt to parse date specifier", "match", "=", "cls", ".", "date_specifiers_regex", ".", "match", "(", "text", ")", "if", "match", ":", "remaining_text", ",", "token", ",", "found", "=", "text", "[", "len", "(", "match", ".", "group", "(", "0", ")", ")", ":", "]", ",", "match", ".", "group", "(", "0", ")", ",", "True", "else", ":", "# Attempt to parse arxiv identifier", "match", "=", "cls", ".", "arxiv_token_regex", ".", "match", "(", "text", ")", "if", "match", ":", "remaining_text", ",", "token", ",", "found", "=", "text", "[", "len", "(", "match", ".", "group", "(", ")", ")", ":", "]", ",", "match", ".", "group", "(", "2", ")", ",", "True", "else", ":", "# Attempt to parse a terminal token", "remaining_text", ",", "token", "=", "SimpleValueUnit", ".", "parse_terminal_token", "(", "parser", ",", "text", ")", "if", "type", "(", "token", ")", "!=", "SyntaxError", ":", "found", "=", "True", "else", ":", "# Attempt to parse a terminal with parentheses", "try", ":", "# Enable parsing a parenthesized terminal so that we can accept {+, -, |} as terminals.", "parser", ".", "_parsing_parenthesized_terminal", "=", "True", "remaining_text", ",", "token", "=", "parser", ".", "parse", "(", "text", ",", "cls", ".", "parenthesized_token_grammar", ",", "pos", ")", "found", "=", "True", "except", "SyntaxError", ":", "pass", "except", "GrammarValueError", ":", "raise", "except", "ValueError", ":", "pass", "finally", ":", "parser", ".", "_parsing_parenthesized_terminal", "=", "False", "if", "found", ":", "result", "=", "remaining_text", ",", "SimpleValueUnit", "(", "token", ")", "else", ":", "result", "=", "text", ",", "SyntaxError", "(", "\"expecting match on \"", "+", "cls", ".", "__name__", ")", "return", "result" ]
40.642857
23.75
def add_widget(self): """ Adds the Component Widget to the engine. :return: Method success. :rtype: bool """ LOGGER.debug("> Adding '{0}' Component Widget.".format(self.__class__.__name__)) self.__preferences_manager.Others_Preferences_gridLayout.addWidget(self.TCP_Client_Ui_groupBox) return True
[ "def", "add_widget", "(", "self", ")", ":", "LOGGER", ".", "debug", "(", "\"> Adding '{0}' Component Widget.\"", ".", "format", "(", "self", ".", "__class__", ".", "__name__", ")", ")", "self", ".", "__preferences_manager", ".", "Others_Preferences_gridLayout", ".", "addWidget", "(", "self", ".", "TCP_Client_Ui_groupBox", ")", "return", "True" ]
27.153846
26.692308
def __search_ca_path(self): """ Get CA Path to check the validity of the server host certificate on the client side """ if "X509_CERT_DIR" in os.environ: self._ca_path = os.environ['X509_CERT_DIR'] elif os.path.exists('/etc/grid-security/certificates'): self._ca_path = '/etc/grid-security/certificates' else: raise ClientAuthException("Could not find a valid CA path")
[ "def", "__search_ca_path", "(", "self", ")", ":", "if", "\"X509_CERT_DIR\"", "in", "os", ".", "environ", ":", "self", ".", "_ca_path", "=", "os", ".", "environ", "[", "'X509_CERT_DIR'", "]", "elif", "os", ".", "path", ".", "exists", "(", "'/etc/grid-security/certificates'", ")", ":", "self", ".", "_ca_path", "=", "'/etc/grid-security/certificates'", "else", ":", "raise", "ClientAuthException", "(", "\"Could not find a valid CA path\"", ")" ]
37
21.833333
def params_for_label(instruction): """Get the params and format them to add them to a label. None if there are no params of if the params are numpy.ndarrays.""" if not hasattr(instruction.op, 'params'): return None if all([isinstance(param, ndarray) for param in instruction.op.params]): return None ret = [] for param in instruction.op.params: if isinstance(param, (sympy.Number, float)): ret.append('%.5g' % param) else: ret.append('%s' % param) return ret
[ "def", "params_for_label", "(", "instruction", ")", ":", "if", "not", "hasattr", "(", "instruction", ".", "op", ",", "'params'", ")", ":", "return", "None", "if", "all", "(", "[", "isinstance", "(", "param", ",", "ndarray", ")", "for", "param", "in", "instruction", ".", "op", ".", "params", "]", ")", ":", "return", "None", "ret", "=", "[", "]", "for", "param", "in", "instruction", ".", "op", ".", "params", ":", "if", "isinstance", "(", "param", ",", "(", "sympy", ".", "Number", ",", "float", ")", ")", ":", "ret", ".", "append", "(", "'%.5g'", "%", "param", ")", "else", ":", "ret", ".", "append", "(", "'%s'", "%", "param", ")", "return", "ret" ]
36.375
16.1875
def remove_user(self, workspace, params={}, **options): """The user making this call must be an admin in the workspace. Returns an empty data record. Parameters ---------- workspace : {Id} The workspace or organization to invite the user to. [data] : {Object} Data for the request - user : {String} An identifier for the user. Can be one of an email address, the globally unique identifier for the user, or the keyword `me` to indicate the current user making the request. """ path = "/workspaces/%s/removeUser" % (workspace) return self.client.post(path, params, **options)
[ "def", "remove_user", "(", "self", ",", "workspace", ",", "params", "=", "{", "}", ",", "*", "*", "options", ")", ":", "path", "=", "\"/workspaces/%s/removeUser\"", "%", "(", "workspace", ")", "return", "self", ".", "client", ".", "post", "(", "path", ",", "params", ",", "*", "*", "options", ")" ]
47.5
19.785714
def generate_pws_in_order(self, n, filter_func=None, N_max=1e6): """ Generates passwords in order between upto N_max @N_max is the maximum size of the priority queue will be tolerated, so if the size of the queue is bigger than 1.5 * N_max, it will shrink the size to 0.75 * N_max @n is the number of password to generate. **This function is expensive, and shuold be called only if necessary. Cache its call as much as possible** # TODO: Need to recheck how to make sure this is working. """ # assert alpha < beta, 'alpha={} must be less than beta={}'.format(alpha, beta) states = [(-1.0, helper.START)] # get the topk first p_min = 1e-9 / (n**2) # max 1 million entries in the heap ret = [] done = set() already_added_in_heap = set() while len(ret) < n and len(states) > 0: # while n > 0 and len(states) > 0: p, s = heapq.heappop(states) if p < 0: p = -p if s in done: continue assert s[0] == helper.START, "Broken s: {!r}".format(s) if s[-1] == helper.END: done.add(s) clean_s = s[1:-1] if filter_func is None or filter_func(clean_s): ret.append((clean_s, p)) # n -= 1 # yield (clean_s, p) else: for c, f in self._get_next(s).items(): if (f*p < p_min or (s+c) in done or (s+c) in already_added_in_heap): continue already_added_in_heap.add(s+c) heapq.heappush(states, (-f*p, s+c)) if len(states) > N_max * 3 / 2: print("Heap size: {}. ret={}. (expected: {}) s={!r}" .format(len(states), len(ret), n, s)) print("The size of states={}. Still need={} pws. Truncating" .format(len(states), n - len(ret))) states = heapq.nsmallest(int(N_max * 3/4), states) print("Done") return ret
[ "def", "generate_pws_in_order", "(", "self", ",", "n", ",", "filter_func", "=", "None", ",", "N_max", "=", "1e6", ")", ":", "# assert alpha < beta, 'alpha={} must be less than beta={}'.format(alpha, beta)", "states", "=", "[", "(", "-", "1.0", ",", "helper", ".", "START", ")", "]", "# get the topk first", "p_min", "=", "1e-9", "/", "(", "n", "**", "2", ")", "# max 1 million entries in the heap ", "ret", "=", "[", "]", "done", "=", "set", "(", ")", "already_added_in_heap", "=", "set", "(", ")", "while", "len", "(", "ret", ")", "<", "n", "and", "len", "(", "states", ")", ">", "0", ":", "# while n > 0 and len(states) > 0:", "p", ",", "s", "=", "heapq", ".", "heappop", "(", "states", ")", "if", "p", "<", "0", ":", "p", "=", "-", "p", "if", "s", "in", "done", ":", "continue", "assert", "s", "[", "0", "]", "==", "helper", ".", "START", ",", "\"Broken s: {!r}\"", ".", "format", "(", "s", ")", "if", "s", "[", "-", "1", "]", "==", "helper", ".", "END", ":", "done", ".", "add", "(", "s", ")", "clean_s", "=", "s", "[", "1", ":", "-", "1", "]", "if", "filter_func", "is", "None", "or", "filter_func", "(", "clean_s", ")", ":", "ret", ".", "append", "(", "(", "clean_s", ",", "p", ")", ")", "# n -= 1", "# yield (clean_s, p)", "else", ":", "for", "c", ",", "f", "in", "self", ".", "_get_next", "(", "s", ")", ".", "items", "(", ")", ":", "if", "(", "f", "*", "p", "<", "p_min", "or", "(", "s", "+", "c", ")", "in", "done", "or", "(", "s", "+", "c", ")", "in", "already_added_in_heap", ")", ":", "continue", "already_added_in_heap", ".", "add", "(", "s", "+", "c", ")", "heapq", ".", "heappush", "(", "states", ",", "(", "-", "f", "*", "p", ",", "s", "+", "c", ")", ")", "if", "len", "(", "states", ")", ">", "N_max", "*", "3", "/", "2", ":", "print", "(", "\"Heap size: {}. ret={}. (expected: {}) s={!r}\"", ".", "format", "(", "len", "(", "states", ")", ",", "len", "(", "ret", ")", ",", "n", ",", "s", ")", ")", "print", "(", "\"The size of states={}. Still need={} pws. Truncating\"", ".", "format", "(", "len", "(", "states", ")", ",", "n", "-", "len", "(", "ret", ")", ")", ")", "states", "=", "heapq", ".", "nsmallest", "(", "int", "(", "N_max", "*", "3", "/", "4", ")", ",", "states", ")", "print", "(", "\"Done\"", ")", "return", "ret" ]
45.425532
15.680851
def worker_stop(obj, worker_ids): """ Stop running workers. \b WORKER_IDS: The IDs of the worker that should be stopped or none to stop them all. """ if len(worker_ids) == 0: msg = 'Would you like to stop all workers?' else: msg = '\n{}\n\n{}'.format('\n'.join(worker_ids), 'Would you like to stop these workers?') if click.confirm(msg, default=True, abort=True): stop_worker(obj['config'], worker_ids=list(worker_ids) if len(worker_ids) > 0 else None)
[ "def", "worker_stop", "(", "obj", ",", "worker_ids", ")", ":", "if", "len", "(", "worker_ids", ")", "==", "0", ":", "msg", "=", "'Would you like to stop all workers?'", "else", ":", "msg", "=", "'\\n{}\\n\\n{}'", ".", "format", "(", "'\\n'", ".", "join", "(", "worker_ids", ")", ",", "'Would you like to stop these workers?'", ")", "if", "click", ".", "confirm", "(", "msg", ",", "default", "=", "True", ",", "abort", "=", "True", ")", ":", "stop_worker", "(", "obj", "[", "'config'", "]", ",", "worker_ids", "=", "list", "(", "worker_ids", ")", "if", "len", "(", "worker_ids", ")", ">", "0", "else", "None", ")" ]
36.4
22
def addNotice(self, data): """ Add custom notice to front-end for this NodeServers :param data: String of characters to add as a notification in the front-end. """ LOGGER.info('Sending addnotice to Polyglot: {}'.format(data)) message = { 'addnotice': data } self.send(message)
[ "def", "addNotice", "(", "self", ",", "data", ")", ":", "LOGGER", ".", "info", "(", "'Sending addnotice to Polyglot: {}'", ".", "format", "(", "data", ")", ")", "message", "=", "{", "'addnotice'", ":", "data", "}", "self", ".", "send", "(", "message", ")" ]
36.111111
17.888889
def backends(self, back=None): ''' Return the backend list ''' if not back: back = self.opts['fileserver_backend'] else: if not isinstance(back, list): try: back = back.split(',') except AttributeError: back = six.text_type(back).split(',') if isinstance(back, Sequence): # The test suite uses an ImmutableList type (based on # collections.Sequence) for lists, which breaks this function in # the test suite. This normalizes the value from the opts into a # list if it is based on collections.Sequence. back = list(back) ret = [] if not isinstance(back, list): return ret # Avoid error logging when performing lookups in the LazyDict by # instead doing the membership check on the result of a call to its # .keys() attribute rather than on the LazyDict itself. server_funcs = self.servers.keys() try: subtract_only = all((x.startswith('-') for x in back)) except AttributeError: pass else: if subtract_only: # Only subtracting backends from enabled ones ret = self.opts['fileserver_backend'] for sub in back: if '{0}.envs'.format(sub[1:]) in server_funcs: ret.remove(sub[1:]) elif '{0}.envs'.format(sub[1:-2]) in server_funcs: ret.remove(sub[1:-2]) return ret for sub in back: if '{0}.envs'.format(sub) in server_funcs: ret.append(sub) elif '{0}.envs'.format(sub[:-2]) in server_funcs: ret.append(sub[:-2]) return ret
[ "def", "backends", "(", "self", ",", "back", "=", "None", ")", ":", "if", "not", "back", ":", "back", "=", "self", ".", "opts", "[", "'fileserver_backend'", "]", "else", ":", "if", "not", "isinstance", "(", "back", ",", "list", ")", ":", "try", ":", "back", "=", "back", ".", "split", "(", "','", ")", "except", "AttributeError", ":", "back", "=", "six", ".", "text_type", "(", "back", ")", ".", "split", "(", "','", ")", "if", "isinstance", "(", "back", ",", "Sequence", ")", ":", "# The test suite uses an ImmutableList type (based on", "# collections.Sequence) for lists, which breaks this function in", "# the test suite. This normalizes the value from the opts into a", "# list if it is based on collections.Sequence.", "back", "=", "list", "(", "back", ")", "ret", "=", "[", "]", "if", "not", "isinstance", "(", "back", ",", "list", ")", ":", "return", "ret", "# Avoid error logging when performing lookups in the LazyDict by", "# instead doing the membership check on the result of a call to its", "# .keys() attribute rather than on the LazyDict itself.", "server_funcs", "=", "self", ".", "servers", ".", "keys", "(", ")", "try", ":", "subtract_only", "=", "all", "(", "(", "x", ".", "startswith", "(", "'-'", ")", "for", "x", "in", "back", ")", ")", "except", "AttributeError", ":", "pass", "else", ":", "if", "subtract_only", ":", "# Only subtracting backends from enabled ones", "ret", "=", "self", ".", "opts", "[", "'fileserver_backend'", "]", "for", "sub", "in", "back", ":", "if", "'{0}.envs'", ".", "format", "(", "sub", "[", "1", ":", "]", ")", "in", "server_funcs", ":", "ret", ".", "remove", "(", "sub", "[", "1", ":", "]", ")", "elif", "'{0}.envs'", ".", "format", "(", "sub", "[", "1", ":", "-", "2", "]", ")", "in", "server_funcs", ":", "ret", ".", "remove", "(", "sub", "[", "1", ":", "-", "2", "]", ")", "return", "ret", "for", "sub", "in", "back", ":", "if", "'{0}.envs'", ".", "format", "(", "sub", ")", "in", "server_funcs", ":", "ret", ".", "append", "(", "sub", ")", "elif", "'{0}.envs'", ".", "format", "(", "sub", "[", ":", "-", "2", "]", ")", "in", "server_funcs", ":", "ret", ".", "append", "(", "sub", "[", ":", "-", "2", "]", ")", "return", "ret" ]
37.163265
19.040816
def sentry_feature(app): """ Sentry feature Adds basic integration with Sentry via the raven library """ # get keys sentry_public_key = app.config.get('SENTRY_PUBLIC_KEY') sentry_project_id = app.config.get('SENTRY_PROJECT_ID') if not sentry_public_key or not sentry_project_id: return # prepare dsn dsn = 'https://{key}@sentry.io/{project_id}' dsn = dsn.format(key=sentry_public_key, project_id=sentry_project_id) # init sentry sentry.init_app(app=app, dsn=dsn)
[ "def", "sentry_feature", "(", "app", ")", ":", "# get keys", "sentry_public_key", "=", "app", ".", "config", ".", "get", "(", "'SENTRY_PUBLIC_KEY'", ")", "sentry_project_id", "=", "app", ".", "config", ".", "get", "(", "'SENTRY_PROJECT_ID'", ")", "if", "not", "sentry_public_key", "or", "not", "sentry_project_id", ":", "return", "# prepare dsn", "dsn", "=", "'https://{key}@sentry.io/{project_id}'", "dsn", "=", "dsn", ".", "format", "(", "key", "=", "sentry_public_key", ",", "project_id", "=", "sentry_project_id", ")", "# init sentry", "sentry", ".", "init_app", "(", "app", "=", "app", ",", "dsn", "=", "dsn", ")" ]
28.222222
20.666667
def consume(self, args): """ Consume the arguments we support. The args are modified inline. The return value is the number of args eaten. """ consumable = args[:self.max_args] self.consumed = len(consumable) del args[:self.consumed] return self.consumed
[ "def", "consume", "(", "self", ",", "args", ")", ":", "consumable", "=", "args", "[", ":", "self", ".", "max_args", "]", "self", ".", "consumed", "=", "len", "(", "consumable", ")", "del", "args", "[", ":", "self", ".", "consumed", "]", "return", "self", ".", "consumed" ]
42.428571
5.428571
def from_hsv(cls, h, s, v): """Constructs a :class:`Colour` from an HSV tuple.""" rgb = colorsys.hsv_to_rgb(h, s, v) return cls.from_rgb(*(int(x * 255) for x in rgb))
[ "def", "from_hsv", "(", "cls", ",", "h", ",", "s", ",", "v", ")", ":", "rgb", "=", "colorsys", ".", "hsv_to_rgb", "(", "h", ",", "s", ",", "v", ")", "return", "cls", ".", "from_rgb", "(", "*", "(", "int", "(", "x", "*", "255", ")", "for", "x", "in", "rgb", ")", ")" ]
46.75
8
def junos_copy_file(src, dst, **kwargs): ''' .. versionadded:: 2019.2.0 Copies the file on the remote Junos device. src The source file path. This argument accepts the usual Salt URIs (e.g., ``salt://``, ``http://``, ``https://``, ``s3://``, ``ftp://``, etc.). dst The destination path on the device where to copy the file. CLI Example: .. code-block:: bash salt '*' napalm.junos_copy_file https://example.com/junos.cfg /var/tmp/myjunos.cfg ''' prep = _junos_prep_fun(napalm_device) # pylint: disable=undefined-variable if not prep['result']: return prep cached_src = __salt__['cp.cache_file'](src) return __salt__['junos.file_copy'](cached_src, dst)
[ "def", "junos_copy_file", "(", "src", ",", "dst", ",", "*", "*", "kwargs", ")", ":", "prep", "=", "_junos_prep_fun", "(", "napalm_device", ")", "# pylint: disable=undefined-variable", "if", "not", "prep", "[", "'result'", "]", ":", "return", "prep", "cached_src", "=", "__salt__", "[", "'cp.cache_file'", "]", "(", "src", ")", "return", "__salt__", "[", "'junos.file_copy'", "]", "(", "cached_src", ",", "dst", ")" ]
30.083333
28.166667
def hurst_rs(data, nvals=None, fit="RANSAC", debug_plot=False, debug_data=False, plot_file=None, corrected=True, unbiased=True): """ Calculates the Hurst exponent by a standard rescaled range (R/S) approach. Explanation of Hurst exponent: The Hurst exponent is a measure for the "long-term memory" of a time series, meaning the long statistical dependencies in the data that do not originate from cycles. It originates from H.E. Hursts observations of the problem of long-term storage in water reservoirs. If x_i is the discharge of a river in year i and we observe this discharge for N years, we can calculate the storage capacity that would be required to keep the discharge steady at its mean value. To do so, we first substract the mean over all x_i from the individual x_i to obtain the departures x'_i from the mean for each year i. As the excess or deficit in discharge always carrys over from year i to year i+1, we need to examine the cumulative sum of x'_i, denoted by y_i. This cumulative sum represents the filling of our hypothetical storage. If the sum is above 0, we are storing excess discharge from the river, if it is below zero we have compensated a deficit in discharge by releasing water from the storage. The range (maximum - minimum) R of y_i therefore represents the total capacity required for the storage. Hurst showed that this value follows a steady trend for varying N if it is normalized by the standard deviation sigma over the x_i. Namely he obtained the following formula: R/sigma = (N/2)^K In this equation, K is called the Hurst exponent. Its value is 0.5 for white noise, but becomes greater for time series that exhibit some positive dependency on previous values. For negative dependencies it becomes less than 0.5. Explanation of the algorithm: The rescaled range (R/S) approach is directly derived from Hurst's definition. The time series of length N is split into non-overlapping subseries of length n. Then, R and S (S = sigma) are calculated for each subseries and the mean is taken over all subseries yielding (R/S)_n. This process is repeated for several lengths n. Finally, the exponent K is obtained by fitting a straight line to the plot of log((R/S)_n) vs log(n). There seems to be no consensus how to chose the subseries lenghts n. This function therefore leaves the choice to the user. The module provides some utility functions for "typical" values: * binary_n: N/2, N/4, N/8, ... * logarithmic_n: min_n, min_n * f, min_n * f^2, ... References: .. [h_1] H. E. Hurst, “The problem of long-term storage in reservoirs,” International Association of Scientific Hydrology. Bulletin, vol. 1, no. 3, pp. 13–27, 1956. .. [h_2] H. E. Hurst, “A suggested statistical model of some time series which occur in nature,” Nature, vol. 180, p. 494, 1957. .. [h_3] R. Weron, “Estimating long-range dependence: finite sample properties and confidence intervals,” Physica A: Statistical Mechanics and its Applications, vol. 312, no. 1, pp. 285–299, 2002. Reference Code: .. [h_a] "hurst" function in R-package "pracma", url: https://cran.r-project.org/web/packages/pracma/pracma.pdf Note: Pracma yields several estimates of the Hurst exponent, which are listed below. Unless otherwise stated they use the divisors of the length of the sequence as n. The length is reduced by at most 1% to find the value that has the most divisors. * The "Simple R/S" estimate is just log((R/S)_n) / log(n) for n = N. * The "theoretical Hurst exponent" is the value that would be expected of an uncorrected rescaled range approach for random noise of the size of the input data. * The "empirical Hurst exponent" is the uncorrected Hurst exponent obtained by the rescaled range approach. * The "corrected empirical Hurst exponent" is the Anis-Lloyd-Peters corrected Hurst exponent, but with sqrt(1/2 * pi * n) added to the (R/S)_n before the log. * The "corrected R over S Hurst exponent" uses the R-function "lm" instead of pracmas own "polyfit" and uses n = N/2, N/4, N/8, ... by successively halving the subsequences (which means that some subsequences may be one element longer than others). In contrast to its name it does not use the Anis-Lloyd-Peters correction factor. If you want to compare the output of pracma to the output of nolds, the "empirical hurst exponent" is the only measure that exactly corresponds to the Hurst measure implemented in nolds (by choosing corrected=False, fit="poly" and employing the same strategy for choosing n as the divisors of the (reduced) sequence length). .. [h_b] Rafael Weron, "HURST: MATLAB function to compute the Hurst exponent using R/S Analysis", url: https://ideas.repec.org/c/wuu/hscode/m11003.html Note: When the same values for nvals are used and fit is set to "poly", nolds yields exactly the same results as this implementation. .. [h_c] Bill Davidson, "Hurst exponent", url: http://www.mathworks.com/matlabcentral/fileexchange/9842-hurst-exponent .. [h_d] Tomaso Aste, "Generalized Hurst exponent", url: http://de.mathworks.com/matlabcentral/fileexchange/30076-generalized-hurst-exponent Args: data (array-like of float): time series Kwargs: nvals (iterable of int): sizes of subseries to use (default: logmid_n(total_N, ratio=1/4.0, nsteps=15) , that is 15 logarithmically spaced values in the medium 25% of the logarithmic range) Generally, the choice for n is a trade-off between the length and the number of the subsequences that are used for the calculation of the (R/S)_n. Very low values of n lead to high variance in the ``r`` and ``s`` while very high values may leave too few subsequences that the mean along them is still meaningful. Logarithmic spacing makes sense, because it translates to even spacing in the log-log-plot. fit (str): the fitting method to use for the line fit, either 'poly' for normal least squares polynomial fitting or 'RANSAC' for RANSAC-fitting which is more robust to outliers debug_plot (boolean): if True, a simple plot of the final line-fitting step will be shown debug_data (boolean): if True, debugging data will be returned alongside the result plot_file (str): if debug_plot is True and plot_file is not None, the plot will be saved under the given file name instead of directly showing it through ``plt.show()`` corrected (boolean): if True, the Anis-Lloyd-Peters correction factor will be applied to the output according to the expected value for the individual (R/S)_n (see [h_3]_) unbiased (boolean): if True, the standard deviation based on the unbiased variance (1/(N-1) instead of 1/N) will be used. This should be the default choice, since the true mean of the sequences is not known. This parameter should only be changed to recreate results of other implementations. Returns: float: estimated Hurst exponent K using a rescaled range approach (if K = 0.5 there are no long-range correlations in the data, if K < 0.5 there are negative long-range correlations, if K > 0.5 there are positive long-range correlations) (1d-vector, 1d-vector, list): only present if debug_data is True: debug data of the form ``(nvals, rsvals, poly)`` where ``nvals`` are the values used for log(n), ``rsvals`` are the corresponding log((R/S)_n) and ``poly`` are the line coefficients (``[slope, intercept]``) """ data = np.asarray(data) total_N = len(data) if nvals is None: # chooses a default value for nvals that will give 15 logarithmically # spaced datapoints leaning towards the middle of the logarithmic range # (since both too small and too large n introduce too much variance) nvals = logmid_n(total_N, ratio=1/4.0, nsteps=15) # get individual values for (R/S)_n rsvals = np.array([rs(data, n, unbiased=unbiased) for n in nvals]) # filter NaNs (zeros should not be possible, because if R is 0 then # S is also zero) not_nan = np.logical_not(np.isnan(rsvals)) rsvals = rsvals[not_nan] nvals = np.asarray(nvals)[not_nan] # it may happen that no rsvals are left (if all values of data are the same) if len(rsvals) == 0: poly = [np.nan, np.nan] if debug_plot: warnings.warn("Cannot display debug plot, all (R/S)_n are NaN") else: # fit a line to the logarithm of the obtained (R/S)_n xvals = np.log(nvals) yvals = np.log(rsvals) if corrected: yvals -= np.log([expected_rs(n) for n in nvals]) poly = poly_fit(xvals, yvals, 1, fit=fit) if debug_plot: plot_reg(xvals, yvals, poly, "log(n)", "log((R/S)_n)", fname=plot_file) # account for correction if necessary h = poly[0] + 0.5 if corrected else poly[0] # return line slope (+ correction) as hurst exponent if debug_data: return (h, (np.log(nvals), np.log(rsvals), poly)) else: return h
[ "def", "hurst_rs", "(", "data", ",", "nvals", "=", "None", ",", "fit", "=", "\"RANSAC\"", ",", "debug_plot", "=", "False", ",", "debug_data", "=", "False", ",", "plot_file", "=", "None", ",", "corrected", "=", "True", ",", "unbiased", "=", "True", ")", ":", "data", "=", "np", ".", "asarray", "(", "data", ")", "total_N", "=", "len", "(", "data", ")", "if", "nvals", "is", "None", ":", "# chooses a default value for nvals that will give 15 logarithmically", "# spaced datapoints leaning towards the middle of the logarithmic range", "# (since both too small and too large n introduce too much variance)", "nvals", "=", "logmid_n", "(", "total_N", ",", "ratio", "=", "1", "/", "4.0", ",", "nsteps", "=", "15", ")", "# get individual values for (R/S)_n", "rsvals", "=", "np", ".", "array", "(", "[", "rs", "(", "data", ",", "n", ",", "unbiased", "=", "unbiased", ")", "for", "n", "in", "nvals", "]", ")", "# filter NaNs (zeros should not be possible, because if R is 0 then", "# S is also zero)", "not_nan", "=", "np", ".", "logical_not", "(", "np", ".", "isnan", "(", "rsvals", ")", ")", "rsvals", "=", "rsvals", "[", "not_nan", "]", "nvals", "=", "np", ".", "asarray", "(", "nvals", ")", "[", "not_nan", "]", "# it may happen that no rsvals are left (if all values of data are the same)", "if", "len", "(", "rsvals", ")", "==", "0", ":", "poly", "=", "[", "np", ".", "nan", ",", "np", ".", "nan", "]", "if", "debug_plot", ":", "warnings", ".", "warn", "(", "\"Cannot display debug plot, all (R/S)_n are NaN\"", ")", "else", ":", "# fit a line to the logarithm of the obtained (R/S)_n", "xvals", "=", "np", ".", "log", "(", "nvals", ")", "yvals", "=", "np", ".", "log", "(", "rsvals", ")", "if", "corrected", ":", "yvals", "-=", "np", ".", "log", "(", "[", "expected_rs", "(", "n", ")", "for", "n", "in", "nvals", "]", ")", "poly", "=", "poly_fit", "(", "xvals", ",", "yvals", ",", "1", ",", "fit", "=", "fit", ")", "if", "debug_plot", ":", "plot_reg", "(", "xvals", ",", "yvals", ",", "poly", ",", "\"log(n)\"", ",", "\"log((R/S)_n)\"", ",", "fname", "=", "plot_file", ")", "# account for correction if necessary", "h", "=", "poly", "[", "0", "]", "+", "0.5", "if", "corrected", "else", "poly", "[", "0", "]", "# return line slope (+ correction) as hurst exponent", "if", "debug_data", ":", "return", "(", "h", ",", "(", "np", ".", "log", "(", "nvals", ")", ",", "np", ".", "log", "(", "rsvals", ")", ",", "poly", ")", ")", "else", ":", "return", "h" ]
49.47644
27.183246
def auto_discretize(self, max_freq=50., wave_frac=0.2): """Subdivide the layers to capture strain variation. Parameters ---------- max_freq: float Maximum frequency of interest [Hz]. wave_frac: float Fraction of wavelength required. Typically 1/3 to 1/5. Returns ------- profile: Profile A new profile with modified layer thicknesses """ layers = [] for l in self: if l.soil_type.is_nonlinear: opt_thickness = l.shear_vel / max_freq * wave_frac count = np.ceil(l.thickness / opt_thickness).astype(int) thickness = l.thickness / count for _ in range(count): layers.append(Layer(l.soil_type, thickness, l.shear_vel)) else: layers.append(l) return Profile(layers, wt_depth=self.wt_depth)
[ "def", "auto_discretize", "(", "self", ",", "max_freq", "=", "50.", ",", "wave_frac", "=", "0.2", ")", ":", "layers", "=", "[", "]", "for", "l", "in", "self", ":", "if", "l", ".", "soil_type", ".", "is_nonlinear", ":", "opt_thickness", "=", "l", ".", "shear_vel", "/", "max_freq", "*", "wave_frac", "count", "=", "np", ".", "ceil", "(", "l", ".", "thickness", "/", "opt_thickness", ")", ".", "astype", "(", "int", ")", "thickness", "=", "l", ".", "thickness", "/", "count", "for", "_", "in", "range", "(", "count", ")", ":", "layers", ".", "append", "(", "Layer", "(", "l", ".", "soil_type", ",", "thickness", ",", "l", ".", "shear_vel", ")", ")", "else", ":", "layers", ".", "append", "(", "l", ")", "return", "Profile", "(", "layers", ",", "wt_depth", "=", "self", ".", "wt_depth", ")" ]
33.962963
19.111111
def get_key(dotenv_path, key_to_get, verbose=False): """ Gets the value of a given key from the given .env If the .env path given doesn't exist, fails :param dotenv_path: path :param key_to_get: key :param verbose: verbosity flag, raise warning if path does not exist :return: value of variable from environment file or None """ key_to_get = str(key_to_get) if not os.path.exists(dotenv_path): if verbose: warnings.warn(f"Can't read {dotenv_path}, it doesn't exist.") return None dotenv_as_dict = dotenv_values(dotenv_path) if key_to_get in dotenv_as_dict: return dotenv_as_dict[key_to_get] else: if verbose: warnings.warn(f"key {key_to_get} not found in {dotenv_path}.") return None
[ "def", "get_key", "(", "dotenv_path", ",", "key_to_get", ",", "verbose", "=", "False", ")", ":", "key_to_get", "=", "str", "(", "key_to_get", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "dotenv_path", ")", ":", "if", "verbose", ":", "warnings", ".", "warn", "(", "f\"Can't read {dotenv_path}, it doesn't exist.\"", ")", "return", "None", "dotenv_as_dict", "=", "dotenv_values", "(", "dotenv_path", ")", "if", "key_to_get", "in", "dotenv_as_dict", ":", "return", "dotenv_as_dict", "[", "key_to_get", "]", "else", ":", "if", "verbose", ":", "warnings", ".", "warn", "(", "f\"key {key_to_get} not found in {dotenv_path}.\"", ")", "return", "None" ]
35.409091
16.045455
def get_settings(self): """ Returns current settings. Only accessible if authenticated as the user. """ url = self._imgur._base_url + "/3/account/{0}/settings".format(self.name) return self._imgur._send_request(url)
[ "def", "get_settings", "(", "self", ")", ":", "url", "=", "self", ".", "_imgur", ".", "_base_url", "+", "\"/3/account/{0}/settings\"", ".", "format", "(", "self", ".", "name", ")", "return", "self", ".", "_imgur", ".", "_send_request", "(", "url", ")" ]
32.125
15.375
def get_current_term(): """ Returns a uw_sws.models.Term object, for the current term. """ url = "{}/current.json".format(term_res_url_prefix) term = _json_to_term_model(get_resource(url)) # A term doesn't become "current" until 2 days before the start of # classes. That's too late to be useful, so if we're after the last # day of grade submission window, use the next term resource. if datetime.now() > term.grade_submission_deadline: return get_next_term() return term
[ "def", "get_current_term", "(", ")", ":", "url", "=", "\"{}/current.json\"", ".", "format", "(", "term_res_url_prefix", ")", "term", "=", "_json_to_term_model", "(", "get_resource", "(", "url", ")", ")", "# A term doesn't become \"current\" until 2 days before the start of", "# classes. That's too late to be useful, so if we're after the last", "# day of grade submission window, use the next term resource.", "if", "datetime", ".", "now", "(", ")", ">", "term", ".", "grade_submission_deadline", ":", "return", "get_next_term", "(", ")", "return", "term" ]
34.2
18.2
def _process_genotype_features(self, limit=None): """ Here we process the genotype_features file, which lists genotypes together with any intrinsic sequence alterations, their zygosity, and affected gene. Because we don't necessarily get allele pair (VSLC) ids in a single row, we iterate through the file and build up a hash that contains all of a genotype's partonomy. We then assemble a genotype based on that partonomy. This table does not list the background genotype/strain: that is listed elsewhere. ZFIN "ALT" objects are mapped to sequence alterations in our system. By the end of this method, we have built up the intrinsic genotype, with Monarch-style labels. All ZFIN labels are added as synonyms (including the "sup" html tags). We make assumptions here that any variants that affect the same locus are in trans. All of the genotype parts are created as BNodes at the moment, to avoid minting new Monarch ids, which means for anyone consuming this data they are inherently unstable. This may change in the future. :param limit: :return: """ raw = '/'.join((self.rawdir, self.files['geno']['file'])) if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) taxon_id = self.globaltt['Danio rerio'] geno_hash = {} # This is used to store the genotype partonomy gvc_hash = {} LOG.info("Processing Genotypes") line_counter = 0 geno = Genotype(graph) with open(raw, 'r', encoding="utf8") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (genotype_num, genotype_name, genotype_unique_name, allele_num, allele_name, allele_ab, allele_type, allele_disp_type, gene_symbol, gene_num, zygosity, construct_name, construct_num # , empty ) = row if self.test_mode and genotype_num not in self.test_ids['genotype']: continue # add the genotype to the graph # not adding the genotype label here, # since it doesn't include the background # that will be done in another method genotype_id = 'ZFIN:' + genotype_num.strip() geno.addGenotype(genotype_id, None) # add the given name and uniquename as synonyms model.addSynonym(genotype_id, genotype_name) model.addSynonym(genotype_id, genotype_unique_name) # store the alleles of the genotype, # in order to use when processing fish if genotype_num not in self.geno_alleles: self.geno_alleles[genotype_num] = set() self.geno_alleles[genotype_num].add(allele_num) if genotype_id not in geno_hash: geno_hash[genotype_id] = {} genoparts = geno_hash[genotype_id] # reassign the allele_type to a proper GENO or SO class # allele_type = self._map_allele_type_to_geno(allele_type) allele_type_id = self.resolve(allele_type, False) if allele_type_id == allele_type: allele_type_id = self.globaltt['unspecified'] # is geno: not zfa: allele_id = 'ZFIN:' + allele_num.strip() if allele_num != '': self.id_label_map[allele_id] = allele_name # alleles in zfin are really sequence alterations in our system geno.addSequenceAlteration(allele_id, allele_name, allele_type_id) model.addSynonym(allele_id, allele_ab) # here, we assemble the items into a genotype hash # we need to do this because each row only holds one allele # of a gene but a genotype may have many alleles and therefore # many rows so we loop through the file once to build a hash of # genotype components if gene_num is not None and gene_num.strip() != '': # add the gene to the graph, along with it's symbol # as the primary label gene_id = 'ZFIN:' + gene_num.strip() geno.addGene(gene_id, gene_symbol) self.id_label_map[gene_id] = gene_symbol # if it's a transgenic construct, # then we'll have to get the other bits if construct_num is not None and construct_num.strip() != '': construct_id = 'ZFIN:' + construct_num.strip() geno.addSequenceDerivesFrom(allele_id, construct_id) self.id_label_map[construct_id] = construct_name # allele to gene if allele_id not in self.variant_loci_genes: self.variant_loci_genes[allele_id] = [gene_id] else: if gene_id not in self.variant_loci_genes[allele_id]: self.variant_loci_genes[allele_id] += [gene_id] if gene_id not in genoparts: genoparts[gene_id] = [allele_id] else: genoparts[gene_id] += [allele_id] other_allele = self._get_other_allele_by_zygosity( allele_id, zygosity) if other_allele is not None: genoparts[gene_id] += [other_allele] else: # if the gene is not known, # still need to add the allele to the genotype hash # these will be added as sequence alterations. genoparts[allele_id] = [allele_id] other_allele = self._get_other_allele_by_zygosity( allele_id, zygosity) if other_allele is not None: genoparts[allele_id] += [other_allele] geno_hash[genotype_id] = genoparts # fetch the other affected genes, # and make sure they are in the geno hash # we have to do this because some of the affected genes # are not listed in this file genes_from_hash = None if allele_id in self.variant_loci_genes: genes_from_hash = self.variant_loci_genes[allele_id] else: pass # LOG.info('no gene found for %s', allele_id) if genes_from_hash is not None \ and genes_from_hash != [gene_id] \ and gene_id not in genes_from_hash: LOG.info( "***Found genes not in genotype_features for %s: %s", allele_id, genes_from_hash) for gh in genes_from_hash: if gh not in genoparts: genoparts[gh] = [allele_id] else: genoparts[gh] += [allele_id] other_allele = self._get_other_allele_by_zygosity( allele_id, zygosity) if other_allele is not None: genoparts[gh].append(other_allele) if not self.test_mode and limit is not None and line_counter > limit: break # end loop through file csvfile.close() LOG.info("Finished parsing file") # ############## BUILD THE INTRINSIC GENOTYPES ############### # using the geno_hash, build the genotype parts, # and add them to the graph # the hash is organized like: # genotype_id : { # gene_id : [list, of, alleles], # for located things # allele_id : [list, of, alleles] # for unlocated things # } # now loop through the geno_hash, and build the vslcs LOG.info("Building intrinsic genotypes from partonomy") for gt in geno_hash: if self.test_mode and re.sub(r'ZFIN:', '', gt) \ not in self.test_ids['genotype']: print('skipping ', gt) continue if gt not in gvc_hash: gvc_hash[gt] = [] gvcparts = gvc_hash[gt] for locus_id in geno_hash[gt]: # LOG.info("locus id %s",locus_id) locus_label = self.id_label_map[locus_id] variant_locus_parts = geno_hash.get(gt).get(locus_id) # LOG.info( # 'vl parts: %s',pprint.pformat(variant_locus_parts)) # if the locus == part, then it isn't a gene, # rather a variant not in a specific gene if locus_id in variant_locus_parts: # set the gene_id to none gene_id = None else: gene_id = locus_id allele1_id = variant_locus_parts[0] if allele1_id not in self.id_label_map: allele1_label = allele1_id LOG.error('allele1 %s not in hash', allele1_id) else: allele1_label = self.id_label_map[allele1_id] allele2_id = None allele2_label = None zygosity_id = None if len(variant_locus_parts) > 2: LOG.error( "There may be a problem. >2 parts for this locus (%s): %s", locus_id, variant_locus_parts) elif len(variant_locus_parts) > 1: allele2_id = variant_locus_parts[1] if allele2_id not in ['0', '?']: allele2_label = self.id_label_map[allele2_id] else: allele2_label = allele2_id if allele2_id is not None: if allele2_id == '?': zygosity_id = self.globaltt['indeterminate'] allele2_id = 'UN' elif allele2_id == '0': zygosity_id = self.globaltt['hemizygous'] elif allele1_id != allele2_id: zygosity_id = self.globaltt['compound heterozygous'] elif allele1_id == allele2_id: zygosity_id = self.globaltt['homozygous'] else: zygosity_id = self.globaltt['simple heterozygous'] allele2_label = '+' allele2_id = 'WT' # make variant_loci vloci2 = vloci2_label = None if gene_id is not None: vloci1 = self._make_variant_locus_id(gene_id, allele1_id) vloci1_label = geno.make_variant_locus_label( locus_label, allele1_label) geno.addSequenceAlterationToVariantLocus( allele1_id, vloci1) geno.addAlleleOfGene(vloci1, gene_id) model.addIndividualToGraph( vloci1, vloci1_label, self.globaltt['variant_locus']) if allele2_id is not None and allele2_id not in ['WT', '0', 'UN']: vloci2 = self._make_variant_locus_id( gene_id, allele2_id) vloci2_label = geno.make_variant_locus_label( locus_label, allele2_label) geno.addSequenceAlterationToVariantLocus( allele2_id, vloci2) model.addIndividualToGraph( vloci2, vloci2_label, self.globaltt['variant_locus']) geno.addAlleleOfGene(vloci2, gene_id) else: vloci1 = allele1_id vloci1_label = allele1_label vloci2 = None if allele2_id not in ['WT', '0', 'UN']: vloci2 = allele2_id vloci2_label = allele2_label # create the vslc gene_label = '' if gene_id is None: gn = 'UN' else: gn = gene_id gene_label = self.id_label_map[gene_id] # TODO also consider adding this to Genotype.py vslc_id = '-'.join((gn, allele1_id, allele2_id)) vslc_id = '_:' + re.sub(r'(ZFIN)?:', '', vslc_id) vslc_label = geno.make_vslc_label( gene_label, allele1_label, allele2_label) # add to global hash self.id_label_map[vslc_id] = vslc_label model.addIndividualToGraph( vslc_id, vslc_label, self.globaltt['variant single locus complement']) geno.addPartsToVSLC( vslc_id, vloci1, vloci2, zygosity_id, self.globaltt['has_variant_part'], self.globaltt['has_variant_part']) gvcparts += [vslc_id] gvc_hash[gt] = gvcparts # end loop through geno_hash LOG.info('Finished finding all the intrinsic genotype parts') LOG.info('Build pretty genotype labels') # now loop through the gvc_hash, and build the gvc for gt in gvc_hash: if self.test_mode and re.sub(r'ZFIN:', '', gt) \ not in self.test_ids['genotype']: continue gvc_parts = gvc_hash[gt] # only need to make a gvc specifically if there's >1 vslc if len(gvc_parts) > 1: gvc_labels = [] # put these in order so they will always make the same id gvc_parts.sort() gvc_id = '-'.join(gvc_parts) gvc_id = re.sub(r'(ZFIN)?:', '', gvc_id) gvc_id = '_:' + re.sub(r'^_*', '', gvc_id) for vslc_id in gvc_parts: # add the vslc to the gvc geno.addVSLCtoParent(vslc_id, gvc_id) # build the gvc label vslc_label = self.id_label_map[vslc_id] if vslc_label is not None: gvc_labels += [vslc_label] else: gvc_labels += [vslc_id] gvc_labels.sort() gvc_label = '; '.join(gvc_labels) # add the gvc to the id-label hash self.id_label_map[gvc_id] = gvc_label # add the gvc model.addIndividualToGraph( gvc_id, gvc_label, self.globaltt['genomic_variation_complement']) elif len(gvc_parts) == 1: # assign the vslc to be also a gvc vslc_id = gvc_parts[0] gvc_id = vslc_id gvc_label = self.id_label_map[vslc_id] model.addType(vslc_id, self.globaltt['genomic_variation_complement']) else: gvc_id = None gvc_label = '' LOG.error("No GVC parts for %s", gt) if gt in self.genotype_backgrounds: background_id = self.genotype_backgrounds[gt] if background_id in self.id_label_map: background_label = self.id_label_map[background_id] else: background_label = background_id LOG.error("We don't have the label for %s stored", background_id) else: background_num = re.sub(r'ZFIN:', '', gt) background_id = '_:bkgd-'+background_num background_label = 'n.s. (' + background_num + ')' background_desc = 'This genomic background is unknown. ' +\ 'This is a placeholder background for ' + gt + '.' # there is no background for this genotype; # need to add the taxon to this one! # make an anonymous background for this genotype geno.addGenomicBackground( background_id, background_label, None, background_desc) geno.addGenomicBackgroundToGenotype(background_id, gt) background_label = 'n.s.' geno.addTaxon(taxon_id, background_id) genotype_name = gvc_label + ' [' + background_label + ']' geno.addGenotype(gt, genotype_name) self.id_label_map[gt] = genotype_name # Add the GVC to the genotype geno.addParts(gvc_id, gt, self.globaltt['has_variant_part']) # end of gvc loop # end of genotype loop # TODO this is almost complete; # deficiencies with >1 locus deleted are still not right LOG.info("Finished building genotype labels") LOG.info("Done with genotypes") return
[ "def", "_process_genotype_features", "(", "self", ",", "limit", "=", "None", ")", ":", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'geno'", "]", "[", "'file'", "]", ")", ")", "if", "self", ".", "test_mode", ":", "graph", "=", "self", ".", "testgraph", "else", ":", "graph", "=", "self", ".", "graph", "model", "=", "Model", "(", "graph", ")", "taxon_id", "=", "self", ".", "globaltt", "[", "'Danio rerio'", "]", "geno_hash", "=", "{", "}", "# This is used to store the genotype partonomy", "gvc_hash", "=", "{", "}", "LOG", ".", "info", "(", "\"Processing Genotypes\"", ")", "line_counter", "=", "0", "geno", "=", "Genotype", "(", "graph", ")", "with", "open", "(", "raw", ",", "'r'", ",", "encoding", "=", "\"utf8\"", ")", "as", "csvfile", ":", "filereader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "for", "row", "in", "filereader", ":", "line_counter", "+=", "1", "(", "genotype_num", ",", "genotype_name", ",", "genotype_unique_name", ",", "allele_num", ",", "allele_name", ",", "allele_ab", ",", "allele_type", ",", "allele_disp_type", ",", "gene_symbol", ",", "gene_num", ",", "zygosity", ",", "construct_name", ",", "construct_num", "# , empty", ")", "=", "row", "if", "self", ".", "test_mode", "and", "genotype_num", "not", "in", "self", ".", "test_ids", "[", "'genotype'", "]", ":", "continue", "# add the genotype to the graph", "# not adding the genotype label here,", "# since it doesn't include the background", "# that will be done in another method", "genotype_id", "=", "'ZFIN:'", "+", "genotype_num", ".", "strip", "(", ")", "geno", ".", "addGenotype", "(", "genotype_id", ",", "None", ")", "# add the given name and uniquename as synonyms", "model", ".", "addSynonym", "(", "genotype_id", ",", "genotype_name", ")", "model", ".", "addSynonym", "(", "genotype_id", ",", "genotype_unique_name", ")", "# store the alleles of the genotype,", "# in order to use when processing fish", "if", "genotype_num", "not", "in", "self", ".", "geno_alleles", ":", "self", ".", "geno_alleles", "[", "genotype_num", "]", "=", "set", "(", ")", "self", ".", "geno_alleles", "[", "genotype_num", "]", ".", "add", "(", "allele_num", ")", "if", "genotype_id", "not", "in", "geno_hash", ":", "geno_hash", "[", "genotype_id", "]", "=", "{", "}", "genoparts", "=", "geno_hash", "[", "genotype_id", "]", "# reassign the allele_type to a proper GENO or SO class", "# allele_type = self._map_allele_type_to_geno(allele_type)", "allele_type_id", "=", "self", ".", "resolve", "(", "allele_type", ",", "False", ")", "if", "allele_type_id", "==", "allele_type", ":", "allele_type_id", "=", "self", ".", "globaltt", "[", "'unspecified'", "]", "# is geno: not zfa:", "allele_id", "=", "'ZFIN:'", "+", "allele_num", ".", "strip", "(", ")", "if", "allele_num", "!=", "''", ":", "self", ".", "id_label_map", "[", "allele_id", "]", "=", "allele_name", "# alleles in zfin are really sequence alterations in our system", "geno", ".", "addSequenceAlteration", "(", "allele_id", ",", "allele_name", ",", "allele_type_id", ")", "model", ".", "addSynonym", "(", "allele_id", ",", "allele_ab", ")", "# here, we assemble the items into a genotype hash", "# we need to do this because each row only holds one allele", "# of a gene but a genotype may have many alleles and therefore", "# many rows so we loop through the file once to build a hash of", "# genotype components", "if", "gene_num", "is", "not", "None", "and", "gene_num", ".", "strip", "(", ")", "!=", "''", ":", "# add the gene to the graph, along with it's symbol", "# as the primary label", "gene_id", "=", "'ZFIN:'", "+", "gene_num", ".", "strip", "(", ")", "geno", ".", "addGene", "(", "gene_id", ",", "gene_symbol", ")", "self", ".", "id_label_map", "[", "gene_id", "]", "=", "gene_symbol", "# if it's a transgenic construct,", "# then we'll have to get the other bits", "if", "construct_num", "is", "not", "None", "and", "construct_num", ".", "strip", "(", ")", "!=", "''", ":", "construct_id", "=", "'ZFIN:'", "+", "construct_num", ".", "strip", "(", ")", "geno", ".", "addSequenceDerivesFrom", "(", "allele_id", ",", "construct_id", ")", "self", ".", "id_label_map", "[", "construct_id", "]", "=", "construct_name", "# allele to gene", "if", "allele_id", "not", "in", "self", ".", "variant_loci_genes", ":", "self", ".", "variant_loci_genes", "[", "allele_id", "]", "=", "[", "gene_id", "]", "else", ":", "if", "gene_id", "not", "in", "self", ".", "variant_loci_genes", "[", "allele_id", "]", ":", "self", ".", "variant_loci_genes", "[", "allele_id", "]", "+=", "[", "gene_id", "]", "if", "gene_id", "not", "in", "genoparts", ":", "genoparts", "[", "gene_id", "]", "=", "[", "allele_id", "]", "else", ":", "genoparts", "[", "gene_id", "]", "+=", "[", "allele_id", "]", "other_allele", "=", "self", ".", "_get_other_allele_by_zygosity", "(", "allele_id", ",", "zygosity", ")", "if", "other_allele", "is", "not", "None", ":", "genoparts", "[", "gene_id", "]", "+=", "[", "other_allele", "]", "else", ":", "# if the gene is not known,", "# still need to add the allele to the genotype hash", "# these will be added as sequence alterations.", "genoparts", "[", "allele_id", "]", "=", "[", "allele_id", "]", "other_allele", "=", "self", ".", "_get_other_allele_by_zygosity", "(", "allele_id", ",", "zygosity", ")", "if", "other_allele", "is", "not", "None", ":", "genoparts", "[", "allele_id", "]", "+=", "[", "other_allele", "]", "geno_hash", "[", "genotype_id", "]", "=", "genoparts", "# fetch the other affected genes,", "# and make sure they are in the geno hash", "# we have to do this because some of the affected genes", "# are not listed in this file", "genes_from_hash", "=", "None", "if", "allele_id", "in", "self", ".", "variant_loci_genes", ":", "genes_from_hash", "=", "self", ".", "variant_loci_genes", "[", "allele_id", "]", "else", ":", "pass", "# LOG.info('no gene found for %s', allele_id)", "if", "genes_from_hash", "is", "not", "None", "and", "genes_from_hash", "!=", "[", "gene_id", "]", "and", "gene_id", "not", "in", "genes_from_hash", ":", "LOG", ".", "info", "(", "\"***Found genes not in genotype_features for %s: %s\"", ",", "allele_id", ",", "genes_from_hash", ")", "for", "gh", "in", "genes_from_hash", ":", "if", "gh", "not", "in", "genoparts", ":", "genoparts", "[", "gh", "]", "=", "[", "allele_id", "]", "else", ":", "genoparts", "[", "gh", "]", "+=", "[", "allele_id", "]", "other_allele", "=", "self", ".", "_get_other_allele_by_zygosity", "(", "allele_id", ",", "zygosity", ")", "if", "other_allele", "is", "not", "None", ":", "genoparts", "[", "gh", "]", ".", "append", "(", "other_allele", ")", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "# end loop through file", "csvfile", ".", "close", "(", ")", "LOG", ".", "info", "(", "\"Finished parsing file\"", ")", "# ############## BUILD THE INTRINSIC GENOTYPES ###############", "# using the geno_hash, build the genotype parts,", "# and add them to the graph", "# the hash is organized like:", "# genotype_id : {", "# gene_id : [list, of, alleles], # for located things", "# allele_id : [list, of, alleles] # for unlocated things", "# }", "# now loop through the geno_hash, and build the vslcs", "LOG", ".", "info", "(", "\"Building intrinsic genotypes from partonomy\"", ")", "for", "gt", "in", "geno_hash", ":", "if", "self", ".", "test_mode", "and", "re", ".", "sub", "(", "r'ZFIN:'", ",", "''", ",", "gt", ")", "not", "in", "self", ".", "test_ids", "[", "'genotype'", "]", ":", "print", "(", "'skipping '", ",", "gt", ")", "continue", "if", "gt", "not", "in", "gvc_hash", ":", "gvc_hash", "[", "gt", "]", "=", "[", "]", "gvcparts", "=", "gvc_hash", "[", "gt", "]", "for", "locus_id", "in", "geno_hash", "[", "gt", "]", ":", "# LOG.info(\"locus id %s\",locus_id)", "locus_label", "=", "self", ".", "id_label_map", "[", "locus_id", "]", "variant_locus_parts", "=", "geno_hash", ".", "get", "(", "gt", ")", ".", "get", "(", "locus_id", ")", "# LOG.info(", "# 'vl parts: %s',pprint.pformat(variant_locus_parts))", "# if the locus == part, then it isn't a gene,", "# rather a variant not in a specific gene", "if", "locus_id", "in", "variant_locus_parts", ":", "# set the gene_id to none", "gene_id", "=", "None", "else", ":", "gene_id", "=", "locus_id", "allele1_id", "=", "variant_locus_parts", "[", "0", "]", "if", "allele1_id", "not", "in", "self", ".", "id_label_map", ":", "allele1_label", "=", "allele1_id", "LOG", ".", "error", "(", "'allele1 %s not in hash'", ",", "allele1_id", ")", "else", ":", "allele1_label", "=", "self", ".", "id_label_map", "[", "allele1_id", "]", "allele2_id", "=", "None", "allele2_label", "=", "None", "zygosity_id", "=", "None", "if", "len", "(", "variant_locus_parts", ")", ">", "2", ":", "LOG", ".", "error", "(", "\"There may be a problem. >2 parts for this locus (%s): %s\"", ",", "locus_id", ",", "variant_locus_parts", ")", "elif", "len", "(", "variant_locus_parts", ")", ">", "1", ":", "allele2_id", "=", "variant_locus_parts", "[", "1", "]", "if", "allele2_id", "not", "in", "[", "'0'", ",", "'?'", "]", ":", "allele2_label", "=", "self", ".", "id_label_map", "[", "allele2_id", "]", "else", ":", "allele2_label", "=", "allele2_id", "if", "allele2_id", "is", "not", "None", ":", "if", "allele2_id", "==", "'?'", ":", "zygosity_id", "=", "self", ".", "globaltt", "[", "'indeterminate'", "]", "allele2_id", "=", "'UN'", "elif", "allele2_id", "==", "'0'", ":", "zygosity_id", "=", "self", ".", "globaltt", "[", "'hemizygous'", "]", "elif", "allele1_id", "!=", "allele2_id", ":", "zygosity_id", "=", "self", ".", "globaltt", "[", "'compound heterozygous'", "]", "elif", "allele1_id", "==", "allele2_id", ":", "zygosity_id", "=", "self", ".", "globaltt", "[", "'homozygous'", "]", "else", ":", "zygosity_id", "=", "self", ".", "globaltt", "[", "'simple heterozygous'", "]", "allele2_label", "=", "'+'", "allele2_id", "=", "'WT'", "# make variant_loci", "vloci2", "=", "vloci2_label", "=", "None", "if", "gene_id", "is", "not", "None", ":", "vloci1", "=", "self", ".", "_make_variant_locus_id", "(", "gene_id", ",", "allele1_id", ")", "vloci1_label", "=", "geno", ".", "make_variant_locus_label", "(", "locus_label", ",", "allele1_label", ")", "geno", ".", "addSequenceAlterationToVariantLocus", "(", "allele1_id", ",", "vloci1", ")", "geno", ".", "addAlleleOfGene", "(", "vloci1", ",", "gene_id", ")", "model", ".", "addIndividualToGraph", "(", "vloci1", ",", "vloci1_label", ",", "self", ".", "globaltt", "[", "'variant_locus'", "]", ")", "if", "allele2_id", "is", "not", "None", "and", "allele2_id", "not", "in", "[", "'WT'", ",", "'0'", ",", "'UN'", "]", ":", "vloci2", "=", "self", ".", "_make_variant_locus_id", "(", "gene_id", ",", "allele2_id", ")", "vloci2_label", "=", "geno", ".", "make_variant_locus_label", "(", "locus_label", ",", "allele2_label", ")", "geno", ".", "addSequenceAlterationToVariantLocus", "(", "allele2_id", ",", "vloci2", ")", "model", ".", "addIndividualToGraph", "(", "vloci2", ",", "vloci2_label", ",", "self", ".", "globaltt", "[", "'variant_locus'", "]", ")", "geno", ".", "addAlleleOfGene", "(", "vloci2", ",", "gene_id", ")", "else", ":", "vloci1", "=", "allele1_id", "vloci1_label", "=", "allele1_label", "vloci2", "=", "None", "if", "allele2_id", "not", "in", "[", "'WT'", ",", "'0'", ",", "'UN'", "]", ":", "vloci2", "=", "allele2_id", "vloci2_label", "=", "allele2_label", "# create the vslc", "gene_label", "=", "''", "if", "gene_id", "is", "None", ":", "gn", "=", "'UN'", "else", ":", "gn", "=", "gene_id", "gene_label", "=", "self", ".", "id_label_map", "[", "gene_id", "]", "# TODO also consider adding this to Genotype.py", "vslc_id", "=", "'-'", ".", "join", "(", "(", "gn", ",", "allele1_id", ",", "allele2_id", ")", ")", "vslc_id", "=", "'_:'", "+", "re", ".", "sub", "(", "r'(ZFIN)?:'", ",", "''", ",", "vslc_id", ")", "vslc_label", "=", "geno", ".", "make_vslc_label", "(", "gene_label", ",", "allele1_label", ",", "allele2_label", ")", "# add to global hash", "self", ".", "id_label_map", "[", "vslc_id", "]", "=", "vslc_label", "model", ".", "addIndividualToGraph", "(", "vslc_id", ",", "vslc_label", ",", "self", ".", "globaltt", "[", "'variant single locus complement'", "]", ")", "geno", ".", "addPartsToVSLC", "(", "vslc_id", ",", "vloci1", ",", "vloci2", ",", "zygosity_id", ",", "self", ".", "globaltt", "[", "'has_variant_part'", "]", ",", "self", ".", "globaltt", "[", "'has_variant_part'", "]", ")", "gvcparts", "+=", "[", "vslc_id", "]", "gvc_hash", "[", "gt", "]", "=", "gvcparts", "# end loop through geno_hash", "LOG", ".", "info", "(", "'Finished finding all the intrinsic genotype parts'", ")", "LOG", ".", "info", "(", "'Build pretty genotype labels'", ")", "# now loop through the gvc_hash, and build the gvc", "for", "gt", "in", "gvc_hash", ":", "if", "self", ".", "test_mode", "and", "re", ".", "sub", "(", "r'ZFIN:'", ",", "''", ",", "gt", ")", "not", "in", "self", ".", "test_ids", "[", "'genotype'", "]", ":", "continue", "gvc_parts", "=", "gvc_hash", "[", "gt", "]", "# only need to make a gvc specifically if there's >1 vslc", "if", "len", "(", "gvc_parts", ")", ">", "1", ":", "gvc_labels", "=", "[", "]", "# put these in order so they will always make the same id", "gvc_parts", ".", "sort", "(", ")", "gvc_id", "=", "'-'", ".", "join", "(", "gvc_parts", ")", "gvc_id", "=", "re", ".", "sub", "(", "r'(ZFIN)?:'", ",", "''", ",", "gvc_id", ")", "gvc_id", "=", "'_:'", "+", "re", ".", "sub", "(", "r'^_*'", ",", "''", ",", "gvc_id", ")", "for", "vslc_id", "in", "gvc_parts", ":", "# add the vslc to the gvc", "geno", ".", "addVSLCtoParent", "(", "vslc_id", ",", "gvc_id", ")", "# build the gvc label", "vslc_label", "=", "self", ".", "id_label_map", "[", "vslc_id", "]", "if", "vslc_label", "is", "not", "None", ":", "gvc_labels", "+=", "[", "vslc_label", "]", "else", ":", "gvc_labels", "+=", "[", "vslc_id", "]", "gvc_labels", ".", "sort", "(", ")", "gvc_label", "=", "'; '", ".", "join", "(", "gvc_labels", ")", "# add the gvc to the id-label hash", "self", ".", "id_label_map", "[", "gvc_id", "]", "=", "gvc_label", "# add the gvc", "model", ".", "addIndividualToGraph", "(", "gvc_id", ",", "gvc_label", ",", "self", ".", "globaltt", "[", "'genomic_variation_complement'", "]", ")", "elif", "len", "(", "gvc_parts", ")", "==", "1", ":", "# assign the vslc to be also a gvc", "vslc_id", "=", "gvc_parts", "[", "0", "]", "gvc_id", "=", "vslc_id", "gvc_label", "=", "self", ".", "id_label_map", "[", "vslc_id", "]", "model", ".", "addType", "(", "vslc_id", ",", "self", ".", "globaltt", "[", "'genomic_variation_complement'", "]", ")", "else", ":", "gvc_id", "=", "None", "gvc_label", "=", "''", "LOG", ".", "error", "(", "\"No GVC parts for %s\"", ",", "gt", ")", "if", "gt", "in", "self", ".", "genotype_backgrounds", ":", "background_id", "=", "self", ".", "genotype_backgrounds", "[", "gt", "]", "if", "background_id", "in", "self", ".", "id_label_map", ":", "background_label", "=", "self", ".", "id_label_map", "[", "background_id", "]", "else", ":", "background_label", "=", "background_id", "LOG", ".", "error", "(", "\"We don't have the label for %s stored\"", ",", "background_id", ")", "else", ":", "background_num", "=", "re", ".", "sub", "(", "r'ZFIN:'", ",", "''", ",", "gt", ")", "background_id", "=", "'_:bkgd-'", "+", "background_num", "background_label", "=", "'n.s. ('", "+", "background_num", "+", "')'", "background_desc", "=", "'This genomic background is unknown. '", "+", "'This is a placeholder background for '", "+", "gt", "+", "'.'", "# there is no background for this genotype;", "# need to add the taxon to this one!", "# make an anonymous background for this genotype", "geno", ".", "addGenomicBackground", "(", "background_id", ",", "background_label", ",", "None", ",", "background_desc", ")", "geno", ".", "addGenomicBackgroundToGenotype", "(", "background_id", ",", "gt", ")", "background_label", "=", "'n.s.'", "geno", ".", "addTaxon", "(", "taxon_id", ",", "background_id", ")", "genotype_name", "=", "gvc_label", "+", "' ['", "+", "background_label", "+", "']'", "geno", ".", "addGenotype", "(", "gt", ",", "genotype_name", ")", "self", ".", "id_label_map", "[", "gt", "]", "=", "genotype_name", "# Add the GVC to the genotype", "geno", ".", "addParts", "(", "gvc_id", ",", "gt", ",", "self", ".", "globaltt", "[", "'has_variant_part'", "]", ")", "# end of gvc loop", "# end of genotype loop", "# TODO this is almost complete;", "# deficiencies with >1 locus deleted are still not right", "LOG", ".", "info", "(", "\"Finished building genotype labels\"", ")", "LOG", ".", "info", "(", "\"Done with genotypes\"", ")", "return" ]
42.169951
20.79064
def distance_drive(self, x, y, angle): """Call this from your :func:`PhysicsEngine.update_sim` function. Will update the robot's position on the simulation field. This moves the robot some relative distance and angle from its current position. :param x: Feet to move the robot in the x direction :param y: Feet to move the robot in the y direction :param angle: Radians to turn the robot """ with self._lock: self.vx += x self.vy += y self.angle += angle c = math.cos(self.angle) s = math.sin(self.angle) self.x += x * c - y * s self.y += x * s + y * c self._update_gyros(angle)
[ "def", "distance_drive", "(", "self", ",", "x", ",", "y", ",", "angle", ")", ":", "with", "self", ".", "_lock", ":", "self", ".", "vx", "+=", "x", "self", ".", "vy", "+=", "y", "self", ".", "angle", "+=", "angle", "c", "=", "math", ".", "cos", "(", "self", ".", "angle", ")", "s", "=", "math", ".", "sin", "(", "self", ".", "angle", ")", "self", ".", "x", "+=", "x", "*", "c", "-", "y", "*", "s", "self", ".", "y", "+=", "x", "*", "s", "+", "y", "*", "c", "self", ".", "_update_gyros", "(", "angle", ")" ]
33.782609
16.73913
def zeq_magic(meas_file='measurements.txt', spec_file='',crd='s',input_dir_path='.', angle=0, n_plots=5, save_plots=True, fmt="svg", interactive=False, specimen="", samp_file='samples.txt', contribution=None,fignum=1): """ zeq_magic makes zijderveld and equal area plots for magic formatted measurements files. Parameters ---------- meas_file : str input measurement file spec_file : str input specimen interpretation file samp_file : str input sample orientations file crd : str coordinate system [s,g,t] for specimen, geographic, tilt corrected g,t options require a sample file with specimen and bedding orientation input_dir_path : str input directory of meas_file, default "." angle : float angle of X direction with respect to specimen X n_plots : int, default 5 maximum number of plots to make if you want to make all possible plots, specify "all" save_plots : bool, default True if True, create and save all requested plots fmt : str, default "svg" format for figures, [svg, jpg, pdf, png] interactive : bool, default False interactively plot and display for each specimen (this is best used on the command line only) specimen : str, default "" specimen name to plot samp_file : str, default 'samples.txt' name of samples file contribution : cb.Contribution, default None if provided, use Contribution object instead of reading in data from files fignum : matplotlib figure number """ def plot_interpretations(ZED, spec_container, this_specimen, this_specimen_measurements, datablock): if cb.is_null(spec_container) or cb.is_null(this_specimen_measurements) or cb.is_null(datablock): return ZED if 'method_codes' not in spec_container.df.columns: return ZED prior_spec_data = spec_container.get_records_for_code( 'LP-DIR', strict_match=False) # look up all prior directional interpretations prior_specimen_interpretations=[] if not len(prior_spec_data): return ZED mpars = {"specimen_direction_type": "Error"} if len(prior_spec_data): prior_specimen_interpretations = prior_spec_data[prior_spec_data['specimen'].astype(str) == this_specimen] #.str.match(this_specimen) == True] if len(prior_specimen_interpretations): if len(prior_specimen_interpretations)>0: beg_pcas = pd.to_numeric( prior_specimen_interpretations.meas_step_min.values).tolist() end_pcas = pd.to_numeric( prior_specimen_interpretations.meas_step_max.values).tolist() spec_methods = prior_specimen_interpretations.method_codes.tolist() # step through all prior interpretations and plot them for ind in range(len(beg_pcas)): spec_meths = spec_methods[ind].split(':') for m in spec_meths: if 'DE-BFL' in m: calculation_type = 'DE-BFL' # best fit line if 'DE-BFP' in m: calculation_type = 'DE-BFP' # best fit plane if 'DE-FM' in m: calculation_type = 'DE-FM' # fisher mean if 'DE-BFL-A' in m: calculation_type = 'DE-BFL-A' # anchored best fit line treatments = pd.to_numeric(this_specimen_measurements.treatment).tolist() if len(beg_pcas)!=0: try: # getting the starting and ending points start, end = treatments.index(beg_pcas[ind]), treatments.index(end_pcas[ind]) mpars = pmag.domean( datablock, start, end, calculation_type) except ValueError as ex: mpars['specimen_direction_type'] = "Error" try: if beg_pcas[ind] == 0: start = 0 else: start = treatments.index(beg_pcas[ind]) if end_pcas[ind] == 0: end = 0 else: end = treatments.index(end_pcas[ind]) mpars = pmag.domean( datablock, start, end, calculation_type) except ValueError: mpars['specimen_direction_type'] = "Error" # calculate direction/plane if mpars["specimen_direction_type"] != "Error": # put it on the plot pmagplotlib.plot_dir(ZED, mpars, datablock, angle) #if interactive: # pmagplotlib.draw_figs(ZED) else: print('\n-W- Specimen {} record contains invalid start/stop bounds:'.format(this_specimen)) print(prior_spec_data.loc[this_specimen][['meas_step_min', 'meas_step_max']]) print('\n Measurement records:') cols = list(set(['treat_ac_field', 'treat_temp']).intersection(this_specimen_measurements.columns)) print(this_specimen_measurements[cols]) print('\n Data will be plotted without interpretations\n') return ZED def make_plots(spec, cnt, meas_df, spec_container, samp_container=None): # get sample data for orientation if spec_container: try: samps = spec_container.df.loc[spec, 'sample'] except KeyError: samps = "" samp_df = [] if isinstance(samps, int) or isinstance(samps, float) or isinstance(samps, np.int64): if np.isnan(samps): samp = "" samp_df = [] else: samp = str(samps) samp_container.df.index = samp_container.df.index.astype(str) samp_df = samp_container.df[samp_container.df.index == samp] elif isinstance(samps, type(None)): samp = "" samp_df = [] elif len(samps): if isinstance(samps, str): samp = samps else: samp = samps.iloc[0] samp_df = samp_container.df[samp_container.df.index == samp] else: samp_df = [] # we can make the figure dictionary that pmagplotlib likes: ZED = {'eqarea': cnt, 'zijd': cnt+1, 'demag': cnt+2} # make datablock # get the relevant data spec_df = meas_df[meas_df.specimen == s] # remove ARM data spec_df = spec_df[- spec_df.method_codes.str.contains( 'LP-*[\w]*-ARM')] # split data into NRM, thermal, and af dataframes spec_df_nrm = spec_df[spec_df.method_codes.str.contains( 'LT-NO')] # get the NRM data spec_df_th = spec_df[spec_df.method_codes.str.contains( 'LT-T-Z')] # zero field thermal demag steps try: cond = spec_df.method_codes.str.contains('(^|[\s\:])LT-PTRM') spec_df_th = spec_df_th[-cond] # get rid of some pTRM steps except ValueError: keep_inds = [] n = 0 for ind, row in spec_df_th.copy().iterrows(): if 'LT-PTRM' in row['method_codes'] and 'ALT-PTRM' not in row['method_codes']: keep_inds.append(n) else: pass n += 1 if len(keep_inds) < n: spec_df_th = spec_df_th.iloc[keep_inds] spec_df_af = spec_df[spec_df.method_codes.str.contains('LT-AF-Z')] this_spec_meas_df = None datablock = None if (not len(spec_df_th.index) > 1) and (not len(spec_df_af.index) > 1): return if len(spec_df_th.index) > 1: # this is a thermal run this_spec_meas_df = pd.concat([spec_df_nrm, spec_df_th]) # make sure all decs/incs are filled in n_rows = len(this_spec_meas_df) this_spec_meas_df = this_spec_meas_df.dropna(how='any', subset=['dir_dec', 'dir_inc', 'magn_moment']) if n_rows > len(this_spec_meas_df): print('-W- Some dec/inc/moment data were missing for specimen {}, so {} measurement row(s) were excluded'.format(s, n_rows - len(this_spec_meas_df))) # geographic transformation if coord != "-1" and len(samp_df): this_spec_meas_df = transform_to_geographic(this_spec_meas_df, samp_df, samp, coord) units = 'K' # units are kelvin try: this_spec_meas_df['magn_moment'] = this_spec_meas_df['magn_moment'].astype(float) this_spec_meas_df['treat_temp'] = this_spec_meas_df['treat_temp'].astype(float) except: print('-W- There are malformed or missing data for specimen {}, skipping'.format(spec)) return datablock = this_spec_meas_df[['treat_temp', 'dir_dec', 'dir_inc', 'magn_moment', 'blank', 'quality']].values.tolist() ZED = pmagplotlib.plot_zed(ZED, datablock, angle, s, units) if len(spec_df_af.index) > 1: # this is an af run this_spec_meas_df = pd.concat([spec_df_nrm, spec_df_af]) # make sure all decs/incs are filled in n_rows = len(this_spec_meas_df) this_spec_meas_df = this_spec_meas_df.dropna(how='any', subset=['dir_dec', 'dir_inc', 'magn_moment']) if n_rows > len(this_spec_meas_df): print('-W- Some dec/inc/moment data were missing for specimen {}, so {} measurement row(s) were excluded'.format(s, n_rows - len(this_spec_meas_df))) # geographic transformation if coord != "-1" and len(samp_df): this_spec_meas_df = transform_to_geographic(this_spec_meas_df, samp_df, samp, coord) units = 'T' # these are AF data try: this_spec_meas_df['magn_moment'] = this_spec_meas_df['magn_moment'].astype(float) this_spec_meas_df['treat_ac_field'] = this_spec_meas_df['treat_ac_field'].astype(float) except: print('-W- There are malformed or missing data for specimen {}, skipping'.format(spec)) return datablock = this_spec_meas_df[['treat_ac_field', 'dir_dec', 'dir_inc', 'magn_moment', 'blank', 'quality']].values.tolist() ZED = pmagplotlib.plot_zed(ZED, datablock, angle, s, units) return plot_interpretations(ZED, spec_container, s, this_spec_meas_df, datablock) if interactive: save_plots = False # read in MagIC formatted data if contribution object not provided if not isinstance(contribution, cb.Contribution): input_dir_path = os.path.realpath(input_dir_path) file_path = pmag.resolve_file_name(meas_file, input_dir_path) # read in magic formatted data if not os.path.exists(file_path): print('No such file:', file_path) return False, [] custom_filenames = {'measurements': file_path, 'specimens': spec_file, 'samples': samp_file} contribution = cb.Contribution(input_dir_path, custom_filenames=custom_filenames, read_tables=['measurements', 'specimens', 'contribution', 'samples']) if pmagplotlib.isServer: try: contribution.propagate_location_to_samples() contribution.propagate_location_to_specimens() contribution.propagate_location_to_measurements() except KeyError as ex: pass meas_container = contribution.tables['measurements'] meas_df = contribution.tables['measurements'].df # #meas_df=pd.read_csv(file_path, sep='\t', header=1) spec_container = contribution.tables.get('specimens', None) samp_container = contribution.tables.get('samples', None) #if not spec_file: # spec_file = os.path.join(os.path.split(file_path)[0], "specimens.txt") #if os.path.exists(spec_file): # spec_container = cb.MagicDataFrame(spec_file, dtype="specimens") #else: # spec_container = None meas_df['blank'] = "" # this is a dummy variable expected by plotZED if 'treat_ac_field' in meas_df.columns: # create 'treatment' column. # uses treat_temp if treat_ac_field is missing OR zero. # (have to take this into account for plotting later) if 'treat_temp' in meas_df.columns: meas_df['treatment'] = meas_df['treat_ac_field'].where( cond=meas_df['treat_ac_field'].astype(bool), other=meas_df['treat_temp']) else: meas_df['treatment'] = meas_df['treat_ac_field'] else: meas_df['treatment'] = meas_df['treat_temp'] if crd == "s": coord = "-1" elif crd == "t": coord = "100" else: coord = "0" specimens = meas_df.specimen.unique() # list of specimen names if len(specimens) == 0: print('there are no data for plotting') return False, [] # check measurement table for req'd fields missing = [] reqd_cols_present = meas_df.columns.intersection(['dir_dec', 'dir_inc', 'magn_moment']) for col in ['dir_dec', 'dir_inc', 'magn_moment']: if col not in reqd_cols_present: missing.append(col) if missing: print('-W- Missing required column(s) {}, cannot run zeq_magic'.format(', '.join(missing))) return False, [] cnt = fignum if n_plots != "all": if len(specimens) > n_plots: specimens = specimens[:n_plots] saved = [] if specimen: specimens = [specimen] for s in specimens: ZED = make_plots(s, cnt, meas_df, spec_container, samp_container) if not ZED: if pmagplotlib.verbose: print('No plots could be created for specimen:', s) continue titles = {key: s + "_" + key + "." + fmt for key in ZED} if pmagplotlib.isServer: titles = {} titles['eqarea'] = 'Equal Area Plot' titles['zijd'] = 'Zijderveld Plot' titles['demag'] = 'Demagnetization Plot' con_id = "" if 'contribution' in contribution.tables: if 'id' in contribution.tables['contribution'].df.columns: con_id = contribution.tables['contribution'].df['id'].values[0] pmagplotlib.add_borders(ZED, titles, con_id=con_id) for title in titles: # try to get the full hierarchy for plot names df_slice = meas_container.df[meas_container.df['specimen'] == s] location = str(meas_container.get_name('location', df_slice)) site = str(meas_container.get_name('site', df_slice)) sample = str(meas_container.get_name('sample', df_slice)) # add coord here! filename = 'LO:_'+location+'_SI:_'+site+'_SA:_'+sample + \ '_SP:_'+str(s)+'_CO:_' + '_TY:_'+title+'_.png' titles[title] = filename if save_plots: saved.extend(pmagplotlib.save_plots(ZED, titles)) elif interactive: pmagplotlib.draw_figs(ZED) ans = pmagplotlib.save_or_quit() if ans == 'a': saved.extend(pmagplotlib.save_plots(ZED, titles)) else: continue else: cnt += 3 return True, saved
[ "def", "zeq_magic", "(", "meas_file", "=", "'measurements.txt'", ",", "spec_file", "=", "''", ",", "crd", "=", "'s'", ",", "input_dir_path", "=", "'.'", ",", "angle", "=", "0", ",", "n_plots", "=", "5", ",", "save_plots", "=", "True", ",", "fmt", "=", "\"svg\"", ",", "interactive", "=", "False", ",", "specimen", "=", "\"\"", ",", "samp_file", "=", "'samples.txt'", ",", "contribution", "=", "None", ",", "fignum", "=", "1", ")", ":", "def", "plot_interpretations", "(", "ZED", ",", "spec_container", ",", "this_specimen", ",", "this_specimen_measurements", ",", "datablock", ")", ":", "if", "cb", ".", "is_null", "(", "spec_container", ")", "or", "cb", ".", "is_null", "(", "this_specimen_measurements", ")", "or", "cb", ".", "is_null", "(", "datablock", ")", ":", "return", "ZED", "if", "'method_codes'", "not", "in", "spec_container", ".", "df", ".", "columns", ":", "return", "ZED", "prior_spec_data", "=", "spec_container", ".", "get_records_for_code", "(", "'LP-DIR'", ",", "strict_match", "=", "False", ")", "# look up all prior directional interpretations", "prior_specimen_interpretations", "=", "[", "]", "if", "not", "len", "(", "prior_spec_data", ")", ":", "return", "ZED", "mpars", "=", "{", "\"specimen_direction_type\"", ":", "\"Error\"", "}", "if", "len", "(", "prior_spec_data", ")", ":", "prior_specimen_interpretations", "=", "prior_spec_data", "[", "prior_spec_data", "[", "'specimen'", "]", ".", "astype", "(", "str", ")", "==", "this_specimen", "]", "#.str.match(this_specimen) == True]", "if", "len", "(", "prior_specimen_interpretations", ")", ":", "if", "len", "(", "prior_specimen_interpretations", ")", ">", "0", ":", "beg_pcas", "=", "pd", ".", "to_numeric", "(", "prior_specimen_interpretations", ".", "meas_step_min", ".", "values", ")", ".", "tolist", "(", ")", "end_pcas", "=", "pd", ".", "to_numeric", "(", "prior_specimen_interpretations", ".", "meas_step_max", ".", "values", ")", ".", "tolist", "(", ")", "spec_methods", "=", "prior_specimen_interpretations", ".", "method_codes", ".", "tolist", "(", ")", "# step through all prior interpretations and plot them", "for", "ind", "in", "range", "(", "len", "(", "beg_pcas", ")", ")", ":", "spec_meths", "=", "spec_methods", "[", "ind", "]", ".", "split", "(", "':'", ")", "for", "m", "in", "spec_meths", ":", "if", "'DE-BFL'", "in", "m", ":", "calculation_type", "=", "'DE-BFL'", "# best fit line", "if", "'DE-BFP'", "in", "m", ":", "calculation_type", "=", "'DE-BFP'", "# best fit plane", "if", "'DE-FM'", "in", "m", ":", "calculation_type", "=", "'DE-FM'", "# fisher mean", "if", "'DE-BFL-A'", "in", "m", ":", "calculation_type", "=", "'DE-BFL-A'", "# anchored best fit line", "treatments", "=", "pd", ".", "to_numeric", "(", "this_specimen_measurements", ".", "treatment", ")", ".", "tolist", "(", ")", "if", "len", "(", "beg_pcas", ")", "!=", "0", ":", "try", ":", "# getting the starting and ending points", "start", ",", "end", "=", "treatments", ".", "index", "(", "beg_pcas", "[", "ind", "]", ")", ",", "treatments", ".", "index", "(", "end_pcas", "[", "ind", "]", ")", "mpars", "=", "pmag", ".", "domean", "(", "datablock", ",", "start", ",", "end", ",", "calculation_type", ")", "except", "ValueError", "as", "ex", ":", "mpars", "[", "'specimen_direction_type'", "]", "=", "\"Error\"", "try", ":", "if", "beg_pcas", "[", "ind", "]", "==", "0", ":", "start", "=", "0", "else", ":", "start", "=", "treatments", ".", "index", "(", "beg_pcas", "[", "ind", "]", ")", "if", "end_pcas", "[", "ind", "]", "==", "0", ":", "end", "=", "0", "else", ":", "end", "=", "treatments", ".", "index", "(", "end_pcas", "[", "ind", "]", ")", "mpars", "=", "pmag", ".", "domean", "(", "datablock", ",", "start", ",", "end", ",", "calculation_type", ")", "except", "ValueError", ":", "mpars", "[", "'specimen_direction_type'", "]", "=", "\"Error\"", "# calculate direction/plane", "if", "mpars", "[", "\"specimen_direction_type\"", "]", "!=", "\"Error\"", ":", "# put it on the plot", "pmagplotlib", ".", "plot_dir", "(", "ZED", ",", "mpars", ",", "datablock", ",", "angle", ")", "#if interactive:", "# pmagplotlib.draw_figs(ZED)", "else", ":", "print", "(", "'\\n-W- Specimen {} record contains invalid start/stop bounds:'", ".", "format", "(", "this_specimen", ")", ")", "print", "(", "prior_spec_data", ".", "loc", "[", "this_specimen", "]", "[", "[", "'meas_step_min'", ",", "'meas_step_max'", "]", "]", ")", "print", "(", "'\\n Measurement records:'", ")", "cols", "=", "list", "(", "set", "(", "[", "'treat_ac_field'", ",", "'treat_temp'", "]", ")", ".", "intersection", "(", "this_specimen_measurements", ".", "columns", ")", ")", "print", "(", "this_specimen_measurements", "[", "cols", "]", ")", "print", "(", "'\\n Data will be plotted without interpretations\\n'", ")", "return", "ZED", "def", "make_plots", "(", "spec", ",", "cnt", ",", "meas_df", ",", "spec_container", ",", "samp_container", "=", "None", ")", ":", "# get sample data for orientation", "if", "spec_container", ":", "try", ":", "samps", "=", "spec_container", ".", "df", ".", "loc", "[", "spec", ",", "'sample'", "]", "except", "KeyError", ":", "samps", "=", "\"\"", "samp_df", "=", "[", "]", "if", "isinstance", "(", "samps", ",", "int", ")", "or", "isinstance", "(", "samps", ",", "float", ")", "or", "isinstance", "(", "samps", ",", "np", ".", "int64", ")", ":", "if", "np", ".", "isnan", "(", "samps", ")", ":", "samp", "=", "\"\"", "samp_df", "=", "[", "]", "else", ":", "samp", "=", "str", "(", "samps", ")", "samp_container", ".", "df", ".", "index", "=", "samp_container", ".", "df", ".", "index", ".", "astype", "(", "str", ")", "samp_df", "=", "samp_container", ".", "df", "[", "samp_container", ".", "df", ".", "index", "==", "samp", "]", "elif", "isinstance", "(", "samps", ",", "type", "(", "None", ")", ")", ":", "samp", "=", "\"\"", "samp_df", "=", "[", "]", "elif", "len", "(", "samps", ")", ":", "if", "isinstance", "(", "samps", ",", "str", ")", ":", "samp", "=", "samps", "else", ":", "samp", "=", "samps", ".", "iloc", "[", "0", "]", "samp_df", "=", "samp_container", ".", "df", "[", "samp_container", ".", "df", ".", "index", "==", "samp", "]", "else", ":", "samp_df", "=", "[", "]", "# we can make the figure dictionary that pmagplotlib likes:", "ZED", "=", "{", "'eqarea'", ":", "cnt", ",", "'zijd'", ":", "cnt", "+", "1", ",", "'demag'", ":", "cnt", "+", "2", "}", "# make datablock", "# get the relevant data", "spec_df", "=", "meas_df", "[", "meas_df", ".", "specimen", "==", "s", "]", "# remove ARM data", "spec_df", "=", "spec_df", "[", "-", "spec_df", ".", "method_codes", ".", "str", ".", "contains", "(", "'LP-*[\\w]*-ARM'", ")", "]", "# split data into NRM, thermal, and af dataframes", "spec_df_nrm", "=", "spec_df", "[", "spec_df", ".", "method_codes", ".", "str", ".", "contains", "(", "'LT-NO'", ")", "]", "# get the NRM data", "spec_df_th", "=", "spec_df", "[", "spec_df", ".", "method_codes", ".", "str", ".", "contains", "(", "'LT-T-Z'", ")", "]", "# zero field thermal demag steps", "try", ":", "cond", "=", "spec_df", ".", "method_codes", ".", "str", ".", "contains", "(", "'(^|[\\s\\:])LT-PTRM'", ")", "spec_df_th", "=", "spec_df_th", "[", "-", "cond", "]", "# get rid of some pTRM steps", "except", "ValueError", ":", "keep_inds", "=", "[", "]", "n", "=", "0", "for", "ind", ",", "row", "in", "spec_df_th", ".", "copy", "(", ")", ".", "iterrows", "(", ")", ":", "if", "'LT-PTRM'", "in", "row", "[", "'method_codes'", "]", "and", "'ALT-PTRM'", "not", "in", "row", "[", "'method_codes'", "]", ":", "keep_inds", ".", "append", "(", "n", ")", "else", ":", "pass", "n", "+=", "1", "if", "len", "(", "keep_inds", ")", "<", "n", ":", "spec_df_th", "=", "spec_df_th", ".", "iloc", "[", "keep_inds", "]", "spec_df_af", "=", "spec_df", "[", "spec_df", ".", "method_codes", ".", "str", ".", "contains", "(", "'LT-AF-Z'", ")", "]", "this_spec_meas_df", "=", "None", "datablock", "=", "None", "if", "(", "not", "len", "(", "spec_df_th", ".", "index", ")", ">", "1", ")", "and", "(", "not", "len", "(", "spec_df_af", ".", "index", ")", ">", "1", ")", ":", "return", "if", "len", "(", "spec_df_th", ".", "index", ")", ">", "1", ":", "# this is a thermal run", "this_spec_meas_df", "=", "pd", ".", "concat", "(", "[", "spec_df_nrm", ",", "spec_df_th", "]", ")", "# make sure all decs/incs are filled in", "n_rows", "=", "len", "(", "this_spec_meas_df", ")", "this_spec_meas_df", "=", "this_spec_meas_df", ".", "dropna", "(", "how", "=", "'any'", ",", "subset", "=", "[", "'dir_dec'", ",", "'dir_inc'", ",", "'magn_moment'", "]", ")", "if", "n_rows", ">", "len", "(", "this_spec_meas_df", ")", ":", "print", "(", "'-W- Some dec/inc/moment data were missing for specimen {}, so {} measurement row(s) were excluded'", ".", "format", "(", "s", ",", "n_rows", "-", "len", "(", "this_spec_meas_df", ")", ")", ")", "# geographic transformation", "if", "coord", "!=", "\"-1\"", "and", "len", "(", "samp_df", ")", ":", "this_spec_meas_df", "=", "transform_to_geographic", "(", "this_spec_meas_df", ",", "samp_df", ",", "samp", ",", "coord", ")", "units", "=", "'K'", "# units are kelvin", "try", ":", "this_spec_meas_df", "[", "'magn_moment'", "]", "=", "this_spec_meas_df", "[", "'magn_moment'", "]", ".", "astype", "(", "float", ")", "this_spec_meas_df", "[", "'treat_temp'", "]", "=", "this_spec_meas_df", "[", "'treat_temp'", "]", ".", "astype", "(", "float", ")", "except", ":", "print", "(", "'-W- There are malformed or missing data for specimen {}, skipping'", ".", "format", "(", "spec", ")", ")", "return", "datablock", "=", "this_spec_meas_df", "[", "[", "'treat_temp'", ",", "'dir_dec'", ",", "'dir_inc'", ",", "'magn_moment'", ",", "'blank'", ",", "'quality'", "]", "]", ".", "values", ".", "tolist", "(", ")", "ZED", "=", "pmagplotlib", ".", "plot_zed", "(", "ZED", ",", "datablock", ",", "angle", ",", "s", ",", "units", ")", "if", "len", "(", "spec_df_af", ".", "index", ")", ">", "1", ":", "# this is an af run", "this_spec_meas_df", "=", "pd", ".", "concat", "(", "[", "spec_df_nrm", ",", "spec_df_af", "]", ")", "# make sure all decs/incs are filled in", "n_rows", "=", "len", "(", "this_spec_meas_df", ")", "this_spec_meas_df", "=", "this_spec_meas_df", ".", "dropna", "(", "how", "=", "'any'", ",", "subset", "=", "[", "'dir_dec'", ",", "'dir_inc'", ",", "'magn_moment'", "]", ")", "if", "n_rows", ">", "len", "(", "this_spec_meas_df", ")", ":", "print", "(", "'-W- Some dec/inc/moment data were missing for specimen {}, so {} measurement row(s) were excluded'", ".", "format", "(", "s", ",", "n_rows", "-", "len", "(", "this_spec_meas_df", ")", ")", ")", "# geographic transformation", "if", "coord", "!=", "\"-1\"", "and", "len", "(", "samp_df", ")", ":", "this_spec_meas_df", "=", "transform_to_geographic", "(", "this_spec_meas_df", ",", "samp_df", ",", "samp", ",", "coord", ")", "units", "=", "'T'", "# these are AF data", "try", ":", "this_spec_meas_df", "[", "'magn_moment'", "]", "=", "this_spec_meas_df", "[", "'magn_moment'", "]", ".", "astype", "(", "float", ")", "this_spec_meas_df", "[", "'treat_ac_field'", "]", "=", "this_spec_meas_df", "[", "'treat_ac_field'", "]", ".", "astype", "(", "float", ")", "except", ":", "print", "(", "'-W- There are malformed or missing data for specimen {}, skipping'", ".", "format", "(", "spec", ")", ")", "return", "datablock", "=", "this_spec_meas_df", "[", "[", "'treat_ac_field'", ",", "'dir_dec'", ",", "'dir_inc'", ",", "'magn_moment'", ",", "'blank'", ",", "'quality'", "]", "]", ".", "values", ".", "tolist", "(", ")", "ZED", "=", "pmagplotlib", ".", "plot_zed", "(", "ZED", ",", "datablock", ",", "angle", ",", "s", ",", "units", ")", "return", "plot_interpretations", "(", "ZED", ",", "spec_container", ",", "s", ",", "this_spec_meas_df", ",", "datablock", ")", "if", "interactive", ":", "save_plots", "=", "False", "# read in MagIC formatted data if contribution object not provided", "if", "not", "isinstance", "(", "contribution", ",", "cb", ".", "Contribution", ")", ":", "input_dir_path", "=", "os", ".", "path", ".", "realpath", "(", "input_dir_path", ")", "file_path", "=", "pmag", ".", "resolve_file_name", "(", "meas_file", ",", "input_dir_path", ")", "# read in magic formatted data", "if", "not", "os", ".", "path", ".", "exists", "(", "file_path", ")", ":", "print", "(", "'No such file:'", ",", "file_path", ")", "return", "False", ",", "[", "]", "custom_filenames", "=", "{", "'measurements'", ":", "file_path", ",", "'specimens'", ":", "spec_file", ",", "'samples'", ":", "samp_file", "}", "contribution", "=", "cb", ".", "Contribution", "(", "input_dir_path", ",", "custom_filenames", "=", "custom_filenames", ",", "read_tables", "=", "[", "'measurements'", ",", "'specimens'", ",", "'contribution'", ",", "'samples'", "]", ")", "if", "pmagplotlib", ".", "isServer", ":", "try", ":", "contribution", ".", "propagate_location_to_samples", "(", ")", "contribution", ".", "propagate_location_to_specimens", "(", ")", "contribution", ".", "propagate_location_to_measurements", "(", ")", "except", "KeyError", "as", "ex", ":", "pass", "meas_container", "=", "contribution", ".", "tables", "[", "'measurements'", "]", "meas_df", "=", "contribution", ".", "tables", "[", "'measurements'", "]", ".", "df", "#", "#meas_df=pd.read_csv(file_path, sep='\\t', header=1)", "spec_container", "=", "contribution", ".", "tables", ".", "get", "(", "'specimens'", ",", "None", ")", "samp_container", "=", "contribution", ".", "tables", ".", "get", "(", "'samples'", ",", "None", ")", "#if not spec_file:", "# spec_file = os.path.join(os.path.split(file_path)[0], \"specimens.txt\")", "#if os.path.exists(spec_file):", "# spec_container = cb.MagicDataFrame(spec_file, dtype=\"specimens\")", "#else:", "# spec_container = None", "meas_df", "[", "'blank'", "]", "=", "\"\"", "# this is a dummy variable expected by plotZED", "if", "'treat_ac_field'", "in", "meas_df", ".", "columns", ":", "# create 'treatment' column.", "# uses treat_temp if treat_ac_field is missing OR zero.", "# (have to take this into account for plotting later)", "if", "'treat_temp'", "in", "meas_df", ".", "columns", ":", "meas_df", "[", "'treatment'", "]", "=", "meas_df", "[", "'treat_ac_field'", "]", ".", "where", "(", "cond", "=", "meas_df", "[", "'treat_ac_field'", "]", ".", "astype", "(", "bool", ")", ",", "other", "=", "meas_df", "[", "'treat_temp'", "]", ")", "else", ":", "meas_df", "[", "'treatment'", "]", "=", "meas_df", "[", "'treat_ac_field'", "]", "else", ":", "meas_df", "[", "'treatment'", "]", "=", "meas_df", "[", "'treat_temp'", "]", "if", "crd", "==", "\"s\"", ":", "coord", "=", "\"-1\"", "elif", "crd", "==", "\"t\"", ":", "coord", "=", "\"100\"", "else", ":", "coord", "=", "\"0\"", "specimens", "=", "meas_df", ".", "specimen", ".", "unique", "(", ")", "# list of specimen names", "if", "len", "(", "specimens", ")", "==", "0", ":", "print", "(", "'there are no data for plotting'", ")", "return", "False", ",", "[", "]", "# check measurement table for req'd fields", "missing", "=", "[", "]", "reqd_cols_present", "=", "meas_df", ".", "columns", ".", "intersection", "(", "[", "'dir_dec'", ",", "'dir_inc'", ",", "'magn_moment'", "]", ")", "for", "col", "in", "[", "'dir_dec'", ",", "'dir_inc'", ",", "'magn_moment'", "]", ":", "if", "col", "not", "in", "reqd_cols_present", ":", "missing", ".", "append", "(", "col", ")", "if", "missing", ":", "print", "(", "'-W- Missing required column(s) {}, cannot run zeq_magic'", ".", "format", "(", "', '", ".", "join", "(", "missing", ")", ")", ")", "return", "False", ",", "[", "]", "cnt", "=", "fignum", "if", "n_plots", "!=", "\"all\"", ":", "if", "len", "(", "specimens", ")", ">", "n_plots", ":", "specimens", "=", "specimens", "[", ":", "n_plots", "]", "saved", "=", "[", "]", "if", "specimen", ":", "specimens", "=", "[", "specimen", "]", "for", "s", "in", "specimens", ":", "ZED", "=", "make_plots", "(", "s", ",", "cnt", ",", "meas_df", ",", "spec_container", ",", "samp_container", ")", "if", "not", "ZED", ":", "if", "pmagplotlib", ".", "verbose", ":", "print", "(", "'No plots could be created for specimen:'", ",", "s", ")", "continue", "titles", "=", "{", "key", ":", "s", "+", "\"_\"", "+", "key", "+", "\".\"", "+", "fmt", "for", "key", "in", "ZED", "}", "if", "pmagplotlib", ".", "isServer", ":", "titles", "=", "{", "}", "titles", "[", "'eqarea'", "]", "=", "'Equal Area Plot'", "titles", "[", "'zijd'", "]", "=", "'Zijderveld Plot'", "titles", "[", "'demag'", "]", "=", "'Demagnetization Plot'", "con_id", "=", "\"\"", "if", "'contribution'", "in", "contribution", ".", "tables", ":", "if", "'id'", "in", "contribution", ".", "tables", "[", "'contribution'", "]", ".", "df", ".", "columns", ":", "con_id", "=", "contribution", ".", "tables", "[", "'contribution'", "]", ".", "df", "[", "'id'", "]", ".", "values", "[", "0", "]", "pmagplotlib", ".", "add_borders", "(", "ZED", ",", "titles", ",", "con_id", "=", "con_id", ")", "for", "title", "in", "titles", ":", "# try to get the full hierarchy for plot names", "df_slice", "=", "meas_container", ".", "df", "[", "meas_container", ".", "df", "[", "'specimen'", "]", "==", "s", "]", "location", "=", "str", "(", "meas_container", ".", "get_name", "(", "'location'", ",", "df_slice", ")", ")", "site", "=", "str", "(", "meas_container", ".", "get_name", "(", "'site'", ",", "df_slice", ")", ")", "sample", "=", "str", "(", "meas_container", ".", "get_name", "(", "'sample'", ",", "df_slice", ")", ")", "# add coord here!", "filename", "=", "'LO:_'", "+", "location", "+", "'_SI:_'", "+", "site", "+", "'_SA:_'", "+", "sample", "+", "'_SP:_'", "+", "str", "(", "s", ")", "+", "'_CO:_'", "+", "'_TY:_'", "+", "title", "+", "'_.png'", "titles", "[", "title", "]", "=", "filename", "if", "save_plots", ":", "saved", ".", "extend", "(", "pmagplotlib", ".", "save_plots", "(", "ZED", ",", "titles", ")", ")", "elif", "interactive", ":", "pmagplotlib", ".", "draw_figs", "(", "ZED", ")", "ans", "=", "pmagplotlib", ".", "save_or_quit", "(", ")", "if", "ans", "==", "'a'", ":", "saved", ".", "extend", "(", "pmagplotlib", ".", "save_plots", "(", "ZED", ",", "titles", ")", ")", "else", ":", "continue", "else", ":", "cnt", "+=", "3", "return", "True", ",", "saved" ]
49.015385
22.96
def prefix_shared_name_attributes(meta_graph, absolute_import_scope): """In-place prefixes shared_name attributes of nodes.""" shared_name_attr = "shared_name" for node in meta_graph.graph_def.node: shared_name_value = node.attr.get(shared_name_attr, None) if shared_name_value and shared_name_value.HasField("s"): if shared_name_value.s: node.attr[shared_name_attr].s = tf.compat.as_bytes( prepend_name_scope( shared_name_value.s, import_scope=absolute_import_scope))
[ "def", "prefix_shared_name_attributes", "(", "meta_graph", ",", "absolute_import_scope", ")", ":", "shared_name_attr", "=", "\"shared_name\"", "for", "node", "in", "meta_graph", ".", "graph_def", ".", "node", ":", "shared_name_value", "=", "node", ".", "attr", ".", "get", "(", "shared_name_attr", ",", "None", ")", "if", "shared_name_value", "and", "shared_name_value", ".", "HasField", "(", "\"s\"", ")", ":", "if", "shared_name_value", ".", "s", ":", "node", ".", "attr", "[", "shared_name_attr", "]", ".", "s", "=", "tf", ".", "compat", ".", "as_bytes", "(", "prepend_name_scope", "(", "shared_name_value", ".", "s", ",", "import_scope", "=", "absolute_import_scope", ")", ")" ]
51.5
14.9
def summary(self): """ m.summary() -- Return a text string one-line summary of motif and its metrics """ m = self txt = "%-34s (Bits: %5.2f MAP: %7.2f D: %5.3f %3d) E: %7.3f"%( m, m.totalbits, m.MAP, m.seeddist, m.seednum, nlog10(m.pvalue)) if m.binomial!=None: txt = txt + ' Bi: %6.2f'%(nlog10(m.binomial)) if m.church != None: txt = txt + ' ch: %6.2f'%(nlog10(m.church)) if m.frac != None: txt = txt + ' f: %5.3f'%(m.frac) if m.E_site != None: txt = txt + ' Es: %6.2f'%(nlog10(m.E_site)) if m.E_seq != None: txt = txt + ' Eq: %6.2f'%(nlog10(m.E_seq)) if m.MNCP != None: txt = txt + ' mn: %6.2f'%(m.MNCP) if m.ROC_auc!= None: txt = txt + ' Ra: %6.4f'%(m.ROC_auc) if m.E_chi2 != None: if m.E_chi2 == 0: m.E_chi2=1e-20 txt = txt + ' x2: %5.2f'%(nlog10(m.E_chi2)) if m.CRA != None: txt = txt + ' cR: %6.4f'%(m.CRA) if m.Cfrac != None: txt = txt + ' Cf: %5.3f'%(m.Cfrac) if m.realpvalue != None: txt = txt + ' P: %6.4e'%(m.realpvalue) if m.kellis != None: txt = txt + ' k: %6.2f'%(m.kellis) if m.numbound : txt = txt + ' b: %3d'%(m.numbound) if m.nummotif : txt = txt + ' nG: %3d'%(m.nummotif) if m.numboundmotif : txt = txt + ' bn: %3d'%(m.numboundmotif) return txt
[ "def", "summary", "(", "self", ")", ":", "m", "=", "self", "txt", "=", "\"%-34s (Bits: %5.2f MAP: %7.2f D: %5.3f %3d) E: %7.3f\"", "%", "(", "m", ",", "m", ".", "totalbits", ",", "m", ".", "MAP", ",", "m", ".", "seeddist", ",", "m", ".", "seednum", ",", "nlog10", "(", "m", ".", "pvalue", ")", ")", "if", "m", ".", "binomial", "!=", "None", ":", "txt", "=", "txt", "+", "' Bi: %6.2f'", "%", "(", "nlog10", "(", "m", ".", "binomial", ")", ")", "if", "m", ".", "church", "!=", "None", ":", "txt", "=", "txt", "+", "' ch: %6.2f'", "%", "(", "nlog10", "(", "m", ".", "church", ")", ")", "if", "m", ".", "frac", "!=", "None", ":", "txt", "=", "txt", "+", "' f: %5.3f'", "%", "(", "m", ".", "frac", ")", "if", "m", ".", "E_site", "!=", "None", ":", "txt", "=", "txt", "+", "' Es: %6.2f'", "%", "(", "nlog10", "(", "m", ".", "E_site", ")", ")", "if", "m", ".", "E_seq", "!=", "None", ":", "txt", "=", "txt", "+", "' Eq: %6.2f'", "%", "(", "nlog10", "(", "m", ".", "E_seq", ")", ")", "if", "m", ".", "MNCP", "!=", "None", ":", "txt", "=", "txt", "+", "' mn: %6.2f'", "%", "(", "m", ".", "MNCP", ")", "if", "m", ".", "ROC_auc", "!=", "None", ":", "txt", "=", "txt", "+", "' Ra: %6.4f'", "%", "(", "m", ".", "ROC_auc", ")", "if", "m", ".", "E_chi2", "!=", "None", ":", "if", "m", ".", "E_chi2", "==", "0", ":", "m", ".", "E_chi2", "=", "1e-20", "txt", "=", "txt", "+", "' x2: %5.2f'", "%", "(", "nlog10", "(", "m", ".", "E_chi2", ")", ")", "if", "m", ".", "CRA", "!=", "None", ":", "txt", "=", "txt", "+", "' cR: %6.4f'", "%", "(", "m", ".", "CRA", ")", "if", "m", ".", "Cfrac", "!=", "None", ":", "txt", "=", "txt", "+", "' Cf: %5.3f'", "%", "(", "m", ".", "Cfrac", ")", "if", "m", ".", "realpvalue", "!=", "None", ":", "txt", "=", "txt", "+", "' P: %6.4e'", "%", "(", "m", ".", "realpvalue", ")", "if", "m", ".", "kellis", "!=", "None", ":", "txt", "=", "txt", "+", "' k: %6.2f'", "%", "(", "m", ".", "kellis", ")", "if", "m", ".", "numbound", ":", "txt", "=", "txt", "+", "' b: %3d'", "%", "(", "m", ".", "numbound", ")", "if", "m", ".", "nummotif", ":", "txt", "=", "txt", "+", "' nG: %3d'", "%", "(", "m", ".", "nummotif", ")", "if", "m", ".", "numboundmotif", ":", "txt", "=", "txt", "+", "' bn: %3d'", "%", "(", "m", ".", "numboundmotif", ")", "return", "txt" ]
53.769231
25.230769
def on_preliminary_config_changed(self, config_m, prop_name, info): """Callback when a preliminary config value has been changed Mainly collects information, delegates handling further to _handle_config_update :param ConfigModel config_m: The config model that has been changed :param str prop_name: Should always be 'preliminary_config' :param dict info: Information e.g. about the changed config key """ self.check_for_preliminary_config() method_name = info['method_name'] # __setitem__, __delitem__, clear, ... if method_name in ['__setitem__', '__delitem__']: config_key = info['args'][0] self._handle_config_update(config_m, config_key) # Probably the preliminary config has been cleared, update corresponding list stores elif config_m is self.core_config_model: self.update_core_config_list_store() self.update_libraries_list_store() else: self.update_gui_config_list_store() self.update_shortcut_settings()
[ "def", "on_preliminary_config_changed", "(", "self", ",", "config_m", ",", "prop_name", ",", "info", ")", ":", "self", ".", "check_for_preliminary_config", "(", ")", "method_name", "=", "info", "[", "'method_name'", "]", "# __setitem__, __delitem__, clear, ...", "if", "method_name", "in", "[", "'__setitem__'", ",", "'__delitem__'", "]", ":", "config_key", "=", "info", "[", "'args'", "]", "[", "0", "]", "self", ".", "_handle_config_update", "(", "config_m", ",", "config_key", ")", "# Probably the preliminary config has been cleared, update corresponding list stores", "elif", "config_m", "is", "self", ".", "core_config_model", ":", "self", ".", "update_core_config_list_store", "(", ")", "self", ".", "update_libraries_list_store", "(", ")", "else", ":", "self", ".", "update_gui_config_list_store", "(", ")", "self", ".", "update_shortcut_settings", "(", ")" ]
46.304348
22.608696
def set_flask_metadata(app, version, repository, description, api_version="1.0", name=None, auth=None, route=None): """ Sets metadata on the application to be returned via metadata routes. Parameters ---------- app : :class:`flask.Flask` instance Flask application for the microservice you're adding metadata to. version: `str` Version of your microservice. repository: `str` URL of the repository containing your microservice's source code. description: `str` Description of the microservice. api_version: `str`, optional Version of the SQuaRE service API framework. Defaults to '1.0'. name : `str`, optional Microservice name. Defaults to the Flask app name. If set, changes the Flask app name to match. auth : `dict`, `str`, or `None` The 'auth' parameter must be None, the empty string, the string 'none', or a dict containing a 'type' key, which must be 'none', 'basic', or 'bitly-proxy'. If the type is not 'none', there must also be a 'data' key containing a dict which holds authentication information appropriate to the authentication type. The legal non-dict 'auth' values are equivalent to a 'type' key of 'none'. route : `None`, `str`, or list of `str`, optional The 'route' parameter must be None, a string, or a list of strings. If supplied, each string will be prepended to the metadata route. Raises ------ TypeError If arguments are not of the appropriate type. ValueError If arguments are the right type but have illegal values. Returns ------- Nothing, but sets `app` metadata and decorates it with `/metadata` and `/v{app_version}/metadata` routes. """ errstr = set_flask_metadata.__doc__ if not isinstance(app, Flask): raise TypeError(errstr) if name is None: name = app.name app.config["NAME"] = name if app.name != name: app.name = name app.config["VERSION"] = version app.config["REPOSITORY"] = repository app.config["DESCRIPTION"] = description app.config["API_VERSION"] = api_version if not (isinstance(name, str) and isinstance(description, str) and isinstance(repository, str) and isinstance(version, str) and isinstance(api_version, str)): raise TypeError(errstr) if not (name and description and repository and version and api_version): raise ValueError(errstr) if auth is None or (isinstance(auth, str) and ((auth == "none") or (auth == ""))): auth = {"type": "none", "data": None} if not isinstance(auth, dict): raise TypeError(errstr) if "type" not in auth: raise ValueError(errstr) atp = auth["type"] if atp == "none": app.config["AUTH"] = {"type": "none", "data": None} else: if atp not in ["basic", "bitly-proxy"] or "data" not in auth: raise ValueError(errstr) app.config["AUTH"] = auth add_metadata_route(app, route)
[ "def", "set_flask_metadata", "(", "app", ",", "version", ",", "repository", ",", "description", ",", "api_version", "=", "\"1.0\"", ",", "name", "=", "None", ",", "auth", "=", "None", ",", "route", "=", "None", ")", ":", "errstr", "=", "set_flask_metadata", ".", "__doc__", "if", "not", "isinstance", "(", "app", ",", "Flask", ")", ":", "raise", "TypeError", "(", "errstr", ")", "if", "name", "is", "None", ":", "name", "=", "app", ".", "name", "app", ".", "config", "[", "\"NAME\"", "]", "=", "name", "if", "app", ".", "name", "!=", "name", ":", "app", ".", "name", "=", "name", "app", ".", "config", "[", "\"VERSION\"", "]", "=", "version", "app", ".", "config", "[", "\"REPOSITORY\"", "]", "=", "repository", "app", ".", "config", "[", "\"DESCRIPTION\"", "]", "=", "description", "app", ".", "config", "[", "\"API_VERSION\"", "]", "=", "api_version", "if", "not", "(", "isinstance", "(", "name", ",", "str", ")", "and", "isinstance", "(", "description", ",", "str", ")", "and", "isinstance", "(", "repository", ",", "str", ")", "and", "isinstance", "(", "version", ",", "str", ")", "and", "isinstance", "(", "api_version", ",", "str", ")", ")", ":", "raise", "TypeError", "(", "errstr", ")", "if", "not", "(", "name", "and", "description", "and", "repository", "and", "version", "and", "api_version", ")", ":", "raise", "ValueError", "(", "errstr", ")", "if", "auth", "is", "None", "or", "(", "isinstance", "(", "auth", ",", "str", ")", "and", "(", "(", "auth", "==", "\"none\"", ")", "or", "(", "auth", "==", "\"\"", ")", ")", ")", ":", "auth", "=", "{", "\"type\"", ":", "\"none\"", ",", "\"data\"", ":", "None", "}", "if", "not", "isinstance", "(", "auth", ",", "dict", ")", ":", "raise", "TypeError", "(", "errstr", ")", "if", "\"type\"", "not", "in", "auth", ":", "raise", "ValueError", "(", "errstr", ")", "atp", "=", "auth", "[", "\"type\"", "]", "if", "atp", "==", "\"none\"", ":", "app", ".", "config", "[", "\"AUTH\"", "]", "=", "{", "\"type\"", ":", "\"none\"", ",", "\"data\"", ":", "None", "}", "else", ":", "if", "atp", "not", "in", "[", "\"basic\"", ",", "\"bitly-proxy\"", "]", "or", "\"data\"", "not", "in", "auth", ":", "raise", "ValueError", "(", "errstr", ")", "app", ".", "config", "[", "\"AUTH\"", "]", "=", "auth", "add_metadata_route", "(", "app", ",", "route", ")" ]
36.172414
20.54023
async def create_tunnel_connection(self, req): """Create a tunnel connection """ tunnel_address = req.tunnel_address connection = await self.create_connection(tunnel_address) response = connection.current_consumer() for event in response.events().values(): event.clear() response.start(HttpTunnel(self, req)) await response.event('post_request').waiter() if response.status_code != 200: raise ConnectionRefusedError( 'Cannot connect to tunnel: status code %s' % response.status_code ) raw_sock = connection.transport.get_extra_info('socket') if raw_sock is None: raise RuntimeError('Transport without socket') # duplicate socket so we can close transport raw_sock = raw_sock.dup() connection.transport.close() await connection.event('connection_lost').waiter() self.sessions -= 1 self.requests_processed -= 1 # connection = await self.create_connection( sock=raw_sock, ssl=req.ssl(self), server_hostname=req.netloc ) return connection
[ "async", "def", "create_tunnel_connection", "(", "self", ",", "req", ")", ":", "tunnel_address", "=", "req", ".", "tunnel_address", "connection", "=", "await", "self", ".", "create_connection", "(", "tunnel_address", ")", "response", "=", "connection", ".", "current_consumer", "(", ")", "for", "event", "in", "response", ".", "events", "(", ")", ".", "values", "(", ")", ":", "event", ".", "clear", "(", ")", "response", ".", "start", "(", "HttpTunnel", "(", "self", ",", "req", ")", ")", "await", "response", ".", "event", "(", "'post_request'", ")", ".", "waiter", "(", ")", "if", "response", ".", "status_code", "!=", "200", ":", "raise", "ConnectionRefusedError", "(", "'Cannot connect to tunnel: status code %s'", "%", "response", ".", "status_code", ")", "raw_sock", "=", "connection", ".", "transport", ".", "get_extra_info", "(", "'socket'", ")", "if", "raw_sock", "is", "None", ":", "raise", "RuntimeError", "(", "'Transport without socket'", ")", "# duplicate socket so we can close transport", "raw_sock", "=", "raw_sock", ".", "dup", "(", ")", "connection", ".", "transport", ".", "close", "(", ")", "await", "connection", ".", "event", "(", "'connection_lost'", ")", ".", "waiter", "(", ")", "self", ".", "sessions", "-=", "1", "self", ".", "requests_processed", "-=", "1", "#", "connection", "=", "await", "self", ".", "create_connection", "(", "sock", "=", "raw_sock", ",", "ssl", "=", "req", ".", "ssl", "(", "self", ")", ",", "server_hostname", "=", "req", ".", "netloc", ")", "return", "connection" ]
40.206897
12.551724
def delete_message(self, queue, message): """ Delete a message from a queue. :type queue: A :class:`boto.sqs.queue.Queue` object :param queue: The Queue from which messages are read. :type message: A :class:`boto.sqs.message.Message` object :param message: The Message to be deleted :rtype: bool :return: True if successful, False otherwise. """ params = {'ReceiptHandle' : message.receipt_handle} return self.get_status('DeleteMessage', params, queue.id)
[ "def", "delete_message", "(", "self", ",", "queue", ",", "message", ")", ":", "params", "=", "{", "'ReceiptHandle'", ":", "message", ".", "receipt_handle", "}", "return", "self", ".", "get_status", "(", "'DeleteMessage'", ",", "params", ",", "queue", ".", "id", ")" ]
36.533333
17.2
def get_end_date(self, obj): """ Returns the end date for a model instance """ obj_date = getattr(obj, self.get_end_date_field()) try: obj_date = obj_date.date() except AttributeError: # It's a date rather than datetime, so we use it as is pass return obj_date
[ "def", "get_end_date", "(", "self", ",", "obj", ")", ":", "obj_date", "=", "getattr", "(", "obj", ",", "self", ".", "get_end_date_field", "(", ")", ")", "try", ":", "obj_date", "=", "obj_date", ".", "date", "(", ")", "except", "AttributeError", ":", "# It's a date rather than datetime, so we use it as is", "pass", "return", "obj_date" ]
31.090909
13.272727
def list(self, binding_type=values.unset, limit=None, page_size=None): """ Lists UserBindingInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param UserBindingInstance.BindingType binding_type: The push technology used by the User Binding resources to read :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.chat.v2.service.user.user_binding.UserBindingInstance] """ return list(self.stream(binding_type=binding_type, limit=limit, page_size=page_size, ))
[ "def", "list", "(", "self", ",", "binding_type", "=", "values", ".", "unset", ",", "limit", "=", "None", ",", "page_size", "=", "None", ")", ":", "return", "list", "(", "self", ".", "stream", "(", "binding_type", "=", "binding_type", ",", "limit", "=", "limit", ",", "page_size", "=", "page_size", ",", ")", ")" ]
64.944444
37.944444
def _all_get_or_create_table(self, where, tablename, description, expectedrows=None): """Creates a new table, or if the table already exists, returns it.""" where_node = self._hdf5file.get_node(where) if not tablename in where_node: if not expectedrows is None: table = self._hdf5file.create_table(where=where_node, name=tablename, description=description, title=tablename, expectedrows=expectedrows, filters=self._all_get_filters()) else: table = self._hdf5file.create_table(where=where_node, name=tablename, description=description, title=tablename, filters=self._all_get_filters()) else: table = where_node._f_get_child(tablename) return table
[ "def", "_all_get_or_create_table", "(", "self", ",", "where", ",", "tablename", ",", "description", ",", "expectedrows", "=", "None", ")", ":", "where_node", "=", "self", ".", "_hdf5file", ".", "get_node", "(", "where", ")", "if", "not", "tablename", "in", "where_node", ":", "if", "not", "expectedrows", "is", "None", ":", "table", "=", "self", ".", "_hdf5file", ".", "create_table", "(", "where", "=", "where_node", ",", "name", "=", "tablename", ",", "description", "=", "description", ",", "title", "=", "tablename", ",", "expectedrows", "=", "expectedrows", ",", "filters", "=", "self", ".", "_all_get_filters", "(", ")", ")", "else", ":", "table", "=", "self", ".", "_hdf5file", ".", "create_table", "(", "where", "=", "where_node", ",", "name", "=", "tablename", ",", "description", "=", "description", ",", "title", "=", "tablename", ",", "filters", "=", "self", ".", "_all_get_filters", "(", ")", ")", "else", ":", "table", "=", "where_node", ".", "_f_get_child", "(", "tablename", ")", "return", "table" ]
53.833333
28.5
def convert_args(self, rem_path, args): """Splits the rest of a URL into its argument parts. The URL is assumed to start with the dynamic request prefix already removed. Parameters ---------- rem_path : string The URL to parse. The URL must start with the dynamic request prefix already removed. args : map The map to fill. Returns ------- args enriched with 'paths', an array containing the remaining path segments, 'query', a map containing the query fields and flags, and 'fragment' containing the fragment part as string. """ fragment_split = rem_path.split('#', 1) query_split = fragment_split[0].split('?', 1) segs = filter( lambda p: len(p) and p != '.', os.path.normpath(query_split[0]).split('/')) paths = [urlparse_unquote(p) for p in segs] query = self.convert_argmap(query_split[1]) \ if len(query_split) > 1 else {} args['paths'] = paths args['query'] = query args['fragment'] = urlparse_unquote(fragment_split[1]).decode('utf8') \ if len(fragment_split) > 1 else '' return args
[ "def", "convert_args", "(", "self", ",", "rem_path", ",", "args", ")", ":", "fragment_split", "=", "rem_path", ".", "split", "(", "'#'", ",", "1", ")", "query_split", "=", "fragment_split", "[", "0", "]", ".", "split", "(", "'?'", ",", "1", ")", "segs", "=", "filter", "(", "lambda", "p", ":", "len", "(", "p", ")", "and", "p", "!=", "'.'", ",", "os", ".", "path", ".", "normpath", "(", "query_split", "[", "0", "]", ")", ".", "split", "(", "'/'", ")", ")", "paths", "=", "[", "urlparse_unquote", "(", "p", ")", "for", "p", "in", "segs", "]", "query", "=", "self", ".", "convert_argmap", "(", "query_split", "[", "1", "]", ")", "if", "len", "(", "query_split", ")", ">", "1", "else", "{", "}", "args", "[", "'paths'", "]", "=", "paths", "args", "[", "'query'", "]", "=", "query", "args", "[", "'fragment'", "]", "=", "urlparse_unquote", "(", "fragment_split", "[", "1", "]", ")", ".", "decode", "(", "'utf8'", ")", "if", "len", "(", "fragment_split", ")", ">", "1", "else", "''", "return", "args" ]
38.0625
18.375
def get_best_candidate(self): """ Returns ---------- best_candidate : the best candidate hyper-parameters as defined by """ # TODO make this best mean response self.incumbent = self.surrogate.Y.max() # Objective function def z(x): # TODO make spread of points around x and take mean value. x = x.copy().reshape(-1, self.n_dims) y_mean, y_var = self.surrogate.predict(x) af = self._acquisition_function(y_mean=y_mean, y_var=y_var) # TODO make -1 dependent on flag in inputs for either max or minimization return (-1) * af # Optimization loop af_values = [] af_args = [] for i in range(self.n_iter): init = self._get_random_point() res = minimize(z, init, bounds=self.n_dims * [(0., 1.)], options={'maxiter': int(self.max_iter), 'disp': 0}) af_args.append(res.x) af_values.append(res.fun) # Choose the best af_values = np.array(af_values).flatten() af_args = np.array(af_args) best_index = int(np.argmin(af_values)) best_candidate = af_args[best_index] return best_candidate
[ "def", "get_best_candidate", "(", "self", ")", ":", "# TODO make this best mean response", "self", ".", "incumbent", "=", "self", ".", "surrogate", ".", "Y", ".", "max", "(", ")", "# Objective function", "def", "z", "(", "x", ")", ":", "# TODO make spread of points around x and take mean value.", "x", "=", "x", ".", "copy", "(", ")", ".", "reshape", "(", "-", "1", ",", "self", ".", "n_dims", ")", "y_mean", ",", "y_var", "=", "self", ".", "surrogate", ".", "predict", "(", "x", ")", "af", "=", "self", ".", "_acquisition_function", "(", "y_mean", "=", "y_mean", ",", "y_var", "=", "y_var", ")", "# TODO make -1 dependent on flag in inputs for either max or minimization", "return", "(", "-", "1", ")", "*", "af", "# Optimization loop", "af_values", "=", "[", "]", "af_args", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "n_iter", ")", ":", "init", "=", "self", ".", "_get_random_point", "(", ")", "res", "=", "minimize", "(", "z", ",", "init", ",", "bounds", "=", "self", ".", "n_dims", "*", "[", "(", "0.", ",", "1.", ")", "]", ",", "options", "=", "{", "'maxiter'", ":", "int", "(", "self", ".", "max_iter", ")", ",", "'disp'", ":", "0", "}", ")", "af_args", ".", "append", "(", "res", ".", "x", ")", "af_values", ".", "append", "(", "res", ".", "fun", ")", "# Choose the best", "af_values", "=", "np", ".", "array", "(", "af_values", ")", ".", "flatten", "(", ")", "af_args", "=", "np", ".", "array", "(", "af_args", ")", "best_index", "=", "int", "(", "np", ".", "argmin", "(", "af_values", ")", ")", "best_candidate", "=", "af_args", "[", "best_index", "]", "return", "best_candidate" ]
35.457143
17.742857
def refreshContents( self ): """ Refreshes the contents tab with the latest selection from the browser. """ item = self.uiContentsTREE.currentItem() if not isinstance(item, XdkEntryItem): return item.load() url = item.url() if url: self.gotoUrl(url)
[ "def", "refreshContents", "(", "self", ")", ":", "item", "=", "self", ".", "uiContentsTREE", ".", "currentItem", "(", ")", "if", "not", "isinstance", "(", "item", ",", "XdkEntryItem", ")", ":", "return", "item", ".", "load", "(", ")", "url", "=", "item", ".", "url", "(", ")", "if", "url", ":", "self", ".", "gotoUrl", "(", "url", ")" ]
28.333333
15.666667
def is_ancestor_of(self, other, inclusive=False): """ class or instance level method which returns True if self is ancestor (closer to root) of other else False. Optional flag `inclusive` on whether or not to treat self as ancestor of self. For example see: * :mod:`sqlalchemy_mptt.tests.cases.integrity.test_hierarchy_structure` """ if inclusive: return (self.tree_id == other.tree_id) \ & (self.left <= other.left) \ & (other.right <= self.right) return (self.tree_id == other.tree_id) \ & (self.left < other.left) \ & (other.right < self.right)
[ "def", "is_ancestor_of", "(", "self", ",", "other", ",", "inclusive", "=", "False", ")", ":", "if", "inclusive", ":", "return", "(", "self", ".", "tree_id", "==", "other", ".", "tree_id", ")", "&", "(", "self", ".", "left", "<=", "other", ".", "left", ")", "&", "(", "other", ".", "right", "<=", "self", ".", "right", ")", "return", "(", "self", ".", "tree_id", "==", "other", ".", "tree_id", ")", "&", "(", "self", ".", "left", "<", "other", ".", "left", ")", "&", "(", "other", ".", "right", "<", "self", ".", "right", ")" ]
41.625
15.8125
def list_images(self): """ Return the list of available images for this node type :returns: Array of hash """ try: return list_images(self._NODE_TYPE) except OSError as e: raise aiohttp.web.HTTPConflict(text="Can not list images {}".format(e))
[ "def", "list_images", "(", "self", ")", ":", "try", ":", "return", "list_images", "(", "self", ".", "_NODE_TYPE", ")", "except", "OSError", "as", "e", ":", "raise", "aiohttp", ".", "web", ".", "HTTPConflict", "(", "text", "=", "\"Can not list images {}\"", ".", "format", "(", "e", ")", ")" ]
27.909091
19.909091
def _backup(self): """ Backup the database into its file. """ if self._authorization(): # We are authorized to work. # We backup the current state of the datbase. Dict(PyFunceble.INTERN["whois_db"]).to_json(self.whois_db_path)
[ "def", "_backup", "(", "self", ")", ":", "if", "self", ".", "_authorization", "(", ")", ":", "# We are authorized to work.", "# We backup the current state of the datbase.", "Dict", "(", "PyFunceble", ".", "INTERN", "[", "\"whois_db\"", "]", ")", ".", "to_json", "(", "self", ".", "whois_db_path", ")" ]
28.7
16.3
def _accumulateFrequencyCounts(values, freqCounts=None): """ Accumulate a list of values 'values' into the frequency counts 'freqCounts', and return the updated frequency counts For example, if values contained the following: [1,1,3,5,1,3,5], and the initial freqCounts was None, then the return value would be: [0,3,0,2,0,2] which corresponds to how many of each value we saw in the input, i.e. there were 0 0's, 3 1's, 0 2's, 2 3's, 0 4's, and 2 5's. If freqCounts is not None, the values will be added to the existing counts and the length of the frequency Counts will be automatically extended as necessary Parameters: ----------------------------------------------- values: The values to accumulate into the frequency counts freqCounts: Accumulated frequency counts so far, or none """ # How big does our freqCounts vector need to be? values = numpy.array(values) numEntries = values.max() + 1 if freqCounts is not None: numEntries = max(numEntries, freqCounts.size) # Where do we accumulate the results? if freqCounts is not None: if freqCounts.size != numEntries: newCounts = numpy.zeros(numEntries, dtype='int32') newCounts[0:freqCounts.size] = freqCounts else: newCounts = freqCounts else: newCounts = numpy.zeros(numEntries, dtype='int32') # Accumulate the new values for v in values: newCounts[v] += 1 return newCounts
[ "def", "_accumulateFrequencyCounts", "(", "values", ",", "freqCounts", "=", "None", ")", ":", "# How big does our freqCounts vector need to be?", "values", "=", "numpy", ".", "array", "(", "values", ")", "numEntries", "=", "values", ".", "max", "(", ")", "+", "1", "if", "freqCounts", "is", "not", "None", ":", "numEntries", "=", "max", "(", "numEntries", ",", "freqCounts", ".", "size", ")", "# Where do we accumulate the results?", "if", "freqCounts", "is", "not", "None", ":", "if", "freqCounts", ".", "size", "!=", "numEntries", ":", "newCounts", "=", "numpy", ".", "zeros", "(", "numEntries", ",", "dtype", "=", "'int32'", ")", "newCounts", "[", "0", ":", "freqCounts", ".", "size", "]", "=", "freqCounts", "else", ":", "newCounts", "=", "freqCounts", "else", ":", "newCounts", "=", "numpy", ".", "zeros", "(", "numEntries", ",", "dtype", "=", "'int32'", ")", "# Accumulate the new values", "for", "v", "in", "values", ":", "newCounts", "[", "v", "]", "+=", "1", "return", "newCounts" ]
34.04878
21.560976
def wrap(self, func): """ Wrap :func: to perform aggregation on :func: call. Should be called with view instance methods. """ @six.wraps(func) def wrapper(*args, **kwargs): try: return self.aggregate() except KeyError: return func(*args, **kwargs) return wrapper
[ "def", "wrap", "(", "self", ",", "func", ")", ":", "@", "six", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "self", ".", "aggregate", "(", ")", "except", "KeyError", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
29.666667
12.416667
def update_attribute(self, attr, value): """Set the value of a workspace attribute.""" update = [fapi._attr_up(attr, value)] r = fapi.update_workspace_attributes(self.namespace, self.name, update, self.api_url) fapi._check_response_code(r, 200)
[ "def", "update_attribute", "(", "self", ",", "attr", ",", "value", ")", ":", "update", "=", "[", "fapi", ".", "_attr_up", "(", "attr", ",", "value", ")", "]", "r", "=", "fapi", ".", "update_workspace_attributes", "(", "self", ".", "namespace", ",", "self", ".", "name", ",", "update", ",", "self", ".", "api_url", ")", "fapi", ".", "_check_response_code", "(", "r", ",", "200", ")" ]
52.666667
10.5
def mask_and_mean_loss(input_tensor, binary_tensor, axis=None): """ Mask a loss by using a tensor filled with 0 or 1 and average correctly. :param input_tensor: A float tensor of shape [batch_size, ...] representing the loss/cross_entropy :param binary_tensor: A float tensor of shape [batch_size, ...] representing the mask. :return: A float tensor of shape [batch_size, ...] representing the masked loss. :param axis: The dimensions to reduce. If None (the default), reduces all dimensions. Must be in the range [-rank(input_tensor), rank(input_tensor)). """ return mean_on_masked(mask_loss(input_tensor, binary_tensor), binary_tensor, axis=axis)
[ "def", "mask_and_mean_loss", "(", "input_tensor", ",", "binary_tensor", ",", "axis", "=", "None", ")", ":", "return", "mean_on_masked", "(", "mask_loss", "(", "input_tensor", ",", "binary_tensor", ")", ",", "binary_tensor", ",", "axis", "=", "axis", ")" ]
62.545455
35.818182
def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True
[ "def", "_setup_core", "(", "self", ")", ":", "if", "not", "self", ".", "ready", ":", "# First call. Initialize.", "self", ".", "functions", ",", "self", ".", "returners", ",", "self", ".", "function_errors", ",", "self", ".", "executors", "=", "self", ".", "_load_modules", "(", ")", "self", ".", "serial", "=", "salt", ".", "payload", ".", "Serial", "(", "self", ".", "opts", ")", "self", ".", "mod_opts", "=", "self", ".", "_prep_mod_opts", "(", ")", "# self.matcher = Matcher(self.opts, self.functions)", "self", ".", "matchers", "=", "salt", ".", "loader", ".", "matchers", "(", "self", ".", "opts", ")", "self", ".", "beacons", "=", "salt", ".", "beacons", ".", "Beacon", "(", "self", ".", "opts", ",", "self", ".", "functions", ")", "uid", "=", "salt", ".", "utils", ".", "user", ".", "get_uid", "(", "user", "=", "self", ".", "opts", ".", "get", "(", "'user'", ",", "None", ")", ")", "self", ".", "proc_dir", "=", "get_proc_dir", "(", "self", ".", "opts", "[", "'cachedir'", "]", ",", "uid", "=", "uid", ")", "self", ".", "grains_cache", "=", "self", ".", "opts", "[", "'grains'", "]", "self", ".", "ready", "=", "True" ]
48.352941
20.588235
def check_events(self): '''check for events, calling registered callbacks as needed''' while self.event_count() > 0: event = self.get_event() for callback in self._callbacks: callback(event)
[ "def", "check_events", "(", "self", ")", ":", "while", "self", ".", "event_count", "(", ")", ">", "0", ":", "event", "=", "self", ".", "get_event", "(", ")", "for", "callback", "in", "self", ".", "_callbacks", ":", "callback", "(", "event", ")" ]
40.166667
11.166667
def create_signed_entities_descriptor(entity_descriptors, security_context, valid_for=None): """ :param entity_descriptors: the entity descriptors to put in in an EntitiesDescriptor tag and sign :param security_context: security context for the signature :param valid_for: number of hours the metadata should be valid :return: the signed XML document :type entity_descriptors: Sequence[saml2.md.EntityDescriptor]] :type security_context: saml2.sigver.SecurityContext :type valid_for: Optional[int] """ entities_desc, xmldoc = entities_descriptor(entity_descriptors, valid_for=valid_for, name=None, ident=None, sign=True, secc=security_context) if not valid_instance(entities_desc): raise ValueError("Could not construct valid EntitiesDescriptor tag") return xmldoc
[ "def", "create_signed_entities_descriptor", "(", "entity_descriptors", ",", "security_context", ",", "valid_for", "=", "None", ")", ":", "entities_desc", ",", "xmldoc", "=", "entities_descriptor", "(", "entity_descriptors", ",", "valid_for", "=", "valid_for", ",", "name", "=", "None", ",", "ident", "=", "None", ",", "sign", "=", "True", ",", "secc", "=", "security_context", ")", "if", "not", "valid_instance", "(", "entities_desc", ")", ":", "raise", "ValueError", "(", "\"Could not construct valid EntitiesDescriptor tag\"", ")", "return", "xmldoc" ]
50.235294
27.411765
def rename(self, name): """Renames app to given name.""" r = self._h._http_resource( method='PUT', resource=('apps', self.name), data={'app[name]': name} ) return r.ok
[ "def", "rename", "(", "self", ",", "name", ")", ":", "r", "=", "self", ".", "_h", ".", "_http_resource", "(", "method", "=", "'PUT'", ",", "resource", "=", "(", "'apps'", ",", "self", ".", "name", ")", ",", "data", "=", "{", "'app[name]'", ":", "name", "}", ")", "return", "r", ".", "ok" ]
25.333333
14.888889
def delete_branch(self, repo, branch, prefix): """ Deletes a branch. :param repo: github.Repository :param branch: string name of the branch to delete """ # make sure that the name of the branch begins with pyup. assert branch.startswith(prefix) obj = repo.branches.get(branch) obj.delete()
[ "def", "delete_branch", "(", "self", ",", "repo", ",", "branch", ",", "prefix", ")", ":", "# make sure that the name of the branch begins with pyup.", "assert", "branch", ".", "startswith", "(", "prefix", ")", "obj", "=", "repo", ".", "branches", ".", "get", "(", "branch", ")", "obj", ".", "delete", "(", ")" ]
35.3
8.7
def xrefs(self, nid, bidirectional=False): """ Fetches xrefs for a node Arguments --------- nid : str Node identifier for entity to be queried bidirection : bool If True, include nodes xreffed to nid Return ------ list[str] """ if self.xref_graph is not None: xg = self.xref_graph if nid not in xg: return [] if bidirectional: return list(xg.neighbors(nid)) else: return [x for x in xg.neighbors(nid) if xg[nid][x][0]['source'] == nid] return []
[ "def", "xrefs", "(", "self", ",", "nid", ",", "bidirectional", "=", "False", ")", ":", "if", "self", ".", "xref_graph", "is", "not", "None", ":", "xg", "=", "self", ".", "xref_graph", "if", "nid", "not", "in", "xg", ":", "return", "[", "]", "if", "bidirectional", ":", "return", "list", "(", "xg", ".", "neighbors", "(", "nid", ")", ")", "else", ":", "return", "[", "x", "for", "x", "in", "xg", ".", "neighbors", "(", "nid", ")", "if", "xg", "[", "nid", "]", "[", "x", "]", "[", "0", "]", "[", "'source'", "]", "==", "nid", "]", "return", "[", "]" ]
25.6
18.16
def remove_file(fpath, verbose=None, ignore_errors=True, dryrun=False, quiet=QUIET): """ Removes a file """ if verbose is None: verbose = not quiet if dryrun: if verbose: print('[util_path] Dryrem %r' % fpath) return else: try: os.remove(fpath) if verbose: print('[util_path] Removed %r' % fpath) except OSError: print('[util_path.remove_file] Misrem %r' % fpath) #warnings.warn('OSError: %s,\n Could not delete %s' % (str(e), fpath)) if not ignore_errors: raise return False return True
[ "def", "remove_file", "(", "fpath", ",", "verbose", "=", "None", ",", "ignore_errors", "=", "True", ",", "dryrun", "=", "False", ",", "quiet", "=", "QUIET", ")", ":", "if", "verbose", "is", "None", ":", "verbose", "=", "not", "quiet", "if", "dryrun", ":", "if", "verbose", ":", "print", "(", "'[util_path] Dryrem %r'", "%", "fpath", ")", "return", "else", ":", "try", ":", "os", ".", "remove", "(", "fpath", ")", "if", "verbose", ":", "print", "(", "'[util_path] Removed %r'", "%", "fpath", ")", "except", "OSError", ":", "print", "(", "'[util_path.remove_file] Misrem %r'", "%", "fpath", ")", "#warnings.warn('OSError: %s,\\n Could not delete %s' % (str(e), fpath))", "if", "not", "ignore_errors", ":", "raise", "return", "False", "return", "True" ]
31.380952
19.285714
def nsDefs(self): """Get the namespace of a node """ ret = libxml2mod.xmlNodeGetNsDefs(self._o) if ret is None:return None __tmp = xmlNs(_obj=ret) return __tmp
[ "def", "nsDefs", "(", "self", ")", ":", "ret", "=", "libxml2mod", ".", "xmlNodeGetNsDefs", "(", "self", ".", "_o", ")", "if", "ret", "is", "None", ":", "return", "None", "__tmp", "=", "xmlNs", "(", "_obj", "=", "ret", ")", "return", "__tmp" ]
32.333333
11.333333
def iter_languages(self, number=-1, etag=None): """Iterate over the programming languages used in the repository. :param int number: (optional), number of languages to return. Default: -1 returns all used languages :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of tuples """ url = self._build_url('languages', base_url=self._api) return self._iter(int(number), url, tuple, etag=etag)
[ "def", "iter_languages", "(", "self", ",", "number", "=", "-", "1", ",", "etag", "=", "None", ")", ":", "url", "=", "self", ".", "_build_url", "(", "'languages'", ",", "base_url", "=", "self", ".", "_api", ")", "return", "self", ".", "_iter", "(", "int", "(", "number", ")", ",", "url", ",", "tuple", ",", "etag", "=", "etag", ")" ]
46.090909
17.181818
def retrieve_public_key(user_repo): """Retrieve the public key from the Travis API. The Travis API response is accessed as JSON so that Travis-Encrypt can easily find the public key that is to be passed to cryptography's load_pem_public_key function. Due to issues with some public keys being returned from the Travis API as PKCS8 encoded, the key is returned with RSA removed from the header and footer. Parameters ---------- user_repo: str the repository in the format of 'username/repository' Returns ------- response: str the public RSA key of the username's repository Raises ------ InvalidCredentialsError raised when an invalid 'username/repository' is given """ url = 'https://api.travis-ci.org/repos/{}/key' .format(user_repo) response = requests.get(url) try: return response.json()['key'].replace(' RSA ', ' ') except KeyError: username, repository = user_repo.split('/') raise InvalidCredentialsError("Either the username: '{}' or the repository: '{}' does not exist. Please enter a valid username or repository name. The username and repository name are both case sensitive." .format(username, repository))
[ "def", "retrieve_public_key", "(", "user_repo", ")", ":", "url", "=", "'https://api.travis-ci.org/repos/{}/key'", ".", "format", "(", "user_repo", ")", "response", "=", "requests", ".", "get", "(", "url", ")", "try", ":", "return", "response", ".", "json", "(", ")", "[", "'key'", "]", ".", "replace", "(", "' RSA '", ",", "' '", ")", "except", "KeyError", ":", "username", ",", "repository", "=", "user_repo", ".", "split", "(", "'/'", ")", "raise", "InvalidCredentialsError", "(", "\"Either the username: '{}' or the repository: '{}' does not exist. Please enter a valid username or repository name. The username and repository name are both case sensitive.\"", ".", "format", "(", "username", ",", "repository", ")", ")" ]
38.15625
29.65625
def count_blank_positions(self): """ return a count of blank cells """ blanks = 0 for row_ndx in range(self.grid_height - 0): for col_ndx in range(self.grid_width - 0): if self.get_tile(row_ndx, col_ndx) == EMPTY: blanks += 1 return blanks
[ "def", "count_blank_positions", "(", "self", ")", ":", "blanks", "=", "0", "for", "row_ndx", "in", "range", "(", "self", ".", "grid_height", "-", "0", ")", ":", "for", "col_ndx", "in", "range", "(", "self", ".", "grid_width", "-", "0", ")", ":", "if", "self", ".", "get_tile", "(", "row_ndx", ",", "col_ndx", ")", "==", "EMPTY", ":", "blanks", "+=", "1", "return", "blanks" ]
32.6
10.6
def pexpireat(self, name, when): """ Set an expire flag on key ``name``. ``when`` can be represented as an integer representing unix time in milliseconds (unix time * 1000) or a Python datetime object. """ if isinstance(when, datetime.datetime): ms = int(when.microsecond / 1000) when = int(mod_time.mktime(when.timetuple())) * 1000 + ms return self.execute_command('PEXPIREAT', name, when)
[ "def", "pexpireat", "(", "self", ",", "name", ",", "when", ")", ":", "if", "isinstance", "(", "when", ",", "datetime", ".", "datetime", ")", ":", "ms", "=", "int", "(", "when", ".", "microsecond", "/", "1000", ")", "when", "=", "int", "(", "mod_time", ".", "mktime", "(", "when", ".", "timetuple", "(", ")", ")", ")", "*", "1000", "+", "ms", "return", "self", ".", "execute_command", "(", "'PEXPIREAT'", ",", "name", ",", "when", ")" ]
46.1
14.3
def set_imu_config(self, compass_enabled, gyro_enabled, accel_enabled): """ Enables and disables the gyroscope, accelerometer and/or magnetometer input to the orientation functions """ # If the consuming code always calls this just before reading the IMU # the IMU consistently fails to read. So prevent unnecessary calls to # IMU config functions using state variables self._init_imu() # Ensure imu is initialised if (not isinstance(compass_enabled, bool) or not isinstance(gyro_enabled, bool) or not isinstance(accel_enabled, bool)): raise TypeError('All set_imu_config parameters must be of boolean type') if self._compass_enabled != compass_enabled: self._compass_enabled = compass_enabled self._imu.setCompassEnable(self._compass_enabled) if self._gyro_enabled != gyro_enabled: self._gyro_enabled = gyro_enabled self._imu.setGyroEnable(self._gyro_enabled) if self._accel_enabled != accel_enabled: self._accel_enabled = accel_enabled self._imu.setAccelEnable(self._accel_enabled)
[ "def", "set_imu_config", "(", "self", ",", "compass_enabled", ",", "gyro_enabled", ",", "accel_enabled", ")", ":", "# If the consuming code always calls this just before reading the IMU", "# the IMU consistently fails to read. So prevent unnecessary calls to", "# IMU config functions using state variables", "self", ".", "_init_imu", "(", ")", "# Ensure imu is initialised", "if", "(", "not", "isinstance", "(", "compass_enabled", ",", "bool", ")", "or", "not", "isinstance", "(", "gyro_enabled", ",", "bool", ")", "or", "not", "isinstance", "(", "accel_enabled", ",", "bool", ")", ")", ":", "raise", "TypeError", "(", "'All set_imu_config parameters must be of boolean type'", ")", "if", "self", ".", "_compass_enabled", "!=", "compass_enabled", ":", "self", ".", "_compass_enabled", "=", "compass_enabled", "self", ".", "_imu", ".", "setCompassEnable", "(", "self", ".", "_compass_enabled", ")", "if", "self", ".", "_gyro_enabled", "!=", "gyro_enabled", ":", "self", ".", "_gyro_enabled", "=", "gyro_enabled", "self", ".", "_imu", ".", "setGyroEnable", "(", "self", ".", "_gyro_enabled", ")", "if", "self", ".", "_accel_enabled", "!=", "accel_enabled", ":", "self", ".", "_accel_enabled", "=", "accel_enabled", "self", ".", "_imu", ".", "setAccelEnable", "(", "self", ".", "_accel_enabled", ")" ]
41.392857
20.607143
def edits1(word): "All edits that are one edit away from `word`." letters = 'qwertyuiopasdfghjklzxcvbnm' splits = [(word[:i], word[i:]) for i in range(len(word) + 1)] print('splits = ', splits) deletes = [L + R[1:] for L, R in splits if R] print('deletes = ', deletes) transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R) > 1] print('transposes = ', transposes) replaces = [L + c + R[1:] for L, R in splits if R for c in letters] print('replaces = ', replaces) inserts = [L + c + R for L, R in splits for c in letters] print('inserts = ', inserts) print(deletes + transposes + replaces + inserts) print(len(set(deletes + transposes + replaces + inserts))) return deletes + transposes + replaces + inserts
[ "def", "edits1", "(", "word", ")", ":", "letters", "=", "'qwertyuiopasdfghjklzxcvbnm'", "splits", "=", "[", "(", "word", "[", ":", "i", "]", ",", "word", "[", "i", ":", "]", ")", "for", "i", "in", "range", "(", "len", "(", "word", ")", "+", "1", ")", "]", "print", "(", "'splits = '", ",", "splits", ")", "deletes", "=", "[", "L", "+", "R", "[", "1", ":", "]", "for", "L", ",", "R", "in", "splits", "if", "R", "]", "print", "(", "'deletes = '", ",", "deletes", ")", "transposes", "=", "[", "L", "+", "R", "[", "1", "]", "+", "R", "[", "0", "]", "+", "R", "[", "2", ":", "]", "for", "L", ",", "R", "in", "splits", "if", "len", "(", "R", ")", ">", "1", "]", "print", "(", "'transposes = '", ",", "transposes", ")", "replaces", "=", "[", "L", "+", "c", "+", "R", "[", "1", ":", "]", "for", "L", ",", "R", "in", "splits", "if", "R", "for", "c", "in", "letters", "]", "print", "(", "'replaces = '", ",", "replaces", ")", "inserts", "=", "[", "L", "+", "c", "+", "R", "for", "L", ",", "R", "in", "splits", "for", "c", "in", "letters", "]", "print", "(", "'inserts = '", ",", "inserts", ")", "print", "(", "deletes", "+", "transposes", "+", "replaces", "+", "inserts", ")", "print", "(", "len", "(", "set", "(", "deletes", "+", "transposes", "+", "replaces", "+", "inserts", ")", ")", ")", "return", "deletes", "+", "transposes", "+", "replaces", "+", "inserts" ]
47.6875
14.8125
def get_network_versions(self, name: str) -> Set[str]: """Return all of the versions of a network with the given name.""" return { version for version, in self.session.query(Network.version).filter(Network.name == name).all() }
[ "def", "get_network_versions", "(", "self", ",", "name", ":", "str", ")", "->", "Set", "[", "str", "]", ":", "return", "{", "version", "for", "version", ",", "in", "self", ".", "session", ".", "query", "(", "Network", ".", "version", ")", ".", "filter", "(", "Network", ".", "name", "==", "name", ")", ".", "all", "(", ")", "}" ]
45
24.666667
def _find_parent_directory(directory, filename): """Find a directory in parent tree with a specific filename :param directory: directory name to find :param filename: filename to find :returns: absolute directory path """ parent_directory = directory absolute_directory = '.' while absolute_directory != os.path.abspath(parent_directory): absolute_directory = os.path.abspath(parent_directory) if os.path.isfile(os.path.join(absolute_directory, filename)): return absolute_directory if os.path.isabs(parent_directory): parent_directory = os.path.join(os.path.dirname(parent_directory), '..', os.path.basename(parent_directory)) else: parent_directory = os.path.join('..', parent_directory) return os.path.abspath(directory)
[ "def", "_find_parent_directory", "(", "directory", ",", "filename", ")", ":", "parent_directory", "=", "directory", "absolute_directory", "=", "'.'", "while", "absolute_directory", "!=", "os", ".", "path", ".", "abspath", "(", "parent_directory", ")", ":", "absolute_directory", "=", "os", ".", "path", ".", "abspath", "(", "parent_directory", ")", "if", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "absolute_directory", ",", "filename", ")", ")", ":", "return", "absolute_directory", "if", "os", ".", "path", ".", "isabs", "(", "parent_directory", ")", ":", "parent_directory", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "parent_directory", ")", ",", "'..'", ",", "os", ".", "path", ".", "basename", "(", "parent_directory", ")", ")", "else", ":", "parent_directory", "=", "os", ".", "path", ".", "join", "(", "'..'", ",", "parent_directory", ")", "return", "os", ".", "path", ".", "abspath", "(", "directory", ")" ]
48.526316
16.526316
def _find_value(ret_dict, key, path=None): ''' PRIVATE METHOD Traverses a dictionary of dictionaries/lists to find key and return the value stored. TODO:// this method doesn't really work very well, and it's not really very useful in its current state. The purpose for this method is to simplify parsing the JSON output so you can just pass the key you want to find and have it return the value. ret : dict<str,obj> The dictionary to search through. Typically this will be a dict returned from solr. key : str The key (str) to find in the dictionary Return: list<dict<str,obj>>:: [{path:path, value:value}] ''' if path is None: path = key else: path = "{0}:{1}".format(path, key) ret = [] for ikey, val in six.iteritems(ret_dict): if ikey == key: ret.append({path: val}) if isinstance(val, list): for item in val: if isinstance(item, dict): ret = ret + _find_value(item, key, path) if isinstance(val, dict): ret = ret + _find_value(val, key, path) return ret
[ "def", "_find_value", "(", "ret_dict", ",", "key", ",", "path", "=", "None", ")", ":", "if", "path", "is", "None", ":", "path", "=", "key", "else", ":", "path", "=", "\"{0}:{1}\"", ".", "format", "(", "path", ",", "key", ")", "ret", "=", "[", "]", "for", "ikey", ",", "val", "in", "six", ".", "iteritems", "(", "ret_dict", ")", ":", "if", "ikey", "==", "key", ":", "ret", ".", "append", "(", "{", "path", ":", "val", "}", ")", "if", "isinstance", "(", "val", ",", "list", ")", ":", "for", "item", "in", "val", ":", "if", "isinstance", "(", "item", ",", "dict", ")", ":", "ret", "=", "ret", "+", "_find_value", "(", "item", ",", "key", ",", "path", ")", "if", "isinstance", "(", "val", ",", "dict", ")", ":", "ret", "=", "ret", "+", "_find_value", "(", "val", ",", "key", ",", "path", ")", "return", "ret" ]
33.228571
19.571429
def get_list_cluster_admins(self): """Get list of cluster admins.""" response = self.request( url="cluster_admins", method='GET', expected_response_code=200 ) return response.json()
[ "def", "get_list_cluster_admins", "(", "self", ")", ":", "response", "=", "self", ".", "request", "(", "url", "=", "\"cluster_admins\"", ",", "method", "=", "'GET'", ",", "expected_response_code", "=", "200", ")", "return", "response", ".", "json", "(", ")" ]
26.888889
13.222222
def _build_queries_and_headers(self): """ Build a list of query information and headers (pseudo-folders) for consumption by the template. Strategy: Look for queries with titles of the form "something - else" (eg. with a ' - ' in the middle) and split on the ' - ', treating the left side as a "header" (or folder). Interleave the headers into the ListView's object_list as appropriate. Ignore headers that only have one child. The front end uses bootstrap's JS Collapse plugin, which necessitates generating CSS classes to map the header onto the child rows, hence the collapse_target variable. To make the return object homogeneous, convert the object_list models into dictionaries for interleaving with the header "objects". This necessitates special handling of 'created_at' and 'created_by_user' because model_to_dict doesn't include non-editable fields (created_at) and will give the int representation of the user instead of the string representation. :return: A list of model dictionaries representing all the query objects, interleaved with header dictionaries. """ dict_list = [] rendered_headers = [] pattern = re.compile('[\W_]+') headers = Counter([q.title.split(' - ')[0] for q in self.object_list]) for q in self.object_list: model_dict = model_to_dict(q) header = q.title.split(' - ')[0] collapse_target = pattern.sub('', header) if headers[header] > 1 and header not in rendered_headers: dict_list.append({'title': header, 'is_header': True, 'is_in_category': False, 'collapse_target': collapse_target, 'count': headers[header]}) rendered_headers.append(header) model_dict.update({'is_in_category': headers[header] > 1, 'collapse_target': collapse_target, 'created_at': q.created_at, 'is_header': False, 'run_count': q.run_count, 'created_by_user': six.text_type(q.created_by_user) if q.created_by_user else None}) dict_list.append(model_dict) return dict_list
[ "def", "_build_queries_and_headers", "(", "self", ")", ":", "dict_list", "=", "[", "]", "rendered_headers", "=", "[", "]", "pattern", "=", "re", ".", "compile", "(", "'[\\W_]+'", ")", "headers", "=", "Counter", "(", "[", "q", ".", "title", ".", "split", "(", "' - '", ")", "[", "0", "]", "for", "q", "in", "self", ".", "object_list", "]", ")", "for", "q", "in", "self", ".", "object_list", ":", "model_dict", "=", "model_to_dict", "(", "q", ")", "header", "=", "q", ".", "title", ".", "split", "(", "' - '", ")", "[", "0", "]", "collapse_target", "=", "pattern", ".", "sub", "(", "''", ",", "header", ")", "if", "headers", "[", "header", "]", ">", "1", "and", "header", "not", "in", "rendered_headers", ":", "dict_list", ".", "append", "(", "{", "'title'", ":", "header", ",", "'is_header'", ":", "True", ",", "'is_in_category'", ":", "False", ",", "'collapse_target'", ":", "collapse_target", ",", "'count'", ":", "headers", "[", "header", "]", "}", ")", "rendered_headers", ".", "append", "(", "header", ")", "model_dict", ".", "update", "(", "{", "'is_in_category'", ":", "headers", "[", "header", "]", ">", "1", ",", "'collapse_target'", ":", "collapse_target", ",", "'created_at'", ":", "q", ".", "created_at", ",", "'is_header'", ":", "False", ",", "'run_count'", ":", "q", ".", "run_count", ",", "'created_by_user'", ":", "six", ".", "text_type", "(", "q", ".", "created_by_user", ")", "if", "q", ".", "created_by_user", "else", "None", "}", ")", "dict_list", ".", "append", "(", "model_dict", ")", "return", "dict_list" ]
54.822222
32.822222
def delete_snapshot_range(self, start_id, end_id): """Starts deleting the specified snapshot range. This is limited to linear snapshot lists, which means there may not be any other child snapshots other than the direct sequence between the start and end snapshot. If the start and end snapshot point to the same snapshot this method is completely equivalent to :py:func:`delete_snapshot` . See :py:class:`ISnapshot` for an introduction to snapshots. The conditions and many details are the same as with :py:func:`delete_snapshot` . This operation is generally faster than deleting snapshots one by one and often also needs less extra disk space before freeing up disk space by deleting the removed disk images corresponding to the snapshot. This API method is right now not implemented! in start_id of type str UUID of the first snapshot to delete. in end_id of type str UUID of the last snapshot to delete. return progress of type :class:`IProgress` Progress object to track the operation completion. raises :class:`VBoxErrorInvalidVmState` The running virtual machine prevents deleting this snapshot. This happens only in very specific situations, usually snapshots can be deleted without trouble while a VM is running. The error message text explains the reason for the failure. raises :class:`OleErrorNotimpl` The method is not implemented yet. """ if not isinstance(start_id, basestring): raise TypeError("start_id can only be an instance of type basestring") if not isinstance(end_id, basestring): raise TypeError("end_id can only be an instance of type basestring") progress = self._call("deleteSnapshotRange", in_p=[start_id, end_id]) progress = IProgress(progress) return progress
[ "def", "delete_snapshot_range", "(", "self", ",", "start_id", ",", "end_id", ")", ":", "if", "not", "isinstance", "(", "start_id", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"start_id can only be an instance of type basestring\"", ")", "if", "not", "isinstance", "(", "end_id", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"end_id can only be an instance of type basestring\"", ")", "progress", "=", "self", ".", "_call", "(", "\"deleteSnapshotRange\"", ",", "in_p", "=", "[", "start_id", ",", "end_id", "]", ")", "progress", "=", "IProgress", "(", "progress", ")", "return", "progress" ]
45.976744
21.27907
def mutant(fn): """ Convenience decorator to isolate mutation to within the decorated function (with respect to the input arguments). All arguments to the decorated function will be frozen so that they are guaranteed not to change. The return value is also frozen. """ @wraps(fn) def inner_f(*args, **kwargs): return freeze(fn(*[freeze(e) for e in args], **dict(freeze(item) for item in kwargs.items()))) return inner_f
[ "def", "mutant", "(", "fn", ")", ":", "@", "wraps", "(", "fn", ")", "def", "inner_f", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "freeze", "(", "fn", "(", "*", "[", "freeze", "(", "e", ")", "for", "e", "in", "args", "]", ",", "*", "*", "dict", "(", "freeze", "(", "item", ")", "for", "item", "in", "kwargs", ".", "items", "(", ")", ")", ")", ")", "return", "inner_f" ]
34.846154
27
def center_widget_on_screen(widget, screen=None): """ Centers given Widget on the screen. :param widget: Current Widget. :type widget: QWidget :param screen: Screen used for centering. :type screen: int :return: Definition success. :rtype: bool """ screen = screen and screen or QApplication.desktop().primaryScreen() desktop_width = QApplication.desktop().screenGeometry(screen).width() desktop_height = QApplication.desktop().screenGeometry(screen).height() widget.move(desktop_width / 2 - widget.sizeHint().width() / 2, desktop_height / 2 - widget.sizeHint().height() / 2) return True
[ "def", "center_widget_on_screen", "(", "widget", ",", "screen", "=", "None", ")", ":", "screen", "=", "screen", "and", "screen", "or", "QApplication", ".", "desktop", "(", ")", ".", "primaryScreen", "(", ")", "desktop_width", "=", "QApplication", ".", "desktop", "(", ")", ".", "screenGeometry", "(", "screen", ")", ".", "width", "(", ")", "desktop_height", "=", "QApplication", ".", "desktop", "(", ")", ".", "screenGeometry", "(", "screen", ")", ".", "height", "(", ")", "widget", ".", "move", "(", "desktop_width", "/", "2", "-", "widget", ".", "sizeHint", "(", ")", ".", "width", "(", ")", "/", "2", ",", "desktop_height", "/", "2", "-", "widget", ".", "sizeHint", "(", ")", ".", "height", "(", ")", "/", "2", ")", "return", "True" ]
37
21.823529
def libvlc_media_player_set_agl(p_mi, drawable): '''Set the agl handler where the media player should render its video output. @param p_mi: the Media Player. @param drawable: the agl handler. ''' f = _Cfunctions.get('libvlc_media_player_set_agl', None) or \ _Cfunction('libvlc_media_player_set_agl', ((1,), (1,),), None, None, MediaPlayer, ctypes.c_uint32) return f(p_mi, drawable)
[ "def", "libvlc_media_player_set_agl", "(", "p_mi", ",", "drawable", ")", ":", "f", "=", "_Cfunctions", ".", "get", "(", "'libvlc_media_player_set_agl'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_media_player_set_agl'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ",", ")", ",", "None", ",", "None", ",", "MediaPlayer", ",", "ctypes", ".", "c_uint32", ")", "return", "f", "(", "p_mi", ",", "drawable", ")" ]
47.222222
19.222222
def _set_exception(self): """Called by a Job object to tell that an exception occured during the processing of the function. The object will become ready but not successful. The collector's notify_ready() method will be called, but NOT the callback method""" assert not self.ready() self._data = sys.exc_info() self._success = False self._event.set() if self._collector is not None: self._collector.notify_ready(self)
[ "def", "_set_exception", "(", "self", ")", ":", "assert", "not", "self", ".", "ready", "(", ")", "self", ".", "_data", "=", "sys", ".", "exc_info", "(", ")", "self", ".", "_success", "=", "False", "self", ".", "_event", ".", "set", "(", ")", "if", "self", ".", "_collector", "is", "not", "None", ":", "self", ".", "_collector", ".", "notify_ready", "(", "self", ")" ]
44.636364
10.454545
def getReference(self, id_): """ Returns the Reference with the specified ID or raises a ReferenceNotFoundException if it does not exist. """ if id_ not in self._referenceIdMap: raise exceptions.ReferenceNotFoundException(id_) return self._referenceIdMap[id_]
[ "def", "getReference", "(", "self", ",", "id_", ")", ":", "if", "id_", "not", "in", "self", ".", "_referenceIdMap", ":", "raise", "exceptions", ".", "ReferenceNotFoundException", "(", "id_", ")", "return", "self", ".", "_referenceIdMap", "[", "id_", "]" ]
39
9.25
def available(self): """ True if any of the supported modules from ``packages`` is available for use. :return: True if any modules from ``packages`` exist :rtype: bool """ for module_name in self.packages: if importlib.util.find_spec(module_name): return True return False
[ "def", "available", "(", "self", ")", ":", "for", "module_name", "in", "self", ".", "packages", ":", "if", "importlib", ".", "util", ".", "find_spec", "(", "module_name", ")", ":", "return", "True", "return", "False" ]
30.909091
17
def _search_archive(pattern, archive, verbosity=0, interactive=True): """Search for given pattern in an archive.""" grep = util.find_program("grep") if not grep: msg = "The grep(1) program is required for searching archive contents, please install it." raise util.PatoolError(msg) tmpdir = util.tmpdir() try: path = _extract_archive(archive, outdir=tmpdir, verbosity=-1) return util.run_checked([grep, "-r", "-e", pattern, "."], ret_ok=(0, 1), verbosity=1, cwd=path) finally: shutil.rmtree(tmpdir, onerror=rmtree_log_error)
[ "def", "_search_archive", "(", "pattern", ",", "archive", ",", "verbosity", "=", "0", ",", "interactive", "=", "True", ")", ":", "grep", "=", "util", ".", "find_program", "(", "\"grep\"", ")", "if", "not", "grep", ":", "msg", "=", "\"The grep(1) program is required for searching archive contents, please install it.\"", "raise", "util", ".", "PatoolError", "(", "msg", ")", "tmpdir", "=", "util", ".", "tmpdir", "(", ")", "try", ":", "path", "=", "_extract_archive", "(", "archive", ",", "outdir", "=", "tmpdir", ",", "verbosity", "=", "-", "1", ")", "return", "util", ".", "run_checked", "(", "[", "grep", ",", "\"-r\"", ",", "\"-e\"", ",", "pattern", ",", "\".\"", "]", ",", "ret_ok", "=", "(", "0", ",", "1", ")", ",", "verbosity", "=", "1", ",", "cwd", "=", "path", ")", "finally", ":", "shutil", ".", "rmtree", "(", "tmpdir", ",", "onerror", "=", "rmtree_log_error", ")" ]
48
25.083333
def _validate_search_query(self, returning_query): """ Checks to see that the query will not exceed the max query depth :param returning_query: The PIF system or Dataset query to execute. :type returning_query: :class:`PifSystemReturningQuery` or :class: `DatasetReturningQuery` """ start_index = returning_query.from_index or 0 size = returning_query.size or 0 if start_index < 0: raise CitrinationClientError( "start_index cannot be negative. Please enter a value greater than or equal to zero") if size < 0: raise CitrinationClientError("Size cannot be negative. Please enter a value greater than or equal to zero") if start_index + size > MAX_QUERY_DEPTH: raise CitrinationClientError( "Citrination does not support pagination past the {0}th result. Please reduce either the from_index and/or size such that their sum is below {0}".format( MAX_QUERY_DEPTH))
[ "def", "_validate_search_query", "(", "self", ",", "returning_query", ")", ":", "start_index", "=", "returning_query", ".", "from_index", "or", "0", "size", "=", "returning_query", ".", "size", "or", "0", "if", "start_index", "<", "0", ":", "raise", "CitrinationClientError", "(", "\"start_index cannot be negative. Please enter a value greater than or equal to zero\"", ")", "if", "size", "<", "0", ":", "raise", "CitrinationClientError", "(", "\"Size cannot be negative. Please enter a value greater than or equal to zero\"", ")", "if", "start_index", "+", "size", ">", "MAX_QUERY_DEPTH", ":", "raise", "CitrinationClientError", "(", "\"Citrination does not support pagination past the {0}th result. Please reduce either the from_index and/or size such that their sum is below {0}\"", ".", "format", "(", "MAX_QUERY_DEPTH", ")", ")" ]
50.65
29.15
def scrape_musicbed_url(url, login, password, num_tracks=sys.maxsize, folders=False, custom_path=''): """ Scrapes provided MusicBed url. Uses requests' Session object in order to store cookies. Requires login and password information. If provided url is of pattern 'https://www.musicbed.com/artists/<string>/<number>' - a number of albums will be downloaded. If provided url is of pattern 'https://www.musicbed.com/albums/<string>/<number>' - only one album will be downloaded. If provided url is of pattern 'https://www.musicbed.com/songs/<string>/<number>' - will be treated as one album (but download only 1st track). Metadata and urls are obtained from JavaScript data that's treated as JSON data. Returns: list: filenames to open """ session = requests.Session() response = session.get( url ) if response.status_code != 200: puts( colored.red( 'scrape_musicbed_url: couldn\'t open provided url. Status code: ' + str( response.status_code ) + '. Aborting.' ) ) session.close() return [] albums = [] # let's determine what url type we got # '/artists/' - search for and download many albums # '/albums/' - means we're downloading 1 album # '/songs/' - means 1 album as well, but we're forcing num_tracks=1 in order to download only first relevant track if url.startswith( 'https://www.musicbed.com/artists/' ): # a hackjob code to get a list of available albums main_index = 0 while response.text.find( 'https://www.musicbed.com/albums/', main_index ) != -1: start_index = response.text.find( 'https://www.musicbed.com/albums/', main_index ) end_index = response.text.find( '">', start_index ) albums.append( response.text[start_index:end_index] ) main_index = end_index elif url.startswith( 'https://www.musicbed.com/songs/' ): albums.append( url ) num_tracks = 1 else: # url.startswith( 'https://www.musicbed.com/albums/' ) albums.append( url ) # let's get our token and try to login (csrf_token seems to be present on every page) token = response.text.split( 'var csrf_token = "' )[1].split( '";' )[0] details = { '_token': token, 'login': login, 'password': password } response = session.post( 'https://www.musicbed.com/ajax/login', data=details ) if response.status_code != 200: puts( colored.red( 'scrape_musicbed_url: couldn\'t login. Aborting. ' ) + colored.white( 'Couldn\'t access login page.' ) ) session.close() return [] login_response_data = demjson.decode( response.text ) if not login_response_data['body']['status']: puts( colored.red( 'scrape_musicbed_url: couldn\'t login. Aborting. ' ) + colored.white( 'Did you provide correct login and password?' ) ) session.close() return [] # now let's actually scrape collected pages filenames = [] for each_album_url in albums: response = session.get( each_album_url ) if response.status_code != 200: puts_safe( colored.red( 'scrape_musicbed_url: couldn\'t open url: ' + each_album_url + '. Status code: ' + str( response.status_code ) + '. Skipping.' ) ) continue # actually not a JSON, but a JS object, but so far so good json = response.text.split( 'App.components.SongRows = ' )[1].split( '</script>' )[0] data = demjson.decode( json ) song_count = 1 for each_song in data['loadedSongs']: if song_count > num_tracks: break try: url, params = each_song['playback_url'].split( '?' ) details = dict() for each_param in params.split( '&' ): name, value = each_param.split( '=' ) details.update( { name: value } ) # musicbed warns about it if it's not fixed details['X-Amz-Credential'] = details['X-Amz-Credential'].replace( '%2F', '/' ) directory = custom_path if folders: sanitized_artist = sanitize_filename( each_song['album']['data']['artist']['data']['name'] ) sanitized_album = sanitize_filename( each_song['album']['data']['name'] ) directory = join( directory, sanitized_artist + ' - ' + sanitized_album ) if not exists( directory ): mkdir( directory ) filename = join( directory, str( song_count ) + ' - ' + sanitize_filename( each_song['name'] ) + '.mp3' ) if exists( filename ): puts_safe( colored.yellow( 'Skipping' ) + colored.white( ': ' + each_song['name'] + ' - it already exists!' ) ) song_count += 1 continue puts_safe( colored.green( 'Downloading' ) + colored.white( ': ' + each_song['name'] ) ) path = download_file( url, filename, session=session, params=details ) # example of genre_string: # "<a href=\"https://www.musicbed.com/genres/ambient/2\">Ambient</a> <a href=\"https://www.musicbed.com/genres/cinematic/4\">Cinematic</a>" genres = '' for each in each_song['genre_string'].split( '</a>' ): if ( each != "" ): genres += each.split( '">' )[1] + '/' genres = genres[:-1] # removing last '/ tag_file(path, each_song['album']['data']['artist']['data']['name'], each_song['name'], album=each_song['album']['data']['name'], year=int( each_song['album']['data']['released_at'].split( '-' )[0] ), genre=genres, artwork_url=each_song['album']['data']['imageObject']['data']['paths']['original'], track_number=str( song_count ), url=each_song['song_url']) filenames.append( path ) song_count += 1 except: puts_safe( colored.red( 'Problem downloading ' ) + colored.white( each_song['name'] ) + '. Skipping.' ) song_count += 1 session.close() return filenames
[ "def", "scrape_musicbed_url", "(", "url", ",", "login", ",", "password", ",", "num_tracks", "=", "sys", ".", "maxsize", ",", "folders", "=", "False", ",", "custom_path", "=", "''", ")", ":", "session", "=", "requests", ".", "Session", "(", ")", "response", "=", "session", ".", "get", "(", "url", ")", "if", "response", ".", "status_code", "!=", "200", ":", "puts", "(", "colored", ".", "red", "(", "'scrape_musicbed_url: couldn\\'t open provided url. Status code: '", "+", "str", "(", "response", ".", "status_code", ")", "+", "'. Aborting.'", ")", ")", "session", ".", "close", "(", ")", "return", "[", "]", "albums", "=", "[", "]", "# let's determine what url type we got", "# '/artists/' - search for and download many albums", "# '/albums/' - means we're downloading 1 album", "# '/songs/' - means 1 album as well, but we're forcing num_tracks=1 in order to download only first relevant track", "if", "url", ".", "startswith", "(", "'https://www.musicbed.com/artists/'", ")", ":", "# a hackjob code to get a list of available albums", "main_index", "=", "0", "while", "response", ".", "text", ".", "find", "(", "'https://www.musicbed.com/albums/'", ",", "main_index", ")", "!=", "-", "1", ":", "start_index", "=", "response", ".", "text", ".", "find", "(", "'https://www.musicbed.com/albums/'", ",", "main_index", ")", "end_index", "=", "response", ".", "text", ".", "find", "(", "'\">'", ",", "start_index", ")", "albums", ".", "append", "(", "response", ".", "text", "[", "start_index", ":", "end_index", "]", ")", "main_index", "=", "end_index", "elif", "url", ".", "startswith", "(", "'https://www.musicbed.com/songs/'", ")", ":", "albums", ".", "append", "(", "url", ")", "num_tracks", "=", "1", "else", ":", "# url.startswith( 'https://www.musicbed.com/albums/' )", "albums", ".", "append", "(", "url", ")", "# let's get our token and try to login (csrf_token seems to be present on every page)", "token", "=", "response", ".", "text", ".", "split", "(", "'var csrf_token = \"'", ")", "[", "1", "]", ".", "split", "(", "'\";'", ")", "[", "0", "]", "details", "=", "{", "'_token'", ":", "token", ",", "'login'", ":", "login", ",", "'password'", ":", "password", "}", "response", "=", "session", ".", "post", "(", "'https://www.musicbed.com/ajax/login'", ",", "data", "=", "details", ")", "if", "response", ".", "status_code", "!=", "200", ":", "puts", "(", "colored", ".", "red", "(", "'scrape_musicbed_url: couldn\\'t login. Aborting. '", ")", "+", "colored", ".", "white", "(", "'Couldn\\'t access login page.'", ")", ")", "session", ".", "close", "(", ")", "return", "[", "]", "login_response_data", "=", "demjson", ".", "decode", "(", "response", ".", "text", ")", "if", "not", "login_response_data", "[", "'body'", "]", "[", "'status'", "]", ":", "puts", "(", "colored", ".", "red", "(", "'scrape_musicbed_url: couldn\\'t login. Aborting. '", ")", "+", "colored", ".", "white", "(", "'Did you provide correct login and password?'", ")", ")", "session", ".", "close", "(", ")", "return", "[", "]", "# now let's actually scrape collected pages", "filenames", "=", "[", "]", "for", "each_album_url", "in", "albums", ":", "response", "=", "session", ".", "get", "(", "each_album_url", ")", "if", "response", ".", "status_code", "!=", "200", ":", "puts_safe", "(", "colored", ".", "red", "(", "'scrape_musicbed_url: couldn\\'t open url: '", "+", "each_album_url", "+", "'. Status code: '", "+", "str", "(", "response", ".", "status_code", ")", "+", "'. Skipping.'", ")", ")", "continue", "# actually not a JSON, but a JS object, but so far so good", "json", "=", "response", ".", "text", ".", "split", "(", "'App.components.SongRows = '", ")", "[", "1", "]", ".", "split", "(", "'</script>'", ")", "[", "0", "]", "data", "=", "demjson", ".", "decode", "(", "json", ")", "song_count", "=", "1", "for", "each_song", "in", "data", "[", "'loadedSongs'", "]", ":", "if", "song_count", ">", "num_tracks", ":", "break", "try", ":", "url", ",", "params", "=", "each_song", "[", "'playback_url'", "]", ".", "split", "(", "'?'", ")", "details", "=", "dict", "(", ")", "for", "each_param", "in", "params", ".", "split", "(", "'&'", ")", ":", "name", ",", "value", "=", "each_param", ".", "split", "(", "'='", ")", "details", ".", "update", "(", "{", "name", ":", "value", "}", ")", "# musicbed warns about it if it's not fixed", "details", "[", "'X-Amz-Credential'", "]", "=", "details", "[", "'X-Amz-Credential'", "]", ".", "replace", "(", "'%2F'", ",", "'/'", ")", "directory", "=", "custom_path", "if", "folders", ":", "sanitized_artist", "=", "sanitize_filename", "(", "each_song", "[", "'album'", "]", "[", "'data'", "]", "[", "'artist'", "]", "[", "'data'", "]", "[", "'name'", "]", ")", "sanitized_album", "=", "sanitize_filename", "(", "each_song", "[", "'album'", "]", "[", "'data'", "]", "[", "'name'", "]", ")", "directory", "=", "join", "(", "directory", ",", "sanitized_artist", "+", "' - '", "+", "sanitized_album", ")", "if", "not", "exists", "(", "directory", ")", ":", "mkdir", "(", "directory", ")", "filename", "=", "join", "(", "directory", ",", "str", "(", "song_count", ")", "+", "' - '", "+", "sanitize_filename", "(", "each_song", "[", "'name'", "]", ")", "+", "'.mp3'", ")", "if", "exists", "(", "filename", ")", ":", "puts_safe", "(", "colored", ".", "yellow", "(", "'Skipping'", ")", "+", "colored", ".", "white", "(", "': '", "+", "each_song", "[", "'name'", "]", "+", "' - it already exists!'", ")", ")", "song_count", "+=", "1", "continue", "puts_safe", "(", "colored", ".", "green", "(", "'Downloading'", ")", "+", "colored", ".", "white", "(", "': '", "+", "each_song", "[", "'name'", "]", ")", ")", "path", "=", "download_file", "(", "url", ",", "filename", ",", "session", "=", "session", ",", "params", "=", "details", ")", "# example of genre_string:", "# \"<a href=\\\"https://www.musicbed.com/genres/ambient/2\\\">Ambient</a> <a href=\\\"https://www.musicbed.com/genres/cinematic/4\\\">Cinematic</a>\"", "genres", "=", "''", "for", "each", "in", "each_song", "[", "'genre_string'", "]", ".", "split", "(", "'</a>'", ")", ":", "if", "(", "each", "!=", "\"\"", ")", ":", "genres", "+=", "each", ".", "split", "(", "'\">'", ")", "[", "1", "]", "+", "'/'", "genres", "=", "genres", "[", ":", "-", "1", "]", "# removing last '/", "tag_file", "(", "path", ",", "each_song", "[", "'album'", "]", "[", "'data'", "]", "[", "'artist'", "]", "[", "'data'", "]", "[", "'name'", "]", ",", "each_song", "[", "'name'", "]", ",", "album", "=", "each_song", "[", "'album'", "]", "[", "'data'", "]", "[", "'name'", "]", ",", "year", "=", "int", "(", "each_song", "[", "'album'", "]", "[", "'data'", "]", "[", "'released_at'", "]", ".", "split", "(", "'-'", ")", "[", "0", "]", ")", ",", "genre", "=", "genres", ",", "artwork_url", "=", "each_song", "[", "'album'", "]", "[", "'data'", "]", "[", "'imageObject'", "]", "[", "'data'", "]", "[", "'paths'", "]", "[", "'original'", "]", ",", "track_number", "=", "str", "(", "song_count", ")", ",", "url", "=", "each_song", "[", "'song_url'", "]", ")", "filenames", ".", "append", "(", "path", ")", "song_count", "+=", "1", "except", ":", "puts_safe", "(", "colored", ".", "red", "(", "'Problem downloading '", ")", "+", "colored", ".", "white", "(", "each_song", "[", "'name'", "]", ")", "+", "'. Skipping.'", ")", "song_count", "+=", "1", "session", ".", "close", "(", ")", "return", "filenames" ]
49.606299
30
def save_notebook(self): """ Saves the current notebook by injecting JavaScript to save to .ipynb file. """ try: from IPython.display import display, Javascript except ImportError: log.warning("Could not import IPython Display Function") print("Make sure to save your notebook before sending it to OK!") return if self.mode == "jupyter": display(Javascript('IPython.notebook.save_checkpoint();')) display(Javascript('IPython.notebook.save_notebook();')) elif self.mode == "jupyterlab": display(Javascript('document.querySelector(\'[data-command="docmanager:save"]\').click();')) print('Saving notebook...', end=' ') ipynbs = [path for path in self.assignment.src if os.path.splitext(path)[1] == '.ipynb'] # Wait for first .ipynb to save if ipynbs: if wait_for_save(ipynbs[0]): print("Saved '{}'.".format(ipynbs[0])) else: log.warning("Timed out waiting for IPython save") print("Could not automatically save \'{}\'".format(ipynbs[0])) print("Make sure your notebook" " is correctly named and saved before submitting to OK!".format(ipynbs[0])) return False else: print("No valid file sources found") return True
[ "def", "save_notebook", "(", "self", ")", ":", "try", ":", "from", "IPython", ".", "display", "import", "display", ",", "Javascript", "except", "ImportError", ":", "log", ".", "warning", "(", "\"Could not import IPython Display Function\"", ")", "print", "(", "\"Make sure to save your notebook before sending it to OK!\"", ")", "return", "if", "self", ".", "mode", "==", "\"jupyter\"", ":", "display", "(", "Javascript", "(", "'IPython.notebook.save_checkpoint();'", ")", ")", "display", "(", "Javascript", "(", "'IPython.notebook.save_notebook();'", ")", ")", "elif", "self", ".", "mode", "==", "\"jupyterlab\"", ":", "display", "(", "Javascript", "(", "'document.querySelector(\\'[data-command=\"docmanager:save\"]\\').click();'", ")", ")", "print", "(", "'Saving notebook...'", ",", "end", "=", "' '", ")", "ipynbs", "=", "[", "path", "for", "path", "in", "self", ".", "assignment", ".", "src", "if", "os", ".", "path", ".", "splitext", "(", "path", ")", "[", "1", "]", "==", "'.ipynb'", "]", "# Wait for first .ipynb to save", "if", "ipynbs", ":", "if", "wait_for_save", "(", "ipynbs", "[", "0", "]", ")", ":", "print", "(", "\"Saved '{}'.\"", ".", "format", "(", "ipynbs", "[", "0", "]", ")", ")", "else", ":", "log", ".", "warning", "(", "\"Timed out waiting for IPython save\"", ")", "print", "(", "\"Could not automatically save \\'{}\\'\"", ".", "format", "(", "ipynbs", "[", "0", "]", ")", ")", "print", "(", "\"Make sure your notebook\"", "\" is correctly named and saved before submitting to OK!\"", ".", "format", "(", "ipynbs", "[", "0", "]", ")", ")", "return", "False", "else", ":", "print", "(", "\"No valid file sources found\"", ")", "return", "True" ]
43.117647
20.235294
def update_shortlink(self, shortlink_id, callback_uri=None, description=None): """Update existing shortlink registration Arguments: shortlink_id: Shortlink id assigned by mCASH """ arguments = {'callback_uri': callback_uri, 'description': description} return self.do_req('PUT', self.merchant_api_base_url + '/shortlink/' + shortlink_id + '/', arguments)
[ "def", "update_shortlink", "(", "self", ",", "shortlink_id", ",", "callback_uri", "=", "None", ",", "description", "=", "None", ")", ":", "arguments", "=", "{", "'callback_uri'", ":", "callback_uri", ",", "'description'", ":", "description", "}", "return", "self", ".", "do_req", "(", "'PUT'", ",", "self", ".", "merchant_api_base_url", "+", "'/shortlink/'", "+", "shortlink_id", "+", "'/'", ",", "arguments", ")" ]
39.230769
13.692308
def timed_cache(**timed_cache_kwargs): """LRU cache decorator with timeout. Parameters ---------- days: int seconds: int microseconds: int milliseconds: int minutes: int hours: int weeks: int maxsise: int [default: 128] typed: bool [default: False] """ def _wrapper(f): maxsize = timed_cache_kwargs.pop('maxsize', 128) typed = timed_cache_kwargs.pop('typed', False) update_delta = timedelta(**timed_cache_kwargs) # nonlocal workaround to support Python 2 # https://technotroph.wordpress.com/2012/10/01/python-closures-and-the-python-2-7-nonlocal-solution/ d = {'next_update': datetime.utcnow() - update_delta} try: f = functools.lru_cache(maxsize=maxsize, typed=typed)(f) except AttributeError: print( "LRU caching is not available in Pyton 2.7, " "this will have no effect!" ) pass @functools.wraps(f) def _wrapped(*args, **kwargs): now = datetime.utcnow() if now >= d['next_update']: try: f.cache_clear() except AttributeError: pass d['next_update'] = now + update_delta return f(*args, **kwargs) return _wrapped return _wrapper
[ "def", "timed_cache", "(", "*", "*", "timed_cache_kwargs", ")", ":", "def", "_wrapper", "(", "f", ")", ":", "maxsize", "=", "timed_cache_kwargs", ".", "pop", "(", "'maxsize'", ",", "128", ")", "typed", "=", "timed_cache_kwargs", ".", "pop", "(", "'typed'", ",", "False", ")", "update_delta", "=", "timedelta", "(", "*", "*", "timed_cache_kwargs", ")", "# nonlocal workaround to support Python 2", "# https://technotroph.wordpress.com/2012/10/01/python-closures-and-the-python-2-7-nonlocal-solution/", "d", "=", "{", "'next_update'", ":", "datetime", ".", "utcnow", "(", ")", "-", "update_delta", "}", "try", ":", "f", "=", "functools", ".", "lru_cache", "(", "maxsize", "=", "maxsize", ",", "typed", "=", "typed", ")", "(", "f", ")", "except", "AttributeError", ":", "print", "(", "\"LRU caching is not available in Pyton 2.7, \"", "\"this will have no effect!\"", ")", "pass", "@", "functools", ".", "wraps", "(", "f", ")", "def", "_wrapped", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "now", "=", "datetime", ".", "utcnow", "(", ")", "if", "now", ">=", "d", "[", "'next_update'", "]", ":", "try", ":", "f", ".", "cache_clear", "(", ")", "except", "AttributeError", ":", "pass", "d", "[", "'next_update'", "]", "=", "now", "+", "update_delta", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "_wrapped", "return", "_wrapper" ]
29.173913
19.108696
def fixchars(self, text): """Find and replace problematic characters.""" keys = ''.join(Config.CHARFIXES.keys()) values = ''.join(Config.CHARFIXES.values()) fixed = text.translate(str.maketrans(keys, values)) if fixed != text: self.modified = True return fixed
[ "def", "fixchars", "(", "self", ",", "text", ")", ":", "keys", "=", "''", ".", "join", "(", "Config", ".", "CHARFIXES", ".", "keys", "(", ")", ")", "values", "=", "''", ".", "join", "(", "Config", ".", "CHARFIXES", ".", "values", "(", ")", ")", "fixed", "=", "text", ".", "translate", "(", "str", ".", "maketrans", "(", "keys", ",", "values", ")", ")", "if", "fixed", "!=", "text", ":", "self", ".", "modified", "=", "True", "return", "fixed" ]
39.125
11.875
def task_failed(sender=None, **kwargs): """ Update the status record accordingly when a :py:class:`UserTaskMixin` fails. """ if isinstance(sender, UserTaskMixin): exception = kwargs['exception'] if not isinstance(exception, TaskCanceledException): # Don't include traceback, since this is intended for end users sender.status.fail(str(exception)) user_task_stopped.send_robust(sender=UserTaskStatus, status=sender.status)
[ "def", "task_failed", "(", "sender", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "sender", ",", "UserTaskMixin", ")", ":", "exception", "=", "kwargs", "[", "'exception'", "]", "if", "not", "isinstance", "(", "exception", ",", "TaskCanceledException", ")", ":", "# Don't include traceback, since this is intended for end users", "sender", ".", "status", ".", "fail", "(", "str", "(", "exception", ")", ")", "user_task_stopped", ".", "send_robust", "(", "sender", "=", "UserTaskStatus", ",", "status", "=", "sender", ".", "status", ")" ]
47.6
14.6
def catalog(self): """Primary registered catalog for the wrapped portal type """ if self._catalog is None: logger.debug("SuperModel::catalog: *Fetch catalog*") self._catalog = self.get_catalog_for(self.brain) return self._catalog
[ "def", "catalog", "(", "self", ")", ":", "if", "self", ".", "_catalog", "is", "None", ":", "logger", ".", "debug", "(", "\"SuperModel::catalog: *Fetch catalog*\"", ")", "self", ".", "_catalog", "=", "self", ".", "get_catalog_for", "(", "self", ".", "brain", ")", "return", "self", ".", "_catalog" ]
39.857143
12.142857
def resize_preview(self, dw, dh): "Resizes preview that is currently dragged" # identify preview if self._objects_moving: id_ = self._objects_moving[0] tags = self.canvas.gettags(id_) for tag in tags: if tag.startswith('preview_'): _, ident = tag.split('preview_') preview = self.previews[ident] preview.resize_by(dw, dh) self.move_previews() break self._update_cregion()
[ "def", "resize_preview", "(", "self", ",", "dw", ",", "dh", ")", ":", "# identify preview", "if", "self", ".", "_objects_moving", ":", "id_", "=", "self", ".", "_objects_moving", "[", "0", "]", "tags", "=", "self", ".", "canvas", ".", "gettags", "(", "id_", ")", "for", "tag", "in", "tags", ":", "if", "tag", ".", "startswith", "(", "'preview_'", ")", ":", "_", ",", "ident", "=", "tag", ".", "split", "(", "'preview_'", ")", "preview", "=", "self", ".", "previews", "[", "ident", "]", "preview", ".", "resize_by", "(", "dw", ",", "dh", ")", "self", ".", "move_previews", "(", ")", "break", "self", ".", "_update_cregion", "(", ")" ]
36.133333
10.266667
def show(*actors, **options # at=None, # shape=(1, 1), # N=None, # pos=(0, 0), # size="auto", # screensize="auto", # title="", # bg="blackboard", # bg2=None, # axes=4, # infinity=False, # verbose=True, # interactive=None, # offscreen=False, # resetcam=True, # zoom=None, # viewup="", # azimuth=0, # elevation=0, # roll=0, # interactorStyle=0, # newPlotter=False, # depthpeeling=False, # q=False, ): """ Create on the fly an instance of class ``Plotter`` and show the object(s) provided. Allowed input objects are: ``filename``, ``vtkPolyData``, ``vtkActor``, ``vtkActor2D``, ``vtkImageActor``, ``vtkAssembly`` or ``vtkVolume``. If filename is given, its type is guessed based on its extension. Supported formats are: `vtu, vts, vtp, ply, obj, stl, 3ds, xml, neutral, gmsh, pcd, xyz, txt, byu, tif, slc, vti, mhd, png, jpg`. :param bool newPlotter: if set to `True`, a call to ``show`` will instantiate a new ``Plotter`` object (a new window) instead of reusing the first created. See e.g.: |readVolumeAsIsoSurface.py|_ :return: the current ``Plotter`` class instance. .. note:: With multiple renderers, keyword ``at`` can become a `list`, e.g. .. code-block:: python from vtkplotter import * s = Sphere() c = Cube() p = Paraboloid() show(s, c, at=[0, 1], shape=(3,1)) show(p, at=2, interactive=True) # # is equivalent to: vp = Plotter(shape=(3,1)) s = Sphere() c = Cube() p = Paraboloid() vp.show(s, at=0) vp.show(p, at=1) vp.show(c, at=2, interactive=True) """ at = options.pop("at", None) shape = options.pop("shape", (1, 1)) N = options.pop("N", None) pos = options.pop("pos", (0, 0)) size = options.pop("size", "auto") screensize = options.pop("screensize", "auto") title = options.pop("title", "") bg = options.pop("bg", "blackboard") bg2 = options.pop("bg2", None) axes = options.pop("axes", 4) infinity = options.pop("infinity", False) verbose = options.pop("verbose", True) interactive = options.pop("interactive", None) offscreen = options.pop("offscreen", False) resetcam = options.pop("resetcam", True) zoom = options.pop("zoom", None) viewup = options.pop("viewup", "") azimuth = options.pop("azimuth", 0) elevation = options.pop("elevation", 0) roll = options.pop("roll", 0) interactorStyle = options.pop("interactorStyle", 0) newPlotter = options.pop("newPlotter", False) depthpeeling = options.pop("depthpeeling", False) q = options.pop("q", False) if len(actors) == 0: actors = None elif len(actors) == 1: actors = actors[0] else: actors = utils.flatten(actors) if settings.plotter_instance and newPlotter == False: vp = settings.plotter_instance else: if utils.isSequence(at): if not utils.isSequence(actors): colors.printc("~times show() Error: input must be a list.", c=1) exit() if len(at) != len(actors): colors.printc("~times show() Error: lists 'input' and 'at', must have equal lengths.", c=1) exit() if len(at) > 1 and (shape == (1, 1) and N == None): N = max(at) + 1 elif at is None and (N or shape != (1, 1)): if not utils.isSequence(actors): colors.printc('~times show() Error: N or shape is set, but input is not a sequence.', c=1) colors.printc(' you may need to specify e.g. at=0', c=1) exit() at = range(len(actors)) vp = Plotter( shape=shape, N=N, pos=pos, size=size, screensize=screensize, title=title, bg=bg, bg2=bg2, axes=axes, infinity=infinity, depthpeeling=depthpeeling, verbose=verbose, interactive=interactive, offscreen=offscreen, ) if utils.isSequence(at): for i, a in enumerate(actors): vp.show( a, at=i, zoom=zoom, resetcam=resetcam, viewup=viewup, azimuth=azimuth, elevation=elevation, roll=roll, interactive=interactive, interactorStyle=interactorStyle, q=q, ) vp.interactor.Start() else: vp.show( actors, at=at, zoom=zoom, resetcam=resetcam, viewup=viewup, azimuth=azimuth, elevation=elevation, roll=roll, interactive=interactive, interactorStyle=interactorStyle, q=q, ) return vp
[ "def", "show", "(", "*", "actors", ",", "*", "*", "options", "# at=None,", "# shape=(1, 1),", "# N=None,", "# pos=(0, 0),", "# size=\"auto\",", "# screensize=\"auto\",", "# title=\"\",", "# bg=\"blackboard\",", "# bg2=None,", "# axes=4,", "# infinity=False,", "# verbose=True,", "# interactive=None,", "# offscreen=False,", "# resetcam=True,", "# zoom=None,", "# viewup=\"\",", "# azimuth=0,", "# elevation=0,", "# roll=0,", "# interactorStyle=0,", "# newPlotter=False,", "# depthpeeling=False,", "# q=False,", ")", ":", "at", "=", "options", ".", "pop", "(", "\"at\"", ",", "None", ")", "shape", "=", "options", ".", "pop", "(", "\"shape\"", ",", "(", "1", ",", "1", ")", ")", "N", "=", "options", ".", "pop", "(", "\"N\"", ",", "None", ")", "pos", "=", "options", ".", "pop", "(", "\"pos\"", ",", "(", "0", ",", "0", ")", ")", "size", "=", "options", ".", "pop", "(", "\"size\"", ",", "\"auto\"", ")", "screensize", "=", "options", ".", "pop", "(", "\"screensize\"", ",", "\"auto\"", ")", "title", "=", "options", ".", "pop", "(", "\"title\"", ",", "\"\"", ")", "bg", "=", "options", ".", "pop", "(", "\"bg\"", ",", "\"blackboard\"", ")", "bg2", "=", "options", ".", "pop", "(", "\"bg2\"", ",", "None", ")", "axes", "=", "options", ".", "pop", "(", "\"axes\"", ",", "4", ")", "infinity", "=", "options", ".", "pop", "(", "\"infinity\"", ",", "False", ")", "verbose", "=", "options", ".", "pop", "(", "\"verbose\"", ",", "True", ")", "interactive", "=", "options", ".", "pop", "(", "\"interactive\"", ",", "None", ")", "offscreen", "=", "options", ".", "pop", "(", "\"offscreen\"", ",", "False", ")", "resetcam", "=", "options", ".", "pop", "(", "\"resetcam\"", ",", "True", ")", "zoom", "=", "options", ".", "pop", "(", "\"zoom\"", ",", "None", ")", "viewup", "=", "options", ".", "pop", "(", "\"viewup\"", ",", "\"\"", ")", "azimuth", "=", "options", ".", "pop", "(", "\"azimuth\"", ",", "0", ")", "elevation", "=", "options", ".", "pop", "(", "\"elevation\"", ",", "0", ")", "roll", "=", "options", ".", "pop", "(", "\"roll\"", ",", "0", ")", "interactorStyle", "=", "options", ".", "pop", "(", "\"interactorStyle\"", ",", "0", ")", "newPlotter", "=", "options", ".", "pop", "(", "\"newPlotter\"", ",", "False", ")", "depthpeeling", "=", "options", ".", "pop", "(", "\"depthpeeling\"", ",", "False", ")", "q", "=", "options", ".", "pop", "(", "\"q\"", ",", "False", ")", "if", "len", "(", "actors", ")", "==", "0", ":", "actors", "=", "None", "elif", "len", "(", "actors", ")", "==", "1", ":", "actors", "=", "actors", "[", "0", "]", "else", ":", "actors", "=", "utils", ".", "flatten", "(", "actors", ")", "if", "settings", ".", "plotter_instance", "and", "newPlotter", "==", "False", ":", "vp", "=", "settings", ".", "plotter_instance", "else", ":", "if", "utils", ".", "isSequence", "(", "at", ")", ":", "if", "not", "utils", ".", "isSequence", "(", "actors", ")", ":", "colors", ".", "printc", "(", "\"~times show() Error: input must be a list.\"", ",", "c", "=", "1", ")", "exit", "(", ")", "if", "len", "(", "at", ")", "!=", "len", "(", "actors", ")", ":", "colors", ".", "printc", "(", "\"~times show() Error: lists 'input' and 'at', must have equal lengths.\"", ",", "c", "=", "1", ")", "exit", "(", ")", "if", "len", "(", "at", ")", ">", "1", "and", "(", "shape", "==", "(", "1", ",", "1", ")", "and", "N", "==", "None", ")", ":", "N", "=", "max", "(", "at", ")", "+", "1", "elif", "at", "is", "None", "and", "(", "N", "or", "shape", "!=", "(", "1", ",", "1", ")", ")", ":", "if", "not", "utils", ".", "isSequence", "(", "actors", ")", ":", "colors", ".", "printc", "(", "'~times show() Error: N or shape is set, but input is not a sequence.'", ",", "c", "=", "1", ")", "colors", ".", "printc", "(", "' you may need to specify e.g. at=0'", ",", "c", "=", "1", ")", "exit", "(", ")", "at", "=", "range", "(", "len", "(", "actors", ")", ")", "vp", "=", "Plotter", "(", "shape", "=", "shape", ",", "N", "=", "N", ",", "pos", "=", "pos", ",", "size", "=", "size", ",", "screensize", "=", "screensize", ",", "title", "=", "title", ",", "bg", "=", "bg", ",", "bg2", "=", "bg2", ",", "axes", "=", "axes", ",", "infinity", "=", "infinity", ",", "depthpeeling", "=", "depthpeeling", ",", "verbose", "=", "verbose", ",", "interactive", "=", "interactive", ",", "offscreen", "=", "offscreen", ",", ")", "if", "utils", ".", "isSequence", "(", "at", ")", ":", "for", "i", ",", "a", "in", "enumerate", "(", "actors", ")", ":", "vp", ".", "show", "(", "a", ",", "at", "=", "i", ",", "zoom", "=", "zoom", ",", "resetcam", "=", "resetcam", ",", "viewup", "=", "viewup", ",", "azimuth", "=", "azimuth", ",", "elevation", "=", "elevation", ",", "roll", "=", "roll", ",", "interactive", "=", "interactive", ",", "interactorStyle", "=", "interactorStyle", ",", "q", "=", "q", ",", ")", "vp", ".", "interactor", ".", "Start", "(", ")", "else", ":", "vp", ".", "show", "(", "actors", ",", "at", "=", "at", ",", "zoom", "=", "zoom", ",", "resetcam", "=", "resetcam", ",", "viewup", "=", "viewup", ",", "azimuth", "=", "azimuth", ",", "elevation", "=", "elevation", ",", "roll", "=", "roll", ",", "interactive", "=", "interactive", ",", "interactorStyle", "=", "interactorStyle", ",", "q", "=", "q", ",", ")", "return", "vp" ]
30.475309
17.858025
def last(self): """ Returns the last object matched or None if there is no matching object. :: >>> iterator = Host.objects.iterator() >>> c = iterator.filter('kali') >>> if c.exists(): >>> print(c.last()) Host(name=kali-foo) :return: element or None """ if len(self): self._params.update(limit=1) if 'filter' not in self._params: return list(self)[-1] else: # Filter may not return results result = list(self) if result: return result[-1]
[ "def", "last", "(", "self", ")", ":", "if", "len", "(", "self", ")", ":", "self", ".", "_params", ".", "update", "(", "limit", "=", "1", ")", "if", "'filter'", "not", "in", "self", ".", "_params", ":", "return", "list", "(", "self", ")", "[", "-", "1", "]", "else", ":", "# Filter may not return results", "result", "=", "list", "(", "self", ")", "if", "result", ":", "return", "result", "[", "-", "1", "]" ]
30.272727
11.454545
def uint8sc(im): """Scale the image to uint8 Parameters: ----------- im: 2d array The image Returns: -------- im: 2d array (dtype uint8) The scaled image to uint8 """ im = np.asarray(im) immin = im.min() immax = im.max() imrange = immax - immin return cv2.convertScaleAbs(im - immin, alpha=255 / imrange)
[ "def", "uint8sc", "(", "im", ")", ":", "im", "=", "np", ".", "asarray", "(", "im", ")", "immin", "=", "im", ".", "min", "(", ")", "immax", "=", "im", ".", "max", "(", ")", "imrange", "=", "immax", "-", "immin", "return", "cv2", ".", "convertScaleAbs", "(", "im", "-", "immin", ",", "alpha", "=", "255", "/", "imrange", ")" ]
19.833333
20.388889
def modify_job(self, name, schedule, persist=True): ''' Modify a job in the scheduler. Ignores jobs from pillar ''' # ensure job exists, then replace it if name in self.opts['schedule']: self.delete_job(name, persist) elif name in self._get_schedule(include_opts=False): log.warning("Cannot modify job %s, it's in the pillar!", name) return self.opts['schedule'][name] = schedule if persist: self.persist()
[ "def", "modify_job", "(", "self", ",", "name", ",", "schedule", ",", "persist", "=", "True", ")", ":", "# ensure job exists, then replace it", "if", "name", "in", "self", ".", "opts", "[", "'schedule'", "]", ":", "self", ".", "delete_job", "(", "name", ",", "persist", ")", "elif", "name", "in", "self", ".", "_get_schedule", "(", "include_opts", "=", "False", ")", ":", "log", ".", "warning", "(", "\"Cannot modify job %s, it's in the pillar!\"", ",", "name", ")", "return", "self", ".", "opts", "[", "'schedule'", "]", "[", "name", "]", "=", "schedule", "if", "persist", ":", "self", ".", "persist", "(", ")" ]
33.733333
19.733333
def rn(op, rc=None, r=None): # pylint: disable=redefined-outer-name, invalid-name """ This function is a wrapper for :meth:`~pywbem.WBEMConnection.ReferenceNames`. Instance-level use: Retrieve the instance paths of the association instances referencing a source instance. Class-level use: Retrieve the class paths of the association classes referencing a source class. Parameters: op (:class:`~pywbem.CIMInstanceName`): Source instance path; select instance-level use. op (:class:`~pywbem.CIMClassName`): Source class path; select class-level use. rc (:term:`string`): ResultClass filter: Include only traversals across this association (result) class. `None` means this filter is not applied. r (:term:`string`): Role filter: Include only traversals from this role (= reference name) in source object. `None` means this filter is not applied. Returns: list of result objects: * For instance-level use, a list of :class:`~pywbem.CIMInstanceName` objects representing the retrieved instance paths, with their attributes set as follows: * `classname`: Name of the creation class of the instance. * `keybindings`: Keybindings of the instance. * `namespace`: Name of the CIM namespace containing the instance. * `host`: Host and optionally port of the WBEM server containing the CIM namespace, or `None` if the server did not return host information. * For class-level use, a list of :class:`~pywbem.CIMClassName` objects representing the retrieved class paths, with their attributes set as follows: * `classname`: Name of the class. * `namespace`: Name of the CIM namespace containing the class. * `host`: Host and optionally port of the WBEM server containing the CIM namespace, or `None` if the server did not return host information. """ return CONN.ReferenceNames(op, ResultClass=rc, Role=r)
[ "def", "rn", "(", "op", ",", "rc", "=", "None", ",", "r", "=", "None", ")", ":", "# pylint: disable=redefined-outer-name, invalid-name", "return", "CONN", ".", "ReferenceNames", "(", "op", ",", "ResultClass", "=", "rc", ",", "Role", "=", "r", ")" ]
35.114754
24
def get_property(self): """Establishes access of Property values""" scope = self def fget(self): """Call the HasProperties _get method""" return self._get(scope.name) def fset(self, value): """Validate value and call the HasProperties _set method""" if value is not undefined: value = scope.validate(self, value) self._set(scope.name, value) def fdel(self): """Set value to utils.undefined on delete""" self._set(scope.name, undefined) return property(fget=fget, fset=fset, fdel=fdel, doc=scope.sphinx())
[ "def", "get_property", "(", "self", ")", ":", "scope", "=", "self", "def", "fget", "(", "self", ")", ":", "\"\"\"Call the HasProperties _get method\"\"\"", "return", "self", ".", "_get", "(", "scope", ".", "name", ")", "def", "fset", "(", "self", ",", "value", ")", ":", "\"\"\"Validate value and call the HasProperties _set method\"\"\"", "if", "value", "is", "not", "undefined", ":", "value", "=", "scope", ".", "validate", "(", "self", ",", "value", ")", "self", ".", "_set", "(", "scope", ".", "name", ",", "value", ")", "def", "fdel", "(", "self", ")", ":", "\"\"\"Set value to utils.undefined on delete\"\"\"", "self", ".", "_set", "(", "scope", ".", "name", ",", "undefined", ")", "return", "property", "(", "fget", "=", "fget", ",", "fset", "=", "fset", ",", "fdel", "=", "fdel", ",", "doc", "=", "scope", ".", "sphinx", "(", ")", ")" ]
31.9
16.7