text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def _info_long(self) -> Optional[str]: """Extract journey information.""" try: return str( html.unescape(self.journey.InfoTextList.InfoText.get("textL")).replace( "<br />", "\n" ) ) except AttributeError: return None
[ "def", "_info_long", "(", "self", ")", "->", "Optional", "[", "str", "]", ":", "try", ":", "return", "str", "(", "html", ".", "unescape", "(", "self", ".", "journey", ".", "InfoTextList", ".", "InfoText", ".", "get", "(", "\"textL\"", ")", ")", ".", "replace", "(", "\"<br />\"", ",", "\"\\n\"", ")", ")", "except", "AttributeError", ":", "return", "None" ]
31.9
17.7
def _aha_request(self, cmd, ain=None, param=None, rf=str): """Send an AHA request.""" url = 'http://' + self._host + '/webservices/homeautoswitch.lua' params = { 'switchcmd': cmd, 'sid': self._sid } if param: params['param'] = param if ain: params['ain'] = ain plain = self._request(url, params) if plain == 'inval': raise InvalidError if rf == bool: return bool(int(plain)) return rf(plain)
[ "def", "_aha_request", "(", "self", ",", "cmd", ",", "ain", "=", "None", ",", "param", "=", "None", ",", "rf", "=", "str", ")", ":", "url", "=", "'http://'", "+", "self", ".", "_host", "+", "'/webservices/homeautoswitch.lua'", "params", "=", "{", "'switchcmd'", ":", "cmd", ",", "'sid'", ":", "self", ".", "_sid", "}", "if", "param", ":", "params", "[", "'param'", "]", "=", "param", "if", "ain", ":", "params", "[", "'ain'", "]", "=", "ain", "plain", "=", "self", ".", "_request", "(", "url", ",", "params", ")", "if", "plain", "==", "'inval'", ":", "raise", "InvalidError", "if", "rf", "==", "bool", ":", "return", "bool", "(", "int", "(", "plain", ")", ")", "return", "rf", "(", "plain", ")" ]
27.736842
17.421053
def get_interface_switchport_output_switchport_interface_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_interface_switchport = ET.Element("get_interface_switchport") config = get_interface_switchport output = ET.SubElement(get_interface_switchport, "output") switchport = ET.SubElement(output, "switchport") interface_type_key = ET.SubElement(switchport, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name = ET.SubElement(switchport, "interface-name") interface_name.text = kwargs.pop('interface_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_interface_switchport_output_switchport_interface_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_interface_switchport", "=", "ET", ".", "Element", "(", "\"get_interface_switchport\"", ")", "config", "=", "get_interface_switchport", "output", "=", "ET", ".", "SubElement", "(", "get_interface_switchport", ",", "\"output\"", ")", "switchport", "=", "ET", ".", "SubElement", "(", "output", ",", "\"switchport\"", ")", "interface_type_key", "=", "ET", ".", "SubElement", "(", "switchport", ",", "\"interface-type\"", ")", "interface_type_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'interface_type'", ")", "interface_name", "=", "ET", ".", "SubElement", "(", "switchport", ",", "\"interface-name\"", ")", "interface_name", ".", "text", "=", "kwargs", ".", "pop", "(", "'interface_name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
49.333333
18.866667
def send_activation_email(user, site): """ Send an activation email to the ``user``. The activation email will make use of two templates: ``registration/activation_email_subject.txt`` This template will be used for the subject line of the email. Because it is used as the subject line of an email, this template's output **must** be only a single line of text; output longer than one line will be forcibly joined into only a single line. ``registration/activation_email.txt`` This template will be used for the body of the email. These templates will each receive the following context variables: ``activation_key`` The activation key for the new account. ``expiration_days`` The number of days remaining during which the account may be activated. ``site`` An object representing the site on which the user registered; depending on whether ``django.contrib.sites`` is installed, this may be an instance of either ``django.contrib.sites.models.Site`` (if the sites application is installed) or ``django.contrib.sites.models.RequestSite`` (if not). Consult the documentation for the Django sites framework for details regarding these objects' interfaces. """ ctx_dict = {'activation_key': user.api_registration_profile.activation_key, 'expiration_days': get_settings('REGISTRATION_API_ACCOUNT_ACTIVATION_DAYS'), 'site': site} subject = render_to_string('registration_api/activation_email_subject.txt', ctx_dict) # Email subject *must not* contain newlines subject = ''.join(subject.splitlines()) message = render_to_string('registration_api/activation_email.txt', ctx_dict) user.email_user(subject, message, settings.DEFAULT_FROM_EMAIL)
[ "def", "send_activation_email", "(", "user", ",", "site", ")", ":", "ctx_dict", "=", "{", "'activation_key'", ":", "user", ".", "api_registration_profile", ".", "activation_key", ",", "'expiration_days'", ":", "get_settings", "(", "'REGISTRATION_API_ACCOUNT_ACTIVATION_DAYS'", ")", ",", "'site'", ":", "site", "}", "subject", "=", "render_to_string", "(", "'registration_api/activation_email_subject.txt'", ",", "ctx_dict", ")", "# Email subject *must not* contain newlines", "subject", "=", "''", ".", "join", "(", "subject", ".", "splitlines", "(", ")", ")", "message", "=", "render_to_string", "(", "'registration_api/activation_email.txt'", ",", "ctx_dict", ")", "user", ".", "email_user", "(", "subject", ",", "message", ",", "settings", ".", "DEFAULT_FROM_EMAIL", ")" ]
39.695652
19.608696
def ess(sim, n): """Calculate effective sample size Parameters ---------- sim : chains n : int Parameter index starting from 0 """ try: ess = _chains.effective_sample_size(sim, n) except (ValueError, ZeroDivisionError): ess = nan return ess
[ "def", "ess", "(", "sim", ",", "n", ")", ":", "try", ":", "ess", "=", "_chains", ".", "effective_sample_size", "(", "sim", ",", "n", ")", "except", "(", "ValueError", ",", "ZeroDivisionError", ")", ":", "ess", "=", "nan", "return", "ess" ]
20.571429
18.928571
def login(self, login, password, set_auth=False): """ Attempts a login to the remote server and on success returns user id and session or None Warning: Do not depend on this. This will be deprecated with SSO. param set_auth: sets the authentication on the client """ rv = self.session.post( self.host, dumps({ "method": "common.db.login", "params": [login, password] }), ) rv = loads(rv.content)['result'] if set_auth: self.set_auth( SessionAuth(login, *rv) ) return rv
[ "def", "login", "(", "self", ",", "login", ",", "password", ",", "set_auth", "=", "False", ")", ":", "rv", "=", "self", ".", "session", ".", "post", "(", "self", ".", "host", ",", "dumps", "(", "{", "\"method\"", ":", "\"common.db.login\"", ",", "\"params\"", ":", "[", "login", ",", "password", "]", "}", ")", ",", ")", "rv", "=", "loads", "(", "rv", ".", "content", ")", "[", "'result'", "]", "if", "set_auth", ":", "self", ".", "set_auth", "(", "SessionAuth", "(", "login", ",", "*", "rv", ")", ")", "return", "rv" ]
27.5
16.333333
def setDisallowed(self, laneID, disallowedClasses): """setDisallowed(string, list) -> None Sets a list of disallowed vehicle classes. """ if isinstance(disallowedClasses, str): disallowedClasses = [disallowedClasses] self._connection._beginMessage(tc.CMD_SET_LANE_VARIABLE, tc.LANE_DISALLOWED, laneID, 1 + 4 + sum(map(len, disallowedClasses)) + 4 * len(disallowedClasses)) self._connection._packStringList(disallowedClasses) self._connection._sendExact()
[ "def", "setDisallowed", "(", "self", ",", "laneID", ",", "disallowedClasses", ")", ":", "if", "isinstance", "(", "disallowedClasses", ",", "str", ")", ":", "disallowedClasses", "=", "[", "disallowedClasses", "]", "self", ".", "_connection", ".", "_beginMessage", "(", "tc", ".", "CMD_SET_LANE_VARIABLE", ",", "tc", ".", "LANE_DISALLOWED", ",", "laneID", ",", "1", "+", "4", "+", "sum", "(", "map", "(", "len", ",", "disallowedClasses", ")", ")", "+", "4", "*", "len", "(", "disallowedClasses", ")", ")", "self", ".", "_connection", ".", "_packStringList", "(", "disallowedClasses", ")", "self", ".", "_connection", ".", "_sendExact", "(", ")" ]
50.181818
20.090909
def get_data(source, fields='*', env=None, first_row=0, count=-1, schema=None): """ A utility function to get a subset of data from a Table, Query, Pandas dataframe or List. Args: source: the source of the data. Can be a Table, Pandas DataFrame, List of dictionaries or lists, or a string, in which case it is expected to be the name of a table in BQ. fields: a list of fields that we want to return as a list of strings, comma-separated string, or '*' for all. env: if the data source is a Query module, this is the set of variable overrides for parameterizing the Query. first_row: the index of the first row to return; default 0. Onl;y used if count is non-negative. count: the number or rows to return. If negative (the default), return all rows. schema: the schema of the data. Optional; if supplied this can be used to help do type-coercion. Returns: A tuple consisting of a dictionary and a count; the dictionary has two entries: 'cols' which is a list of column metadata entries for Google Charts, and 'rows' which is a list of lists of values. The count is the total number of rows in the source (independent of the first_row/count parameters). Raises: Exception if the request could not be fulfilled. """ ipy = IPython.get_ipython() if env is None: env = {} env.update(ipy.user_ns) if isinstance(source, basestring): source = datalab.utils.get_item(ipy.user_ns, source, source) if isinstance(source, basestring): source = datalab.bigquery.Table(source) if isinstance(source, types.ModuleType) or isinstance(source, datalab.data.SqlStatement): source = datalab.bigquery.Query(source, values=env) if isinstance(source, list): if len(source) == 0: return _get_data_from_empty_list(source, fields, first_row, count, schema) elif isinstance(source[0], dict): return _get_data_from_list_of_dicts(source, fields, first_row, count, schema) elif isinstance(source[0], list): return _get_data_from_list_of_lists(source, fields, first_row, count, schema) else: raise Exception("To get tabular data from a list it must contain dictionaries or lists.") elif isinstance(source, pandas.DataFrame): return _get_data_from_dataframe(source, fields, first_row, count, schema) elif (isinstance(source, google.datalab.bigquery.Query) or isinstance(source, google.datalab.bigquery.Table)): return google.datalab.utils.commands._utils.get_data( source, fields, env, first_row, count, schema) elif isinstance(source, datalab.bigquery.Query): return _get_data_from_table(source.results(), fields, first_row, count, schema) elif isinstance(source, datalab.bigquery.Table): return _get_data_from_table(source, fields, first_row, count, schema) else: raise Exception("Cannot chart %s; unsupported object type" % source)
[ "def", "get_data", "(", "source", ",", "fields", "=", "'*'", ",", "env", "=", "None", ",", "first_row", "=", "0", ",", "count", "=", "-", "1", ",", "schema", "=", "None", ")", ":", "ipy", "=", "IPython", ".", "get_ipython", "(", ")", "if", "env", "is", "None", ":", "env", "=", "{", "}", "env", ".", "update", "(", "ipy", ".", "user_ns", ")", "if", "isinstance", "(", "source", ",", "basestring", ")", ":", "source", "=", "datalab", ".", "utils", ".", "get_item", "(", "ipy", ".", "user_ns", ",", "source", ",", "source", ")", "if", "isinstance", "(", "source", ",", "basestring", ")", ":", "source", "=", "datalab", ".", "bigquery", ".", "Table", "(", "source", ")", "if", "isinstance", "(", "source", ",", "types", ".", "ModuleType", ")", "or", "isinstance", "(", "source", ",", "datalab", ".", "data", ".", "SqlStatement", ")", ":", "source", "=", "datalab", ".", "bigquery", ".", "Query", "(", "source", ",", "values", "=", "env", ")", "if", "isinstance", "(", "source", ",", "list", ")", ":", "if", "len", "(", "source", ")", "==", "0", ":", "return", "_get_data_from_empty_list", "(", "source", ",", "fields", ",", "first_row", ",", "count", ",", "schema", ")", "elif", "isinstance", "(", "source", "[", "0", "]", ",", "dict", ")", ":", "return", "_get_data_from_list_of_dicts", "(", "source", ",", "fields", ",", "first_row", ",", "count", ",", "schema", ")", "elif", "isinstance", "(", "source", "[", "0", "]", ",", "list", ")", ":", "return", "_get_data_from_list_of_lists", "(", "source", ",", "fields", ",", "first_row", ",", "count", ",", "schema", ")", "else", ":", "raise", "Exception", "(", "\"To get tabular data from a list it must contain dictionaries or lists.\"", ")", "elif", "isinstance", "(", "source", ",", "pandas", ".", "DataFrame", ")", ":", "return", "_get_data_from_dataframe", "(", "source", ",", "fields", ",", "first_row", ",", "count", ",", "schema", ")", "elif", "(", "isinstance", "(", "source", ",", "google", ".", "datalab", ".", "bigquery", ".", "Query", ")", "or", "isinstance", "(", "source", ",", "google", ".", "datalab", ".", "bigquery", ".", "Table", ")", ")", ":", "return", "google", ".", "datalab", ".", "utils", ".", "commands", ".", "_utils", ".", "get_data", "(", "source", ",", "fields", ",", "env", ",", "first_row", ",", "count", ",", "schema", ")", "elif", "isinstance", "(", "source", ",", "datalab", ".", "bigquery", ".", "Query", ")", ":", "return", "_get_data_from_table", "(", "source", ".", "results", "(", ")", ",", "fields", ",", "first_row", ",", "count", ",", "schema", ")", "elif", "isinstance", "(", "source", ",", "datalab", ".", "bigquery", ".", "Table", ")", ":", "return", "_get_data_from_table", "(", "source", ",", "fields", ",", "first_row", ",", "count", ",", "schema", ")", "else", ":", "raise", "Exception", "(", "\"Cannot chart %s; unsupported object type\"", "%", "source", ")" ]
49.982456
28.754386
def deleteThreads(self, thread_ids): """ Deletes threads :param thread_ids: Thread IDs to delete. See :ref:`intro_threads` :return: Whether the request was successful :raises: FBchatException if request failed """ thread_ids = require_list(thread_ids) data_unpin = dict() data_delete = dict() for i, thread_id in enumerate(thread_ids): data_unpin["ids[{}]".format(thread_id)] = "false" data_delete["ids[{}]".format(i)] = thread_id r_unpin = self._post(self.req_url.PINNED_STATUS, data_unpin) r_delete = self._post(self.req_url.DELETE_THREAD, data_delete) return r_unpin.ok and r_delete.ok
[ "def", "deleteThreads", "(", "self", ",", "thread_ids", ")", ":", "thread_ids", "=", "require_list", "(", "thread_ids", ")", "data_unpin", "=", "dict", "(", ")", "data_delete", "=", "dict", "(", ")", "for", "i", ",", "thread_id", "in", "enumerate", "(", "thread_ids", ")", ":", "data_unpin", "[", "\"ids[{}]\"", ".", "format", "(", "thread_id", ")", "]", "=", "\"false\"", "data_delete", "[", "\"ids[{}]\"", ".", "format", "(", "i", ")", "]", "=", "thread_id", "r_unpin", "=", "self", ".", "_post", "(", "self", ".", "req_url", ".", "PINNED_STATUS", ",", "data_unpin", ")", "r_delete", "=", "self", ".", "_post", "(", "self", ".", "req_url", ".", "DELETE_THREAD", ",", "data_delete", ")", "return", "r_unpin", ".", "ok", "and", "r_delete", ".", "ok" ]
38.944444
16.166667
def comment_create(self, post_id, comment_body, anonymous=None): """Action to lets you create a comment (Requires login). Parameters: post_id (int): The post id number to which you are responding. comment_body (str): The body of the comment. anonymous (int): Set to 1 if you want to post this comment anonymously. """ params = { 'comment[post_id]': post_id, 'comment[body]': comment_body, 'comment[anonymous]': anonymous } return self._get('comment/create', params, method='POST')
[ "def", "comment_create", "(", "self", ",", "post_id", ",", "comment_body", ",", "anonymous", "=", "None", ")", ":", "params", "=", "{", "'comment[post_id]'", ":", "post_id", ",", "'comment[body]'", ":", "comment_body", ",", "'comment[anonymous]'", ":", "anonymous", "}", "return", "self", ".", "_get", "(", "'comment/create'", ",", "params", ",", "method", "=", "'POST'", ")" ]
41.333333
16.333333
def roundup(self, minimum_distance): """ If the minimum_distance is nonzero, returns a copy of the DistancesContext with updated distances, i.e. the ones below minimum_distance are rounded up to the minimum_distance. Otherwise, returns the original DistancesContext unchanged. """ if not minimum_distance: return self ctx = DistancesContext() for dist, array in vars(self).items(): small_distances = array < minimum_distance if small_distances.any(): array = array[:] # make a copy first array[small_distances] = minimum_distance setattr(ctx, dist, array) return ctx
[ "def", "roundup", "(", "self", ",", "minimum_distance", ")", ":", "if", "not", "minimum_distance", ":", "return", "self", "ctx", "=", "DistancesContext", "(", ")", "for", "dist", ",", "array", "in", "vars", "(", "self", ")", ".", "items", "(", ")", ":", "small_distances", "=", "array", "<", "minimum_distance", "if", "small_distances", ".", "any", "(", ")", ":", "array", "=", "array", "[", ":", "]", "# make a copy first", "array", "[", "small_distances", "]", "=", "minimum_distance", "setattr", "(", "ctx", ",", "dist", ",", "array", ")", "return", "ctx" ]
41.823529
12.882353
def assert_raises_regex(exception, regex, msg_fmt="{msg}"): """Fail unless an exception with a message that matches a regular expression is raised within the context. The regular expression can be a regular expression string or object. >>> with assert_raises_regex(ValueError, r"\\d+"): ... raise ValueError("Error #42") ... >>> with assert_raises_regex(ValueError, r"\\d+"): ... raise ValueError("Generic Error") ... Traceback (most recent call last): ... AssertionError: 'Generic Error' does not match '\\\\d+' The following msg_fmt arguments are supported: * msg - the default error message * exc_type - exception type that is expected * exc_name - expected exception type name * text - actual error text * pattern - expected error message as regular expression string """ def test(exc): compiled = re.compile(regex) if not exc.args: msg = "{} without message".format(exception.__name__) fail( msg_fmt.format( msg=msg, text=None, pattern=compiled.pattern, exc_type=exception, exc_name=exception.__name__, ) ) text = exc.args[0] if not compiled.search(text): msg = "{!r} does not match {!r}".format(text, compiled.pattern) fail( msg_fmt.format( msg=msg, text=text, pattern=compiled.pattern, exc_type=exception, exc_name=exception.__name__, ) ) context = AssertRaisesRegexContext(exception, regex, msg_fmt) context.add_test(test) return context
[ "def", "assert_raises_regex", "(", "exception", ",", "regex", ",", "msg_fmt", "=", "\"{msg}\"", ")", ":", "def", "test", "(", "exc", ")", ":", "compiled", "=", "re", ".", "compile", "(", "regex", ")", "if", "not", "exc", ".", "args", ":", "msg", "=", "\"{} without message\"", ".", "format", "(", "exception", ".", "__name__", ")", "fail", "(", "msg_fmt", ".", "format", "(", "msg", "=", "msg", ",", "text", "=", "None", ",", "pattern", "=", "compiled", ".", "pattern", ",", "exc_type", "=", "exception", ",", "exc_name", "=", "exception", ".", "__name__", ",", ")", ")", "text", "=", "exc", ".", "args", "[", "0", "]", "if", "not", "compiled", ".", "search", "(", "text", ")", ":", "msg", "=", "\"{!r} does not match {!r}\"", ".", "format", "(", "text", ",", "compiled", ".", "pattern", ")", "fail", "(", "msg_fmt", ".", "format", "(", "msg", "=", "msg", ",", "text", "=", "text", ",", "pattern", "=", "compiled", ".", "pattern", ",", "exc_type", "=", "exception", ",", "exc_name", "=", "exception", ".", "__name__", ",", ")", ")", "context", "=", "AssertRaisesRegexContext", "(", "exception", ",", "regex", ",", "msg_fmt", ")", "context", ".", "add_test", "(", "test", ")", "return", "context" ]
33.415094
16.698113
def _point_scalar(self, name=None): """ Returns point scalars of a vtk object Parameters ---------- name : str Name of point scalars to retrive. Returns ------- scalars : np.ndarray Numpy array of scalars """ if name is None: # use active scalar array field, name = self.active_scalar_info if field != POINT_DATA_FIELD: raise RuntimeError('Must specify an array to fetch.') vtkarr = self.GetPointData().GetArray(name) if vtkarr is None: raise AssertionError('({}) is not a point scalar'.format(name)) # numpy does not support bit array data types if isinstance(vtkarr, vtk.vtkBitArray): vtkarr = vtk_bit_array_to_char(vtkarr) if name not in self._point_bool_array_names: self._point_bool_array_names.append(name) array = vtk_to_numpy(vtkarr) if array.dtype == np.uint8 and name in self._point_bool_array_names: array = array.view(np.bool) return array
[ "def", "_point_scalar", "(", "self", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "# use active scalar array", "field", ",", "name", "=", "self", ".", "active_scalar_info", "if", "field", "!=", "POINT_DATA_FIELD", ":", "raise", "RuntimeError", "(", "'Must specify an array to fetch.'", ")", "vtkarr", "=", "self", ".", "GetPointData", "(", ")", ".", "GetArray", "(", "name", ")", "if", "vtkarr", "is", "None", ":", "raise", "AssertionError", "(", "'({}) is not a point scalar'", ".", "format", "(", "name", ")", ")", "# numpy does not support bit array data types", "if", "isinstance", "(", "vtkarr", ",", "vtk", ".", "vtkBitArray", ")", ":", "vtkarr", "=", "vtk_bit_array_to_char", "(", "vtkarr", ")", "if", "name", "not", "in", "self", ".", "_point_bool_array_names", ":", "self", ".", "_point_bool_array_names", ".", "append", "(", "name", ")", "array", "=", "vtk_to_numpy", "(", "vtkarr", ")", "if", "array", ".", "dtype", "==", "np", ".", "uint8", "and", "name", "in", "self", ".", "_point_bool_array_names", ":", "array", "=", "array", ".", "view", "(", "np", ".", "bool", ")", "return", "array" ]
32.323529
17.382353
def set_properties(self, path, mode): """Set file's properties (name and mode). This function is also in charge of swapping between textual and binary streams. """ self.name = path self.mode = mode if 'b' in self.mode: if not isinstance(self.read_data, bytes): self.read_data = bytes(self.read_data, encoding='utf8') else: if not isinstance(self.read_data, str): self.read_data = str(self.read_data, encoding='utf8')
[ "def", "set_properties", "(", "self", ",", "path", ",", "mode", ")", ":", "self", ".", "name", "=", "path", "self", ".", "mode", "=", "mode", "if", "'b'", "in", "self", ".", "mode", ":", "if", "not", "isinstance", "(", "self", ".", "read_data", ",", "bytes", ")", ":", "self", ".", "read_data", "=", "bytes", "(", "self", ".", "read_data", ",", "encoding", "=", "'utf8'", ")", "else", ":", "if", "not", "isinstance", "(", "self", ".", "read_data", ",", "str", ")", ":", "self", ".", "read_data", "=", "str", "(", "self", ".", "read_data", ",", "encoding", "=", "'utf8'", ")" ]
34.933333
19.066667
def get_base_branch(): # type: () -> str """ Return the base branch for the current branch. This function will first try to guess the base branch and if it can't it will let the user choose the branch from the list of all local branches. Returns: str: The name of the branch the current branch is based on. """ base_branch = git.guess_base_branch() if base_branch is None: log.info("Can't guess the base branch, you have to pick one yourself:") base_branch = choose_branch() return base_branch
[ "def", "get_base_branch", "(", ")", ":", "# type: () -> str", "base_branch", "=", "git", ".", "guess_base_branch", "(", ")", "if", "base_branch", "is", "None", ":", "log", ".", "info", "(", "\"Can't guess the base branch, you have to pick one yourself:\"", ")", "base_branch", "=", "choose_branch", "(", ")", "return", "base_branch" ]
31.823529
23.411765
def all_finite(self,X): """returns true if X is finite, false, otherwise""" # Adapted from sklearn utils: _assert_all_finite(X) # First try an O(n) time, O(1) space solution for the common case that # everything is finite; fall back to O(n) space np.isfinite to prevent # false positives from overflow in sum method. # Note: this is basically here because sklearn tree.py uses float32 internally, # and float64's that are finite are not finite in float32. if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(np.asarray(X,dtype='float32').sum()) and not np.isfinite(np.asarray(X,dtype='float32')).all()): return False return True
[ "def", "all_finite", "(", "self", ",", "X", ")", ":", "# Adapted from sklearn utils: _assert_all_finite(X)", "# First try an O(n) time, O(1) space solution for the common case that", "# everything is finite; fall back to O(n) space np.isfinite to prevent", "# false positives from overflow in sum method.", "# Note: this is basically here because sklearn tree.py uses float32 internally,", "# and float64's that are finite are not finite in float32.", "if", "(", "X", ".", "dtype", ".", "char", "in", "np", ".", "typecodes", "[", "'AllFloat'", "]", "and", "not", "np", ".", "isfinite", "(", "np", ".", "asarray", "(", "X", ",", "dtype", "=", "'float32'", ")", ".", "sum", "(", ")", ")", "and", "not", "np", ".", "isfinite", "(", "np", ".", "asarray", "(", "X", ",", "dtype", "=", "'float32'", ")", ")", ".", "all", "(", ")", ")", ":", "return", "False", "return", "True" ]
56.692308
23.538462
def package(self): """ Find a package name from a build task's parameters. :returns: name of the package this build task is building. :raises: ValueError if we could not parse this tasks's request params. """ if self.method == 'buildNotification': return self.params[1]['name'] if self.method in ('createImage', 'image', 'livecd'): return self.params[0] if self.method == 'indirectionimage': return self.params[0]['name'] # params[0] is the source URL for these tasks: if self.method not in ('build', 'buildArch', 'buildContainer', 'buildMaven', 'buildSRPMFromSCM', 'maven'): return None # (I wish there was a better way to do this.) source = self.params[0] o = urlparse(source) # build tasks can load an SRPM from a "cli-build" tmpdir: if source.endswith('.src.rpm'): srpm = os.path.basename(source) (name, version, release) = srpm.rsplit('-', 2) # Note we're throwing away version and release here. They could be # useful eventually, maybe in a "Package" class. return name # or an allowed SCM: elif o.scheme: package = os.path.basename(o.path) if package.endswith('.git'): package = package[:-4] if self.method == 'buildContainer': package += '-container' return package raise ValueError('could not parse source "%s"' % source)
[ "def", "package", "(", "self", ")", ":", "if", "self", ".", "method", "==", "'buildNotification'", ":", "return", "self", ".", "params", "[", "1", "]", "[", "'name'", "]", "if", "self", ".", "method", "in", "(", "'createImage'", ",", "'image'", ",", "'livecd'", ")", ":", "return", "self", ".", "params", "[", "0", "]", "if", "self", ".", "method", "==", "'indirectionimage'", ":", "return", "self", ".", "params", "[", "0", "]", "[", "'name'", "]", "# params[0] is the source URL for these tasks:", "if", "self", ".", "method", "not", "in", "(", "'build'", ",", "'buildArch'", ",", "'buildContainer'", ",", "'buildMaven'", ",", "'buildSRPMFromSCM'", ",", "'maven'", ")", ":", "return", "None", "# (I wish there was a better way to do this.)", "source", "=", "self", ".", "params", "[", "0", "]", "o", "=", "urlparse", "(", "source", ")", "# build tasks can load an SRPM from a \"cli-build\" tmpdir:", "if", "source", ".", "endswith", "(", "'.src.rpm'", ")", ":", "srpm", "=", "os", ".", "path", ".", "basename", "(", "source", ")", "(", "name", ",", "version", ",", "release", ")", "=", "srpm", ".", "rsplit", "(", "'-'", ",", "2", ")", "# Note we're throwing away version and release here. They could be", "# useful eventually, maybe in a \"Package\" class.", "return", "name", "# or an allowed SCM:", "elif", "o", ".", "scheme", ":", "package", "=", "os", ".", "path", ".", "basename", "(", "o", ".", "path", ")", "if", "package", ".", "endswith", "(", "'.git'", ")", ":", "package", "=", "package", "[", ":", "-", "4", "]", "if", "self", ".", "method", "==", "'buildContainer'", ":", "package", "+=", "'-container'", "return", "package", "raise", "ValueError", "(", "'could not parse source \"%s\"'", "%", "source", ")" ]
43.305556
14.472222
def enrich(self, columns): """ This method appends at the end of the dataframe as many rows as items are found in the list of elemnents in the provided columns. This assumes that the length of the lists for the several specified columns is the same. As an example, for the row A {"C1":"V1", "C2":field1, "C3":field2, "C4":field3} we have three cells with a list of four elements each of them: * field1: [1,2,3,4] * field2: ["a", "b", "c", "d"] * field3: [1.1, 2.2, 3.3, 4.4] This method converts each of the elements of each cell in a new row keeping the columns name: {"C1":"V1", "C2":1, "C3":"a", "C4":1.1} {"C1":"V1", "C2":2, "C3":"b", "C4":2.2} {"C1":"V1", "C2":3, "C3":"c", "C4":3.3} {"C1":"V1", "C2":4, "C3":"d", "C4":4.4} :param columns: list of strings :rtype pandas.DataFrame """ for column in columns: if column not in self.data.columns: return self.data # Looking for the rows with columns with lists of more # than one element first_column = list(self.data[columns[0]]) count = 0 append_df = pandas.DataFrame() for cell in first_column: if len(cell) >= 1: # Interested in those lists with more # than one element df = pandas.DataFrame() # Create a dataframe of N rows from the list for column in columns: df[column] = self.data.loc[count, column] # Repeat the original rows N times extra_df = pandas.DataFrame([self.data.loc[count]] * len(df)) for column in columns: extra_df[column] = list(df[column]) append_df = append_df.append(extra_df, ignore_index=True) extra_df = pandas.DataFrame() count = count + 1 self.data = self.data.append(append_df, ignore_index=True) return self.data
[ "def", "enrich", "(", "self", ",", "columns", ")", ":", "for", "column", "in", "columns", ":", "if", "column", "not", "in", "self", ".", "data", ".", "columns", ":", "return", "self", ".", "data", "# Looking for the rows with columns with lists of more", "# than one element", "first_column", "=", "list", "(", "self", ".", "data", "[", "columns", "[", "0", "]", "]", ")", "count", "=", "0", "append_df", "=", "pandas", ".", "DataFrame", "(", ")", "for", "cell", "in", "first_column", ":", "if", "len", "(", "cell", ")", ">=", "1", ":", "# Interested in those lists with more", "# than one element", "df", "=", "pandas", ".", "DataFrame", "(", ")", "# Create a dataframe of N rows from the list", "for", "column", "in", "columns", ":", "df", "[", "column", "]", "=", "self", ".", "data", ".", "loc", "[", "count", ",", "column", "]", "# Repeat the original rows N times", "extra_df", "=", "pandas", ".", "DataFrame", "(", "[", "self", ".", "data", ".", "loc", "[", "count", "]", "]", "*", "len", "(", "df", ")", ")", "for", "column", "in", "columns", ":", "extra_df", "[", "column", "]", "=", "list", "(", "df", "[", "column", "]", ")", "append_df", "=", "append_df", ".", "append", "(", "extra_df", ",", "ignore_index", "=", "True", ")", "extra_df", "=", "pandas", ".", "DataFrame", "(", ")", "count", "=", "count", "+", "1", "self", ".", "data", "=", "self", ".", "data", ".", "append", "(", "append_df", ",", "ignore_index", "=", "True", ")", "return", "self", ".", "data" ]
37.555556
17.259259
def lint(session): """Run flake8. Returns a failure if flake8 finds linting errors or sufficiently serious code quality issues. """ session.interpreter = 'python3.6' session.install('flake8') # Install dev packages. _install_dev_packages(session) session.run( 'flake8', '--exclude=contrib/opencensus-ext-ocagent/opencensus/ext/ocagent/trace_exporter/gen/', 'context/', 'contrib/', 'opencensus/', 'tests/', 'examples/')
[ "def", "lint", "(", "session", ")", ":", "session", ".", "interpreter", "=", "'python3.6'", "session", ".", "install", "(", "'flake8'", ")", "# Install dev packages.", "_install_dev_packages", "(", "session", ")", "session", ".", "run", "(", "'flake8'", ",", "'--exclude=contrib/opencensus-ext-ocagent/opencensus/ext/ocagent/trace_exporter/gen/'", ",", "'context/'", ",", "'contrib/'", ",", "'opencensus/'", ",", "'tests/'", ",", "'examples/'", ")" ]
31.066667
20.066667
def new_transient(self, ext=''): '''Creates empty TransientFile with random name and given extension. File on FS is not created''' name = random_name(self.transient_length) + ext return TransientFile(self.transient_root, name, self)
[ "def", "new_transient", "(", "self", ",", "ext", "=", "''", ")", ":", "name", "=", "random_name", "(", "self", ".", "transient_length", ")", "+", "ext", "return", "TransientFile", "(", "self", ".", "transient_root", ",", "name", ",", "self", ")" ]
52
16.8
def copytree(src, dst, symlinks=False, ignore=None): """Recursively copy a directory tree using copy2(). The destination directory must not already exist. If exception(s) occur, an Error is raised with a list of reasons. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. The optional ignore argument is a callable. If given, it is called with the `src` parameter, which is the directory being visited by copytree(), and `names` which is the list of `src` contents, as returned by os.listdir(): callable(src, names) -> ignored_names Since copytree() is called recursively, the callable will be called once for each directory that is copied. It returns a list of names relative to the `src` directory that should not be copied. XXX Consider this example code rather than the ultimate tool. """ from shutil import copy2, Error, copystat names = os.listdir(src) if ignore is not None: ignored_names = ignore(src, names) else: ignored_names = set() os.makedirs(dst) errors = [] for name in names: if name in ignored_names: continue srcname = os.path.join(src, name) dstname = os.path.join(dst, name) try: if symlinks and os.path.islink(srcname): linkto = os.readlink(srcname) os.symlink(linkto, dstname) elif os.path.isdir(srcname): copytree(srcname, dstname, symlinks, ignore) else: # Will raise a SpecialFileError for unsupported file types copy2(srcname, dstname) # catch the Error from the recursive copytree so that we can # continue with other files except Error as err: errors.extend(err.args[0]) except EnvironmentError as why: errors.append((srcname, dstname, str(why))) try: copystat(src, dst) except OSError as why: if WindowsError is not None and isinstance(why, WindowsError): # Copying file access times may fail on Windows pass else: errors.extend((src, dst, str(why))) if errors: raise Error(errors)
[ "def", "copytree", "(", "src", ",", "dst", ",", "symlinks", "=", "False", ",", "ignore", "=", "None", ")", ":", "from", "shutil", "import", "copy2", ",", "Error", ",", "copystat", "names", "=", "os", ".", "listdir", "(", "src", ")", "if", "ignore", "is", "not", "None", ":", "ignored_names", "=", "ignore", "(", "src", ",", "names", ")", "else", ":", "ignored_names", "=", "set", "(", ")", "os", ".", "makedirs", "(", "dst", ")", "errors", "=", "[", "]", "for", "name", "in", "names", ":", "if", "name", "in", "ignored_names", ":", "continue", "srcname", "=", "os", ".", "path", ".", "join", "(", "src", ",", "name", ")", "dstname", "=", "os", ".", "path", ".", "join", "(", "dst", ",", "name", ")", "try", ":", "if", "symlinks", "and", "os", ".", "path", ".", "islink", "(", "srcname", ")", ":", "linkto", "=", "os", ".", "readlink", "(", "srcname", ")", "os", ".", "symlink", "(", "linkto", ",", "dstname", ")", "elif", "os", ".", "path", ".", "isdir", "(", "srcname", ")", ":", "copytree", "(", "srcname", ",", "dstname", ",", "symlinks", ",", "ignore", ")", "else", ":", "# Will raise a SpecialFileError for unsupported file types", "copy2", "(", "srcname", ",", "dstname", ")", "# catch the Error from the recursive copytree so that we can", "# continue with other files", "except", "Error", "as", "err", ":", "errors", ".", "extend", "(", "err", ".", "args", "[", "0", "]", ")", "except", "EnvironmentError", "as", "why", ":", "errors", ".", "append", "(", "(", "srcname", ",", "dstname", ",", "str", "(", "why", ")", ")", ")", "try", ":", "copystat", "(", "src", ",", "dst", ")", "except", "OSError", "as", "why", ":", "if", "WindowsError", "is", "not", "None", "and", "isinstance", "(", "why", ",", "WindowsError", ")", ":", "# Copying file access times may fail on Windows", "pass", "else", ":", "errors", ".", "extend", "(", "(", "src", ",", "dst", ",", "str", "(", "why", ")", ")", ")", "if", "errors", ":", "raise", "Error", "(", "errors", ")" ]
35.784615
18.892308
def cmd_quick(action, action_space, ability_id, queued): """Do a quick command like 'Stop' or 'Stim'.""" action_cmd = spatial(action, action_space).unit_command action_cmd.ability_id = ability_id action_cmd.queue_command = queued
[ "def", "cmd_quick", "(", "action", ",", "action_space", ",", "ability_id", ",", "queued", ")", ":", "action_cmd", "=", "spatial", "(", "action", ",", "action_space", ")", ".", "unit_command", "action_cmd", ".", "ability_id", "=", "ability_id", "action_cmd", ".", "queue_command", "=", "queued" ]
46.6
8.4
def _get_variable_name_or_register(var, variables, names, params, prefix): ''' Args: var (~nnabla.Variable) variables (OrderedDict) names (dict): Force name table, Variable -> str params (dict): NdArray -> str prefix(str) ''' if var not in variables.values(): vname = prefix if var.data in params: vname = params[var.data] elif var in names: vname = names[var] vname = _get_unique_variable_name(vname, variables) variables[vname] = var else: vname = list(variables.keys())[list(variables.values()).index(var)] return vname
[ "def", "_get_variable_name_or_register", "(", "var", ",", "variables", ",", "names", ",", "params", ",", "prefix", ")", ":", "if", "var", "not", "in", "variables", ".", "values", "(", ")", ":", "vname", "=", "prefix", "if", "var", ".", "data", "in", "params", ":", "vname", "=", "params", "[", "var", ".", "data", "]", "elif", "var", "in", "names", ":", "vname", "=", "names", "[", "var", "]", "vname", "=", "_get_unique_variable_name", "(", "vname", ",", "variables", ")", "variables", "[", "vname", "]", "=", "var", "else", ":", "vname", "=", "list", "(", "variables", ".", "keys", "(", ")", ")", "[", "list", "(", "variables", ".", "values", "(", ")", ")", ".", "index", "(", "var", ")", "]", "return", "vname" ]
30.428571
19.380952
def encode_fetch_request(cls, client_id, correlation_id, payloads=None, max_wait_time=100, min_bytes=4096): """ Encodes some FetchRequest structs Arguments: client_id: string correlation_id: int payloads: list of FetchRequest max_wait_time: int, how long to block waiting on min_bytes of data min_bytes: int, the minimum number of bytes to accumulate before returning the response """ payloads = [] if payloads is None else payloads grouped_payloads = group_by_topic_and_partition(payloads) message = [] message.append(cls._encode_message_header(client_id, correlation_id, KafkaProtocol.FETCH_KEY)) # -1 is the replica id message.append(struct.pack('>iiii', -1, max_wait_time, min_bytes, len(grouped_payloads))) for topic, topic_payloads in grouped_payloads.items(): message.append(write_short_string(topic)) message.append(struct.pack('>i', len(topic_payloads))) for partition, payload in topic_payloads.items(): message.append(struct.pack('>iqi', partition, payload.offset, payload.max_bytes)) msg = b''.join(message) return struct.pack('>i%ds' % len(msg), len(msg), msg)
[ "def", "encode_fetch_request", "(", "cls", ",", "client_id", ",", "correlation_id", ",", "payloads", "=", "None", ",", "max_wait_time", "=", "100", ",", "min_bytes", "=", "4096", ")", ":", "payloads", "=", "[", "]", "if", "payloads", "is", "None", "else", "payloads", "grouped_payloads", "=", "group_by_topic_and_partition", "(", "payloads", ")", "message", "=", "[", "]", "message", ".", "append", "(", "cls", ".", "_encode_message_header", "(", "client_id", ",", "correlation_id", ",", "KafkaProtocol", ".", "FETCH_KEY", ")", ")", "# -1 is the replica id", "message", ".", "append", "(", "struct", ".", "pack", "(", "'>iiii'", ",", "-", "1", ",", "max_wait_time", ",", "min_bytes", ",", "len", "(", "grouped_payloads", ")", ")", ")", "for", "topic", ",", "topic_payloads", "in", "grouped_payloads", ".", "items", "(", ")", ":", "message", ".", "append", "(", "write_short_string", "(", "topic", ")", ")", "message", ".", "append", "(", "struct", ".", "pack", "(", "'>i'", ",", "len", "(", "topic_payloads", ")", ")", ")", "for", "partition", ",", "payload", "in", "topic_payloads", ".", "items", "(", ")", ":", "message", ".", "append", "(", "struct", ".", "pack", "(", "'>iqi'", ",", "partition", ",", "payload", ".", "offset", ",", "payload", ".", "max_bytes", ")", ")", "msg", "=", "b''", ".", "join", "(", "message", ")", "return", "struct", ".", "pack", "(", "'>i%ds'", "%", "len", "(", "msg", ")", ",", "len", "(", "msg", ")", ",", "msg", ")" ]
42.411765
23
def sensoryCompute(self, activeMinicolumns, learn): """ @param activeMinicolumns (numpy array) List of indices of minicolumns to activate. @param learn (bool) If True, the two layers should learn this association. @return (tuple of dicts) Data for logging/tracing. """ inputParams = { "activeColumns": activeMinicolumns, "basalInput": self.getLocationRepresentation(), "basalGrowthCandidates": self.getLearnableLocationRepresentation(), "learn": learn } self.L4.compute(**inputParams) locationParams = { "anchorInput": self.L4.getActiveCells(), "anchorGrowthCandidates": self.L4.getWinnerCells(), "learn": learn, } for module in self.L6aModules: module.sensoryCompute(**locationParams) return (inputParams, locationParams)
[ "def", "sensoryCompute", "(", "self", ",", "activeMinicolumns", ",", "learn", ")", ":", "inputParams", "=", "{", "\"activeColumns\"", ":", "activeMinicolumns", ",", "\"basalInput\"", ":", "self", ".", "getLocationRepresentation", "(", ")", ",", "\"basalGrowthCandidates\"", ":", "self", ".", "getLearnableLocationRepresentation", "(", ")", ",", "\"learn\"", ":", "learn", "}", "self", ".", "L4", ".", "compute", "(", "*", "*", "inputParams", ")", "locationParams", "=", "{", "\"anchorInput\"", ":", "self", ".", "L4", ".", "getActiveCells", "(", ")", ",", "\"anchorGrowthCandidates\"", ":", "self", ".", "L4", ".", "getWinnerCells", "(", ")", ",", "\"learn\"", ":", "learn", ",", "}", "for", "module", "in", "self", ".", "L6aModules", ":", "module", ".", "sensoryCompute", "(", "*", "*", "locationParams", ")", "return", "(", "inputParams", ",", "locationParams", ")" ]
28.821429
16.892857
def trim_tree(tree): """ Remove the dead branches of tree, that is, the resulting tree accepts the same language as the original one (that is, the same words that end with the < character), but parts of the tree that lead to nothing are removed. :param tree: the tree; :return: the tree without dead branches. """ # Remove empty nodes new_tree = {k: v for k, v in tree.items() if v} # Remove missing successors new_tree = {k: {successor: weight for successor, weight in v.items() if successor in new_tree or successor[0] == "<"} for k, v in new_tree.items()} while tree != new_tree: tree = new_tree # Remove empty nodes new_tree = {k: v for k, v in tree.items() if v} # Remove missing successors new_tree = {k: {successor: weight for successor, weight in v.items() if successor in new_tree or successor[0] == "<"} for k, v in new_tree.items()} return new_tree
[ "def", "trim_tree", "(", "tree", ")", ":", "# Remove empty nodes", "new_tree", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "tree", ".", "items", "(", ")", "if", "v", "}", "# Remove missing successors", "new_tree", "=", "{", "k", ":", "{", "successor", ":", "weight", "for", "successor", ",", "weight", "in", "v", ".", "items", "(", ")", "if", "successor", "in", "new_tree", "or", "successor", "[", "0", "]", "==", "\"<\"", "}", "for", "k", ",", "v", "in", "new_tree", ".", "items", "(", ")", "}", "while", "tree", "!=", "new_tree", ":", "tree", "=", "new_tree", "# Remove empty nodes", "new_tree", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "tree", ".", "items", "(", ")", "if", "v", "}", "# Remove missing successors", "new_tree", "=", "{", "k", ":", "{", "successor", ":", "weight", "for", "successor", ",", "weight", "in", "v", ".", "items", "(", ")", "if", "successor", "in", "new_tree", "or", "successor", "[", "0", "]", "==", "\"<\"", "}", "for", "k", ",", "v", "in", "new_tree", ".", "items", "(", ")", "}", "return", "new_tree" ]
43.5
17.961538
def add_function(self, function_id=None, function=None, inputs=None, outputs=None, input_domain=None, weight=None, inp_weight=None, out_weight=None, description=None, filters=None, await_domain=None, await_result=None, **kwargs): """ Add a single function node to dispatcher. :param function_id: Function node id. If None will be assigned as <fun.__name__>. :type function_id: str, optional :param function: Data node estimation function. :type function: callable, optional :param inputs: Ordered arguments (i.e., data node ids) needed by the function. :type inputs: list, optional :param outputs: Ordered results (i.e., data node ids) returned by the function. :type outputs: list, optional :param input_domain: A function that checks if input values satisfy the function domain. This can be any function that takes the same inputs of the function and returns True if input values satisfy the domain, otherwise False. In this case the dispatch algorithm doesn't pass on the node. :type input_domain: callable, optional :param weight: Node weight. It is a weight coefficient that is used by the dispatch algorithm to estimate the minimum workflow. :type weight: float, int, optional :param inp_weight: Edge weights from data nodes to the function node. It is a dictionary (key=data node id) with the weight coefficients used by the dispatch algorithm to estimate the minimum workflow. :type inp_weight: dict[str, float | int], optional :param out_weight: Edge weights from the function node to data nodes. It is a dictionary (key=data node id) with the weight coefficients used by the dispatch algorithm to estimate the minimum workflow. :type out_weight: dict[str, float | int], optional :param description: Function node's description. :type description: str, optional :param filters: A list of functions that are invoked after the invocation of the main function. :type filters: list[function], optional :param await_domain: If True the Dispatcher waits all input results before executing the `input_domain` function. If a number is defined this is used as `timeout` for `Future.result` method [default: True]. Note this is used when asynchronous or parallel execution is enable. :type await_domain: bool|int|float, optional :param await_result: If True the Dispatcher waits output results before assigning them to the workflow. If a number is defined this is used as `timeout` for `Future.result` method [default: False]. Note this is used when asynchronous or parallel execution is enable. :type await_result: bool|int|float, optional :param kwargs: Set additional node attributes using key=value. :type kwargs: keyword arguments, optional """ kwargs.update(_call_kw(locals())) self.deferred.append(('add_function', kwargs)) return self
[ "def", "add_function", "(", "self", ",", "function_id", "=", "None", ",", "function", "=", "None", ",", "inputs", "=", "None", ",", "outputs", "=", "None", ",", "input_domain", "=", "None", ",", "weight", "=", "None", ",", "inp_weight", "=", "None", ",", "out_weight", "=", "None", ",", "description", "=", "None", ",", "filters", "=", "None", ",", "await_domain", "=", "None", ",", "await_result", "=", "None", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "_call_kw", "(", "locals", "(", ")", ")", ")", "self", ".", "deferred", ".", "append", "(", "(", "'add_function'", ",", "kwargs", ")", ")", "return", "self" ]
42.582278
22.962025
def HWProcess(cls, proc: HWProcess, ctx: ResourceContext) -> None: """ Gues resource usage by HWProcess """ seen = ctx.seen for stm in proc.statements: encl = stm._enclosed_for full_ev_dep = stm._is_completly_event_dependent now_ev_dep = stm._now_is_event_dependent ev_dep = full_ev_dep or now_ev_dep out_mux_dim = count_mux_inputs_for_outputs(stm) for o in stm._outputs: if o in seen: continue i = out_mux_dim[o] if isinstance(o._dtype, HArray): assert i == 1, (o, i, " only one ram port per HWProcess") for a in walk_assignments(stm, o): assert len(a.indexes) == 1, "one address per RAM port" addr = a.indexes[0] ctx.registerRAM_write_port(o, addr, ev_dep) elif ev_dep: ctx.registerFF(o) if i > 1: ctx.registerMUX(stm, o, i) elif o not in encl: ctx.registerLatch(o) if i > 1: ctx.registerMUX(stm, o, i) elif i > 1: ctx.registerMUX(stm, o, i) else: # just a connection continue if isinstance(stm, SwitchContainer): caseEqs = set([stm.switchOn._eq(c[0]) for c in stm.cases]) inputs = chain( [sig for sig in stm._inputs if sig not in caseEqs], [stm.switchOn]) else: inputs = stm._inputs for i in inputs: # discover only internal signals in this statements for # operators if not i.hidden or i in seen: continue cls.HWProcess_operators(i, ctx, ev_dep)
[ "def", "HWProcess", "(", "cls", ",", "proc", ":", "HWProcess", ",", "ctx", ":", "ResourceContext", ")", "->", "None", ":", "seen", "=", "ctx", ".", "seen", "for", "stm", "in", "proc", ".", "statements", ":", "encl", "=", "stm", ".", "_enclosed_for", "full_ev_dep", "=", "stm", ".", "_is_completly_event_dependent", "now_ev_dep", "=", "stm", ".", "_now_is_event_dependent", "ev_dep", "=", "full_ev_dep", "or", "now_ev_dep", "out_mux_dim", "=", "count_mux_inputs_for_outputs", "(", "stm", ")", "for", "o", "in", "stm", ".", "_outputs", ":", "if", "o", "in", "seen", ":", "continue", "i", "=", "out_mux_dim", "[", "o", "]", "if", "isinstance", "(", "o", ".", "_dtype", ",", "HArray", ")", ":", "assert", "i", "==", "1", ",", "(", "o", ",", "i", ",", "\" only one ram port per HWProcess\"", ")", "for", "a", "in", "walk_assignments", "(", "stm", ",", "o", ")", ":", "assert", "len", "(", "a", ".", "indexes", ")", "==", "1", ",", "\"one address per RAM port\"", "addr", "=", "a", ".", "indexes", "[", "0", "]", "ctx", ".", "registerRAM_write_port", "(", "o", ",", "addr", ",", "ev_dep", ")", "elif", "ev_dep", ":", "ctx", ".", "registerFF", "(", "o", ")", "if", "i", ">", "1", ":", "ctx", ".", "registerMUX", "(", "stm", ",", "o", ",", "i", ")", "elif", "o", "not", "in", "encl", ":", "ctx", ".", "registerLatch", "(", "o", ")", "if", "i", ">", "1", ":", "ctx", ".", "registerMUX", "(", "stm", ",", "o", ",", "i", ")", "elif", "i", ">", "1", ":", "ctx", ".", "registerMUX", "(", "stm", ",", "o", ",", "i", ")", "else", ":", "# just a connection", "continue", "if", "isinstance", "(", "stm", ",", "SwitchContainer", ")", ":", "caseEqs", "=", "set", "(", "[", "stm", ".", "switchOn", ".", "_eq", "(", "c", "[", "0", "]", ")", "for", "c", "in", "stm", ".", "cases", "]", ")", "inputs", "=", "chain", "(", "[", "sig", "for", "sig", "in", "stm", ".", "_inputs", "if", "sig", "not", "in", "caseEqs", "]", ",", "[", "stm", ".", "switchOn", "]", ")", "else", ":", "inputs", "=", "stm", ".", "_inputs", "for", "i", "in", "inputs", ":", "# discover only internal signals in this statements for", "# operators", "if", "not", "i", ".", "hidden", "or", "i", "in", "seen", ":", "continue", "cls", ".", "HWProcess_operators", "(", "i", ",", "ctx", ",", "ev_dep", ")" ]
37.882353
15.529412
def set_ctype(self, ctype, orig_ctype=None): """ Set the selected content type. Will not override the value of the content type if that has already been determined. :param ctype: The content type string to set. :param orig_ctype: The original content type, as found in the configuration. """ if self.ctype is None: self.ctype = ctype self.orig_ctype = orig_ctype
[ "def", "set_ctype", "(", "self", ",", "ctype", ",", "orig_ctype", "=", "None", ")", ":", "if", "self", ".", "ctype", "is", "None", ":", "self", ".", "ctype", "=", "ctype", "self", ".", "orig_ctype", "=", "orig_ctype" ]
32.857143
17
def start(self): """Public method for initiating connectivity with the emby server.""" asyncio.ensure_future(self.register(), loop=self._event_loop) if self._own_loop: _LOGGER.info("Starting up our own event loop.") self._event_loop.run_forever() self._event_loop.close() _LOGGER.info("Connection shut down.")
[ "def", "start", "(", "self", ")", ":", "asyncio", ".", "ensure_future", "(", "self", ".", "register", "(", ")", ",", "loop", "=", "self", ".", "_event_loop", ")", "if", "self", ".", "_own_loop", ":", "_LOGGER", ".", "info", "(", "\"Starting up our own event loop.\"", ")", "self", ".", "_event_loop", ".", "run_forever", "(", ")", "self", ".", "_event_loop", ".", "close", "(", ")", "_LOGGER", ".", "info", "(", "\"Connection shut down.\"", ")" ]
41.555556
15.666667
def appendText(self, text): ''' appendText - append some inner text ''' # self.text is just raw string of the text self.text += text self.isSelfClosing = False # inner text means it can't self close anymo # self.blocks is either text or tags, in order of appearance self.blocks.append(text)
[ "def", "appendText", "(", "self", ",", "text", ")", ":", "# self.text is just raw string of the text", "self", ".", "text", "+=", "text", "self", ".", "isSelfClosing", "=", "False", "# inner text means it can't self close anymo", "# self.blocks is either text or tags, in order of appearance", "self", ".", "blocks", ".", "append", "(", "text", ")" ]
38.888889
19.777778
def calculate_hash(options): """returns an option_collection_hash given a list of options""" options = sorted(list(options)) sha_hash = sha1() # equivalent to loop over the options and call sha_hash.update() sha_hash.update(''.join(options).encode('utf-8')) return sha_hash.hexdigest()
[ "def", "calculate_hash", "(", "options", ")", ":", "options", "=", "sorted", "(", "list", "(", "options", ")", ")", "sha_hash", "=", "sha1", "(", ")", "# equivalent to loop over the options and call sha_hash.update()", "sha_hash", ".", "update", "(", "''", ".", "join", "(", "options", ")", ".", "encode", "(", "'utf-8'", ")", ")", "return", "sha_hash", ".", "hexdigest", "(", ")" ]
46.714286
11.714286
def oops(self): '''Out Of Phase Stereo effect. Mixes stereo to twin-mono where each mono channel contains the difference between the left and right stereo channels. This is sometimes known as the 'karaoke' effect as it often has the effect of removing most or all of the vocals from a recording. ''' effect_args = ['oops'] self.effects.extend(effect_args) self.effects_log.append('oops') return self
[ "def", "oops", "(", "self", ")", ":", "effect_args", "=", "[", "'oops'", "]", "self", ".", "effects", ".", "extend", "(", "effect_args", ")", "self", ".", "effects_log", ".", "append", "(", "'oops'", ")", "return", "self" ]
38.5
26.166667
def add_event(self, source, reference, event_title, event_type, method='', description='', bucket_list=[], campaign='', confidence='', date=None): """ Adds an event. If the event name already exists, it will return that event instead. Args: source: Source of the information reference: A reference where more information can be found event_title: The title of the event event_type: The type of event. See your CRITs vocabulary. method: The method for obtaining the event. description: A text description of the event. bucket_list: A list of bucket list items to add campaign: An associated campaign confidence: The campaign confidence date: A datetime.datetime object of when the event occurred. Returns: A JSON event object or None if there was an error. """ # Check to see if the event already exists events = self.get_events(event_title) if events is not None: if events['meta']['total_count'] == 1: return events['objects'][0] if events['meta']['total_count'] > 1: log.error('Multiple events found while trying to add the event' ': {}'.format(event_title)) return None # Now we can create the event data = { 'api_key': self.api_key, 'username': self.username, 'source': source, 'reference': reference, 'method': method, 'campaign': campaign, 'confidence': confidence, 'description': description, 'event_type': event_type, 'date': date, 'title': event_title, 'bucket_list': ','.join(bucket_list), } r = requests.post('{}/events/'.format(self.url), data=data, verify=self.verify, proxies=self.proxies) if r.status_code == 200: log.debug('Event created: {}'.format(event_title)) json_obj = json.loads(r.text) if 'id' not in json_obj: log.error('Error adding event. id not returned.') return None return json_obj else: log.error('Event creation failed with status code: ' '{}'.format(r.status_code)) return None
[ "def", "add_event", "(", "self", ",", "source", ",", "reference", ",", "event_title", ",", "event_type", ",", "method", "=", "''", ",", "description", "=", "''", ",", "bucket_list", "=", "[", "]", ",", "campaign", "=", "''", ",", "confidence", "=", "''", ",", "date", "=", "None", ")", ":", "# Check to see if the event already exists", "events", "=", "self", ".", "get_events", "(", "event_title", ")", "if", "events", "is", "not", "None", ":", "if", "events", "[", "'meta'", "]", "[", "'total_count'", "]", "==", "1", ":", "return", "events", "[", "'objects'", "]", "[", "0", "]", "if", "events", "[", "'meta'", "]", "[", "'total_count'", "]", ">", "1", ":", "log", ".", "error", "(", "'Multiple events found while trying to add the event'", "': {}'", ".", "format", "(", "event_title", ")", ")", "return", "None", "# Now we can create the event", "data", "=", "{", "'api_key'", ":", "self", ".", "api_key", ",", "'username'", ":", "self", ".", "username", ",", "'source'", ":", "source", ",", "'reference'", ":", "reference", ",", "'method'", ":", "method", ",", "'campaign'", ":", "campaign", ",", "'confidence'", ":", "confidence", ",", "'description'", ":", "description", ",", "'event_type'", ":", "event_type", ",", "'date'", ":", "date", ",", "'title'", ":", "event_title", ",", "'bucket_list'", ":", "','", ".", "join", "(", "bucket_list", ")", ",", "}", "r", "=", "requests", ".", "post", "(", "'{}/events/'", ".", "format", "(", "self", ".", "url", ")", ",", "data", "=", "data", ",", "verify", "=", "self", ".", "verify", ",", "proxies", "=", "self", ".", "proxies", ")", "if", "r", ".", "status_code", "==", "200", ":", "log", ".", "debug", "(", "'Event created: {}'", ".", "format", "(", "event_title", ")", ")", "json_obj", "=", "json", ".", "loads", "(", "r", ".", "text", ")", "if", "'id'", "not", "in", "json_obj", ":", "log", ".", "error", "(", "'Error adding event. id not returned.'", ")", "return", "None", "return", "json_obj", "else", ":", "log", ".", "error", "(", "'Event creation failed with status code: '", "'{}'", ".", "format", "(", "r", ".", "status_code", ")", ")", "return", "None" ]
38.402985
14.343284
def optimal_variational_posterior( kernel, inducing_index_points, observation_index_points, observations, observation_noise_variance, mean_fn=None, jitter=1e-6, name=None): """Model selection for optimal variational hyperparameters. Given the full training set (parameterized by `observations` and `observation_index_points`), compute the optimal variational location and scale for the VGP. This is based of the method suggested in [Titsias, 2009][1]. Args: kernel: `PositiveSemidefiniteKernel`-like instance representing the GP's covariance function. inducing_index_points: `float` `Tensor` of locations of inducing points in the index set. Shape has the form `[b1, ..., bB, e2, f1, ..., fF]`, just like `observation_index_points`. The batch shape components needn't be identical to those of `observation_index_points`, but must be broadcast compatible with them. observation_index_points: `float` `Tensor` representing finite (batch of) vector(s) of points where observations are defined. Shape has the form `[b1, ..., bB, e1, f1, ..., fF]` where `F` is the number of feature dimensions and must equal `kernel.feature_ndims` and `e1` is the number (size) of index points in each batch (we denote it `e1` to distinguish it from the numer of inducing index points, denoted `e2` below). observations: `float` `Tensor` representing collection, or batch of collections, of observations corresponding to `observation_index_points`. Shape has the form `[b1, ..., bB, e]`, which must be brodcastable with the batch and example shapes of `observation_index_points`. The batch shape `[b1, ..., bB]` must be broadcastable with the shapes of all other batched parameters (`kernel.batch_shape`, `observation_index_points`, etc.). observation_noise_variance: `float` `Tensor` representing the variance of the noise in the Normal likelihood distribution of the model. May be batched, in which case the batch shape must be broadcastable with the shapes of all other batched parameters (`kernel.batch_shape`, `index_points`, etc.). Default value: `0.` mean_fn: Python `callable` that acts on index points to produce a (batch of) vector(s) of mean values at those index points. Takes a `Tensor` of shape `[b1, ..., bB, f1, ..., fF]` and returns a `Tensor` whose shape is (broadcastable with) `[b1, ..., bB]`. Default value: `None` implies constant zero function. jitter: `float` scalar `Tensor` added to the diagonal of the covariance matrix to ensure positive definiteness of the covariance matrix. Default value: `1e-6`. name: Python `str` name prefixed to Ops created by this class. Default value: "optimal_variational_posterior". Returns: loc, scale: Tuple representing the variational location and scale. Raises: ValueError: if `mean_fn` is not `None` and is not callable. #### References [1]: Titsias, M. "Variational Model Selection for Sparse Gaussian Process Regression", 2009. http://proceedings.mlr.press/v5/titsias09a/titsias09a.pdf """ with tf.name_scope(name or 'optimal_variational_posterior'): dtype = dtype_util.common_dtype( [inducing_index_points, observation_index_points, observations, observation_noise_variance, jitter], tf.float32) inducing_index_points = tf.convert_to_tensor( value=inducing_index_points, dtype=dtype, name='inducing_index_points') observation_index_points = tf.convert_to_tensor( value=observation_index_points, dtype=dtype, name='observation_index_points') observations = tf.convert_to_tensor( value=observations, dtype=dtype, name='observations') observation_noise_variance = tf.convert_to_tensor( value=observation_noise_variance, dtype=dtype, name='observation_noise_variance') jitter = tf.convert_to_tensor( value=jitter, dtype=dtype, name='jitter') # Default to a constant zero function. if mean_fn is None: mean_fn = lambda x: tf.zeros([1], dtype=dtype) else: if not callable(mean_fn): raise ValueError('`mean_fn` must be a Python callable') # z are the inducing points and x are the observation index points. kzz = kernel.matrix(inducing_index_points, inducing_index_points) kzx = kernel.matrix(inducing_index_points, observation_index_points) noise_var_inv = tf.math.reciprocal(observation_noise_variance) sigma_inv = _add_diagonal_shift( kzz + noise_var_inv * tf.matmul(kzx, kzx, adjoint_b=True), jitter) chol_sigma_inv = tf.linalg.cholesky(sigma_inv) kzx_lin_op = tf.linalg.LinearOperatorFullMatrix(kzx) kzx_obs = kzx_lin_op.matvec( observations - mean_fn(observation_index_points)) kzz_lin_op = tf.linalg.LinearOperatorFullMatrix(kzz) loc = (mean_fn(inducing_index_points) + noise_var_inv * kzz_lin_op.matvec( _solve_cholesky_factored_system_vec(chol_sigma_inv, kzx_obs))) chol_sigma_inv_lin_op = tf.linalg.LinearOperatorLowerTriangular( chol_sigma_inv) scale = chol_sigma_inv_lin_op.solve(kzz) return loc, scale
[ "def", "optimal_variational_posterior", "(", "kernel", ",", "inducing_index_points", ",", "observation_index_points", ",", "observations", ",", "observation_noise_variance", ",", "mean_fn", "=", "None", ",", "jitter", "=", "1e-6", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "name_scope", "(", "name", "or", "'optimal_variational_posterior'", ")", ":", "dtype", "=", "dtype_util", ".", "common_dtype", "(", "[", "inducing_index_points", ",", "observation_index_points", ",", "observations", ",", "observation_noise_variance", ",", "jitter", "]", ",", "tf", ".", "float32", ")", "inducing_index_points", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "inducing_index_points", ",", "dtype", "=", "dtype", ",", "name", "=", "'inducing_index_points'", ")", "observation_index_points", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "observation_index_points", ",", "dtype", "=", "dtype", ",", "name", "=", "'observation_index_points'", ")", "observations", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "observations", ",", "dtype", "=", "dtype", ",", "name", "=", "'observations'", ")", "observation_noise_variance", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "observation_noise_variance", ",", "dtype", "=", "dtype", ",", "name", "=", "'observation_noise_variance'", ")", "jitter", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "jitter", ",", "dtype", "=", "dtype", ",", "name", "=", "'jitter'", ")", "# Default to a constant zero function.", "if", "mean_fn", "is", "None", ":", "mean_fn", "=", "lambda", "x", ":", "tf", ".", "zeros", "(", "[", "1", "]", ",", "dtype", "=", "dtype", ")", "else", ":", "if", "not", "callable", "(", "mean_fn", ")", ":", "raise", "ValueError", "(", "'`mean_fn` must be a Python callable'", ")", "# z are the inducing points and x are the observation index points.", "kzz", "=", "kernel", ".", "matrix", "(", "inducing_index_points", ",", "inducing_index_points", ")", "kzx", "=", "kernel", ".", "matrix", "(", "inducing_index_points", ",", "observation_index_points", ")", "noise_var_inv", "=", "tf", ".", "math", ".", "reciprocal", "(", "observation_noise_variance", ")", "sigma_inv", "=", "_add_diagonal_shift", "(", "kzz", "+", "noise_var_inv", "*", "tf", ".", "matmul", "(", "kzx", ",", "kzx", ",", "adjoint_b", "=", "True", ")", ",", "jitter", ")", "chol_sigma_inv", "=", "tf", ".", "linalg", ".", "cholesky", "(", "sigma_inv", ")", "kzx_lin_op", "=", "tf", ".", "linalg", ".", "LinearOperatorFullMatrix", "(", "kzx", ")", "kzx_obs", "=", "kzx_lin_op", ".", "matvec", "(", "observations", "-", "mean_fn", "(", "observation_index_points", ")", ")", "kzz_lin_op", "=", "tf", ".", "linalg", ".", "LinearOperatorFullMatrix", "(", "kzz", ")", "loc", "=", "(", "mean_fn", "(", "inducing_index_points", ")", "+", "noise_var_inv", "*", "kzz_lin_op", ".", "matvec", "(", "_solve_cholesky_factored_system_vec", "(", "chol_sigma_inv", ",", "kzx_obs", ")", ")", ")", "chol_sigma_inv_lin_op", "=", "tf", ".", "linalg", ".", "LinearOperatorLowerTriangular", "(", "chol_sigma_inv", ")", "scale", "=", "chol_sigma_inv_lin_op", ".", "solve", "(", "kzz", ")", "return", "loc", ",", "scale" ]
45.133333
23.25
def engine(log_file, no_distribute, yes, config_file, make_html_report, upgrade_db, db_version, what_if_I_upgrade, run, list_hazard_calculations, list_risk_calculations, delete_calculation, delete_uncompleted_calculations, hazard_calculation_id, list_outputs, show_log, export_output, export_outputs, exports='', log_level='info', reuse_hazard=False): """ Run a calculation using the traditional command line API """ if not run: # configure a basic logging logs.init() if config_file: config.read(os.path.abspath(os.path.expanduser(config_file)), soft_mem_limit=int, hard_mem_limit=int, port=int, multi_user=valid.boolean, multi_node=valid.boolean) if no_distribute: os.environ['OQ_DISTRIBUTE'] = 'no' # check if the datadir exists datadir = datastore.get_datadir() if not os.path.exists(datadir): os.makedirs(datadir) dbserver.ensure_on() # check if we are talking to the right server err = dbserver.check_foreign() if err: sys.exit(err) if upgrade_db: msg = logs.dbcmd('what_if_I_upgrade', 'read_scripts') if msg.startswith('Your database is already updated'): pass elif yes or confirm('Proceed? (y/n) '): logs.dbcmd('upgrade_db') sys.exit(0) if db_version: safeprint(logs.dbcmd('db_version')) sys.exit(0) if what_if_I_upgrade: safeprint(logs.dbcmd('what_if_I_upgrade', 'extract_upgrade_scripts')) sys.exit(0) # check if the db is outdated outdated = logs.dbcmd('check_outdated') if outdated: sys.exit(outdated) # hazard or hazard+risk if hazard_calculation_id == -1: # get the latest calculation of the current user hc_id = get_job_id(hazard_calculation_id, getpass.getuser()) elif hazard_calculation_id: # make it possible to use calculations made by another user hc_id = get_job_id(hazard_calculation_id) else: hc_id = None if run: log_file = os.path.expanduser(log_file) \ if log_file is not None else None job_inis = [os.path.expanduser(f) for f in run] if len(job_inis) == 1 and not hc_id: # init logs before calling get_oqparam logs.init('nojob', getattr(logging, log_level.upper())) # not using logs.handle that logs on the db oq = readinput.get_oqparam(job_inis[0]) smart_run(job_inis[0], oq, log_level, log_file, exports, reuse_hazard) return for i, job_ini in enumerate(job_inis): open(job_ini, 'rb').read() # IOError if the file does not exist job_id = run_job(job_ini, log_level, log_file, exports, hazard_calculation_id=hc_id) if not hc_id: # use the first calculation as base for the others hc_id = job_id # hazard elif list_hazard_calculations: for line in logs.dbcmd( 'list_calculations', 'hazard', getpass.getuser()): safeprint(line) elif delete_calculation is not None: del_calculation(delete_calculation, yes) # risk elif list_risk_calculations: for line in logs.dbcmd('list_calculations', 'risk', getpass.getuser()): safeprint(line) # export elif make_html_report: safeprint('Written %s' % make_report(make_html_report)) sys.exit(0) elif list_outputs is not None: hc_id = get_job_id(list_outputs) for line in logs.dbcmd('list_outputs', hc_id): safeprint(line) elif show_log is not None: hc_id = get_job_id(show_log) for line in logs.dbcmd('get_log', hc_id): safeprint(line) elif export_output is not None: output_id, target_dir = export_output dskey, calc_id, datadir = logs.dbcmd('get_output', int(output_id)) for line in core.export_output( dskey, calc_id, datadir, os.path.expanduser(target_dir), exports or 'csv,xml'): safeprint(line) elif export_outputs is not None: job_id, target_dir = export_outputs hc_id = get_job_id(job_id) for line in core.export_outputs( hc_id, os.path.expanduser(target_dir), exports or 'csv,xml'): safeprint(line) elif delete_uncompleted_calculations: logs.dbcmd('delete_uncompleted_calculations', getpass.getuser()) else: engine.parentparser.prog = 'oq engine' engine.parentparser.print_usage()
[ "def", "engine", "(", "log_file", ",", "no_distribute", ",", "yes", ",", "config_file", ",", "make_html_report", ",", "upgrade_db", ",", "db_version", ",", "what_if_I_upgrade", ",", "run", ",", "list_hazard_calculations", ",", "list_risk_calculations", ",", "delete_calculation", ",", "delete_uncompleted_calculations", ",", "hazard_calculation_id", ",", "list_outputs", ",", "show_log", ",", "export_output", ",", "export_outputs", ",", "exports", "=", "''", ",", "log_level", "=", "'info'", ",", "reuse_hazard", "=", "False", ")", ":", "if", "not", "run", ":", "# configure a basic logging", "logs", ".", "init", "(", ")", "if", "config_file", ":", "config", ".", "read", "(", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "config_file", ")", ")", ",", "soft_mem_limit", "=", "int", ",", "hard_mem_limit", "=", "int", ",", "port", "=", "int", ",", "multi_user", "=", "valid", ".", "boolean", ",", "multi_node", "=", "valid", ".", "boolean", ")", "if", "no_distribute", ":", "os", ".", "environ", "[", "'OQ_DISTRIBUTE'", "]", "=", "'no'", "# check if the datadir exists", "datadir", "=", "datastore", ".", "get_datadir", "(", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "datadir", ")", ":", "os", ".", "makedirs", "(", "datadir", ")", "dbserver", ".", "ensure_on", "(", ")", "# check if we are talking to the right server", "err", "=", "dbserver", ".", "check_foreign", "(", ")", "if", "err", ":", "sys", ".", "exit", "(", "err", ")", "if", "upgrade_db", ":", "msg", "=", "logs", ".", "dbcmd", "(", "'what_if_I_upgrade'", ",", "'read_scripts'", ")", "if", "msg", ".", "startswith", "(", "'Your database is already updated'", ")", ":", "pass", "elif", "yes", "or", "confirm", "(", "'Proceed? (y/n) '", ")", ":", "logs", ".", "dbcmd", "(", "'upgrade_db'", ")", "sys", ".", "exit", "(", "0", ")", "if", "db_version", ":", "safeprint", "(", "logs", ".", "dbcmd", "(", "'db_version'", ")", ")", "sys", ".", "exit", "(", "0", ")", "if", "what_if_I_upgrade", ":", "safeprint", "(", "logs", ".", "dbcmd", "(", "'what_if_I_upgrade'", ",", "'extract_upgrade_scripts'", ")", ")", "sys", ".", "exit", "(", "0", ")", "# check if the db is outdated", "outdated", "=", "logs", ".", "dbcmd", "(", "'check_outdated'", ")", "if", "outdated", ":", "sys", ".", "exit", "(", "outdated", ")", "# hazard or hazard+risk", "if", "hazard_calculation_id", "==", "-", "1", ":", "# get the latest calculation of the current user", "hc_id", "=", "get_job_id", "(", "hazard_calculation_id", ",", "getpass", ".", "getuser", "(", ")", ")", "elif", "hazard_calculation_id", ":", "# make it possible to use calculations made by another user", "hc_id", "=", "get_job_id", "(", "hazard_calculation_id", ")", "else", ":", "hc_id", "=", "None", "if", "run", ":", "log_file", "=", "os", ".", "path", ".", "expanduser", "(", "log_file", ")", "if", "log_file", "is", "not", "None", "else", "None", "job_inis", "=", "[", "os", ".", "path", ".", "expanduser", "(", "f", ")", "for", "f", "in", "run", "]", "if", "len", "(", "job_inis", ")", "==", "1", "and", "not", "hc_id", ":", "# init logs before calling get_oqparam", "logs", ".", "init", "(", "'nojob'", ",", "getattr", "(", "logging", ",", "log_level", ".", "upper", "(", ")", ")", ")", "# not using logs.handle that logs on the db", "oq", "=", "readinput", ".", "get_oqparam", "(", "job_inis", "[", "0", "]", ")", "smart_run", "(", "job_inis", "[", "0", "]", ",", "oq", ",", "log_level", ",", "log_file", ",", "exports", ",", "reuse_hazard", ")", "return", "for", "i", ",", "job_ini", "in", "enumerate", "(", "job_inis", ")", ":", "open", "(", "job_ini", ",", "'rb'", ")", ".", "read", "(", ")", "# IOError if the file does not exist", "job_id", "=", "run_job", "(", "job_ini", ",", "log_level", ",", "log_file", ",", "exports", ",", "hazard_calculation_id", "=", "hc_id", ")", "if", "not", "hc_id", ":", "# use the first calculation as base for the others", "hc_id", "=", "job_id", "# hazard", "elif", "list_hazard_calculations", ":", "for", "line", "in", "logs", ".", "dbcmd", "(", "'list_calculations'", ",", "'hazard'", ",", "getpass", ".", "getuser", "(", ")", ")", ":", "safeprint", "(", "line", ")", "elif", "delete_calculation", "is", "not", "None", ":", "del_calculation", "(", "delete_calculation", ",", "yes", ")", "# risk", "elif", "list_risk_calculations", ":", "for", "line", "in", "logs", ".", "dbcmd", "(", "'list_calculations'", ",", "'risk'", ",", "getpass", ".", "getuser", "(", ")", ")", ":", "safeprint", "(", "line", ")", "# export", "elif", "make_html_report", ":", "safeprint", "(", "'Written %s'", "%", "make_report", "(", "make_html_report", ")", ")", "sys", ".", "exit", "(", "0", ")", "elif", "list_outputs", "is", "not", "None", ":", "hc_id", "=", "get_job_id", "(", "list_outputs", ")", "for", "line", "in", "logs", ".", "dbcmd", "(", "'list_outputs'", ",", "hc_id", ")", ":", "safeprint", "(", "line", ")", "elif", "show_log", "is", "not", "None", ":", "hc_id", "=", "get_job_id", "(", "show_log", ")", "for", "line", "in", "logs", ".", "dbcmd", "(", "'get_log'", ",", "hc_id", ")", ":", "safeprint", "(", "line", ")", "elif", "export_output", "is", "not", "None", ":", "output_id", ",", "target_dir", "=", "export_output", "dskey", ",", "calc_id", ",", "datadir", "=", "logs", ".", "dbcmd", "(", "'get_output'", ",", "int", "(", "output_id", ")", ")", "for", "line", "in", "core", ".", "export_output", "(", "dskey", ",", "calc_id", ",", "datadir", ",", "os", ".", "path", ".", "expanduser", "(", "target_dir", ")", ",", "exports", "or", "'csv,xml'", ")", ":", "safeprint", "(", "line", ")", "elif", "export_outputs", "is", "not", "None", ":", "job_id", ",", "target_dir", "=", "export_outputs", "hc_id", "=", "get_job_id", "(", "job_id", ")", "for", "line", "in", "core", ".", "export_outputs", "(", "hc_id", ",", "os", ".", "path", ".", "expanduser", "(", "target_dir", ")", ",", "exports", "or", "'csv,xml'", ")", ":", "safeprint", "(", "line", ")", "elif", "delete_uncompleted_calculations", ":", "logs", ".", "dbcmd", "(", "'delete_uncompleted_calculations'", ",", "getpass", ".", "getuser", "(", ")", ")", "else", ":", "engine", ".", "parentparser", ".", "prog", "=", "'oq engine'", "engine", ".", "parentparser", ".", "print_usage", "(", ")" ]
35.757813
18.351563
def normalize_pts(pts, ymax, scaler=2): """ scales all coordinates and flip y axis due to different origin coordinates (top left vs. bottom left) """ return [(x * scaler, ymax - (y * scaler)) for x, y in pts]
[ "def", "normalize_pts", "(", "pts", ",", "ymax", ",", "scaler", "=", "2", ")", ":", "return", "[", "(", "x", "*", "scaler", ",", "ymax", "-", "(", "y", "*", "scaler", ")", ")", "for", "x", ",", "y", "in", "pts", "]" ]
37.166667
8.5
def _initVirtualOutputs(self): """ Sets up the structure to hold all the output data arrays for this image in memory. """ self.virtualOutputs = {} for product in self.outputNames: self.virtualOutputs[product] = None
[ "def", "_initVirtualOutputs", "(", "self", ")", ":", "self", ".", "virtualOutputs", "=", "{", "}", "for", "product", "in", "self", ".", "outputNames", ":", "self", ".", "virtualOutputs", "[", "product", "]", "=", "None" ]
37.285714
5.142857
async def get_json( self, force: bool=False, silent: bool=False, cache: bool=True, ) -> Any: """Parses the body data as JSON and returns it. Arguments: force: Force JSON parsing even if the mimetype is not JSON. silent: Do not trigger error handling if parsing fails, without this the :meth:`on_json_loading_failed` will be called on error. cache: Cache the parsed JSON on this request object. """ if cache and self._cached_json is not sentinel: return self._cached_json if not (force or self.is_json): return None data = await self._load_json_data() try: result = loads(data) except ValueError as error: if silent: result = None else: self.on_json_loading_failed(error) if cache: self._cached_json = result return result
[ "async", "def", "get_json", "(", "self", ",", "force", ":", "bool", "=", "False", ",", "silent", ":", "bool", "=", "False", ",", "cache", ":", "bool", "=", "True", ",", ")", "->", "Any", ":", "if", "cache", "and", "self", ".", "_cached_json", "is", "not", "sentinel", ":", "return", "self", ".", "_cached_json", "if", "not", "(", "force", "or", "self", ".", "is_json", ")", ":", "return", "None", "data", "=", "await", "self", ".", "_load_json_data", "(", ")", "try", ":", "result", "=", "loads", "(", "data", ")", "except", "ValueError", "as", "error", ":", "if", "silent", ":", "result", "=", "None", "else", ":", "self", ".", "on_json_loading_failed", "(", "error", ")", "if", "cache", ":", "self", ".", "_cached_json", "=", "result", "return", "result" ]
33.103448
18.896552
def define_charset(self, code, mode): """Define ``G0`` or ``G1`` charset. :param str code: character set code, should be a character from ``"B0UK"``, otherwise ignored. :param str mode: if ``"("`` ``G0`` charset is defined, if ``")"`` -- we operate on ``G1``. .. warning:: User-defined charsets are currently not supported. """ if code in cs.MAPS: if mode == "(": self.g0_charset = cs.MAPS[code] elif mode == ")": self.g1_charset = cs.MAPS[code]
[ "def", "define_charset", "(", "self", ",", "code", ",", "mode", ")", ":", "if", "code", "in", "cs", ".", "MAPS", ":", "if", "mode", "==", "\"(\"", ":", "self", ".", "g0_charset", "=", "cs", ".", "MAPS", "[", "code", "]", "elif", "mode", "==", "\")\"", ":", "self", ".", "g1_charset", "=", "cs", ".", "MAPS", "[", "code", "]" ]
39.133333
16.866667
def similarity_graph(self, k=5, radius=None, include_self_edges=False, output_type='SGraph', verbose=True): """ Construct the similarity graph on the reference dataset, which is already stored in the model. This is conceptually very similar to running `query` with the reference set, but this method is optimized for the purpose, syntactically simpler, and automatically removes self-edges. Parameters ---------- k : int, optional Maximum number of neighbors to return for each point in the dataset. Setting this to ``None`` deactivates the constraint, so that all neighbors are returned within ``radius`` of a given point. radius : float, optional For a given point, only neighbors within this distance are returned. The default is ``None``, in which case the ``k`` nearest neighbors are returned for each query point, regardless of distance. include_self_edges : bool, optional For most distance functions, each point in the model's reference dataset is its own nearest neighbor. If this parameter is set to False, this result is ignored, and the nearest neighbors are returned *excluding* the point itself. output_type : {'SGraph', 'SFrame'}, optional By default, the results are returned in the form of an SGraph, where each point in the reference dataset is a vertex and an edge A -> B indicates that vertex B is a nearest neighbor of vertex A. If 'output_type' is set to 'SFrame', the output is in the same form as the results of the 'query' method: an SFrame with columns indicating the query label (in this case the query data is the same as the reference data), reference label, distance between the two points, and the rank of the neighbor. verbose : bool, optional If True, print progress updates and model details. Returns ------- out : SFrame or SGraph The type of the output object depends on the 'output_type' parameter. See the parameter description for more detail. Notes ----- - If both ``k`` and ``radius`` are set to ``None``, each data point is matched to the entire dataset. If the reference dataset has :math:`n` rows, the output is an SFrame with :math:`n^2` rows (or an SGraph with :math:`n^2` edges). - For models created with the 'lsh' method, the output similarity graph may have fewer vertices than there are data points in the original reference set. Because LSH is an approximate method, a query point may have fewer than 'k' neighbors. If LSH returns no neighbors at all for a query and self-edges are excluded, the query point is omitted from the results. Examples -------- First construct an SFrame and create a nearest neighbors model: >>> sf = turicreate.SFrame({'x1': [0.98, 0.62, 0.11], ... 'x2': [0.69, 0.58, 0.36]}) ... >>> model = turicreate.nearest_neighbors.create(sf, distance='euclidean') Unlike the ``query`` method, there is no need for a second dataset with ``similarity_graph``. >>> g = model.similarity_graph(k=1) # an SGraph >>> g.edges +----------+----------+----------------+------+ | __src_id | __dst_id | distance | rank | +----------+----------+----------------+------+ | 0 | 1 | 0.376430604494 | 1 | | 2 | 1 | 0.55542776308 | 1 | | 1 | 0 | 0.376430604494 | 1 | +----------+----------+----------------+------+ """ ## Validate inputs. if k is not None: if not isinstance(k, int): raise ValueError("Input 'k' must be an integer.") if k <= 0: raise ValueError("Input 'k' must be larger than 0.") if radius is not None: if not isinstance(radius, (int, float)): raise ValueError("Input 'radius' must be an integer or float.") if radius < 0: raise ValueError("Input 'radius' must be non-negative.") ## Set k and radius to special values to indicate 'None' if k is None: k = -1 if radius is None: radius = -1.0 opts = {'model': self.__proxy__, 'model_name': self.__name__, 'k': k, 'radius': radius, 'include_self_edges': include_self_edges} with QuietProgress(verbose): result = _turicreate.extensions._nearest_neighbors.similarity_graph(opts) knn = result['neighbors'] if output_type == "SFrame": return knn else: sg = _SGraph(edges=knn, src_field='query_label', dst_field='reference_label') return sg
[ "def", "similarity_graph", "(", "self", ",", "k", "=", "5", ",", "radius", "=", "None", ",", "include_self_edges", "=", "False", ",", "output_type", "=", "'SGraph'", ",", "verbose", "=", "True", ")", ":", "## Validate inputs.", "if", "k", "is", "not", "None", ":", "if", "not", "isinstance", "(", "k", ",", "int", ")", ":", "raise", "ValueError", "(", "\"Input 'k' must be an integer.\"", ")", "if", "k", "<=", "0", ":", "raise", "ValueError", "(", "\"Input 'k' must be larger than 0.\"", ")", "if", "radius", "is", "not", "None", ":", "if", "not", "isinstance", "(", "radius", ",", "(", "int", ",", "float", ")", ")", ":", "raise", "ValueError", "(", "\"Input 'radius' must be an integer or float.\"", ")", "if", "radius", "<", "0", ":", "raise", "ValueError", "(", "\"Input 'radius' must be non-negative.\"", ")", "## Set k and radius to special values to indicate 'None'", "if", "k", "is", "None", ":", "k", "=", "-", "1", "if", "radius", "is", "None", ":", "radius", "=", "-", "1.0", "opts", "=", "{", "'model'", ":", "self", ".", "__proxy__", ",", "'model_name'", ":", "self", ".", "__name__", ",", "'k'", ":", "k", ",", "'radius'", ":", "radius", ",", "'include_self_edges'", ":", "include_self_edges", "}", "with", "QuietProgress", "(", "verbose", ")", ":", "result", "=", "_turicreate", ".", "extensions", ".", "_nearest_neighbors", ".", "similarity_graph", "(", "opts", ")", "knn", "=", "result", "[", "'neighbors'", "]", "if", "output_type", "==", "\"SFrame\"", ":", "return", "knn", "else", ":", "sg", "=", "_SGraph", "(", "edges", "=", "knn", ",", "src_field", "=", "'query_label'", ",", "dst_field", "=", "'reference_label'", ")", "return", "sg" ]
40.919355
25.854839
def generate_binding_credentials(self, binding): """Generate binding credentials This function will permit to define the configuration to connect to the instance. Those credentials will be stored on a secret and exposed to a a Pod. We should at least returns the 'username' and 'password'. Args: binding (AtlasServiceBinding.Binding): A binding Returns: dict: All credentials and secrets. Raises: ErrClusterConfig: Connection string to the cluster is not available. """ uri = self.clusters.get(binding.instance.get_cluster(), None) if not uri: raise ErrClusterConfig(binding.instance.get_cluster()) # partial credentials creds = {"username" : self.generate_binding_username(binding), "password" : pwgen(32, symbols=False), "database" : binding.instance.get_dbname()} # uri uri = uri % ( creds["username"], creds["password"], creds["database"]) creds["uri"] = uri # return creds return creds
[ "def", "generate_binding_credentials", "(", "self", ",", "binding", ")", ":", "uri", "=", "self", ".", "clusters", ".", "get", "(", "binding", ".", "instance", ".", "get_cluster", "(", ")", ",", "None", ")", "if", "not", "uri", ":", "raise", "ErrClusterConfig", "(", "binding", ".", "instance", ".", "get_cluster", "(", ")", ")", "# partial credentials", "creds", "=", "{", "\"username\"", ":", "self", ".", "generate_binding_username", "(", "binding", ")", ",", "\"password\"", ":", "pwgen", "(", "32", ",", "symbols", "=", "False", ")", ",", "\"database\"", ":", "binding", ".", "instance", ".", "get_dbname", "(", ")", "}", "# uri", "uri", "=", "uri", "%", "(", "creds", "[", "\"username\"", "]", ",", "creds", "[", "\"password\"", "]", ",", "creds", "[", "\"database\"", "]", ")", "creds", "[", "\"uri\"", "]", "=", "uri", "# return creds", "return", "creds" ]
31.868421
21.921053
def hasKey(self, key, notNone=False): '''Return entries where the key is present. Example of use: >>> test = [ ... {"name": "Jim", "age": 18, "income": 93000, "wigs": 68 }, ... {"name": "Larry", "age": 18, "wigs": [3, 2, 9]}, ... {"name": "Joe", "age": 20, "income": None , "wigs": [1, 2, 3]}, ... {"name": "Bill", "age": 19, "income": 29000 }, ... ] >>> print PLOD(test).hasKey("income").returnString() [ {age: 18, income: 93000, name: 'Jim' , wigs: 68}, {age: 20, income: None , name: 'Joe' , wigs: [1, 2, 3]}, {age: 19, income: 29000, name: 'Bill', wigs: None } ] >>> print PLOD(test).hasKey("income", notNone=True).returnString() [ {age: 18, income: 93000, name: 'Jim' , wigs: 68}, {age: 19, income: 29000, name: 'Bill', wigs: None} ] .. versionadded:: 0.1.2 :param key: The dictionary key (or cascading list of keys) to locate. :param notNone: If True, then None is the equivalent of a missing key. Otherwise, a key with a value of None is NOT considered missing. :returns: self ''' result = [] result_tracker = [] for counter, row in enumerate(self.table): (target, _, value) = internal.dict_crawl(row, key) if target: if notNone==False or not value is None: result.append(row) result_tracker.append(self.index_track[counter]) self.table = result self.index_track = result_tracker return self
[ "def", "hasKey", "(", "self", ",", "key", ",", "notNone", "=", "False", ")", ":", "result", "=", "[", "]", "result_tracker", "=", "[", "]", "for", "counter", ",", "row", "in", "enumerate", "(", "self", ".", "table", ")", ":", "(", "target", ",", "_", ",", "value", ")", "=", "internal", ".", "dict_crawl", "(", "row", ",", "key", ")", "if", "target", ":", "if", "notNone", "==", "False", "or", "not", "value", "is", "None", ":", "result", ".", "append", "(", "row", ")", "result_tracker", ".", "append", "(", "self", ".", "index_track", "[", "counter", "]", ")", "self", ".", "table", "=", "result", "self", ".", "index_track", "=", "result_tracker", "return", "self" ]
39.090909
24.409091
def getFrameDimensions(data, page_width, page_height): """Calculate dimensions of a frame Returns left, top, width and height of the frame in points. """ box = data.get("-pdf-frame-box", []) if len(box) == 4: return [getSize(x) for x in box] top = getSize(data.get("top", 0)) left = getSize(data.get("left", 0)) bottom = getSize(data.get("bottom", 0)) right = getSize(data.get("right", 0)) if "height" in data: height = getSize(data["height"]) if "top" in data: top = getSize(data["top"]) bottom = page_height - (top + height) elif "bottom" in data: bottom = getSize(data["bottom"]) top = page_height - (bottom + height) if "width" in data: width = getSize(data["width"]) if "left" in data: left = getSize(data["left"]) right = page_width - (left + width) elif "right" in data: right = getSize(data["right"]) left = page_width - (right + width) top += getSize(data.get("margin-top", 0)) left += getSize(data.get("margin-left", 0)) bottom += getSize(data.get("margin-bottom", 0)) right += getSize(data.get("margin-right", 0)) width = page_width - (left + right) height = page_height - (top + bottom) return left, top, width, height
[ "def", "getFrameDimensions", "(", "data", ",", "page_width", ",", "page_height", ")", ":", "box", "=", "data", ".", "get", "(", "\"-pdf-frame-box\"", ",", "[", "]", ")", "if", "len", "(", "box", ")", "==", "4", ":", "return", "[", "getSize", "(", "x", ")", "for", "x", "in", "box", "]", "top", "=", "getSize", "(", "data", ".", "get", "(", "\"top\"", ",", "0", ")", ")", "left", "=", "getSize", "(", "data", ".", "get", "(", "\"left\"", ",", "0", ")", ")", "bottom", "=", "getSize", "(", "data", ".", "get", "(", "\"bottom\"", ",", "0", ")", ")", "right", "=", "getSize", "(", "data", ".", "get", "(", "\"right\"", ",", "0", ")", ")", "if", "\"height\"", "in", "data", ":", "height", "=", "getSize", "(", "data", "[", "\"height\"", "]", ")", "if", "\"top\"", "in", "data", ":", "top", "=", "getSize", "(", "data", "[", "\"top\"", "]", ")", "bottom", "=", "page_height", "-", "(", "top", "+", "height", ")", "elif", "\"bottom\"", "in", "data", ":", "bottom", "=", "getSize", "(", "data", "[", "\"bottom\"", "]", ")", "top", "=", "page_height", "-", "(", "bottom", "+", "height", ")", "if", "\"width\"", "in", "data", ":", "width", "=", "getSize", "(", "data", "[", "\"width\"", "]", ")", "if", "\"left\"", "in", "data", ":", "left", "=", "getSize", "(", "data", "[", "\"left\"", "]", ")", "right", "=", "page_width", "-", "(", "left", "+", "width", ")", "elif", "\"right\"", "in", "data", ":", "right", "=", "getSize", "(", "data", "[", "\"right\"", "]", ")", "left", "=", "page_width", "-", "(", "right", "+", "width", ")", "top", "+=", "getSize", "(", "data", ".", "get", "(", "\"margin-top\"", ",", "0", ")", ")", "left", "+=", "getSize", "(", "data", ".", "get", "(", "\"margin-left\"", ",", "0", ")", ")", "bottom", "+=", "getSize", "(", "data", ".", "get", "(", "\"margin-bottom\"", ",", "0", ")", ")", "right", "+=", "getSize", "(", "data", ".", "get", "(", "\"margin-right\"", ",", "0", ")", ")", "width", "=", "page_width", "-", "(", "left", "+", "right", ")", "height", "=", "page_height", "-", "(", "top", "+", "bottom", ")", "return", "left", ",", "top", ",", "width", ",", "height" ]
36.694444
8.555556
def find_protoc(path=os.environ['PATH']): ''' Traverse a path ($PATH by default) to find the protoc compiler ''' protoc_filename = 'protoc' bin_search_paths = path.split(':') or [] for search_path in bin_search_paths: bin_path = os.path.join(search_path, protoc_filename) if os.path.isfile(bin_path) and os.access(bin_path, os.X_OK): return bin_path raise ProtocNotFound("Protobuf compiler not found")
[ "def", "find_protoc", "(", "path", "=", "os", ".", "environ", "[", "'PATH'", "]", ")", ":", "protoc_filename", "=", "'protoc'", "bin_search_paths", "=", "path", ".", "split", "(", "':'", ")", "or", "[", "]", "for", "search_path", "in", "bin_search_paths", ":", "bin_path", "=", "os", ".", "path", ".", "join", "(", "search_path", ",", "protoc_filename", ")", "if", "os", ".", "path", ".", "isfile", "(", "bin_path", ")", "and", "os", ".", "access", "(", "bin_path", ",", "os", ".", "X_OK", ")", ":", "return", "bin_path", "raise", "ProtocNotFound", "(", "\"Protobuf compiler not found\"", ")" ]
35.307692
20.384615
def downstream(self, node): """ Returns a list of all nodes this node has edges towards. Args: node (str): The node whose downstream nodes you want to find. Returns: list: A list of nodes that are immediately downstream from the node. """ graph = self.graph if node not in graph: raise KeyError('node %s is not in graph' % node) return list(graph[node])
[ "def", "downstream", "(", "self", ",", "node", ")", ":", "graph", "=", "self", ".", "graph", "if", "node", "not", "in", "graph", ":", "raise", "KeyError", "(", "'node %s is not in graph'", "%", "node", ")", "return", "list", "(", "graph", "[", "node", "]", ")" ]
32.285714
20.071429
def load(self): "Loads the state from the state file" try: if os.path.getsize(self.state_file) <= 1: raise IOError("File is empty.") with open(self.state_file) as fh: state = json.load(fh) assert isinstance(state, dict) self.hosts = state['hosts'] self.stats = state['stats'] for key in self.stats: self.stats[key]['open_requests'] = 0 except (IOError, OSError): # There is no state file; start empty. self.hosts = {} self.stats = {}
[ "def", "load", "(", "self", ")", ":", "try", ":", "if", "os", ".", "path", ".", "getsize", "(", "self", ".", "state_file", ")", "<=", "1", ":", "raise", "IOError", "(", "\"File is empty.\"", ")", "with", "open", "(", "self", ".", "state_file", ")", "as", "fh", ":", "state", "=", "json", ".", "load", "(", "fh", ")", "assert", "isinstance", "(", "state", ",", "dict", ")", "self", ".", "hosts", "=", "state", "[", "'hosts'", "]", "self", ".", "stats", "=", "state", "[", "'stats'", "]", "for", "key", "in", "self", ".", "stats", ":", "self", ".", "stats", "[", "key", "]", "[", "'open_requests'", "]", "=", "0", "except", "(", "IOError", ",", "OSError", ")", ":", "# There is no state file; start empty.", "self", ".", "hosts", "=", "{", "}", "self", ".", "stats", "=", "{", "}" ]
38.125
9.875
def tangent(f): """A decorator which removes the `with insert_grad_of` statement. This allows the function to be called as usual. Args: f: A function Returns: A function with any `with insert_grad_of` context managers removed. """ node = annotate.resolve_calls(f) RemoveWith().visit(node) wrapped = functools.wraps(f)(compile_.compile_function(node)) wrapped.tangent = f return wrapped
[ "def", "tangent", "(", "f", ")", ":", "node", "=", "annotate", ".", "resolve_calls", "(", "f", ")", "RemoveWith", "(", ")", ".", "visit", "(", "node", ")", "wrapped", "=", "functools", ".", "wraps", "(", "f", ")", "(", "compile_", ".", "compile_function", "(", "node", ")", ")", "wrapped", ".", "tangent", "=", "f", "return", "wrapped" ]
25.0625
22.3125
def copy_ifcfg_file(source_interface, dest_interface): """Copies an existing ifcfg network script to another :param source_interface: String (e.g. 1) :param dest_interface: String (e.g. 0:0) :return: None :raises TypeError, OSError """ log = logging.getLogger(mod_logger + '.copy_ifcfg_file') # Validate args if not isinstance(source_interface, basestring): msg = 'source_interface argument must be a string' log.error(msg) raise TypeError(msg) if not isinstance(dest_interface, basestring): msg = 'dest_interface argument must be a string' log.error(msg) raise TypeError(msg) network_script = '/etc/sysconfig/network-scripts/ifcfg-eth' source_file = network_script + source_interface dest_file = network_script + dest_interface command = ['cp', '-f', source_file, dest_file] try: result = run_command(command) code = result['code'] except CommandError: _, ex, trace = sys.exc_info() msg = 'Unable to copy the ifcfg file from interface {s} to interface {d}\n{e}'.format( s=source_interface, d=dest_interface, e=str(ex)) raise OSError, msg, trace log.info('Copy command exited with code: {c}'.format(c=code)) if code != 0: msg = 'There was a problem copying file {s} file to {d}'.format(s=source, d=dest_file) log.error(msg) raise OSError(msg) # Updating the destination network script DEVICE property try: sed(file_path=dest_file, pattern='^DEVICE=.*', replace_str='DEVICE="eth{i}"'.format(i=dest_interface)) except CommandError: _, ex, trace = sys.exc_info() msg = 'Unable to update DEVICE in file: {d}\n{e}'.format( d=dest_file, e=str(ex)) log.error(msg) raise CommandError, msg, trace log.info('Successfully created file: {d}'.format(d=dest_file)) log.info('Restarting networking in 10 seconds to ensure the changes take effect...') time.sleep(10) retry_time = 10 max_retries = 10 for i in range(1, max_retries+2): if i > max_retries: msg = 'Unable to successfully start the networking service after {m} attempts'.format(m=max_retries) log.error(msg) raise OSError(msg) log.info('Attempting to restart the networking service, attempt #{i} of {m}'.format(i=i, m=max_retries)) try: service_network_restart() except CommandError: _, ex, trace = sys.exc_info() log.warn('Attempted unsuccessfully to restart networking on attempt #{i} of {m}, trying again in {t} ' 'seconds\n{e}'.format(i=i, m=max_retries, t=retry_time, e=str(ex))) time.sleep(retry_time) else: log.info('Successfully restarted networking') break log.info('Successfully configured interface: {d}'.format(d=dest_interface))
[ "def", "copy_ifcfg_file", "(", "source_interface", ",", "dest_interface", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "mod_logger", "+", "'.copy_ifcfg_file'", ")", "# Validate args", "if", "not", "isinstance", "(", "source_interface", ",", "basestring", ")", ":", "msg", "=", "'source_interface argument must be a string'", "log", ".", "error", "(", "msg", ")", "raise", "TypeError", "(", "msg", ")", "if", "not", "isinstance", "(", "dest_interface", ",", "basestring", ")", ":", "msg", "=", "'dest_interface argument must be a string'", "log", ".", "error", "(", "msg", ")", "raise", "TypeError", "(", "msg", ")", "network_script", "=", "'/etc/sysconfig/network-scripts/ifcfg-eth'", "source_file", "=", "network_script", "+", "source_interface", "dest_file", "=", "network_script", "+", "dest_interface", "command", "=", "[", "'cp'", ",", "'-f'", ",", "source_file", ",", "dest_file", "]", "try", ":", "result", "=", "run_command", "(", "command", ")", "code", "=", "result", "[", "'code'", "]", "except", "CommandError", ":", "_", ",", "ex", ",", "trace", "=", "sys", ".", "exc_info", "(", ")", "msg", "=", "'Unable to copy the ifcfg file from interface {s} to interface {d}\\n{e}'", ".", "format", "(", "s", "=", "source_interface", ",", "d", "=", "dest_interface", ",", "e", "=", "str", "(", "ex", ")", ")", "raise", "OSError", ",", "msg", ",", "trace", "log", ".", "info", "(", "'Copy command exited with code: {c}'", ".", "format", "(", "c", "=", "code", ")", ")", "if", "code", "!=", "0", ":", "msg", "=", "'There was a problem copying file {s} file to {d}'", ".", "format", "(", "s", "=", "source", ",", "d", "=", "dest_file", ")", "log", ".", "error", "(", "msg", ")", "raise", "OSError", "(", "msg", ")", "# Updating the destination network script DEVICE property", "try", ":", "sed", "(", "file_path", "=", "dest_file", ",", "pattern", "=", "'^DEVICE=.*'", ",", "replace_str", "=", "'DEVICE=\"eth{i}\"'", ".", "format", "(", "i", "=", "dest_interface", ")", ")", "except", "CommandError", ":", "_", ",", "ex", ",", "trace", "=", "sys", ".", "exc_info", "(", ")", "msg", "=", "'Unable to update DEVICE in file: {d}\\n{e}'", ".", "format", "(", "d", "=", "dest_file", ",", "e", "=", "str", "(", "ex", ")", ")", "log", ".", "error", "(", "msg", ")", "raise", "CommandError", ",", "msg", ",", "trace", "log", ".", "info", "(", "'Successfully created file: {d}'", ".", "format", "(", "d", "=", "dest_file", ")", ")", "log", ".", "info", "(", "'Restarting networking in 10 seconds to ensure the changes take effect...'", ")", "time", ".", "sleep", "(", "10", ")", "retry_time", "=", "10", "max_retries", "=", "10", "for", "i", "in", "range", "(", "1", ",", "max_retries", "+", "2", ")", ":", "if", "i", ">", "max_retries", ":", "msg", "=", "'Unable to successfully start the networking service after {m} attempts'", ".", "format", "(", "m", "=", "max_retries", ")", "log", ".", "error", "(", "msg", ")", "raise", "OSError", "(", "msg", ")", "log", ".", "info", "(", "'Attempting to restart the networking service, attempt #{i} of {m}'", ".", "format", "(", "i", "=", "i", ",", "m", "=", "max_retries", ")", ")", "try", ":", "service_network_restart", "(", ")", "except", "CommandError", ":", "_", ",", "ex", ",", "trace", "=", "sys", ".", "exc_info", "(", ")", "log", ".", "warn", "(", "'Attempted unsuccessfully to restart networking on attempt #{i} of {m}, trying again in {t} '", "'seconds\\n{e}'", ".", "format", "(", "i", "=", "i", ",", "m", "=", "max_retries", ",", "t", "=", "retry_time", ",", "e", "=", "str", "(", "ex", ")", ")", ")", "time", ".", "sleep", "(", "retry_time", ")", "else", ":", "log", ".", "info", "(", "'Successfully restarted networking'", ")", "break", "log", ".", "info", "(", "'Successfully configured interface: {d}'", ".", "format", "(", "d", "=", "dest_interface", ")", ")" ]
40.71831
21.197183
def info(self): """Supplemental description of the list, with length and type""" itext = self.class_info if self.key_prop.info and self.value_prop.info: itext += ' (keys: {}; values: {})'.format( self.key_prop.info, self.value_prop.info ) elif self.key_prop.info: itext += ' (keys: {})'.format(self.key_prop.info) elif self.value_prop.info: itext += ' (values: {})'.format(self.value_prop.info) return itext
[ "def", "info", "(", "self", ")", ":", "itext", "=", "self", ".", "class_info", "if", "self", ".", "key_prop", ".", "info", "and", "self", ".", "value_prop", ".", "info", ":", "itext", "+=", "' (keys: {}; values: {})'", ".", "format", "(", "self", ".", "key_prop", ".", "info", ",", "self", ".", "value_prop", ".", "info", ")", "elif", "self", ".", "key_prop", ".", "info", ":", "itext", "+=", "' (keys: {})'", ".", "format", "(", "self", ".", "key_prop", ".", "info", ")", "elif", "self", ".", "value_prop", ".", "info", ":", "itext", "+=", "' (values: {})'", ".", "format", "(", "self", ".", "value_prop", ".", "info", ")", "return", "itext" ]
42.333333
15.5
def get_edit_url( self, md_id: str = None, md_type: str = None, owner_id: str = None, tab: str = "identification", ): """Constructs the edition URL of a metadata. :param str md_id: metadata/resource UUID :param str owner_id: owner UUID :param str tab: target tab in the web form """ # checks inputs if not checker.check_is_uuid(md_id) or not checker.check_is_uuid(owner_id): raise ValueError("One of md_id or owner_id is not a correct UUID.") else: pass if checker.check_edit_tab(tab, md_type=md_type): pass # construct URL return ( "{}" "/groups/{}" "/resources/{}" "/{}".format(self.APP_URLS.get(self.platform), owner_id, md_id, tab) )
[ "def", "get_edit_url", "(", "self", ",", "md_id", ":", "str", "=", "None", ",", "md_type", ":", "str", "=", "None", ",", "owner_id", ":", "str", "=", "None", ",", "tab", ":", "str", "=", "\"identification\"", ",", ")", ":", "# checks inputs", "if", "not", "checker", ".", "check_is_uuid", "(", "md_id", ")", "or", "not", "checker", ".", "check_is_uuid", "(", "owner_id", ")", ":", "raise", "ValueError", "(", "\"One of md_id or owner_id is not a correct UUID.\"", ")", "else", ":", "pass", "if", "checker", ".", "check_edit_tab", "(", "tab", ",", "md_type", "=", "md_type", ")", ":", "pass", "# construct URL", "return", "(", "\"{}\"", "\"/groups/{}\"", "\"/resources/{}\"", "\"/{}\"", ".", "format", "(", "self", ".", "APP_URLS", ".", "get", "(", "self", ".", "platform", ")", ",", "owner_id", ",", "md_id", ",", "tab", ")", ")" ]
30.962963
19.962963
def get_ast_field_name(ast): """Return the normalized field name for the given AST node.""" replacements = { # We always rewrite the following field names into their proper underlying counterparts. TYPENAME_META_FIELD_NAME: '@class' } base_field_name = ast.name.value normalized_name = replacements.get(base_field_name, base_field_name) return normalized_name
[ "def", "get_ast_field_name", "(", "ast", ")", ":", "replacements", "=", "{", "# We always rewrite the following field names into their proper underlying counterparts.", "TYPENAME_META_FIELD_NAME", ":", "'@class'", "}", "base_field_name", "=", "ast", ".", "name", ".", "value", "normalized_name", "=", "replacements", ".", "get", "(", "base_field_name", ",", "base_field_name", ")", "return", "normalized_name" ]
43.444444
19.444444
def expect_column_values_to_not_match_regex_list(self, column, regex_list, mostly=None, result_format=None, include_config=False, catch_exceptions=None, meta=None): """Expect the column entries to be strings that do not match any of a list of regular expressions. Matches can \ be anywhere in the string. expect_column_values_to_not_match_regex_list is a :func:`column_map_expectation <great_expectations.data_asset.dataset.Dataset.column_map_expectation>`. Args: column (str): \ The column name. regex_list (list): \ The list of regular expressions which the column entries should not match Keyword Args: mostly (None or a float between 0 and 1): \ Return `"success": True` if at least mostly percent of values match the expectation. \ For more detail, see :ref:`mostly`. Other Parameters: result_format (str or None): \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. include_config (boolean): \ If True, then include the expectation config as part of the result object. \ For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see :ref:`catch_exceptions`. meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification. \ For more detail, see :ref:`meta`. Returns: A JSON-serializable expectation result object. Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`. See Also: expect_column_values_to_match_regex_list """ raise NotImplementedError
[ "def", "expect_column_values_to_not_match_regex_list", "(", "self", ",", "column", ",", "regex_list", ",", "mostly", "=", "None", ",", "result_format", "=", "None", ",", "include_config", "=", "False", ",", "catch_exceptions", "=", "None", ",", "meta", "=", "None", ")", ":", "raise", "NotImplementedError" ]
50.136364
30.659091
def _calendar_month_middles(year): """List of middle day of each month, used by Linke turbidity lookup""" # remove mdays[0] since January starts at mdays[1] # make local copy of mdays since we need to change # February for leap years mdays = np.array(calendar.mdays[1:]) ydays = 365 # handle leap years if calendar.isleap(year): mdays[1] = mdays[1] + 1 ydays = 366 middles = np.concatenate( [[-calendar.mdays[-1] / 2.0], # Dec last year np.cumsum(mdays) - np.array(mdays) / 2., # this year [ydays + calendar.mdays[1] / 2.0]]) # Jan next year return middles
[ "def", "_calendar_month_middles", "(", "year", ")", ":", "# remove mdays[0] since January starts at mdays[1]", "# make local copy of mdays since we need to change", "# February for leap years", "mdays", "=", "np", ".", "array", "(", "calendar", ".", "mdays", "[", "1", ":", "]", ")", "ydays", "=", "365", "# handle leap years", "if", "calendar", ".", "isleap", "(", "year", ")", ":", "mdays", "[", "1", "]", "=", "mdays", "[", "1", "]", "+", "1", "ydays", "=", "366", "middles", "=", "np", ".", "concatenate", "(", "[", "[", "-", "calendar", ".", "mdays", "[", "-", "1", "]", "/", "2.0", "]", ",", "# Dec last year", "np", ".", "cumsum", "(", "mdays", ")", "-", "np", ".", "array", "(", "mdays", ")", "/", "2.", ",", "# this year", "[", "ydays", "+", "calendar", ".", "mdays", "[", "1", "]", "/", "2.0", "]", "]", ")", "# Jan next year", "return", "middles" ]
39.125
13.625
def nodes_for_spec(self, spec): """ Determine nodes for an input_algorithms spec Taking into account nested specs """ tokens = [] if isinstance(spec, sb.create_spec): container = nodes.container(classes=["option_spec_option shortline blue-back"]) creates = spec.kls for name, option in sorted(spec.kwargs.items(), key=lambda x: len(x[0])): para = nodes.paragraph(classes=["option monospaced"]) para += nodes.Text("{0} = ".format(name)) self.nodes_for_signature(option, para) fields = {} if creates and hasattr(creates, 'fields') and isinstance(creates.fields, dict): for key, val in creates.fields.items(): if isinstance(key, tuple): fields[key[0]] = val else: fields[key] = val txt = fields.get(name) or "No description" viewlist = ViewList() for line in dedent(txt).split('\n'): viewlist.append(line, name) desc = nodes.section(classes=["description monospaced"]) self.state.nested_parse(viewlist, self.content_offset, desc) container += para container += desc container.extend(self.nodes_for_spec(option)) tokens.append(container) elif isinstance(spec, sb.optional_spec): tokens.extend(self.nodes_for_spec(spec.spec)) elif isinstance(spec, sb.container_spec): tokens.extend(self.nodes_for_spec(spec.spec)) elif isinstance(spec, sb.dictof): tokens.extend(self.nodes_for_spec(spec.value_spec)) return tokens
[ "def", "nodes_for_spec", "(", "self", ",", "spec", ")", ":", "tokens", "=", "[", "]", "if", "isinstance", "(", "spec", ",", "sb", ".", "create_spec", ")", ":", "container", "=", "nodes", ".", "container", "(", "classes", "=", "[", "\"option_spec_option shortline blue-back\"", "]", ")", "creates", "=", "spec", ".", "kls", "for", "name", ",", "option", "in", "sorted", "(", "spec", ".", "kwargs", ".", "items", "(", ")", ",", "key", "=", "lambda", "x", ":", "len", "(", "x", "[", "0", "]", ")", ")", ":", "para", "=", "nodes", ".", "paragraph", "(", "classes", "=", "[", "\"option monospaced\"", "]", ")", "para", "+=", "nodes", ".", "Text", "(", "\"{0} = \"", ".", "format", "(", "name", ")", ")", "self", ".", "nodes_for_signature", "(", "option", ",", "para", ")", "fields", "=", "{", "}", "if", "creates", "and", "hasattr", "(", "creates", ",", "'fields'", ")", "and", "isinstance", "(", "creates", ".", "fields", ",", "dict", ")", ":", "for", "key", ",", "val", "in", "creates", ".", "fields", ".", "items", "(", ")", ":", "if", "isinstance", "(", "key", ",", "tuple", ")", ":", "fields", "[", "key", "[", "0", "]", "]", "=", "val", "else", ":", "fields", "[", "key", "]", "=", "val", "txt", "=", "fields", ".", "get", "(", "name", ")", "or", "\"No description\"", "viewlist", "=", "ViewList", "(", ")", "for", "line", "in", "dedent", "(", "txt", ")", ".", "split", "(", "'\\n'", ")", ":", "viewlist", ".", "append", "(", "line", ",", "name", ")", "desc", "=", "nodes", ".", "section", "(", "classes", "=", "[", "\"description monospaced\"", "]", ")", "self", ".", "state", ".", "nested_parse", "(", "viewlist", ",", "self", ".", "content_offset", ",", "desc", ")", "container", "+=", "para", "container", "+=", "desc", "container", ".", "extend", "(", "self", ".", "nodes_for_spec", "(", "option", ")", ")", "tokens", ".", "append", "(", "container", ")", "elif", "isinstance", "(", "spec", ",", "sb", ".", "optional_spec", ")", ":", "tokens", ".", "extend", "(", "self", ".", "nodes_for_spec", "(", "spec", ".", "spec", ")", ")", "elif", "isinstance", "(", "spec", ",", "sb", ".", "container_spec", ")", ":", "tokens", ".", "extend", "(", "self", ".", "nodes_for_spec", "(", "spec", ".", "spec", ")", ")", "elif", "isinstance", "(", "spec", ",", "sb", ".", "dictof", ")", ":", "tokens", ".", "extend", "(", "self", ".", "nodes_for_spec", "(", "spec", ".", "value_spec", ")", ")", "return", "tokens" ]
43.804878
18.097561
def init_metadata(self, origin, backend_name, backend_version, category, backend_params): """Init metadata information. Metatada is composed by basic information needed to identify where archived data came from and how it can be retrieved and built into Perceval items. :param: origin: identifier of the repository :param: backend_name: name of the backend :param: backend_version: version of the backend :param: category: category of the items fetched :param: backend_params: dict representation of the fetch parameters raises ArchiveError: when an error occurs initializing the metadata """ created_on = datetime_to_utc(datetime_utcnow()) created_on_dumped = created_on.isoformat() backend_params_dumped = pickle.dumps(backend_params, 0) metadata = (origin, backend_name, backend_version, category, backend_params_dumped, created_on_dumped,) try: cursor = self._db.cursor() insert_stmt = "INSERT INTO " + self.METADATA_TABLE + " "\ "(origin, backend_name, backend_version, " \ "category, backend_params, created_on) " \ "VALUES (?, ?, ?, ?, ?, ?)" cursor.execute(insert_stmt, metadata) self._db.commit() cursor.close() except sqlite3.DatabaseError as e: msg = "metadata initialization error; cause: %s" % str(e) raise ArchiveError(cause=msg) self.origin = origin self.backend_name = backend_name self.backend_version = backend_version self.category = category self.backend_params = backend_params self.created_on = created_on logger.debug("Metadata of archive %s initialized to %s", self.archive_path, metadata)
[ "def", "init_metadata", "(", "self", ",", "origin", ",", "backend_name", ",", "backend_version", ",", "category", ",", "backend_params", ")", ":", "created_on", "=", "datetime_to_utc", "(", "datetime_utcnow", "(", ")", ")", "created_on_dumped", "=", "created_on", ".", "isoformat", "(", ")", "backend_params_dumped", "=", "pickle", ".", "dumps", "(", "backend_params", ",", "0", ")", "metadata", "=", "(", "origin", ",", "backend_name", ",", "backend_version", ",", "category", ",", "backend_params_dumped", ",", "created_on_dumped", ",", ")", "try", ":", "cursor", "=", "self", ".", "_db", ".", "cursor", "(", ")", "insert_stmt", "=", "\"INSERT INTO \"", "+", "self", ".", "METADATA_TABLE", "+", "\" \"", "\"(origin, backend_name, backend_version, \"", "\"category, backend_params, created_on) \"", "\"VALUES (?, ?, ?, ?, ?, ?)\"", "cursor", ".", "execute", "(", "insert_stmt", ",", "metadata", ")", "self", ".", "_db", ".", "commit", "(", ")", "cursor", ".", "close", "(", ")", "except", "sqlite3", ".", "DatabaseError", "as", "e", ":", "msg", "=", "\"metadata initialization error; cause: %s\"", "%", "str", "(", "e", ")", "raise", "ArchiveError", "(", "cause", "=", "msg", ")", "self", ".", "origin", "=", "origin", "self", ".", "backend_name", "=", "backend_name", "self", ".", "backend_version", "=", "backend_version", "self", ".", "category", "=", "category", "self", ".", "backend_params", "=", "backend_params", "self", ".", "created_on", "=", "created_on", "logger", ".", "debug", "(", "\"Metadata of archive %s initialized to %s\"", ",", "self", ".", "archive_path", ",", "metadata", ")" ]
41.152174
19.282609
def _check_conflict(cls, dirPath, name): """ Check whether the module of the given name conflicts with another module on the sys.path. :param dirPath: the directory from which the module was originally loaded :param name: the mpdule name """ old_sys_path = sys.path try: sys.path = [d for d in old_sys_path if os.path.realpath(d) != os.path.realpath(dirPath)] try: colliding_module = importlib.import_module(name) except ImportError: pass else: raise ResourceException( "The user module '%s' collides with module '%s from '%s'." % ( name, colliding_module.__name__, colliding_module.__file__)) finally: sys.path = old_sys_path
[ "def", "_check_conflict", "(", "cls", ",", "dirPath", ",", "name", ")", ":", "old_sys_path", "=", "sys", ".", "path", "try", ":", "sys", ".", "path", "=", "[", "d", "for", "d", "in", "old_sys_path", "if", "os", ".", "path", ".", "realpath", "(", "d", ")", "!=", "os", ".", "path", ".", "realpath", "(", "dirPath", ")", "]", "try", ":", "colliding_module", "=", "importlib", ".", "import_module", "(", "name", ")", "except", "ImportError", ":", "pass", "else", ":", "raise", "ResourceException", "(", "\"The user module '%s' collides with module '%s from '%s'.\"", "%", "(", "name", ",", "colliding_module", ".", "__name__", ",", "colliding_module", ".", "__file__", ")", ")", "finally", ":", "sys", ".", "path", "=", "old_sys_path" ]
41.2
22.7
def uint32_to_uint8(cls, img): """ Cast uint32 RGB image to 4 uint8 channels. """ return np.flipud(img.view(dtype=np.uint8).reshape(img.shape + (4,)))
[ "def", "uint32_to_uint8", "(", "cls", ",", "img", ")", ":", "return", "np", ".", "flipud", "(", "img", ".", "view", "(", "dtype", "=", "np", ".", "uint8", ")", ".", "reshape", "(", "img", ".", "shape", "+", "(", "4", ",", ")", ")", ")" ]
35.6
11.2
def find_definition(name, relative_to=None, importer=__import__): """Find definition by name in module-space. The find algorthm will look for definitions by name relative to a message definition or by fully qualfied name. If no definition is found relative to the relative_to parameter it will do the same search against the container of relative_to. If relative_to is a nested Message, it will search its message_definition(). If that message has no message_definition() it will search its module. If relative_to is a module, it will attempt to look for the containing module and search relative to it. If the module is a top-level module, it will look for the a message using a fully qualified name. If no message is found then, the search fails and DefinitionNotFoundError is raised. For example, when looking for any definition 'foo.bar.ADefinition' relative to an actual message definition abc.xyz.SomeMessage: find_definition('foo.bar.ADefinition', SomeMessage) It is like looking for the following fully qualified names: abc.xyz.SomeMessage. foo.bar.ADefinition abc.xyz. foo.bar.ADefinition abc. foo.bar.ADefinition foo.bar.ADefinition When resolving the name relative to Message definitions and modules, the algorithm searches any Messages or sub-modules found in its path. Non-Message values are not searched. A name that begins with '.' is considered to be a fully qualified name. The name is always searched for from the topmost package. For example, assume two message types: abc.xyz.SomeMessage xyz.SomeMessage Searching for '.xyz.SomeMessage' relative to 'abc' will resolve to 'xyz.SomeMessage' and not 'abc.xyz.SomeMessage'. For this kind of name, the relative_to parameter is effectively ignored and always set to None. For more information about package name resolution, please see: http://code.google.com/apis/protocolbuffers/docs/proto.html#packages Args: name: Name of definition to find. May be fully qualified or relative name. relative_to: Search for definition relative to message definition or module. None will cause a fully qualified name search. importer: Import function to use for resolving modules. Returns: Enum or Message class definition associated with name. Raises: DefinitionNotFoundError if no definition is found in any search path. """ # Check parameters. if not (relative_to is None or isinstance(relative_to, types.ModuleType) or isinstance(relative_to, type) and issubclass(relative_to, Message)): raise TypeError( 'relative_to must be None, Message definition or module.' ' Found: %s' % relative_to) name_path = name.split('.') # Handle absolute path reference. if not name_path[0]: relative_to = None name_path = name_path[1:] def search_path(): """Performs a single iteration searching the path from relative_to. This is the function that searches up the path from a relative object. fully.qualified.object . relative.or.nested.Definition ----------------------------> ^ | this part of search --+ Returns: Message or Enum at the end of name_path, else None. """ next_part = relative_to for node in name_path: # Look for attribute first. attribute = getattr(next_part, node, None) if attribute is not None: next_part = attribute else: # If module, look for sub-module. if (next_part is None or isinstance(next_part, types.ModuleType)): if next_part is None: module_name = node else: module_name = '%s.%s' % (next_part.__name__, node) try: fromitem = module_name.split('.')[-1] next_part = importer(module_name, '', '', [str(fromitem)]) except ImportError: return None else: return None if not isinstance(next_part, types.ModuleType): if not (isinstance(next_part, type) and issubclass(next_part, (Message, Enum))): return None return next_part while True: found = search_path() if isinstance(found, type) and issubclass(found, (Enum, Message)): return found else: # Find next relative_to to search against. # # fully.qualified.object . relative.or.nested.Definition # <--------------------- # ^ # | # does this part of search if relative_to is None: # Fully qualified search was done. Nothing found. Fail. raise DefinitionNotFoundError( 'Could not find definition for %s' % name) else: if isinstance(relative_to, types.ModuleType): # Find parent module. module_path = relative_to.__name__.split('.')[:-1] if not module_path: relative_to = None else: # Should not raise ImportError. If it does... # weird and unexpected. Propagate. relative_to = importer( '.'.join(module_path), '', '', [module_path[-1]]) elif (isinstance(relative_to, type) and issubclass(relative_to, Message)): parent = relative_to.message_definition() if parent is None: last_module_name = relative_to.__module__.split( '.')[-1] relative_to = importer( relative_to.__module__, '', '', [last_module_name]) else: relative_to = parent
[ "def", "find_definition", "(", "name", ",", "relative_to", "=", "None", ",", "importer", "=", "__import__", ")", ":", "# Check parameters.", "if", "not", "(", "relative_to", "is", "None", "or", "isinstance", "(", "relative_to", ",", "types", ".", "ModuleType", ")", "or", "isinstance", "(", "relative_to", ",", "type", ")", "and", "issubclass", "(", "relative_to", ",", "Message", ")", ")", ":", "raise", "TypeError", "(", "'relative_to must be None, Message definition or module.'", "' Found: %s'", "%", "relative_to", ")", "name_path", "=", "name", ".", "split", "(", "'.'", ")", "# Handle absolute path reference.", "if", "not", "name_path", "[", "0", "]", ":", "relative_to", "=", "None", "name_path", "=", "name_path", "[", "1", ":", "]", "def", "search_path", "(", ")", ":", "\"\"\"Performs a single iteration searching the path from relative_to.\n\n This is the function that searches up the path from a relative object.\n\n fully.qualified.object . relative.or.nested.Definition\n ---------------------------->\n ^\n |\n this part of search --+\n\n Returns:\n Message or Enum at the end of name_path, else None.\n \"\"\"", "next_part", "=", "relative_to", "for", "node", "in", "name_path", ":", "# Look for attribute first.", "attribute", "=", "getattr", "(", "next_part", ",", "node", ",", "None", ")", "if", "attribute", "is", "not", "None", ":", "next_part", "=", "attribute", "else", ":", "# If module, look for sub-module.", "if", "(", "next_part", "is", "None", "or", "isinstance", "(", "next_part", ",", "types", ".", "ModuleType", ")", ")", ":", "if", "next_part", "is", "None", ":", "module_name", "=", "node", "else", ":", "module_name", "=", "'%s.%s'", "%", "(", "next_part", ".", "__name__", ",", "node", ")", "try", ":", "fromitem", "=", "module_name", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "next_part", "=", "importer", "(", "module_name", ",", "''", ",", "''", ",", "[", "str", "(", "fromitem", ")", "]", ")", "except", "ImportError", ":", "return", "None", "else", ":", "return", "None", "if", "not", "isinstance", "(", "next_part", ",", "types", ".", "ModuleType", ")", ":", "if", "not", "(", "isinstance", "(", "next_part", ",", "type", ")", "and", "issubclass", "(", "next_part", ",", "(", "Message", ",", "Enum", ")", ")", ")", ":", "return", "None", "return", "next_part", "while", "True", ":", "found", "=", "search_path", "(", ")", "if", "isinstance", "(", "found", ",", "type", ")", "and", "issubclass", "(", "found", ",", "(", "Enum", ",", "Message", ")", ")", ":", "return", "found", "else", ":", "# Find next relative_to to search against.", "#", "# fully.qualified.object . relative.or.nested.Definition", "# <---------------------", "# ^", "# |", "# does this part of search", "if", "relative_to", "is", "None", ":", "# Fully qualified search was done. Nothing found. Fail.", "raise", "DefinitionNotFoundError", "(", "'Could not find definition for %s'", "%", "name", ")", "else", ":", "if", "isinstance", "(", "relative_to", ",", "types", ".", "ModuleType", ")", ":", "# Find parent module.", "module_path", "=", "relative_to", ".", "__name__", ".", "split", "(", "'.'", ")", "[", ":", "-", "1", "]", "if", "not", "module_path", ":", "relative_to", "=", "None", "else", ":", "# Should not raise ImportError. If it does...", "# weird and unexpected. Propagate.", "relative_to", "=", "importer", "(", "'.'", ".", "join", "(", "module_path", ")", ",", "''", ",", "''", ",", "[", "module_path", "[", "-", "1", "]", "]", ")", "elif", "(", "isinstance", "(", "relative_to", ",", "type", ")", "and", "issubclass", "(", "relative_to", ",", "Message", ")", ")", ":", "parent", "=", "relative_to", ".", "message_definition", "(", ")", "if", "parent", "is", "None", ":", "last_module_name", "=", "relative_to", ".", "__module__", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "relative_to", "=", "importer", "(", "relative_to", ".", "__module__", ",", "''", ",", "''", ",", "[", "last_module_name", "]", ")", "else", ":", "relative_to", "=", "parent" ]
39.830189
21.264151
def absorb_args(self, func): """ Calls a function without any arguments. The returned caller function accepts any arguments (and throws them away). """ @wraps(func) def wrapper(*args, **kwargs): return func() return wrapper
[ "def", "absorb_args", "(", "self", ",", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "func", "(", ")", "return", "wrapper" ]
31.444444
13
def _fulfills_version_spec(version, version_spec): ''' Check version number against version specification info and return a boolean value based on whether or not the version number meets the specified version. ''' for oper, spec in version_spec: if oper is None: continue if not salt.utils.versions.compare(ver1=version, oper=oper, ver2=spec, cmp_func=_pep440_version_cmp): return False return True
[ "def", "_fulfills_version_spec", "(", "version", ",", "version_spec", ")", ":", "for", "oper", ",", "spec", "in", "version_spec", ":", "if", "oper", "is", "None", ":", "continue", "if", "not", "salt", ".", "utils", ".", "versions", ".", "compare", "(", "ver1", "=", "version", ",", "oper", "=", "oper", ",", "ver2", "=", "spec", ",", "cmp_func", "=", "_pep440_version_cmp", ")", ":", "return", "False", "return", "True" ]
37.916667
25.583333
def _peek_job(self, pos): """ Return the job currently at position `pos`, but still keep it in the job queue. An IndexError will be raised if that position does not currently exist in the job list. :param int pos: Position of the job to get. :return: The job """ if pos < len(self._job_info_queue): return self._job_info_queue[pos].job raise IndexError()
[ "def", "_peek_job", "(", "self", ",", "pos", ")", ":", "if", "pos", "<", "len", "(", "self", ".", "_job_info_queue", ")", ":", "return", "self", ".", "_job_info_queue", "[", "pos", "]", ".", "job", "raise", "IndexError", "(", ")" ]
32.923077
21.692308
def OnLinkBitmap(self, event): """Link bitmap event handler""" # Get file name wildcard = "*" message = _("Select bitmap for current cell") style = wx.OPEN | wx.CHANGE_DIR filepath, __ = \ self.grid.interfaces.get_filepath_findex_from_user(wildcard, message, style) try: bmp = wx.Bitmap(filepath) except TypeError: return if bmp.Size == (-1, -1): # Bitmap could not be read return code = "wx.Bitmap(r'{filepath}')".format(filepath=filepath) key = self.grid.actions.cursor self.grid.actions.set_code(key, code)
[ "def", "OnLinkBitmap", "(", "self", ",", "event", ")", ":", "# Get file name", "wildcard", "=", "\"*\"", "message", "=", "_", "(", "\"Select bitmap for current cell\"", ")", "style", "=", "wx", ".", "OPEN", "|", "wx", ".", "CHANGE_DIR", "filepath", ",", "__", "=", "self", ".", "grid", ".", "interfaces", ".", "get_filepath_findex_from_user", "(", "wildcard", ",", "message", ",", "style", ")", "try", ":", "bmp", "=", "wx", ".", "Bitmap", "(", "filepath", ")", "except", "TypeError", ":", "return", "if", "bmp", ".", "Size", "==", "(", "-", "1", ",", "-", "1", ")", ":", "# Bitmap could not be read", "return", "code", "=", "\"wx.Bitmap(r'{filepath}')\"", ".", "format", "(", "filepath", "=", "filepath", ")", "key", "=", "self", ".", "grid", ".", "actions", ".", "cursor", "self", ".", "grid", ".", "actions", ".", "set_code", "(", "key", ",", "code", ")" ]
30.869565
19.086957
def _find_neighbors(self, inst, avg_dist): """ Identify nearest as well as farthest hits and misses within radius defined by average distance over whole distance array. This works the same regardless of endpoint type. """ NN_near = [] NN_far = [] min_indices = [] max_indices = [] for i in range(self._datalen): if inst != i: locator = [inst, i] if i > inst: locator.reverse() d = self._distance_array[locator[0]][locator[1]] if d < avg_dist: min_indices.append(i) if d > avg_dist: max_indices.append(i) for i in range(len(min_indices)): NN_near.append(min_indices[i]) for i in range(len(max_indices)): NN_far.append(max_indices[i]) return np.array(NN_near, dtype=np.int32), np.array(NN_far, dtype=np.int32)
[ "def", "_find_neighbors", "(", "self", ",", "inst", ",", "avg_dist", ")", ":", "NN_near", "=", "[", "]", "NN_far", "=", "[", "]", "min_indices", "=", "[", "]", "max_indices", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "_datalen", ")", ":", "if", "inst", "!=", "i", ":", "locator", "=", "[", "inst", ",", "i", "]", "if", "i", ">", "inst", ":", "locator", ".", "reverse", "(", ")", "d", "=", "self", ".", "_distance_array", "[", "locator", "[", "0", "]", "]", "[", "locator", "[", "1", "]", "]", "if", "d", "<", "avg_dist", ":", "min_indices", ".", "append", "(", "i", ")", "if", "d", ">", "avg_dist", ":", "max_indices", ".", "append", "(", "i", ")", "for", "i", "in", "range", "(", "len", "(", "min_indices", ")", ")", ":", "NN_near", ".", "append", "(", "min_indices", "[", "i", "]", ")", "for", "i", "in", "range", "(", "len", "(", "max_indices", ")", ")", ":", "NN_far", ".", "append", "(", "max_indices", "[", "i", "]", ")", "return", "np", ".", "array", "(", "NN_near", ",", "dtype", "=", "np", ".", "int32", ")", ",", "np", ".", "array", "(", "NN_far", ",", "dtype", "=", "np", ".", "int32", ")" ]
37.68
12.84
def import_module(filename): """ Returns module object Source: https://www.blog.pythonlibrary.org/2016/05/27/python-201-an-intro-to-importlib/ """ module_name = "xyz" module_spec = importlib.util.spec_from_file_location(module_name, filename) if module_spec is None: raise RuntimeError("Python cannot import file '{}'".format(filename)) module = importlib.util.module_from_spec(module_spec) module_spec.loader.exec_module(module) # print(dir(module)) # # msg = 'The {module_name} module has the following methods:' \ # ' {methods}' # print(msg.format(module_name=module_name, # methods=dir(module))) return module
[ "def", "import_module", "(", "filename", ")", ":", "module_name", "=", "\"xyz\"", "module_spec", "=", "importlib", ".", "util", ".", "spec_from_file_location", "(", "module_name", ",", "filename", ")", "if", "module_spec", "is", "None", ":", "raise", "RuntimeError", "(", "\"Python cannot import file '{}'\"", ".", "format", "(", "filename", ")", ")", "module", "=", "importlib", ".", "util", ".", "module_from_spec", "(", "module_spec", ")", "module_spec", ".", "loader", ".", "exec_module", "(", "module", ")", "# print(dir(module))\r", "#\r", "# msg = 'The {module_name} module has the following methods:' \\\r", "# ' {methods}'\r", "# print(msg.format(module_name=module_name,\r", "# methods=dir(module)))\r", "return", "module" ]
31
22.913043
def search_seqs(self, seqrec, in_seq, locus, run=0, partial_ann=None): """ search_seqs - method for annotating a BioPython sequence without alignment :param seqrec: The reference sequence :type seqrec: SeqRecord :param locus: The gene locus associated with the sequence. :type locus: str :param in_seq: The input sequence :type in_seq: SeqRecord :param run: The number of runs that have been done :type run: int :param partial_ann: A partial annotation from a previous step :type partial_ann: :ref:`ann` :rtype: :ref:`ann` Example usage: >>> from Bio.Seq import Seq >>> from seqann.seq_search import SeqSearch >>> inseq = Seq('AGAGACTCTCCCGAGGATTTCGTGTACCAGTTTAAGGCCATGTGCTACTTCACC') >>> sqsrch = SeqSearch() >>> ann = sqsrch.search_seqs(refseqs, inseq) """ # Extract out the sequences and feature names # from the reference sequences # The mapped features will be subtracted from seq_covered # so the final seq_covered number will reflect the remaining # number of base pairs that haven't been mapped. # # The coordinates and mapping will help determine what positions # in the sequence have been mapped and to what features. The # missing blocks variable will be generated using these. structures = get_structures() seq_covered = len(in_seq.seq) coordinates = dict(map(lambda x: [x, 1], [i for i in range(0, len(in_seq.seq)+1)])) mapping = dict(map(lambda x: [x, 1], [i for i in range(0, len(in_seq.seq)+1)])) ambig_map = {} found_feats = {} feat_missing = {} method = "nt_search" if not partial_ann else partial_ann.method # If the partial annotation is provided # then make the found_feats equal to # what has already been annotated feats = get_features(seqrec) if partial_ann: found_feats = partial_ann.features if self.verbose and self.verbosity > 4: self.logger.info("Found partial features:") for f in found_feats: self.logger.info(f) # Skip references that only have features # that have already been annoated if len([f for f in feats if f in found_feats]) == len(feats): if self.verbose: self.logger.info("Skipping incomplete refseq") return partial_ann if self.verbose and self.verbosity > 1: self.logger.info("Using partial annotation | " + locus + " " + str(len(partial_ann.features))) coordinates = dict(map(lambda l: [l, 1], [item for sublist in partial_ann.blocks for item in sublist])) seq_covered = partial_ann.covered mapping = partial_ann.mapping if self.verbose and self.verbosity > 2: self.logger.info("Partial sequence coverage = " + str(seq_covered)) self.logger.info("Partial sequence metho = " + method) added_feat = {} deleted_coords = {} for feat_name in sorted(feats, key=lambda k: structures[locus][k]): # skip if partial annotation is provided # and the feat name is not one of the # missing features if partial_ann and feat_name not in partial_ann.refmissing: if self.verbose and self.verbosity > 1: self.logger.info("Skipping " + feat_name + " - Already annotated") continue if self.verbose and self.verbosity > 1: self.logger.info("Running seqsearch for " + feat_name) # Search for the reference feature sequence in the # input sequence. Record the coordinates if it's # found and if it's found in multiple spots. If it # is not found, then record that feature as missing. seq_search = nt_search(str(in_seq.seq), str(feats[feat_name])) if len(seq_search) == 2: if self.verbose and self.verbosity > 0: self.logger.info("Found exact match for " + feat_name) seq_covered -= len(str(feats[feat_name])) end = int(len(str(feats[feat_name])) + seq_search[1]) if feat_name == 'three_prime_UTR' \ and len(str(in_seq.seq)) > end: end = len(str(in_seq.seq)) # If the feature is found and it's a five_prime_UTR then # the start should always be 0, so insertions at the # beinging of the sequence will be found. start = seq_search[1] if feat_name != 'five_prime_UTR' else 0 si = seq_search[1]+1 if seq_search[1] != 0 and \ feat_name != 'five_prime_UTR' else 0 # check if this features has already been mapped mapcheck = set([0 if i in coordinates else 1 for i in range(si, end+1)]) # Dont map features if they are out of order skip = False if found_feats and len(found_feats) > 0: for f in found_feats: o1 = structures[locus][feat_name] o2 = structures[locus][f] loctyp = loctype(found_feats[f].location.start, found_feats[f].location.end, start, end) if o1 < o2 and loctyp: skip = True if self.verbose: self.logger.info("Skipping map for " + feat_name) elif o2 < o1 and not loctyp: skip = True if self.verbose: self.logger.info("Skipping map for " + feat_name) if 1 not in mapcheck and not skip: for i in range(si, end+1): if i in coordinates: if feat_name == "exon_8" or feat_name == 'three_prime_UTR': deleted_coords.update({i: coordinates[i]}) del coordinates[i] else: if self.verbose: self.logger.error("seqsearch - should't be here " + locus + " - " + " - " + feat_name) mapping[i] = feat_name found_feats.update({feat_name: SeqFeature( FeatureLocation( ExactPosition(start), ExactPosition(end), strand=1), type=feat_name)}) if feat_name == "exon_8" or feat_name == 'three_prime_UTR': added_feat.update({feat_name: feats[feat_name]}) if self.verbose and self.verbosity > 3: self.logger.info("Coordinates | Start = " + str(start) + " - End = " + str(end)) elif(len(seq_search) > 2): if self.verbose and self.verbosity > 1: self.logger.info("Found " + str(len(seq_search)) + " matches for " + feat_name) new_seq = [seq_search[0]] for i in range(1, len(seq_search)): tnp = seq_search[i]+1 if seq_search[i] in coordinates or tnp in coordinates: new_seq.append(seq_search[i]) seq_search = new_seq if(partial_ann and feat_name == "exon_8" and run > 0): missing_feats = sorted(list(partial_ann.missing.keys())) # * HARD CODED LOGIC * # # > exon8 in class I maps to multiple spots in a sequence, # often in the 3' UTR. These features need to be mapped # last to make sure it's not mapping exon8 incorrectly. if(missing_feats == ['exon_8', 'three_prime_UTR'] and len(seq_search) <= 3): if self.verbose and self.verbosity > 0: self.logger.info("Resolving exon_8") seq_covered -= len(str(feats[feat_name])) end = int(len(str(feats[feat_name])) + seq_search[1]) # If the feature is found and it's a five_prime_UTR then # the start should always be 0, so insertions at the # beinging of the sequence will be found. start = seq_search[1] si = seq_search[1]+1 if seq_search[1] != 0 else 0 # check if this features has already been mapped mapcheck = set([0 if i in coordinates else 1 for i in range(si, end+1)]) for i in range(si, end+1): if i in coordinates: del coordinates[i] else: if self.verbose: self.logger.error("seqsearch - should't be here " + locus + " - " + " - " + feat_name) mapping[i] = feat_name found_feats.update({feat_name: SeqFeature( FeatureLocation( ExactPosition(start), ExactPosition(end), strand=1), type=feat_name)}) if self.verbose and self.verbosity > 0: self.logger.info("Coordinates | Start = " + str(start) + " - End = " + str(end)) else: if self.verbose and self.verbosity > 0: self.logger.info("Adding ambig feature " + feat_name) feat_missing.update({feat_name: feats[feat_name]}) ambig_map.update({feat_name: seq_search[1:len(seq_search)]}) else: if self.verbose and self.verbosity > 0: self.logger.info("Adding ambig feature " + feat_name) feat_missing.update({feat_name: feats[feat_name]}) ambig_map.update({feat_name: seq_search[1:len(seq_search)]}) else: if self.verbose and self.verbosity > 1: self.logger.info("No match for " + feat_name) feat_missing.update({feat_name: feats[feat_name]}) blocks = getblocks(coordinates) exact_matches = list(found_feats.keys()) # * HARD CODED LOGIC * # # > # # HLA-DRB1 exon3 exact match - with intron1 and 3 missing if('exon_3' in exact_matches and run == 99 and locus == 'HLA-DRB1' and 'exon_2' in feat_missing and (len(blocks) == 1 or len(blocks) == 2)): for b in blocks: x = b[len(b)-1] if x == max(list(mapping.keys())): featname = "intron_3" found_feats.update({featname: SeqFeature( FeatureLocation( ExactPosition(b[0]-1), ExactPosition(b[len(b)-1]), strand=1), type=featname)}) else: featname = "exon_2" found_feats.update({featname: SeqFeature( FeatureLocation( ExactPosition(b[0]), ExactPosition(b[len(b)-1]), strand=1), type=featname)}) seq_covered -= len(b) if self.verbose and self.verbosity > 1: self.logger.info("Successfully annotated class DRB1 II sequence") return Annotation(features=found_feats, covered=seq_covered, seq=in_seq, missing=feat_missing, ambig=ambig_map, method=method, mapping=mapping, exact_match=exact_matches) # If it's a class II sequence and # exon_2 is an exact match # * HARD CODED LOGIC * # # > It's common for exon2 to be fully sequenced # but intron_2 and intron_1 to be partially sequenced, # which can make it hard to annotate those to features. # If there are two missing blocks that is small enough # and they are before and after exon2, then it's very # very likely to be intron_2 and intron_1. if 'exon_2' in exact_matches and len(blocks) == 2 \ and is_classII(locus) and seq_covered < 300: if self.verbose and self.verbosity > 1: self.logger.info("Running search for class II sequence") r = True for b in blocks: x = b[len(b)-1] if x == max(list(mapping.keys())): x = b[0]-1 else: x += 1 f = mapping[x] if f != 'exon_2': r = False if r: for b in blocks: x = b[len(b)-1] if x == max(list(mapping.keys())): featname = "intron_2" found_feats.update({featname: SeqFeature( FeatureLocation( ExactPosition(b[0]-1), ExactPosition(b[len(b)-1]), strand=1), type=featname)}) else: featname = "intron_1" found_feats.update({featname: SeqFeature( FeatureLocation( ExactPosition(b[0]), ExactPosition(b[len(b)-1]), strand=1), type=featname)}) seq_covered -= len(b) if self.verbose and self.verbosity > 1: self.logger.info("Successfully annotated class II sequence") return Annotation(features=found_feats, covered=seq_covered, seq=in_seq, missing=feat_missing, ambig=ambig_map, method=method, mapping=mapping, exact_match=exact_matches) annotated_feats, mb, mapping = self._resolve_unmapped(blocks, feat_missing, ambig_map, mapping, found_feats, locus, seq_covered ) # * HARD CODED LOGIC * # if(not mb and blocks and len(feat_missing.keys()) == 0 and len(ambig_map.keys()) == 0): mb = blocks if mb: # Unmap exon 8 if locus in ['HLA-C', 'HLA-A'] and len(in_seq.seq) < 3000 \ and 'exon_8' in exact_matches: for i in deleted_coords: mapping[i] = 1 coordinates.update(deleted_coords) mb = getblocks(coordinates) feat_missing.update(added_feat) # Delte from found features del exact_matches[exact_matches.index('exon_8')] del found_feats['exon_8'] if 'exon_8' in annotated_feats: del annotated_feats['exon_8'] if 'three_prime_UTR' in found_feats: del found_feats['three_prime_UTR'] if 'three_prime_UTR' in annotated_feats: del annotated_feats['three_prime_UTR'] refmissing = [f for f in structures[locus] if f not in annotated_feats] if self.verbose and self.verbosity > 1: self.logger.info("* Annotation not complete *") # Print out what features were missing by the ref if self.verbose and self.verbosity > 2: self.logger.info("Refseq was missing these features = " + ",".join(list(refmissing))) # Print out what features were ambig matches if self.verbose and self.verbosity > 1 and len(ambig_map) > 1: self.logger.info("Features with ambig matches = " + ",".join(list(ambig_map))) # Print out what features were exact matches if self.verbose and self.verbosity > 2 and len(exact_matches) > 1: self.logger.info("Features exact matches = " + ",".join(list(exact_matches))) # Print out what features have been annotated if self.verbose and self.verbosity > 1 and len(annotated_feats) > 1: self.logger.info("Features annotated = " + ",".join(list(annotated_feats))) # Print out what features are missing if self.verbose and self.verbosity > 1 and len(feat_missing) > 1: self.logger.info("Features missing = " + ",".join(list(feat_missing))) annotation = Annotation(features=annotated_feats, covered=seq_covered, seq=in_seq, missing=feat_missing, ambig=ambig_map, blocks=mb, method=method, refmissing=refmissing, mapping=mapping, exact_match=exact_matches, annotation=None) else: mb = None # Unmap exon 8 if locus in ['HLA-C', 'HLA-A'] and len(in_seq.seq) < 600 \ and 'exon_8' in exact_matches \ and 'three_prime_UTR' in annotated_feats\ and 'three_prime_UTR' not in exact_matches: for i in deleted_coords: mapping[i] = 1 coordinates.update(deleted_coords) mb = getblocks(coordinates) feat_missing.update(added_feat) del exact_matches[exact_matches.index('exon_8')] del found_feats['exon_8'] if 'exon_8' in annotated_feats: del annotated_feats['exon_8'] if 'three_prime_UTR' in found_feats: del found_feats['three_prime_UTR'] if 'three_prime_UTR' in annotated_feats: del annotated_feats['three_prime_UTR'] if self.verbose: self.logger.info("* No missing blocks after seq_search *") # Print out what features were ambig matches if self.verbose and self.verbosity > 0 and len(ambig_map) > 1: self.logger.info("Features with ambig matches = " + ",".join(list(ambig_map))) # Print out what features were exact matches if self.verbose and self.verbosity > 0 and len(exact_matches) > 1: self.logger.info("Features exact matches = " + ",".join(list(exact_matches))) # Print out what features have been annotated if self.verbose and self.verbosity > 0 and len(annotated_feats) > 1: self.logger.info("Features annotated = " + ",".join(list(annotated_feats))) # Print out what features are missing if self.verbose and self.verbosity > 0 and len(feat_missing) > 1: self.logger.info("Features missing = " + ",".join(list(feat_missing))) annotation = Annotation(features=annotated_feats, covered=seq_covered, seq=in_seq, missing=feat_missing, ambig=ambig_map, method=method, blocks=mb, mapping=mapping, exact_match=exact_matches, annotation=None) return annotation
[ "def", "search_seqs", "(", "self", ",", "seqrec", ",", "in_seq", ",", "locus", ",", "run", "=", "0", ",", "partial_ann", "=", "None", ")", ":", "# Extract out the sequences and feature names", "# from the reference sequences", "# The mapped features will be subtracted from seq_covered", "# so the final seq_covered number will reflect the remaining", "# number of base pairs that haven't been mapped.", "#", "# The coordinates and mapping will help determine what positions", "# in the sequence have been mapped and to what features. The", "# missing blocks variable will be generated using these.", "structures", "=", "get_structures", "(", ")", "seq_covered", "=", "len", "(", "in_seq", ".", "seq", ")", "coordinates", "=", "dict", "(", "map", "(", "lambda", "x", ":", "[", "x", ",", "1", "]", ",", "[", "i", "for", "i", "in", "range", "(", "0", ",", "len", "(", "in_seq", ".", "seq", ")", "+", "1", ")", "]", ")", ")", "mapping", "=", "dict", "(", "map", "(", "lambda", "x", ":", "[", "x", ",", "1", "]", ",", "[", "i", "for", "i", "in", "range", "(", "0", ",", "len", "(", "in_seq", ".", "seq", ")", "+", "1", ")", "]", ")", ")", "ambig_map", "=", "{", "}", "found_feats", "=", "{", "}", "feat_missing", "=", "{", "}", "method", "=", "\"nt_search\"", "if", "not", "partial_ann", "else", "partial_ann", ".", "method", "# If the partial annotation is provided", "# then make the found_feats equal to", "# what has already been annotated", "feats", "=", "get_features", "(", "seqrec", ")", "if", "partial_ann", ":", "found_feats", "=", "partial_ann", ".", "features", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "4", ":", "self", ".", "logger", ".", "info", "(", "\"Found partial features:\"", ")", "for", "f", "in", "found_feats", ":", "self", ".", "logger", ".", "info", "(", "f", ")", "# Skip references that only have features", "# that have already been annoated", "if", "len", "(", "[", "f", "for", "f", "in", "feats", "if", "f", "in", "found_feats", "]", ")", "==", "len", "(", "feats", ")", ":", "if", "self", ".", "verbose", ":", "self", ".", "logger", ".", "info", "(", "\"Skipping incomplete refseq\"", ")", "return", "partial_ann", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "1", ":", "self", ".", "logger", ".", "info", "(", "\"Using partial annotation | \"", "+", "locus", "+", "\" \"", "+", "str", "(", "len", "(", "partial_ann", ".", "features", ")", ")", ")", "coordinates", "=", "dict", "(", "map", "(", "lambda", "l", ":", "[", "l", ",", "1", "]", ",", "[", "item", "for", "sublist", "in", "partial_ann", ".", "blocks", "for", "item", "in", "sublist", "]", ")", ")", "seq_covered", "=", "partial_ann", ".", "covered", "mapping", "=", "partial_ann", ".", "mapping", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "2", ":", "self", ".", "logger", ".", "info", "(", "\"Partial sequence coverage = \"", "+", "str", "(", "seq_covered", ")", ")", "self", ".", "logger", ".", "info", "(", "\"Partial sequence metho = \"", "+", "method", ")", "added_feat", "=", "{", "}", "deleted_coords", "=", "{", "}", "for", "feat_name", "in", "sorted", "(", "feats", ",", "key", "=", "lambda", "k", ":", "structures", "[", "locus", "]", "[", "k", "]", ")", ":", "# skip if partial annotation is provided", "# and the feat name is not one of the", "# missing features", "if", "partial_ann", "and", "feat_name", "not", "in", "partial_ann", ".", "refmissing", ":", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "1", ":", "self", ".", "logger", ".", "info", "(", "\"Skipping \"", "+", "feat_name", "+", "\" - Already annotated\"", ")", "continue", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "1", ":", "self", ".", "logger", ".", "info", "(", "\"Running seqsearch for \"", "+", "feat_name", ")", "# Search for the reference feature sequence in the", "# input sequence. Record the coordinates if it's", "# found and if it's found in multiple spots. If it", "# is not found, then record that feature as missing.", "seq_search", "=", "nt_search", "(", "str", "(", "in_seq", ".", "seq", ")", ",", "str", "(", "feats", "[", "feat_name", "]", ")", ")", "if", "len", "(", "seq_search", ")", "==", "2", ":", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "0", ":", "self", ".", "logger", ".", "info", "(", "\"Found exact match for \"", "+", "feat_name", ")", "seq_covered", "-=", "len", "(", "str", "(", "feats", "[", "feat_name", "]", ")", ")", "end", "=", "int", "(", "len", "(", "str", "(", "feats", "[", "feat_name", "]", ")", ")", "+", "seq_search", "[", "1", "]", ")", "if", "feat_name", "==", "'three_prime_UTR'", "and", "len", "(", "str", "(", "in_seq", ".", "seq", ")", ")", ">", "end", ":", "end", "=", "len", "(", "str", "(", "in_seq", ".", "seq", ")", ")", "# If the feature is found and it's a five_prime_UTR then", "# the start should always be 0, so insertions at the", "# beinging of the sequence will be found.", "start", "=", "seq_search", "[", "1", "]", "if", "feat_name", "!=", "'five_prime_UTR'", "else", "0", "si", "=", "seq_search", "[", "1", "]", "+", "1", "if", "seq_search", "[", "1", "]", "!=", "0", "and", "feat_name", "!=", "'five_prime_UTR'", "else", "0", "# check if this features has already been mapped", "mapcheck", "=", "set", "(", "[", "0", "if", "i", "in", "coordinates", "else", "1", "for", "i", "in", "range", "(", "si", ",", "end", "+", "1", ")", "]", ")", "# Dont map features if they are out of order", "skip", "=", "False", "if", "found_feats", "and", "len", "(", "found_feats", ")", ">", "0", ":", "for", "f", "in", "found_feats", ":", "o1", "=", "structures", "[", "locus", "]", "[", "feat_name", "]", "o2", "=", "structures", "[", "locus", "]", "[", "f", "]", "loctyp", "=", "loctype", "(", "found_feats", "[", "f", "]", ".", "location", ".", "start", ",", "found_feats", "[", "f", "]", ".", "location", ".", "end", ",", "start", ",", "end", ")", "if", "o1", "<", "o2", "and", "loctyp", ":", "skip", "=", "True", "if", "self", ".", "verbose", ":", "self", ".", "logger", ".", "info", "(", "\"Skipping map for \"", "+", "feat_name", ")", "elif", "o2", "<", "o1", "and", "not", "loctyp", ":", "skip", "=", "True", "if", "self", ".", "verbose", ":", "self", ".", "logger", ".", "info", "(", "\"Skipping map for \"", "+", "feat_name", ")", "if", "1", "not", "in", "mapcheck", "and", "not", "skip", ":", "for", "i", "in", "range", "(", "si", ",", "end", "+", "1", ")", ":", "if", "i", "in", "coordinates", ":", "if", "feat_name", "==", "\"exon_8\"", "or", "feat_name", "==", "'three_prime_UTR'", ":", "deleted_coords", ".", "update", "(", "{", "i", ":", "coordinates", "[", "i", "]", "}", ")", "del", "coordinates", "[", "i", "]", "else", ":", "if", "self", ".", "verbose", ":", "self", ".", "logger", ".", "error", "(", "\"seqsearch - should't be here \"", "+", "locus", "+", "\" - \"", "+", "\" - \"", "+", "feat_name", ")", "mapping", "[", "i", "]", "=", "feat_name", "found_feats", ".", "update", "(", "{", "feat_name", ":", "SeqFeature", "(", "FeatureLocation", "(", "ExactPosition", "(", "start", ")", ",", "ExactPosition", "(", "end", ")", ",", "strand", "=", "1", ")", ",", "type", "=", "feat_name", ")", "}", ")", "if", "feat_name", "==", "\"exon_8\"", "or", "feat_name", "==", "'three_prime_UTR'", ":", "added_feat", ".", "update", "(", "{", "feat_name", ":", "feats", "[", "feat_name", "]", "}", ")", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "3", ":", "self", ".", "logger", ".", "info", "(", "\"Coordinates | Start = \"", "+", "str", "(", "start", ")", "+", "\" - End = \"", "+", "str", "(", "end", ")", ")", "elif", "(", "len", "(", "seq_search", ")", ">", "2", ")", ":", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "1", ":", "self", ".", "logger", ".", "info", "(", "\"Found \"", "+", "str", "(", "len", "(", "seq_search", ")", ")", "+", "\" matches for \"", "+", "feat_name", ")", "new_seq", "=", "[", "seq_search", "[", "0", "]", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "seq_search", ")", ")", ":", "tnp", "=", "seq_search", "[", "i", "]", "+", "1", "if", "seq_search", "[", "i", "]", "in", "coordinates", "or", "tnp", "in", "coordinates", ":", "new_seq", ".", "append", "(", "seq_search", "[", "i", "]", ")", "seq_search", "=", "new_seq", "if", "(", "partial_ann", "and", "feat_name", "==", "\"exon_8\"", "and", "run", ">", "0", ")", ":", "missing_feats", "=", "sorted", "(", "list", "(", "partial_ann", ".", "missing", ".", "keys", "(", ")", ")", ")", "# * HARD CODED LOGIC * #", "# > exon8 in class I maps to multiple spots in a sequence,", "# often in the 3' UTR. These features need to be mapped", "# last to make sure it's not mapping exon8 incorrectly.", "if", "(", "missing_feats", "==", "[", "'exon_8'", ",", "'three_prime_UTR'", "]", "and", "len", "(", "seq_search", ")", "<=", "3", ")", ":", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "0", ":", "self", ".", "logger", ".", "info", "(", "\"Resolving exon_8\"", ")", "seq_covered", "-=", "len", "(", "str", "(", "feats", "[", "feat_name", "]", ")", ")", "end", "=", "int", "(", "len", "(", "str", "(", "feats", "[", "feat_name", "]", ")", ")", "+", "seq_search", "[", "1", "]", ")", "# If the feature is found and it's a five_prime_UTR then", "# the start should always be 0, so insertions at the", "# beinging of the sequence will be found.", "start", "=", "seq_search", "[", "1", "]", "si", "=", "seq_search", "[", "1", "]", "+", "1", "if", "seq_search", "[", "1", "]", "!=", "0", "else", "0", "# check if this features has already been mapped", "mapcheck", "=", "set", "(", "[", "0", "if", "i", "in", "coordinates", "else", "1", "for", "i", "in", "range", "(", "si", ",", "end", "+", "1", ")", "]", ")", "for", "i", "in", "range", "(", "si", ",", "end", "+", "1", ")", ":", "if", "i", "in", "coordinates", ":", "del", "coordinates", "[", "i", "]", "else", ":", "if", "self", ".", "verbose", ":", "self", ".", "logger", ".", "error", "(", "\"seqsearch - should't be here \"", "+", "locus", "+", "\" - \"", "+", "\" - \"", "+", "feat_name", ")", "mapping", "[", "i", "]", "=", "feat_name", "found_feats", ".", "update", "(", "{", "feat_name", ":", "SeqFeature", "(", "FeatureLocation", "(", "ExactPosition", "(", "start", ")", ",", "ExactPosition", "(", "end", ")", ",", "strand", "=", "1", ")", ",", "type", "=", "feat_name", ")", "}", ")", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "0", ":", "self", ".", "logger", ".", "info", "(", "\"Coordinates | Start = \"", "+", "str", "(", "start", ")", "+", "\" - End = \"", "+", "str", "(", "end", ")", ")", "else", ":", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "0", ":", "self", ".", "logger", ".", "info", "(", "\"Adding ambig feature \"", "+", "feat_name", ")", "feat_missing", ".", "update", "(", "{", "feat_name", ":", "feats", "[", "feat_name", "]", "}", ")", "ambig_map", ".", "update", "(", "{", "feat_name", ":", "seq_search", "[", "1", ":", "len", "(", "seq_search", ")", "]", "}", ")", "else", ":", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "0", ":", "self", ".", "logger", ".", "info", "(", "\"Adding ambig feature \"", "+", "feat_name", ")", "feat_missing", ".", "update", "(", "{", "feat_name", ":", "feats", "[", "feat_name", "]", "}", ")", "ambig_map", ".", "update", "(", "{", "feat_name", ":", "seq_search", "[", "1", ":", "len", "(", "seq_search", ")", "]", "}", ")", "else", ":", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "1", ":", "self", ".", "logger", ".", "info", "(", "\"No match for \"", "+", "feat_name", ")", "feat_missing", ".", "update", "(", "{", "feat_name", ":", "feats", "[", "feat_name", "]", "}", ")", "blocks", "=", "getblocks", "(", "coordinates", ")", "exact_matches", "=", "list", "(", "found_feats", ".", "keys", "(", ")", ")", "# * HARD CODED LOGIC * #", "# > ", "#", "# HLA-DRB1 exon3 exact match - with intron1 and 3 missing", "if", "(", "'exon_3'", "in", "exact_matches", "and", "run", "==", "99", "and", "locus", "==", "'HLA-DRB1'", "and", "'exon_2'", "in", "feat_missing", "and", "(", "len", "(", "blocks", ")", "==", "1", "or", "len", "(", "blocks", ")", "==", "2", ")", ")", ":", "for", "b", "in", "blocks", ":", "x", "=", "b", "[", "len", "(", "b", ")", "-", "1", "]", "if", "x", "==", "max", "(", "list", "(", "mapping", ".", "keys", "(", ")", ")", ")", ":", "featname", "=", "\"intron_3\"", "found_feats", ".", "update", "(", "{", "featname", ":", "SeqFeature", "(", "FeatureLocation", "(", "ExactPosition", "(", "b", "[", "0", "]", "-", "1", ")", ",", "ExactPosition", "(", "b", "[", "len", "(", "b", ")", "-", "1", "]", ")", ",", "strand", "=", "1", ")", ",", "type", "=", "featname", ")", "}", ")", "else", ":", "featname", "=", "\"exon_2\"", "found_feats", ".", "update", "(", "{", "featname", ":", "SeqFeature", "(", "FeatureLocation", "(", "ExactPosition", "(", "b", "[", "0", "]", ")", ",", "ExactPosition", "(", "b", "[", "len", "(", "b", ")", "-", "1", "]", ")", ",", "strand", "=", "1", ")", ",", "type", "=", "featname", ")", "}", ")", "seq_covered", "-=", "len", "(", "b", ")", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "1", ":", "self", ".", "logger", ".", "info", "(", "\"Successfully annotated class DRB1 II sequence\"", ")", "return", "Annotation", "(", "features", "=", "found_feats", ",", "covered", "=", "seq_covered", ",", "seq", "=", "in_seq", ",", "missing", "=", "feat_missing", ",", "ambig", "=", "ambig_map", ",", "method", "=", "method", ",", "mapping", "=", "mapping", ",", "exact_match", "=", "exact_matches", ")", "# If it's a class II sequence and", "# exon_2 is an exact match", "# * HARD CODED LOGIC * #", "# > It's common for exon2 to be fully sequenced", "# but intron_2 and intron_1 to be partially sequenced,", "# which can make it hard to annotate those to features.", "# If there are two missing blocks that is small enough", "# and they are before and after exon2, then it's very", "# very likely to be intron_2 and intron_1.", "if", "'exon_2'", "in", "exact_matches", "and", "len", "(", "blocks", ")", "==", "2", "and", "is_classII", "(", "locus", ")", "and", "seq_covered", "<", "300", ":", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "1", ":", "self", ".", "logger", ".", "info", "(", "\"Running search for class II sequence\"", ")", "r", "=", "True", "for", "b", "in", "blocks", ":", "x", "=", "b", "[", "len", "(", "b", ")", "-", "1", "]", "if", "x", "==", "max", "(", "list", "(", "mapping", ".", "keys", "(", ")", ")", ")", ":", "x", "=", "b", "[", "0", "]", "-", "1", "else", ":", "x", "+=", "1", "f", "=", "mapping", "[", "x", "]", "if", "f", "!=", "'exon_2'", ":", "r", "=", "False", "if", "r", ":", "for", "b", "in", "blocks", ":", "x", "=", "b", "[", "len", "(", "b", ")", "-", "1", "]", "if", "x", "==", "max", "(", "list", "(", "mapping", ".", "keys", "(", ")", ")", ")", ":", "featname", "=", "\"intron_2\"", "found_feats", ".", "update", "(", "{", "featname", ":", "SeqFeature", "(", "FeatureLocation", "(", "ExactPosition", "(", "b", "[", "0", "]", "-", "1", ")", ",", "ExactPosition", "(", "b", "[", "len", "(", "b", ")", "-", "1", "]", ")", ",", "strand", "=", "1", ")", ",", "type", "=", "featname", ")", "}", ")", "else", ":", "featname", "=", "\"intron_1\"", "found_feats", ".", "update", "(", "{", "featname", ":", "SeqFeature", "(", "FeatureLocation", "(", "ExactPosition", "(", "b", "[", "0", "]", ")", ",", "ExactPosition", "(", "b", "[", "len", "(", "b", ")", "-", "1", "]", ")", ",", "strand", "=", "1", ")", ",", "type", "=", "featname", ")", "}", ")", "seq_covered", "-=", "len", "(", "b", ")", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "1", ":", "self", ".", "logger", ".", "info", "(", "\"Successfully annotated class II sequence\"", ")", "return", "Annotation", "(", "features", "=", "found_feats", ",", "covered", "=", "seq_covered", ",", "seq", "=", "in_seq", ",", "missing", "=", "feat_missing", ",", "ambig", "=", "ambig_map", ",", "method", "=", "method", ",", "mapping", "=", "mapping", ",", "exact_match", "=", "exact_matches", ")", "annotated_feats", ",", "mb", ",", "mapping", "=", "self", ".", "_resolve_unmapped", "(", "blocks", ",", "feat_missing", ",", "ambig_map", ",", "mapping", ",", "found_feats", ",", "locus", ",", "seq_covered", ")", "# * HARD CODED LOGIC * #", "if", "(", "not", "mb", "and", "blocks", "and", "len", "(", "feat_missing", ".", "keys", "(", ")", ")", "==", "0", "and", "len", "(", "ambig_map", ".", "keys", "(", ")", ")", "==", "0", ")", ":", "mb", "=", "blocks", "if", "mb", ":", "# Unmap exon 8", "if", "locus", "in", "[", "'HLA-C'", ",", "'HLA-A'", "]", "and", "len", "(", "in_seq", ".", "seq", ")", "<", "3000", "and", "'exon_8'", "in", "exact_matches", ":", "for", "i", "in", "deleted_coords", ":", "mapping", "[", "i", "]", "=", "1", "coordinates", ".", "update", "(", "deleted_coords", ")", "mb", "=", "getblocks", "(", "coordinates", ")", "feat_missing", ".", "update", "(", "added_feat", ")", "# Delte from found features", "del", "exact_matches", "[", "exact_matches", ".", "index", "(", "'exon_8'", ")", "]", "del", "found_feats", "[", "'exon_8'", "]", "if", "'exon_8'", "in", "annotated_feats", ":", "del", "annotated_feats", "[", "'exon_8'", "]", "if", "'three_prime_UTR'", "in", "found_feats", ":", "del", "found_feats", "[", "'three_prime_UTR'", "]", "if", "'three_prime_UTR'", "in", "annotated_feats", ":", "del", "annotated_feats", "[", "'three_prime_UTR'", "]", "refmissing", "=", "[", "f", "for", "f", "in", "structures", "[", "locus", "]", "if", "f", "not", "in", "annotated_feats", "]", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "1", ":", "self", ".", "logger", ".", "info", "(", "\"* Annotation not complete *\"", ")", "# Print out what features were missing by the ref", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "2", ":", "self", ".", "logger", ".", "info", "(", "\"Refseq was missing these features = \"", "+", "\",\"", ".", "join", "(", "list", "(", "refmissing", ")", ")", ")", "# Print out what features were ambig matches", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "1", "and", "len", "(", "ambig_map", ")", ">", "1", ":", "self", ".", "logger", ".", "info", "(", "\"Features with ambig matches = \"", "+", "\",\"", ".", "join", "(", "list", "(", "ambig_map", ")", ")", ")", "# Print out what features were exact matches", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "2", "and", "len", "(", "exact_matches", ")", ">", "1", ":", "self", ".", "logger", ".", "info", "(", "\"Features exact matches = \"", "+", "\",\"", ".", "join", "(", "list", "(", "exact_matches", ")", ")", ")", "# Print out what features have been annotated", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "1", "and", "len", "(", "annotated_feats", ")", ">", "1", ":", "self", ".", "logger", ".", "info", "(", "\"Features annotated = \"", "+", "\",\"", ".", "join", "(", "list", "(", "annotated_feats", ")", ")", ")", "# Print out what features are missing", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "1", "and", "len", "(", "feat_missing", ")", ">", "1", ":", "self", ".", "logger", ".", "info", "(", "\"Features missing = \"", "+", "\",\"", ".", "join", "(", "list", "(", "feat_missing", ")", ")", ")", "annotation", "=", "Annotation", "(", "features", "=", "annotated_feats", ",", "covered", "=", "seq_covered", ",", "seq", "=", "in_seq", ",", "missing", "=", "feat_missing", ",", "ambig", "=", "ambig_map", ",", "blocks", "=", "mb", ",", "method", "=", "method", ",", "refmissing", "=", "refmissing", ",", "mapping", "=", "mapping", ",", "exact_match", "=", "exact_matches", ",", "annotation", "=", "None", ")", "else", ":", "mb", "=", "None", "# Unmap exon 8", "if", "locus", "in", "[", "'HLA-C'", ",", "'HLA-A'", "]", "and", "len", "(", "in_seq", ".", "seq", ")", "<", "600", "and", "'exon_8'", "in", "exact_matches", "and", "'three_prime_UTR'", "in", "annotated_feats", "and", "'three_prime_UTR'", "not", "in", "exact_matches", ":", "for", "i", "in", "deleted_coords", ":", "mapping", "[", "i", "]", "=", "1", "coordinates", ".", "update", "(", "deleted_coords", ")", "mb", "=", "getblocks", "(", "coordinates", ")", "feat_missing", ".", "update", "(", "added_feat", ")", "del", "exact_matches", "[", "exact_matches", ".", "index", "(", "'exon_8'", ")", "]", "del", "found_feats", "[", "'exon_8'", "]", "if", "'exon_8'", "in", "annotated_feats", ":", "del", "annotated_feats", "[", "'exon_8'", "]", "if", "'three_prime_UTR'", "in", "found_feats", ":", "del", "found_feats", "[", "'three_prime_UTR'", "]", "if", "'three_prime_UTR'", "in", "annotated_feats", ":", "del", "annotated_feats", "[", "'three_prime_UTR'", "]", "if", "self", ".", "verbose", ":", "self", ".", "logger", ".", "info", "(", "\"* No missing blocks after seq_search *\"", ")", "# Print out what features were ambig matches", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "0", "and", "len", "(", "ambig_map", ")", ">", "1", ":", "self", ".", "logger", ".", "info", "(", "\"Features with ambig matches = \"", "+", "\",\"", ".", "join", "(", "list", "(", "ambig_map", ")", ")", ")", "# Print out what features were exact matches", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "0", "and", "len", "(", "exact_matches", ")", ">", "1", ":", "self", ".", "logger", ".", "info", "(", "\"Features exact matches = \"", "+", "\",\"", ".", "join", "(", "list", "(", "exact_matches", ")", ")", ")", "# Print out what features have been annotated", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "0", "and", "len", "(", "annotated_feats", ")", ">", "1", ":", "self", ".", "logger", ".", "info", "(", "\"Features annotated = \"", "+", "\",\"", ".", "join", "(", "list", "(", "annotated_feats", ")", ")", ")", "# Print out what features are missing", "if", "self", ".", "verbose", "and", "self", ".", "verbosity", ">", "0", "and", "len", "(", "feat_missing", ")", ">", "1", ":", "self", ".", "logger", ".", "info", "(", "\"Features missing = \"", "+", "\",\"", ".", "join", "(", "list", "(", "feat_missing", ")", ")", ")", "annotation", "=", "Annotation", "(", "features", "=", "annotated_feats", ",", "covered", "=", "seq_covered", ",", "seq", "=", "in_seq", ",", "missing", "=", "feat_missing", ",", "ambig", "=", "ambig_map", ",", "method", "=", "method", ",", "blocks", "=", "mb", ",", "mapping", "=", "mapping", ",", "exact_match", "=", "exact_matches", ",", "annotation", "=", "None", ")", "return", "annotation" ]
46.273292
22.070393
def write_study(self, study_id, file_content, branch, author): """Given a study_id, temporary filename of content, branch and auth_info Deprecated but needed until we merge api local-dep to master... """ gh_user = branch.split('_study_')[0] msg = "Update Study #%s via OpenTree API" % study_id return self.write_document(gh_user, study_id, file_content, branch, author, commit_msg=msg)
[ "def", "write_study", "(", "self", ",", "study_id", ",", "file_content", ",", "branch", ",", "author", ")", ":", "gh_user", "=", "branch", ".", "split", "(", "'_study_'", ")", "[", "0", "]", "msg", "=", "\"Update Study #%s via OpenTree API\"", "%", "study_id", "return", "self", ".", "write_document", "(", "gh_user", ",", "study_id", ",", "file_content", ",", "branch", ",", "author", ",", "commit_msg", "=", "msg", ")" ]
42.642857
13.285714
def asluav_status_encode(self, LED_status, SATCOM_status, Servo_status, Motor_rpm): ''' Extended state information for ASLUAVs LED_status : Status of the position-indicator LEDs (uint8_t) SATCOM_status : Status of the IRIDIUM satellite communication system (uint8_t) Servo_status : Status vector for up to 8 servos (uint8_t) Motor_rpm : Motor RPM (float) ''' return MAVLink_asluav_status_message(LED_status, SATCOM_status, Servo_status, Motor_rpm)
[ "def", "asluav_status_encode", "(", "self", ",", "LED_status", ",", "SATCOM_status", ",", "Servo_status", ",", "Motor_rpm", ")", ":", "return", "MAVLink_asluav_status_message", "(", "LED_status", ",", "SATCOM_status", ",", "Servo_status", ",", "Motor_rpm", ")" ]
56.636364
38.818182
def run_migration(connection, queries, engine): """ Apply a migration to the SQL server """ # Execute query with connection.cursor() as cursorMig: # Parse statements queries = parse_statements(queries, engine) for query in queries: cursorMig.execute(query) connection.commit() return True
[ "def", "run_migration", "(", "connection", ",", "queries", ",", "engine", ")", ":", "# Execute query", "with", "connection", ".", "cursor", "(", ")", "as", "cursorMig", ":", "# Parse statements", "queries", "=", "parse_statements", "(", "queries", ",", "engine", ")", "for", "query", "in", "queries", ":", "cursorMig", ".", "execute", "(", "query", ")", "connection", ".", "commit", "(", ")", "return", "True" ]
26.076923
17.538462
def createDenseCNNModel(self): """ Create a standard network composed of two CNN / MaxPool layers followed by a linear layer with using ReLU activation between the layers """ # Create denseCNN2 model model = nn.Sequential( nn.Conv2d(in_channels=self.in_channels, out_channels=self.out_channels[0], kernel_size=self.kernel_size[0], stride=self.stride[0], padding=self.padding[0]), nn.MaxPool2d(kernel_size=2), nn.ReLU(), nn.Conv2d(in_channels=self.out_channels[0], out_channels=self.out_channels[1], kernel_size=self.kernel_size[1], stride=self.stride[1], padding=self.padding[1]), nn.MaxPool2d(kernel_size=2), nn.ReLU(), Flatten(), nn.Linear(self.cnn_output_len[1], self.n), nn.ReLU(), nn.Linear(self.n, self.output_size), nn.LogSoftmax(dim=1) ) model.to(self.device) if torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) return model
[ "def", "createDenseCNNModel", "(", "self", ")", ":", "# Create denseCNN2 model", "model", "=", "nn", ".", "Sequential", "(", "nn", ".", "Conv2d", "(", "in_channels", "=", "self", ".", "in_channels", ",", "out_channels", "=", "self", ".", "out_channels", "[", "0", "]", ",", "kernel_size", "=", "self", ".", "kernel_size", "[", "0", "]", ",", "stride", "=", "self", ".", "stride", "[", "0", "]", ",", "padding", "=", "self", ".", "padding", "[", "0", "]", ")", ",", "nn", ".", "MaxPool2d", "(", "kernel_size", "=", "2", ")", ",", "nn", ".", "ReLU", "(", ")", ",", "nn", ".", "Conv2d", "(", "in_channels", "=", "self", ".", "out_channels", "[", "0", "]", ",", "out_channels", "=", "self", ".", "out_channels", "[", "1", "]", ",", "kernel_size", "=", "self", ".", "kernel_size", "[", "1", "]", ",", "stride", "=", "self", ".", "stride", "[", "1", "]", ",", "padding", "=", "self", ".", "padding", "[", "1", "]", ")", ",", "nn", ".", "MaxPool2d", "(", "kernel_size", "=", "2", ")", ",", "nn", ".", "ReLU", "(", ")", ",", "Flatten", "(", ")", ",", "nn", ".", "Linear", "(", "self", ".", "cnn_output_len", "[", "1", "]", ",", "self", ".", "n", ")", ",", "nn", ".", "ReLU", "(", ")", ",", "nn", ".", "Linear", "(", "self", ".", "n", ",", "self", ".", "output_size", ")", ",", "nn", ".", "LogSoftmax", "(", "dim", "=", "1", ")", ")", "model", ".", "to", "(", "self", ".", "device", ")", "if", "torch", ".", "cuda", ".", "device_count", "(", ")", ">", "1", ":", "model", "=", "torch", ".", "nn", ".", "DataParallel", "(", "model", ")", "return", "model" ]
30.333333
21.121212
def remove_allocated_node_name(self, name): """ Removes an allocated node name :param name: allocated node name """ if name in self._allocated_node_names: self._allocated_node_names.remove(name)
[ "def", "remove_allocated_node_name", "(", "self", ",", "name", ")", ":", "if", "name", "in", "self", ".", "_allocated_node_names", ":", "self", ".", "_allocated_node_names", ".", "remove", "(", "name", ")" ]
26.666667
11.333333
def setOrientation( self, orientation ): """ Sets the orientation for this toolbar to the inputed value, and \ updates the contents margins and collapse button based on the vaule. :param orientation | <Qt.Orientation> """ super(XToolBar, self).setOrientation(orientation) self.refreshButton()
[ "def", "setOrientation", "(", "self", ",", "orientation", ")", ":", "super", "(", "XToolBar", ",", "self", ")", ".", "setOrientation", "(", "orientation", ")", "self", ".", "refreshButton", "(", ")" ]
39.333333
15.555556
def _calc_eddy_time(self): """ estimate the eddy turn-over time in days """ ens = 0. for j in range(self.nz): ens = .5*self.Hi[j] * self.spec_var(self.wv2*self.ph[j]) return 2.*pi*np.sqrt( self.H / ens.sum() ) / 86400
[ "def", "_calc_eddy_time", "(", "self", ")", ":", "ens", "=", "0.", "for", "j", "in", "range", "(", "self", ".", "nz", ")", ":", "ens", "=", ".5", "*", "self", ".", "Hi", "[", "j", "]", "*", "self", ".", "spec_var", "(", "self", ".", "wv2", "*", "self", ".", "ph", "[", "j", "]", ")", "return", "2.", "*", "pi", "*", "np", ".", "sqrt", "(", "self", ".", "H", "/", "ens", ".", "sum", "(", ")", ")", "/", "86400" ]
36.571429
18.857143
def _get_destcache(self, graph, orig, branch, turn, tick, *, forward): """Return a set of destination nodes succeeding ``orig``""" destcache, destcache_lru, get_keycachelike, successors, adds_dels_sucpred = self._get_destcache_stuff lru_append(destcache, destcache_lru, ((graph, orig, branch), turn, tick), KEYCACHE_MAXSIZE) return get_keycachelike( destcache, successors, adds_dels_sucpred, (graph, orig), branch, turn, tick, forward=forward )
[ "def", "_get_destcache", "(", "self", ",", "graph", ",", "orig", ",", "branch", ",", "turn", ",", "tick", ",", "*", ",", "forward", ")", ":", "destcache", ",", "destcache_lru", ",", "get_keycachelike", ",", "successors", ",", "adds_dels_sucpred", "=", "self", ".", "_get_destcache_stuff", "lru_append", "(", "destcache", ",", "destcache_lru", ",", "(", "(", "graph", ",", "orig", ",", "branch", ")", ",", "turn", ",", "tick", ")", ",", "KEYCACHE_MAXSIZE", ")", "return", "get_keycachelike", "(", "destcache", ",", "successors", ",", "adds_dels_sucpred", ",", "(", "graph", ",", "orig", ")", ",", "branch", ",", "turn", ",", "tick", ",", "forward", "=", "forward", ")" ]
62.625
29
def parse_addr(addr, *, proto=None, host=None): """Parses an address Returns: Address: the parsed address """ port = None if isinstance(addr, Address): return addr elif isinstance(addr, str): if addr.startswith('http://'): proto, addr = 'http', addr[7:] if addr.startswith('udp://'): proto, addr = 'udp', addr[6:] elif addr.startswith('tcp://'): proto, addr = 'tcp', addr[6:] elif addr.startswith('unix://'): proto, addr = 'unix', addr[7:] a, _, b = addr.partition(':') host = a or host port = b or port elif isinstance(addr, (tuple, list)): # list is not good a, b = addr host = a or host port = b or port elif isinstance(addr, int): port = addr else: raise ValueError('bad value') if port is not None: port = int(port) return Address(proto, host, port)
[ "def", "parse_addr", "(", "addr", ",", "*", ",", "proto", "=", "None", ",", "host", "=", "None", ")", ":", "port", "=", "None", "if", "isinstance", "(", "addr", ",", "Address", ")", ":", "return", "addr", "elif", "isinstance", "(", "addr", ",", "str", ")", ":", "if", "addr", ".", "startswith", "(", "'http://'", ")", ":", "proto", ",", "addr", "=", "'http'", ",", "addr", "[", "7", ":", "]", "if", "addr", ".", "startswith", "(", "'udp://'", ")", ":", "proto", ",", "addr", "=", "'udp'", ",", "addr", "[", "6", ":", "]", "elif", "addr", ".", "startswith", "(", "'tcp://'", ")", ":", "proto", ",", "addr", "=", "'tcp'", ",", "addr", "[", "6", ":", "]", "elif", "addr", ".", "startswith", "(", "'unix://'", ")", ":", "proto", ",", "addr", "=", "'unix'", ",", "addr", "[", "7", ":", "]", "a", ",", "_", ",", "b", "=", "addr", ".", "partition", "(", "':'", ")", "host", "=", "a", "or", "host", "port", "=", "b", "or", "port", "elif", "isinstance", "(", "addr", ",", "(", "tuple", ",", "list", ")", ")", ":", "# list is not good", "a", ",", "b", "=", "addr", "host", "=", "a", "or", "host", "port", "=", "b", "or", "port", "elif", "isinstance", "(", "addr", ",", "int", ")", ":", "port", "=", "addr", "else", ":", "raise", "ValueError", "(", "'bad value'", ")", "if", "port", "is", "not", "None", ":", "port", "=", "int", "(", "port", ")", "return", "Address", "(", "proto", ",", "host", ",", "port", ")" ]
26.942857
12.457143
def _run_command_in_extended_path(syslog_ng_sbin_dir, command, params): ''' Runs the specified command with the syslog_ng_sbin_dir in the PATH ''' orig_path = os.environ.get('PATH', '') env = None if syslog_ng_sbin_dir: # Custom environment variables should be str types. This code # normalizes the paths to unicode to join them together, and then # converts back to a str type. env = { str('PATH'): salt.utils.stringutils.to_str( # future lint: disable=blacklisted-function os.pathsep.join( salt.utils.data.decode( (orig_path, syslog_ng_sbin_dir) ) ) ) } return _run_command(command, options=params, env=env)
[ "def", "_run_command_in_extended_path", "(", "syslog_ng_sbin_dir", ",", "command", ",", "params", ")", ":", "orig_path", "=", "os", ".", "environ", ".", "get", "(", "'PATH'", ",", "''", ")", "env", "=", "None", "if", "syslog_ng_sbin_dir", ":", "# Custom environment variables should be str types. This code", "# normalizes the paths to unicode to join them together, and then", "# converts back to a str type.", "env", "=", "{", "str", "(", "'PATH'", ")", ":", "salt", ".", "utils", ".", "stringutils", ".", "to_str", "(", "# future lint: disable=blacklisted-function", "os", ".", "pathsep", ".", "join", "(", "salt", ".", "utils", ".", "data", ".", "decode", "(", "(", "orig_path", ",", "syslog_ng_sbin_dir", ")", ")", ")", ")", "}", "return", "_run_command", "(", "command", ",", "options", "=", "params", ",", "env", "=", "env", ")" ]
38.95
23.05
def scale_points(df_points, scale=INKSCAPE_PPmm.magnitude, inplace=False): ''' Translate points such that bounding box is anchored at (0, 0) and scale ``x`` and ``y`` columns of input frame by specified :data:`scale`. Parameters ---------- df_points : pandas.DataFrame Table of ``x``/``y`` point positions. Must have at least the following columns: - ``x``: x-coordinate - ``y``: y-coordinate scale : float, optional Factor to scale points by. By default, scale to millimeters based on Inkscape default of 90 pixels-per-inch. scale : float, optional Factor to scale points by. in_place : bool, optional If ``True``, input frame will be modified. Otherwise, the scaled points are written to a new frame, leaving the input frame unmodified. Returns ------- pandas.DataFrame Input frame with the points translated such that bounding box is anchored at (0, 0) and ``x`` and ``y`` values scaled by specified :data:`scale`. ''' if not inplace: df_points = df_points.copy() # Offset device, such that all coordinates are >= 0. df_points.x -= df_points.x.min() df_points.y -= df_points.y.min() # Scale path coordinates. df_points.x /= scale df_points.y /= scale return df_points
[ "def", "scale_points", "(", "df_points", ",", "scale", "=", "INKSCAPE_PPmm", ".", "magnitude", ",", "inplace", "=", "False", ")", ":", "if", "not", "inplace", ":", "df_points", "=", "df_points", ".", "copy", "(", ")", "# Offset device, such that all coordinates are >= 0.", "df_points", ".", "x", "-=", "df_points", ".", "x", ".", "min", "(", ")", "df_points", ".", "y", "-=", "df_points", ".", "y", ".", "min", "(", ")", "# Scale path coordinates.", "df_points", ".", "x", "/=", "scale", "df_points", ".", "y", "/=", "scale", "return", "df_points" ]
29.777778
22.311111
def do_usufy(self, query, **kwargs): """ Verifying a usufy query in this platform. This might be redefined in any class inheriting from Platform. Args: ----- query: The element to be searched. Return: ------- A list of elements to be appended. """ results = [] test = self.check_usufy(query, **kwargs) if test: r = { "type": "i3visio.profile", "value": self.platformName + " - " + query, "attributes": [] } # Appending platform URI aux = {} aux["type"] = "i3visio.uri" aux["value"] = self.createURL(word=query, mode="usufy") aux["attributes"] = [] r["attributes"].append(aux) # Appending the alias aux = {} aux["type"] = "i3visio.alias" aux["value"] = query aux["attributes"] = [] r["attributes"].append(aux) # Appending platform name aux = {} aux["type"] = "i3visio.platform" aux["value"] = self.platformName aux["attributes"] = [] r["attributes"].append(aux) r["attributes"] += self.process_usufy(test) results.append(r) return results
[ "def", "do_usufy", "(", "self", ",", "query", ",", "*", "*", "kwargs", ")", ":", "results", "=", "[", "]", "test", "=", "self", ".", "check_usufy", "(", "query", ",", "*", "*", "kwargs", ")", "if", "test", ":", "r", "=", "{", "\"type\"", ":", "\"i3visio.profile\"", ",", "\"value\"", ":", "self", ".", "platformName", "+", "\" - \"", "+", "query", ",", "\"attributes\"", ":", "[", "]", "}", "# Appending platform URI", "aux", "=", "{", "}", "aux", "[", "\"type\"", "]", "=", "\"i3visio.uri\"", "aux", "[", "\"value\"", "]", "=", "self", ".", "createURL", "(", "word", "=", "query", ",", "mode", "=", "\"usufy\"", ")", "aux", "[", "\"attributes\"", "]", "=", "[", "]", "r", "[", "\"attributes\"", "]", ".", "append", "(", "aux", ")", "# Appending the alias", "aux", "=", "{", "}", "aux", "[", "\"type\"", "]", "=", "\"i3visio.alias\"", "aux", "[", "\"value\"", "]", "=", "query", "aux", "[", "\"attributes\"", "]", "=", "[", "]", "r", "[", "\"attributes\"", "]", ".", "append", "(", "aux", ")", "# Appending platform name", "aux", "=", "{", "}", "aux", "[", "\"type\"", "]", "=", "\"i3visio.platform\"", "aux", "[", "\"value\"", "]", "=", "self", ".", "platformName", "aux", "[", "\"attributes\"", "]", "=", "[", "]", "r", "[", "\"attributes\"", "]", ".", "append", "(", "aux", ")", "r", "[", "\"attributes\"", "]", "+=", "self", ".", "process_usufy", "(", "test", ")", "results", ".", "append", "(", "r", ")", "return", "results" ]
27.708333
16.541667
def insurance_limit(self, loss_type, dummy=None): """ :returns: the limit fraction of the asset cost for `loss_type` """ val = self.calc(loss_type, self.insurance_limits, self.area, self.number) if self.calc.limit_abs: # convert to relative value return val / self.calc(loss_type, self.values, self.area, self.number) else: return val
[ "def", "insurance_limit", "(", "self", ",", "loss_type", ",", "dummy", "=", "None", ")", ":", "val", "=", "self", ".", "calc", "(", "loss_type", ",", "self", ".", "insurance_limits", ",", "self", ".", "area", ",", "self", ".", "number", ")", "if", "self", ".", "calc", ".", "limit_abs", ":", "# convert to relative value", "return", "val", "/", "self", ".", "calc", "(", "loss_type", ",", "self", ".", "values", ",", "self", ".", "area", ",", "self", ".", "number", ")", "else", ":", "return", "val" ]
41.454545
15.636364
def serialize_message(self, message: SegmentSequence) -> bytes: """Serialize a message (as SegmentSequence, list of FinTS3Segment, or FinTS3Segment) into a byte array""" if isinstance(message, FinTS3Segment): message = SegmentSequence([message]) if isinstance(message, (list, tuple, Iterable)): message = SegmentSequence(list(message)) result = [] for segment in message.segments: result.append(self.serialize_segment(segment)) return self.implode_segments(result)
[ "def", "serialize_message", "(", "self", ",", "message", ":", "SegmentSequence", ")", "->", "bytes", ":", "if", "isinstance", "(", "message", ",", "FinTS3Segment", ")", ":", "message", "=", "SegmentSequence", "(", "[", "message", "]", ")", "if", "isinstance", "(", "message", ",", "(", "list", ",", "tuple", ",", "Iterable", ")", ")", ":", "message", "=", "SegmentSequence", "(", "list", "(", "message", ")", ")", "result", "=", "[", "]", "for", "segment", "in", "message", ".", "segments", ":", "result", ".", "append", "(", "self", ".", "serialize_segment", "(", "segment", ")", ")", "return", "self", ".", "implode_segments", "(", "result", ")" ]
41.461538
17.538462
def get(self, *keys: str, default: Any = NOT_SET) -> Any: """ Returns values from the settings in the order of keys, the first value encountered is used. Example: >>> settings = Settings({"ARCA_ONE": 1, "ARCA_TWO": 2}) >>> settings.get("one") 1 >>> settings.get("one", "two") 1 >>> settings.get("two", "one") 2 >>> settings.get("three", "one") 1 >>> settings.get("three", default=3) 3 >>> settings.get("three") Traceback (most recent call last): ... KeyError: :param keys: One or more keys to get from settings. If multiple keys are provided, the value of the first key that has a value is returned. :param default: If none of the ``options`` aren't set, return this value. :return: A value from the settings or the default. :raise ValueError: If no keys are provided. :raise KeyError: If none of the keys are set and no default is provided. """ if not len(keys): raise ValueError("At least one key must be provided.") for option in keys: key = f"{self.PREFIX}_{option.upper()}" if key in self._data: return self._data[key] if default is NOT_SET: raise KeyError("None of the following key is present in settings and no default is set: {}".format( ", ".join(keys) )) return default
[ "def", "get", "(", "self", ",", "*", "keys", ":", "str", ",", "default", ":", "Any", "=", "NOT_SET", ")", "->", "Any", ":", "if", "not", "len", "(", "keys", ")", ":", "raise", "ValueError", "(", "\"At least one key must be provided.\"", ")", "for", "option", "in", "keys", ":", "key", "=", "f\"{self.PREFIX}_{option.upper()}\"", "if", "key", "in", "self", ".", "_data", ":", "return", "self", ".", "_data", "[", "key", "]", "if", "default", "is", "NOT_SET", ":", "raise", "KeyError", "(", "\"None of the following key is present in settings and no default is set: {}\"", ".", "format", "(", "\", \"", ".", "join", "(", "keys", ")", ")", ")", "return", "default" ]
33.295455
23.022727
def Dump(self, key = None): """ Using the standard Python pretty printer, return the contents of the scons build environment as a string. If the key passed in is anything other than None, then that will be used as an index into the build environment dictionary and whatever is found there will be fed into the pretty printer. Note that this key is case sensitive. """ import pprint pp = pprint.PrettyPrinter(indent=2) if key: dict = self.Dictionary(key) else: dict = self.Dictionary() return pp.pformat(dict)
[ "def", "Dump", "(", "self", ",", "key", "=", "None", ")", ":", "import", "pprint", "pp", "=", "pprint", ".", "PrettyPrinter", "(", "indent", "=", "2", ")", "if", "key", ":", "dict", "=", "self", ".", "Dictionary", "(", "key", ")", "else", ":", "dict", "=", "self", ".", "Dictionary", "(", ")", "return", "pp", ".", "pformat", "(", "dict", ")" ]
36.529412
16.176471
def _render_child_fragment(self, child, context, view='student_view'): """ Helper method to overcome html block rendering quirks """ try: child_fragment = child.render(view, context) except NoSuchViewError: if child.scope_ids.block_type == 'html' and getattr(self.runtime, 'is_author_mode', False): # html block doesn't support preview_view, and if we use student_view Studio will wrap # it in HTML that we don't want in the preview. So just render its HTML directly: child_fragment = Fragment(child.data) else: child_fragment = child.render('student_view', context) return child_fragment
[ "def", "_render_child_fragment", "(", "self", ",", "child", ",", "context", ",", "view", "=", "'student_view'", ")", ":", "try", ":", "child_fragment", "=", "child", ".", "render", "(", "view", ",", "context", ")", "except", "NoSuchViewError", ":", "if", "child", ".", "scope_ids", ".", "block_type", "==", "'html'", "and", "getattr", "(", "self", ".", "runtime", ",", "'is_author_mode'", ",", "False", ")", ":", "# html block doesn't support preview_view, and if we use student_view Studio will wrap", "# it in HTML that we don't want in the preview. So just render its HTML directly:", "child_fragment", "=", "Fragment", "(", "child", ".", "data", ")", "else", ":", "child_fragment", "=", "child", ".", "render", "(", "'student_view'", ",", "context", ")", "return", "child_fragment" ]
48.2
26.866667
def _matches_patterns(path, patterns): """Given a list of patterns, returns a if a path matches any pattern.""" for glob in patterns: try: if PurePath(path).match(glob): return True except TypeError: pass return False
[ "def", "_matches_patterns", "(", "path", ",", "patterns", ")", ":", "for", "glob", "in", "patterns", ":", "try", ":", "if", "PurePath", "(", "path", ")", ".", "match", "(", "glob", ")", ":", "return", "True", "except", "TypeError", ":", "pass", "return", "False" ]
34.333333
11.444444
def close_application(self): """Closes the current application and also close webdriver session.""" self._debug('Closing application with session id %s' % self._current_application().session_id) self._cache.close()
[ "def", "close_application", "(", "self", ")", ":", "self", ".", "_debug", "(", "'Closing application with session id %s'", "%", "self", ".", "_current_application", "(", ")", ".", "session_id", ")", "self", ".", "_cache", ".", "close", "(", ")" ]
59.5
21.75
def plot_reaction_scheme(df, temperature, pressure, potential, pH, e_lim=None): """Returns a matplotlib object with the plotted reaction path. Parameters ---------- df : Pandas DataFrame generated by reaction_network temperature : numeric temperature in K pressure : numeric pressure in mbar pH : PH in bulk solution potential : Electric potential vs. SHE in eV e_lim: Limits for the energy axis. Returns ------- fig: matplotlib object. """ ncols = int((df.shape[0]/20)) +1 fig_width = ncols + 1.5*len(df['intermediate_labels'][0]) figsize = (fig_width, 6) fig, ax = plt.subplots(figsize=figsize) if pressure == None: pressure_label = '0' else: pressure_label = str(pressure) lines = [] for j, energy_list in enumerate(df['reaction_energy']): ts = df['transition_states'][j] R = df['reaction_coordinate'][j] E = [[x, x] for x in energy_list] labels = df['system_label'] for i, n in enumerate(R): if i == 0: line = Line2D([0], [0], color=colors[j], lw=4) lines.append(line) ax.plot(n, E[i], ls='-', color=colors[j], linewidth=3.25, solid_capstyle='round', path_effects=[pe.Stroke(linewidth=6, foreground=edge_colors[j]), pe.Normal()], label=labels[j]) ax.plot([n[1], n[1] + 0.5], [E[i], E[i + 1]], ls='--', dashes=(3, 2), color=colors[j], linewidth=1.) else: if ts[i]: xts = [R[i-1][1], R[i][0], R[i+1][0]] yts = [energy_list[i-1], energy_list[i], energy_list[i+1]] z1 = np.polyfit(xts, yts, 2) xp1 = np.linspace(xts[0], xts[2], 100) p1 = np.poly1d(z1) ax.plot(xp1, p1(xp1), ls='--', color=colors[j], linewidth=2.) ax.plot(xts[1], yts[1], marker = 'o', c=colors[j], mec = edge_colors[j], lw=1.5, markersize=7) else: ax.plot(n, E[i], ls='-', color=colors[j], linewidth=3.25, solid_capstyle='round', path_effects=[pe.Stroke(linewidth=6, foreground=edge_colors[j]), pe.Normal()]) if i < len(R) - 1: ax.plot([n[1], n[1] + 0.5], [E[i], E[i + 1]], ls='--', dashes=(3, 2), color=colors[j], linewidth=1.) ax.legend(handlelength=0.4, ncol=ncols, loc=2, frameon=False, bbox_to_anchor=(1.05, 1), borderaxespad=0., fontsize=12) if e_lim: ax.set_ylim(e_lim) ax.set_xlabel('Reaction coordinate') ax.set_ylabel('Reaction free energy (eV)') reaction_labels = df['intermediate_labels'][0] reaction_labels = [sub(w) for w in reaction_labels] plt.xticks(np.arange(len(reaction_labels)) + 0.25, tuple(reaction_labels), rotation=45) # plt.tight_layout() a = ax.get_xlim()[1]+0.05*ax.get_xlim()[1] b = ax.get_ylim()[0]+0.05*ax.get_ylim()[1] if potential is not None and pH is not None: ax.text(a,b, 'U = '+str(potential)+' eV vs. SHE \n pH = ' +str(pH)+' \n T = '+str(temperature) +' K \n p = '+pressure_label+' mbar',fontsize=12) else: ax.text(a,b, 'T = '+str(temperature)+' \n p = '+pressure_label+' mbar',fontsize=12) plt.tight_layout() return(fig)
[ "def", "plot_reaction_scheme", "(", "df", ",", "temperature", ",", "pressure", ",", "potential", ",", "pH", ",", "e_lim", "=", "None", ")", ":", "ncols", "=", "int", "(", "(", "df", ".", "shape", "[", "0", "]", "/", "20", ")", ")", "+", "1", "fig_width", "=", "ncols", "+", "1.5", "*", "len", "(", "df", "[", "'intermediate_labels'", "]", "[", "0", "]", ")", "figsize", "=", "(", "fig_width", ",", "6", ")", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "figsize", "=", "figsize", ")", "if", "pressure", "==", "None", ":", "pressure_label", "=", "'0'", "else", ":", "pressure_label", "=", "str", "(", "pressure", ")", "lines", "=", "[", "]", "for", "j", ",", "energy_list", "in", "enumerate", "(", "df", "[", "'reaction_energy'", "]", ")", ":", "ts", "=", "df", "[", "'transition_states'", "]", "[", "j", "]", "R", "=", "df", "[", "'reaction_coordinate'", "]", "[", "j", "]", "E", "=", "[", "[", "x", ",", "x", "]", "for", "x", "in", "energy_list", "]", "labels", "=", "df", "[", "'system_label'", "]", "for", "i", ",", "n", "in", "enumerate", "(", "R", ")", ":", "if", "i", "==", "0", ":", "line", "=", "Line2D", "(", "[", "0", "]", ",", "[", "0", "]", ",", "color", "=", "colors", "[", "j", "]", ",", "lw", "=", "4", ")", "lines", ".", "append", "(", "line", ")", "ax", ".", "plot", "(", "n", ",", "E", "[", "i", "]", ",", "ls", "=", "'-'", ",", "color", "=", "colors", "[", "j", "]", ",", "linewidth", "=", "3.25", ",", "solid_capstyle", "=", "'round'", ",", "path_effects", "=", "[", "pe", ".", "Stroke", "(", "linewidth", "=", "6", ",", "foreground", "=", "edge_colors", "[", "j", "]", ")", ",", "pe", ".", "Normal", "(", ")", "]", ",", "label", "=", "labels", "[", "j", "]", ")", "ax", ".", "plot", "(", "[", "n", "[", "1", "]", ",", "n", "[", "1", "]", "+", "0.5", "]", ",", "[", "E", "[", "i", "]", ",", "E", "[", "i", "+", "1", "]", "]", ",", "ls", "=", "'--'", ",", "dashes", "=", "(", "3", ",", "2", ")", ",", "color", "=", "colors", "[", "j", "]", ",", "linewidth", "=", "1.", ")", "else", ":", "if", "ts", "[", "i", "]", ":", "xts", "=", "[", "R", "[", "i", "-", "1", "]", "[", "1", "]", ",", "R", "[", "i", "]", "[", "0", "]", ",", "R", "[", "i", "+", "1", "]", "[", "0", "]", "]", "yts", "=", "[", "energy_list", "[", "i", "-", "1", "]", ",", "energy_list", "[", "i", "]", ",", "energy_list", "[", "i", "+", "1", "]", "]", "z1", "=", "np", ".", "polyfit", "(", "xts", ",", "yts", ",", "2", ")", "xp1", "=", "np", ".", "linspace", "(", "xts", "[", "0", "]", ",", "xts", "[", "2", "]", ",", "100", ")", "p1", "=", "np", ".", "poly1d", "(", "z1", ")", "ax", ".", "plot", "(", "xp1", ",", "p1", "(", "xp1", ")", ",", "ls", "=", "'--'", ",", "color", "=", "colors", "[", "j", "]", ",", "linewidth", "=", "2.", ")", "ax", ".", "plot", "(", "xts", "[", "1", "]", ",", "yts", "[", "1", "]", ",", "marker", "=", "'o'", ",", "c", "=", "colors", "[", "j", "]", ",", "mec", "=", "edge_colors", "[", "j", "]", ",", "lw", "=", "1.5", ",", "markersize", "=", "7", ")", "else", ":", "ax", ".", "plot", "(", "n", ",", "E", "[", "i", "]", ",", "ls", "=", "'-'", ",", "color", "=", "colors", "[", "j", "]", ",", "linewidth", "=", "3.25", ",", "solid_capstyle", "=", "'round'", ",", "path_effects", "=", "[", "pe", ".", "Stroke", "(", "linewidth", "=", "6", ",", "foreground", "=", "edge_colors", "[", "j", "]", ")", ",", "pe", ".", "Normal", "(", ")", "]", ")", "if", "i", "<", "len", "(", "R", ")", "-", "1", ":", "ax", ".", "plot", "(", "[", "n", "[", "1", "]", ",", "n", "[", "1", "]", "+", "0.5", "]", ",", "[", "E", "[", "i", "]", ",", "E", "[", "i", "+", "1", "]", "]", ",", "ls", "=", "'--'", ",", "dashes", "=", "(", "3", ",", "2", ")", ",", "color", "=", "colors", "[", "j", "]", ",", "linewidth", "=", "1.", ")", "ax", ".", "legend", "(", "handlelength", "=", "0.4", ",", "ncol", "=", "ncols", ",", "loc", "=", "2", ",", "frameon", "=", "False", ",", "bbox_to_anchor", "=", "(", "1.05", ",", "1", ")", ",", "borderaxespad", "=", "0.", ",", "fontsize", "=", "12", ")", "if", "e_lim", ":", "ax", ".", "set_ylim", "(", "e_lim", ")", "ax", ".", "set_xlabel", "(", "'Reaction coordinate'", ")", "ax", ".", "set_ylabel", "(", "'Reaction free energy (eV)'", ")", "reaction_labels", "=", "df", "[", "'intermediate_labels'", "]", "[", "0", "]", "reaction_labels", "=", "[", "sub", "(", "w", ")", "for", "w", "in", "reaction_labels", "]", "plt", ".", "xticks", "(", "np", ".", "arange", "(", "len", "(", "reaction_labels", ")", ")", "+", "0.25", ",", "tuple", "(", "reaction_labels", ")", ",", "rotation", "=", "45", ")", "# plt.tight_layout()", "a", "=", "ax", ".", "get_xlim", "(", ")", "[", "1", "]", "+", "0.05", "*", "ax", ".", "get_xlim", "(", ")", "[", "1", "]", "b", "=", "ax", ".", "get_ylim", "(", ")", "[", "0", "]", "+", "0.05", "*", "ax", ".", "get_ylim", "(", ")", "[", "1", "]", "if", "potential", "is", "not", "None", "and", "pH", "is", "not", "None", ":", "ax", ".", "text", "(", "a", ",", "b", ",", "'U = '", "+", "str", "(", "potential", ")", "+", "' eV vs. SHE \\n pH = '", "+", "str", "(", "pH", ")", "+", "' \\n T = '", "+", "str", "(", "temperature", ")", "+", "' K \\n p = '", "+", "pressure_label", "+", "' mbar'", ",", "fontsize", "=", "12", ")", "else", ":", "ax", ".", "text", "(", "a", ",", "b", ",", "'T = '", "+", "str", "(", "temperature", ")", "+", "' \\n p = '", "+", "pressure_label", "+", "' mbar'", ",", "fontsize", "=", "12", ")", "plt", ".", "tight_layout", "(", ")", "return", "(", "fig", ")" ]
39.872093
22.627907
def authenticate(self): """ Handles authentication, and persists the X-APPLE-WEB-KB cookie so that subsequent logins will not cause additional e-mails from Apple. """ logger.info("Authenticating as %s", self.user['apple_id']) data = dict(self.user) # We authenticate every time, so "remember me" is not needed data.update({'extended_login': False}) try: req = self.session.post( self._base_login_url, params=self.params, data=json.dumps(data) ) except PyiCloudAPIResponseError as error: msg = 'Invalid email/password combination.' raise PyiCloudFailedLoginException(msg, error) resp = req.json() self.params.update({'dsid': resp['dsInfo']['dsid']}) if not os.path.exists(self._cookie_directory): os.mkdir(self._cookie_directory) self.session.cookies.save() logger.debug("Cookies saved to %s", self._get_cookiejar_path()) self.data = resp self.webservices = self.data['webservices'] logger.info("Authentication completed successfully") logger.debug(self.params)
[ "def", "authenticate", "(", "self", ")", ":", "logger", ".", "info", "(", "\"Authenticating as %s\"", ",", "self", ".", "user", "[", "'apple_id'", "]", ")", "data", "=", "dict", "(", "self", ".", "user", ")", "# We authenticate every time, so \"remember me\" is not needed", "data", ".", "update", "(", "{", "'extended_login'", ":", "False", "}", ")", "try", ":", "req", "=", "self", ".", "session", ".", "post", "(", "self", ".", "_base_login_url", ",", "params", "=", "self", ".", "params", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ")", "except", "PyiCloudAPIResponseError", "as", "error", ":", "msg", "=", "'Invalid email/password combination.'", "raise", "PyiCloudFailedLoginException", "(", "msg", ",", "error", ")", "resp", "=", "req", ".", "json", "(", ")", "self", ".", "params", ".", "update", "(", "{", "'dsid'", ":", "resp", "[", "'dsInfo'", "]", "[", "'dsid'", "]", "}", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "_cookie_directory", ")", ":", "os", ".", "mkdir", "(", "self", ".", "_cookie_directory", ")", "self", ".", "session", ".", "cookies", ".", "save", "(", ")", "logger", ".", "debug", "(", "\"Cookies saved to %s\"", ",", "self", ".", "_get_cookiejar_path", "(", ")", ")", "self", ".", "data", "=", "resp", "self", ".", "webservices", "=", "self", ".", "data", "[", "'webservices'", "]", "logger", ".", "info", "(", "\"Authentication completed successfully\"", ")", "logger", ".", "debug", "(", "self", ".", "params", ")" ]
33.138889
20.305556
def _post_json(self, url, data, **kw): ''' Makes a POST request, setting Authorization and Content-Type headers by default ''' data = json.dumps(data) headers = kw.pop('headers', {}) headers.setdefault('Content-Type', 'application/json') headers.setdefault('Accept', 'application/json') kw['headers'] = headers kw['data'] = data return self._post(url, **kw)
[ "def", "_post_json", "(", "self", ",", "url", ",", "data", ",", "*", "*", "kw", ")", ":", "data", "=", "json", ".", "dumps", "(", "data", ")", "headers", "=", "kw", ".", "pop", "(", "'headers'", ",", "{", "}", ")", "headers", ".", "setdefault", "(", "'Content-Type'", ",", "'application/json'", ")", "headers", ".", "setdefault", "(", "'Accept'", ",", "'application/json'", ")", "kw", "[", "'headers'", "]", "=", "headers", "kw", "[", "'data'", "]", "=", "data", "return", "self", ".", "_post", "(", "url", ",", "*", "*", "kw", ")" ]
33.384615
14.615385
def get_Cpt_params(A, Cnodes, AggOp, T): """Return C and F pts. Helper function that returns a dictionary of sparse matrices and arrays which allow us to easily operate on Cpts and Fpts separately. Parameters ---------- A : {csr_matrix, bsr_matrix} Operator Cnodes : {array} Array of all root node indices. This is an array of nodal indices, not degree-of-freedom indices. If the blocksize of T is 1, then nodal indices and degree-of-freedom indices coincide. AggOp : {csr_matrix} Aggregation operator corresponding to A T : {bsr_matrix} Tentative prolongator based on AggOp Returns ------- Dictionary containing these parameters: P_I : {bsr_matrix} Interpolation operator that carries out only simple injection from the coarse grid to fine grid Cpts nodes I_F : {bsr_matrix} Identity operator on Fpts, i.e., the action of this matrix zeros out entries in a vector at all Cpts, leaving Fpts untouched I_C : {bsr_matrix} Identity operator on Cpts nodes, i.e., the action of this matrix zeros out entries in a vector at all Fpts, leaving Cpts untouched Cpts : {array} An array of all root node dofs, corresponding to the F/C splitting Fpts : {array} An array of all non root node dofs, corresponding to the F/C splitting Examples -------- >>> from numpy import array >>> from pyamg.util.utils import get_Cpt_params >>> from pyamg.gallery import poisson >>> from scipy.sparse import csr_matrix, bsr_matrix >>> A = poisson((10,), format='csr') >>> Cpts = array([3, 7]) >>> AggOp = ([[ 1., 0.], [ 1., 0.], ... [ 1., 0.], [ 1., 0.], ... [ 1., 0.], [ 0., 1.], ... [ 0., 1.], [ 0., 1.], ... [ 0., 1.], [ 0., 1.]]) >>> AggOp = csr_matrix(AggOp) >>> T = AggOp.copy().tobsr() >>> params = get_Cpt_params(A, Cpts, AggOp, T) >>> params['P_I'].todense() matrix([[ 0., 0.], [ 0., 0.], [ 0., 0.], [ 1., 0.], [ 0., 0.], [ 0., 0.], [ 0., 0.], [ 0., 1.], [ 0., 0.], [ 0., 0.]]) Notes ----- The principal calling routine is aggregation.smooth.energy_prolongation_smoother, which uses the Cpt_param dictionary for root-node style prolongation smoothing """ if not isspmatrix_bsr(A) and not isspmatrix_csr(A): raise TypeError('Expected BSR or CSR matrix A') if not isspmatrix_csr(AggOp): raise TypeError('Expected CSR matrix AggOp') if not isspmatrix_bsr(T): raise TypeError('Expected BSR matrix T') if T.blocksize[0] != T.blocksize[1]: raise TypeError('Expected square blocksize for BSR matrix T') if A.shape[0] != A.shape[1]: raise TypeError('Expected square matrix A') if T.shape[0] != A.shape[0]: raise TypeError('Expected compatible dimensions for T and A,\ T.shape[0] = A.shape[0]') if Cnodes.shape[0] != AggOp.shape[1]: if AggOp.shape[1] > 1: raise TypeError('Number of columns in AggOp must equal number\ of Cnodes') if isspmatrix_bsr(A) and A.blocksize[0] > 1: # Expand the list of Cpt nodes to a list of Cpt dofs blocksize = A.blocksize[0] Cpts = np.repeat(blocksize*Cnodes, blocksize) for k in range(1, blocksize): Cpts[list(range(k, Cpts.shape[0], blocksize))] += k else: blocksize = 1 Cpts = Cnodes Cpts = np.array(Cpts, dtype=int) # More input checking if Cpts.shape[0] != T.shape[1]: if T.shape[1] > blocksize: raise ValueError('Expected number of Cpts to match T.shape[1]') if blocksize != T.blocksize[0]: raise ValueError('Expected identical blocksize in A and T') if AggOp.shape[0] != int(T.shape[0]/blocksize): raise ValueError('Number of rows in AggOp must equal number of\ fine-grid nodes') # Create two maps, one for F points and one for C points ncoarse = T.shape[1] I_C = eye(A.shape[0], A.shape[1], format='csr') I_F = I_C.copy() I_F.data[Cpts] = 0.0 I_F.eliminate_zeros() I_C = I_C - I_F I_C.eliminate_zeros() # Find Fpts, the complement of Cpts Fpts = I_F.indices.copy() # P_I only injects from Cpts on the coarse grid to the fine grid, but # because of it's later uses, it must have the CSC indices ordered as # in Cpts if I_C.nnz > 0: indices = Cpts.copy() indptr = np.arange(indices.shape[0]+1) else: indices = np.zeros((0,), dtype=T.indices.dtype) indptr = np.zeros((ncoarse+1,), dtype=T.indptr.dtype) P_I = csc_matrix((I_C.data.copy(), indices, indptr), shape=(I_C.shape[0], ncoarse)) P_I = P_I.tobsr(T.blocksize) # Use same blocksize as A if isspmatrix_bsr(A): I_C = I_C.tobsr(A.blocksize) I_F = I_F.tobsr(A.blocksize) else: I_C = I_C.tobsr(blocksize=(1, 1)) I_F = I_F.tobsr(blocksize=(1, 1)) return {'P_I': P_I, 'I_F': I_F, 'I_C': I_C, 'Cpts': Cpts, 'Fpts': Fpts}
[ "def", "get_Cpt_params", "(", "A", ",", "Cnodes", ",", "AggOp", ",", "T", ")", ":", "if", "not", "isspmatrix_bsr", "(", "A", ")", "and", "not", "isspmatrix_csr", "(", "A", ")", ":", "raise", "TypeError", "(", "'Expected BSR or CSR matrix A'", ")", "if", "not", "isspmatrix_csr", "(", "AggOp", ")", ":", "raise", "TypeError", "(", "'Expected CSR matrix AggOp'", ")", "if", "not", "isspmatrix_bsr", "(", "T", ")", ":", "raise", "TypeError", "(", "'Expected BSR matrix T'", ")", "if", "T", ".", "blocksize", "[", "0", "]", "!=", "T", ".", "blocksize", "[", "1", "]", ":", "raise", "TypeError", "(", "'Expected square blocksize for BSR matrix T'", ")", "if", "A", ".", "shape", "[", "0", "]", "!=", "A", ".", "shape", "[", "1", "]", ":", "raise", "TypeError", "(", "'Expected square matrix A'", ")", "if", "T", ".", "shape", "[", "0", "]", "!=", "A", ".", "shape", "[", "0", "]", ":", "raise", "TypeError", "(", "'Expected compatible dimensions for T and A,\\\n T.shape[0] = A.shape[0]'", ")", "if", "Cnodes", ".", "shape", "[", "0", "]", "!=", "AggOp", ".", "shape", "[", "1", "]", ":", "if", "AggOp", ".", "shape", "[", "1", "]", ">", "1", ":", "raise", "TypeError", "(", "'Number of columns in AggOp must equal number\\\n of Cnodes'", ")", "if", "isspmatrix_bsr", "(", "A", ")", "and", "A", ".", "blocksize", "[", "0", "]", ">", "1", ":", "# Expand the list of Cpt nodes to a list of Cpt dofs", "blocksize", "=", "A", ".", "blocksize", "[", "0", "]", "Cpts", "=", "np", ".", "repeat", "(", "blocksize", "*", "Cnodes", ",", "blocksize", ")", "for", "k", "in", "range", "(", "1", ",", "blocksize", ")", ":", "Cpts", "[", "list", "(", "range", "(", "k", ",", "Cpts", ".", "shape", "[", "0", "]", ",", "blocksize", ")", ")", "]", "+=", "k", "else", ":", "blocksize", "=", "1", "Cpts", "=", "Cnodes", "Cpts", "=", "np", ".", "array", "(", "Cpts", ",", "dtype", "=", "int", ")", "# More input checking", "if", "Cpts", ".", "shape", "[", "0", "]", "!=", "T", ".", "shape", "[", "1", "]", ":", "if", "T", ".", "shape", "[", "1", "]", ">", "blocksize", ":", "raise", "ValueError", "(", "'Expected number of Cpts to match T.shape[1]'", ")", "if", "blocksize", "!=", "T", ".", "blocksize", "[", "0", "]", ":", "raise", "ValueError", "(", "'Expected identical blocksize in A and T'", ")", "if", "AggOp", ".", "shape", "[", "0", "]", "!=", "int", "(", "T", ".", "shape", "[", "0", "]", "/", "blocksize", ")", ":", "raise", "ValueError", "(", "'Number of rows in AggOp must equal number of\\\n fine-grid nodes'", ")", "# Create two maps, one for F points and one for C points", "ncoarse", "=", "T", ".", "shape", "[", "1", "]", "I_C", "=", "eye", "(", "A", ".", "shape", "[", "0", "]", ",", "A", ".", "shape", "[", "1", "]", ",", "format", "=", "'csr'", ")", "I_F", "=", "I_C", ".", "copy", "(", ")", "I_F", ".", "data", "[", "Cpts", "]", "=", "0.0", "I_F", ".", "eliminate_zeros", "(", ")", "I_C", "=", "I_C", "-", "I_F", "I_C", ".", "eliminate_zeros", "(", ")", "# Find Fpts, the complement of Cpts", "Fpts", "=", "I_F", ".", "indices", ".", "copy", "(", ")", "# P_I only injects from Cpts on the coarse grid to the fine grid, but", "# because of it's later uses, it must have the CSC indices ordered as", "# in Cpts", "if", "I_C", ".", "nnz", ">", "0", ":", "indices", "=", "Cpts", ".", "copy", "(", ")", "indptr", "=", "np", ".", "arange", "(", "indices", ".", "shape", "[", "0", "]", "+", "1", ")", "else", ":", "indices", "=", "np", ".", "zeros", "(", "(", "0", ",", ")", ",", "dtype", "=", "T", ".", "indices", ".", "dtype", ")", "indptr", "=", "np", ".", "zeros", "(", "(", "ncoarse", "+", "1", ",", ")", ",", "dtype", "=", "T", ".", "indptr", ".", "dtype", ")", "P_I", "=", "csc_matrix", "(", "(", "I_C", ".", "data", ".", "copy", "(", ")", ",", "indices", ",", "indptr", ")", ",", "shape", "=", "(", "I_C", ".", "shape", "[", "0", "]", ",", "ncoarse", ")", ")", "P_I", "=", "P_I", ".", "tobsr", "(", "T", ".", "blocksize", ")", "# Use same blocksize as A", "if", "isspmatrix_bsr", "(", "A", ")", ":", "I_C", "=", "I_C", ".", "tobsr", "(", "A", ".", "blocksize", ")", "I_F", "=", "I_F", ".", "tobsr", "(", "A", ".", "blocksize", ")", "else", ":", "I_C", "=", "I_C", ".", "tobsr", "(", "blocksize", "=", "(", "1", ",", "1", ")", ")", "I_F", "=", "I_F", ".", "tobsr", "(", "blocksize", "=", "(", "1", ",", "1", ")", ")", "return", "{", "'P_I'", ":", "P_I", ",", "'I_F'", ":", "I_F", ",", "'I_C'", ":", "I_C", ",", "'Cpts'", ":", "Cpts", ",", "'Fpts'", ":", "Fpts", "}" ]
35.054422
18.29932
def set_location(self, time, latitude, longitude): ''' Sets the location for the query. Parameters ---------- time: datetime or DatetimeIndex Time range of the query. ''' if isinstance(time, datetime.datetime): tzinfo = time.tzinfo else: tzinfo = time.tz if tzinfo is None: self.location = Location(latitude, longitude) else: self.location = Location(latitude, longitude, tz=tzinfo)
[ "def", "set_location", "(", "self", ",", "time", ",", "latitude", ",", "longitude", ")", ":", "if", "isinstance", "(", "time", ",", "datetime", ".", "datetime", ")", ":", "tzinfo", "=", "time", ".", "tzinfo", "else", ":", "tzinfo", "=", "time", ".", "tz", "if", "tzinfo", "is", "None", ":", "self", ".", "location", "=", "Location", "(", "latitude", ",", "longitude", ")", "else", ":", "self", ".", "location", "=", "Location", "(", "latitude", ",", "longitude", ",", "tz", "=", "tzinfo", ")" ]
28.166667
18.722222
def pdf_to_text(pdf_filepath='', **kwargs): """ Parse pdf to a list of strings using the pdfminer lib. Args: no_laparams=False, all_texts=None, detect_vertical=None, word_margin=None, char_margin=None, line_margin=None, boxes_flow=None, codec='utf-8', strip_control=False, maxpages=0, page_numbers=None, password="", scale=1.0, rotation=0, layoutmode='normal', debug=False, disable_caching=False, """ result = [] try: if not os.path.exists(pdf_filepath): raise ValueError("No valid pdf filepath introduced..") # TODO: REVIEW THIS PARAMS # update params if not defined kwargs['outfp'] = kwargs.get('outfp', StringIO()) kwargs['laparams'] = kwargs.get('laparams', pdfminer.layout.LAParams()) kwargs['imagewriter'] = kwargs.get('imagewriter', None) kwargs['output_type'] = kwargs.get('output_type', "text") kwargs['codec'] = kwargs.get('codec', 'utf-8') kwargs['disable_caching'] = kwargs.get('disable_caching', False) with open(pdf_filepath, "rb") as f_pdf: pdfminer.high_level.extract_text_to_fp(f_pdf, **kwargs) result = kwargs.get('outfp').getvalue() except Exception: logger.error('fail pdf to text parsing') return result
[ "def", "pdf_to_text", "(", "pdf_filepath", "=", "''", ",", "*", "*", "kwargs", ")", ":", "result", "=", "[", "]", "try", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "pdf_filepath", ")", ":", "raise", "ValueError", "(", "\"No valid pdf filepath introduced..\"", ")", "# TODO: REVIEW THIS PARAMS", "# update params if not defined", "kwargs", "[", "'outfp'", "]", "=", "kwargs", ".", "get", "(", "'outfp'", ",", "StringIO", "(", ")", ")", "kwargs", "[", "'laparams'", "]", "=", "kwargs", ".", "get", "(", "'laparams'", ",", "pdfminer", ".", "layout", ".", "LAParams", "(", ")", ")", "kwargs", "[", "'imagewriter'", "]", "=", "kwargs", ".", "get", "(", "'imagewriter'", ",", "None", ")", "kwargs", "[", "'output_type'", "]", "=", "kwargs", ".", "get", "(", "'output_type'", ",", "\"text\"", ")", "kwargs", "[", "'codec'", "]", "=", "kwargs", ".", "get", "(", "'codec'", ",", "'utf-8'", ")", "kwargs", "[", "'disable_caching'", "]", "=", "kwargs", ".", "get", "(", "'disable_caching'", ",", "False", ")", "with", "open", "(", "pdf_filepath", ",", "\"rb\"", ")", "as", "f_pdf", ":", "pdfminer", ".", "high_level", ".", "extract_text_to_fp", "(", "f_pdf", ",", "*", "*", "kwargs", ")", "result", "=", "kwargs", ".", "get", "(", "'outfp'", ")", ".", "getvalue", "(", ")", "except", "Exception", ":", "logger", ".", "error", "(", "'fail pdf to text parsing'", ")", "return", "result" ]
35.216216
21.810811
def find_by_uuid(self, si, uuid, is_vm=True, path=None, data_center=None): """ Finds vm/host by his uuid in the vCenter or returns "None" :param si: pyvmomi 'ServiceInstance' :param uuid: the object uuid :param path: the path to find the object ('dc' or 'dc/folder' or 'dc/folder/folder/etc...') :param is_vm: if true, search for virtual machines, otherwise search for hosts :param data_center: """ if uuid is None: return None if path is not None: data_center = self.find_item_in_path_by_type(si, path, vim.Datacenter) search_index = si.content.searchIndex return search_index.FindByUuid(data_center, uuid, is_vm)
[ "def", "find_by_uuid", "(", "self", ",", "si", ",", "uuid", ",", "is_vm", "=", "True", ",", "path", "=", "None", ",", "data_center", "=", "None", ")", ":", "if", "uuid", "is", "None", ":", "return", "None", "if", "path", "is", "not", "None", ":", "data_center", "=", "self", ".", "find_item_in_path_by_type", "(", "si", ",", "path", ",", "vim", ".", "Datacenter", ")", "search_index", "=", "si", ".", "content", ".", "searchIndex", "return", "search_index", ".", "FindByUuid", "(", "data_center", ",", "uuid", ",", "is_vm", ")" ]
41.333333
24.333333
def balance(ctx): """ Show Zebra balance. Like the hours balance, vacation left, etc. """ backend = plugins_registry.get_backends_by_class(ZebraBackend)[0] timesheet_collection = get_timesheet_collection_for_context(ctx, None) hours_to_be_pushed = timesheet_collection.get_hours(pushed=False, ignored=False, unmapped=False) today = datetime.date.today() user_info = backend.get_user_info() timesheets = backend.get_timesheets(get_first_dow(today), get_last_dow(today)) total_duration = sum([float(timesheet['time']) for timesheet in timesheets]) vacation = hours_to_days(user_info['vacation']['difference']) vacation_balance = '{} days, {:.2f} hours'.format(*vacation) hours_balance = user_info['hours']['hours']['balance'] click.echo("Hours balance: {}".format(signed_number(hours_balance))) click.echo("Hours balance after push: {}".format(signed_number(hours_balance + hours_to_be_pushed))) click.echo("Hours done this week: {:.2f}".format(total_duration)) click.echo("Vacation left: {}".format(vacation_balance))
[ "def", "balance", "(", "ctx", ")", ":", "backend", "=", "plugins_registry", ".", "get_backends_by_class", "(", "ZebraBackend", ")", "[", "0", "]", "timesheet_collection", "=", "get_timesheet_collection_for_context", "(", "ctx", ",", "None", ")", "hours_to_be_pushed", "=", "timesheet_collection", ".", "get_hours", "(", "pushed", "=", "False", ",", "ignored", "=", "False", ",", "unmapped", "=", "False", ")", "today", "=", "datetime", ".", "date", ".", "today", "(", ")", "user_info", "=", "backend", ".", "get_user_info", "(", ")", "timesheets", "=", "backend", ".", "get_timesheets", "(", "get_first_dow", "(", "today", ")", ",", "get_last_dow", "(", "today", ")", ")", "total_duration", "=", "sum", "(", "[", "float", "(", "timesheet", "[", "'time'", "]", ")", "for", "timesheet", "in", "timesheets", "]", ")", "vacation", "=", "hours_to_days", "(", "user_info", "[", "'vacation'", "]", "[", "'difference'", "]", ")", "vacation_balance", "=", "'{} days, {:.2f} hours'", ".", "format", "(", "*", "vacation", ")", "hours_balance", "=", "user_info", "[", "'hours'", "]", "[", "'hours'", "]", "[", "'balance'", "]", "click", ".", "echo", "(", "\"Hours balance: {}\"", ".", "format", "(", "signed_number", "(", "hours_balance", ")", ")", ")", "click", ".", "echo", "(", "\"Hours balance after push: {}\"", ".", "format", "(", "signed_number", "(", "hours_balance", "+", "hours_to_be_pushed", ")", ")", ")", "click", ".", "echo", "(", "\"Hours done this week: {:.2f}\"", ".", "format", "(", "total_duration", ")", ")", "click", ".", "echo", "(", "\"Vacation left: {}\"", ".", "format", "(", "vacation_balance", ")", ")" ]
42.8
28.48
def _compute_C_matrix(self): """See Fouss et al. (2006) and von Luxburg et al. (2007). This is the commute-time matrix. It's a squared-euclidian distance matrix in :math:`\\mathbb{R}^n`. """ self.C = np.repeat(np.diag(self.Lp)[:, np.newaxis], self.Lp.shape[0], axis=1) self.C += np.repeat(np.diag(self.Lp)[np.newaxis, :], self.Lp.shape[0], axis=0) self.C -= 2*self.Lp # the following is much slower # self.C = np.zeros(self.Lp.shape) # for i in range(self.Lp.shape[0]): # for j in range(self.Lp.shape[1]): # self.C[i, j] = self.Lp[i, i] + self.Lp[j, j] - 2*self.Lp[i, j] volG = np.sum(self.z) self.C *= volG settings.mt(0, 'computed commute distance matrix') self.distances_dpt = self.C
[ "def", "_compute_C_matrix", "(", "self", ")", ":", "self", ".", "C", "=", "np", ".", "repeat", "(", "np", ".", "diag", "(", "self", ".", "Lp", ")", "[", ":", ",", "np", ".", "newaxis", "]", ",", "self", ".", "Lp", ".", "shape", "[", "0", "]", ",", "axis", "=", "1", ")", "self", ".", "C", "+=", "np", ".", "repeat", "(", "np", ".", "diag", "(", "self", ".", "Lp", ")", "[", "np", ".", "newaxis", ",", ":", "]", ",", "self", ".", "Lp", ".", "shape", "[", "0", "]", ",", "axis", "=", "0", ")", "self", ".", "C", "-=", "2", "*", "self", ".", "Lp", "# the following is much slower", "# self.C = np.zeros(self.Lp.shape)", "# for i in range(self.Lp.shape[0]):", "# for j in range(self.Lp.shape[1]):", "# self.C[i, j] = self.Lp[i, i] + self.Lp[j, j] - 2*self.Lp[i, j]", "volG", "=", "np", ".", "sum", "(", "self", ".", "z", ")", "self", ".", "C", "*=", "volG", "settings", ".", "mt", "(", "0", ",", "'computed commute distance matrix'", ")", "self", ".", "distances_dpt", "=", "self", ".", "C" ]
43.15
13.45
def _is_match(self, response, answer): """For MC, can call through to MultiChoice Item Record?""" # TODO: this varies depending on question type if self._only_generic_right_feedback(): return str(answer.genus_type) == str(RIGHT_ANSWER_GENUS) elif self._is_multiple_choice(): return MultiChoiceItemRecord._is_match(self, response, answer) elif self._is_image_sequence() or self._is_mw_sentence(): return OrderedChoiceItemRecord._is_match(self, response, answer) elif self._is_numeric_response(): return CalculationInteractionItemRecord._is_match(self, response, answer) elif self._is_fitb(): return MagicRandomizedInlineChoiceItemRecord._is_match(self, response, answer) return False
[ "def", "_is_match", "(", "self", ",", "response", ",", "answer", ")", ":", "# TODO: this varies depending on question type", "if", "self", ".", "_only_generic_right_feedback", "(", ")", ":", "return", "str", "(", "answer", ".", "genus_type", ")", "==", "str", "(", "RIGHT_ANSWER_GENUS", ")", "elif", "self", ".", "_is_multiple_choice", "(", ")", ":", "return", "MultiChoiceItemRecord", ".", "_is_match", "(", "self", ",", "response", ",", "answer", ")", "elif", "self", ".", "_is_image_sequence", "(", ")", "or", "self", ".", "_is_mw_sentence", "(", ")", ":", "return", "OrderedChoiceItemRecord", ".", "_is_match", "(", "self", ",", "response", ",", "answer", ")", "elif", "self", ".", "_is_numeric_response", "(", ")", ":", "return", "CalculationInteractionItemRecord", ".", "_is_match", "(", "self", ",", "response", ",", "answer", ")", "elif", "self", ".", "_is_fitb", "(", ")", ":", "return", "MagicRandomizedInlineChoiceItemRecord", ".", "_is_match", "(", "self", ",", "response", ",", "answer", ")", "return", "False" ]
56.642857
19.5
def parsemail(raw_message): """Parse message headers, then remove BCC header.""" message = email.parser.Parser().parsestr(raw_message) # Detect encoding detected = chardet.detect(bytearray(raw_message, "utf-8")) encoding = detected["encoding"] print(">>> encoding {}".format(encoding)) for part in message.walk(): if part.get_content_maintype() == 'multipart': continue part.set_charset(encoding) # Extract recipients addrs = email.utils.getaddresses(message.get_all("TO", [])) + \ email.utils.getaddresses(message.get_all("CC", [])) + \ email.utils.getaddresses(message.get_all("BCC", [])) recipients = [x[1] for x in addrs] message.__delitem__("bcc") message.__setitem__('Date', email.utils.formatdate()) sender = message["from"] return (message, sender, recipients)
[ "def", "parsemail", "(", "raw_message", ")", ":", "message", "=", "email", ".", "parser", ".", "Parser", "(", ")", ".", "parsestr", "(", "raw_message", ")", "# Detect encoding", "detected", "=", "chardet", ".", "detect", "(", "bytearray", "(", "raw_message", ",", "\"utf-8\"", ")", ")", "encoding", "=", "detected", "[", "\"encoding\"", "]", "print", "(", "\">>> encoding {}\"", ".", "format", "(", "encoding", ")", ")", "for", "part", "in", "message", ".", "walk", "(", ")", ":", "if", "part", ".", "get_content_maintype", "(", ")", "==", "'multipart'", ":", "continue", "part", ".", "set_charset", "(", "encoding", ")", "# Extract recipients", "addrs", "=", "email", ".", "utils", ".", "getaddresses", "(", "message", ".", "get_all", "(", "\"TO\"", ",", "[", "]", ")", ")", "+", "email", ".", "utils", ".", "getaddresses", "(", "message", ".", "get_all", "(", "\"CC\"", ",", "[", "]", ")", ")", "+", "email", ".", "utils", ".", "getaddresses", "(", "message", ".", "get_all", "(", "\"BCC\"", ",", "[", "]", ")", ")", "recipients", "=", "[", "x", "[", "1", "]", "for", "x", "in", "addrs", "]", "message", ".", "__delitem__", "(", "\"bcc\"", ")", "message", ".", "__setitem__", "(", "'Date'", ",", "email", ".", "utils", ".", "formatdate", "(", ")", ")", "sender", "=", "message", "[", "\"from\"", "]", "return", "(", "message", ",", "sender", ",", "recipients", ")" ]
36.913043
16.391304
def add_source_root(self, path, langs=tuple(), category=SourceRootCategories.UNKNOWN): """Add the specified fixed source root, which must be relative to the buildroot. Useful in a limited set of circumstances, e.g., when unpacking sources from a jar with unknown structure. Tests should prefer to use dirs that match our source root patterns instead of explicitly setting source roots here. """ self._trie.add_fixed(path, langs, category)
[ "def", "add_source_root", "(", "self", ",", "path", ",", "langs", "=", "tuple", "(", ")", ",", "category", "=", "SourceRootCategories", ".", "UNKNOWN", ")", ":", "self", ".", "_trie", ".", "add_fixed", "(", "path", ",", "langs", ",", "category", ")" ]
57.125
25.75
def gist(cls, gistid): """gistid must be in a username/id form""" url = "https://gist.github.com/{}.js".format(gistid) script = "<script src=\"{}\"></script>".format(url) return script
[ "def", "gist", "(", "cls", ",", "gistid", ")", ":", "url", "=", "\"https://gist.github.com/{}.js\"", ".", "format", "(", "gistid", ")", "script", "=", "\"<script src=\\\"{}\\\"></script>\"", ".", "format", "(", "url", ")", "return", "script" ]
42.4
15.2
def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): """Helper to create a parse action for converting parsed datetime string to Python datetime.datetime Params - - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) Example:: dt_expr = pyparsing_common.iso8601_datetime.copy() dt_expr.setParseAction(pyparsing_common.convertToDatetime()) print(dt_expr.parseString("1999-12-31T23:59:59.999")) prints:: [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] """ def cvt_fn(s,l,t): try: return datetime.strptime(t[0], fmt) except ValueError as ve: raise ParseException(s, l, str(ve)) return cvt_fn
[ "def", "convertToDatetime", "(", "fmt", "=", "\"%Y-%m-%dT%H:%M:%S.%f\"", ")", ":", "def", "cvt_fn", "(", "s", ",", "l", ",", "t", ")", ":", "try", ":", "return", "datetime", ".", "strptime", "(", "t", "[", "0", "]", ",", "fmt", ")", "except", "ValueError", "as", "ve", ":", "raise", "ParseException", "(", "s", ",", "l", ",", "str", "(", "ve", ")", ")", "return", "cvt_fn" ]
34.130435
23.217391
def write_new_expr_id(self, search_group, search, lars_id, instruments, gps_start_time, gps_end_time, comments = None): """ Creates a new def_id for the given arguments and returns it. If an entry already exists with these, will just return that id. @search_group: string representing the search group (e.g., cbc) @serach: string representing search (e.g., inspiral) @lars_id: string representing lars_id @instruments: the instruments; must be a python set @gps_start_time: string or int representing the gps_start_time of the experiment @gps_end_time: string or int representing the gps_end_time of the experiment """ # check if id already exists check_id = self.get_expr_id( search_group, search, lars_id, instruments, gps_start_time, gps_end_time, comments = comments ) if check_id: return check_id # experiment not found in table row = self.RowType() row.experiment_id = self.get_next_id() row.search_group = search_group row.search = search row.lars_id = lars_id row.instruments = ifos_from_instrument_set(instruments) row.gps_start_time = gps_start_time row.gps_end_time = gps_end_time row.comments = comments self.append(row) # return new ID return row.experiment_id
[ "def", "write_new_expr_id", "(", "self", ",", "search_group", ",", "search", ",", "lars_id", ",", "instruments", ",", "gps_start_time", ",", "gps_end_time", ",", "comments", "=", "None", ")", ":", "# check if id already exists", "check_id", "=", "self", ".", "get_expr_id", "(", "search_group", ",", "search", ",", "lars_id", ",", "instruments", ",", "gps_start_time", ",", "gps_end_time", ",", "comments", "=", "comments", ")", "if", "check_id", ":", "return", "check_id", "# experiment not found in table", "row", "=", "self", ".", "RowType", "(", ")", "row", ".", "experiment_id", "=", "self", ".", "get_next_id", "(", ")", "row", ".", "search_group", "=", "search_group", "row", ".", "search", "=", "search", "row", ".", "lars_id", "=", "lars_id", "row", ".", "instruments", "=", "ifos_from_instrument_set", "(", "instruments", ")", "row", ".", "gps_start_time", "=", "gps_start_time", "row", ".", "gps_end_time", "=", "gps_end_time", "row", ".", "comments", "=", "comments", "self", ".", "append", "(", "row", ")", "# return new ID", "return", "row", ".", "experiment_id" ]
37.625
22.875