text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def main(): """Read configuration and execute test runs.""" parser = argparse.ArgumentParser(description='Stress test applications.') parser.add_argument('config_path', help='Path to configuration file.') args = parser.parse_args() try: configuration = load_configuration(args.config_path) except InvalidConfigurationError: print("\nConfiguration is not valid.") print('Example:\n{}'.format(help_configuration)) return 1 print("Starting up ...") futures = [] with ProcessPoolExecutor(configuration[PROCESSORS]) as executor: for _ in range(configuration[PROCESSES]): futures.append(executor.submit(execute_test, configuration)) print("... finished") test_stats = combine_test_stats([f.result() for f in futures]) show_test_stats(test_stats) return 0
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Stress test applications.'", ")", "parser", ".", "add_argument", "(", "'config_path'", ",", "help", "=", "'Path to configuration file.'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "try", ":", "configuration", "=", "load_configuration", "(", "args", ".", "config_path", ")", "except", "InvalidConfigurationError", ":", "print", "(", "\"\\nConfiguration is not valid.\"", ")", "print", "(", "'Example:\\n{}'", ".", "format", "(", "help_configuration", ")", ")", "return", "1", "print", "(", "\"Starting up ...\"", ")", "futures", "=", "[", "]", "with", "ProcessPoolExecutor", "(", "configuration", "[", "PROCESSORS", "]", ")", "as", "executor", ":", "for", "_", "in", "range", "(", "configuration", "[", "PROCESSES", "]", ")", ":", "futures", ".", "append", "(", "executor", ".", "submit", "(", "execute_test", ",", "configuration", ")", ")", "print", "(", "\"... finished\"", ")", "test_stats", "=", "combine_test_stats", "(", "[", "f", ".", "result", "(", ")", "for", "f", "in", "futures", "]", ")", "show_test_stats", "(", "test_stats", ")", "return", "0" ]
41.65
19.7
def pop(self, key, default=__marker): '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised. ''' if key in self: result = self[key] del self[key] return result if default is self.__marker: raise KeyError(key) return default
[ "def", "pop", "(", "self", ",", "key", ",", "default", "=", "__marker", ")", ":", "if", "key", "in", "self", ":", "result", "=", "self", "[", "key", "]", "del", "self", "[", "key", "]", "return", "result", "if", "default", "is", "self", ".", "__marker", ":", "raise", "KeyError", "(", "key", ")", "return", "default" ]
34.083333
20.75
def read_namespaced_deployment_scale(self, name, namespace, **kwargs): """ read scale of the specified Deployment This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.read_namespaced_deployment_scale(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :return: V1Scale If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.read_namespaced_deployment_scale_with_http_info(name, namespace, **kwargs) else: (data) = self.read_namespaced_deployment_scale_with_http_info(name, namespace, **kwargs) return data
[ "def", "read_namespaced_deployment_scale", "(", "self", ",", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "read_namespaced_deployment_scale_with_http_info", "(", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "read_namespaced_deployment_scale_with_http_info", "(", "name", ",", "namespace", ",", "*", "*", "kwargs", ")", "return", "data" ]
49.545455
23.363636
def _potential_wins(self): '''Generates all the combinations of board positions that need to be checked for a win.''' yield from self.board yield from zip(*self.board) yield self.board[0][0], self.board[1][1], self.board[2][2] yield self.board[0][2], self.board[1][1], self.board[2][0]
[ "def", "_potential_wins", "(", "self", ")", ":", "yield", "from", "self", ".", "board", "yield", "from", "zip", "(", "*", "self", ".", "board", ")", "yield", "self", ".", "board", "[", "0", "]", "[", "0", "]", ",", "self", ".", "board", "[", "1", "]", "[", "1", "]", ",", "self", ".", "board", "[", "2", "]", "[", "2", "]", "yield", "self", ".", "board", "[", "0", "]", "[", "2", "]", ",", "self", ".", "board", "[", "1", "]", "[", "1", "]", ",", "self", ".", "board", "[", "2", "]", "[", "0", "]" ]
46.714286
16.714286
def clear_cache(self, items=None, topic=EVENT_TOPIC): """ expects event object to be in the format of a session-stop or session-expire event, whose results attribute is a namedtuple(identifiers, session_key) """ try: for realm in self.realms: identifier = items.identifiers.from_source(realm.name) if identifier: realm.clear_cached_authc_info(identifier) except AttributeError: msg = ('Could not clear authc_info from cache after event. ' 'items: ' + str(items)) logger.warn(msg)
[ "def", "clear_cache", "(", "self", ",", "items", "=", "None", ",", "topic", "=", "EVENT_TOPIC", ")", ":", "try", ":", "for", "realm", "in", "self", ".", "realms", ":", "identifier", "=", "items", ".", "identifiers", ".", "from_source", "(", "realm", ".", "name", ")", "if", "identifier", ":", "realm", ".", "clear_cached_authc_info", "(", "identifier", ")", "except", "AttributeError", ":", "msg", "=", "(", "'Could not clear authc_info from cache after event. '", "'items: '", "+", "str", "(", "items", ")", ")", "logger", ".", "warn", "(", "msg", ")" ]
41.866667
14.133333
def set_attribute(self, attribute, attribute_state): """Get an attribute from the session. :param attribute: :return: attribute value, status code :rtype: object, constants.StatusCode """ # Check that the attribute exists. try: attr = attributes.AttributesByID[attribute] except KeyError: return constants.StatusCode.error_nonsupported_attribute # Check that the attribute is valid for this session type. if not attr.in_resource(self.session_type): return constants.StatusCode.error_nonsupported_attribute # Check that the attribute is writable. if not attr.write: return constants.StatusCode.error_attribute_read_only try: self.attrs[attribute] = attribute_state except ValueError: return constants.StatusCode.error_nonsupported_attribute_state return constants.StatusCode.success
[ "def", "set_attribute", "(", "self", ",", "attribute", ",", "attribute_state", ")", ":", "# Check that the attribute exists.", "try", ":", "attr", "=", "attributes", ".", "AttributesByID", "[", "attribute", "]", "except", "KeyError", ":", "return", "constants", ".", "StatusCode", ".", "error_nonsupported_attribute", "# Check that the attribute is valid for this session type.", "if", "not", "attr", ".", "in_resource", "(", "self", ".", "session_type", ")", ":", "return", "constants", ".", "StatusCode", ".", "error_nonsupported_attribute", "# Check that the attribute is writable.", "if", "not", "attr", ".", "write", ":", "return", "constants", ".", "StatusCode", ".", "error_attribute_read_only", "try", ":", "self", ".", "attrs", "[", "attribute", "]", "=", "attribute_state", "except", "ValueError", ":", "return", "constants", ".", "StatusCode", ".", "error_nonsupported_attribute_state", "return", "constants", ".", "StatusCode", ".", "success" ]
34
20.214286
def import_module_from_fpath(module_fpath): r""" imports module from a file path Args: module_fpath (str): Returns: module: module CommandLine: python -m utool.util_import --test-import_module_from_fpath Example: >>> # DISABLE_DOCTEST >>> from utool.util_import import * # NOQA >>> import utool >>> module_fpath = utool.__file__ >>> module = import_module_from_fpath(module_fpath) >>> result = ('module = %s' % (str(module),)) >>> print(result) Ignore: import shutil import ubelt as ub test_root = ub.ensure_app_cache_dir('test_fpath_import') # Clear the directory shutil.rmtree(test_root) test_root = ub.ensure_app_cache_dir('test_fpath_import') # ----- # Define two temporary modules with the same name that are not in sys.path import sys, os, os.path from os.path import join # Even though they have the same name they have different values mod1_fpath = ub.ensuredir((test_root, 'path1', 'testmod')) ub.writeto(join(mod1_fpath, '__init__.py'), 'version = 1\nfrom . import sibling\na1 = 1') ub.writeto(join(mod1_fpath, 'sibling.py'), 'spam = \"ham\"\nb1 = 2') # Even though they have the same name they have different values mod2_fpath = ub.ensuredir((test_root, 'path2', 'testmod')) ub.writeto(join(mod2_fpath, '__init__.py'), 'version = 2\nfrom . import sibling\na2 = 3') ub.writeto(join(mod2_fpath, 'sibling.py'), 'spam = \"jam\"\nb2 = 4') # ----- # Neither module should be importable through the normal mechanism try: import testmod assert False, 'should fail' except ImportError as ex: pass mod1 = ut.import_module_from_fpath(mod1_fpath) print('mod1.version = {!r}'.format(mod1.version)) print('mod1.version = {!r}'.format(mod1.version)) print(mod1.version == 1, 'mod1 version is 1') print('mod1.a1 = {!r}'.format(mod1.a1)) mod2 = ut.import_module_from_fpath(mod2_fpath) print('mod2.version = {!r}'.format(mod2.version)) print(mod2.version == 2, 'mod2 version is 2') print('mod2.a2 = {!r}'.format(mod1.a2)) # BUT Notice how mod1 is mod2 print(mod1 is mod2) # mod1 has attributes from mod1 and mod2 print('mod1.a1 = {!r}'.format(mod1.a1)) print('mod1.a2 = {!r}'.format(mod1.a2)) print('mod2.a1 = {!r}'.format(mod2.a1)) print('mod2.a2 = {!r}'.format(mod2.a2)) # Both are version 2 print('mod1.version = {!r}'.format(mod1.version)) print('mod2.version = {!r}'.format(mod2.version)) # However sibling always remains at version1 (ham) print('mod2.sibling.spam = {!r}'.format(mod2.sibling.spam)) # now importing testmod works because it reads from sys.modules import testmod # reloading mod1 overwrites attrs again mod1 = ut.import_module_from_fpath(mod1_fpath) # Removing both from sys.modules del sys.modules['testmod'] del sys.modules['testmod.sibling'] mod2 = ut.import_module_from_fpath(mod2_fpath) print(not hasattr(mod2, 'a1'), 'mod2 no longer has a1 and it reloads itself correctly') # ------- del sys.modules['testmod'] del sys.modules['testmod.sibling'] mod1 = ut.import_module_from_fpath(mod1_fpath) # third test mod3_fpath = ub.ensuredir((test_root, 'path3', 'testmod')) ub.writeto(join(mod3_fpath, '__init__.py'), 'version = 3') module_fpath = mod3_fpath modname = 'testmod' # third test mod4_fpath = ub.ensuredir((test_root, 'path3', 'novelmod')) ub.writeto(join(mod4_fpath, '__init__.py'), 'version = 4') """ from os.path import basename, splitext, isdir, join, exists, dirname, split import platform if isdir(module_fpath): module_fpath = join(module_fpath, '__init__.py') print('module_fpath = {!r}'.format(module_fpath)) if not exists(module_fpath): raise ImportError('module_fpath={!r} does not exist'.format( module_fpath)) python_version = platform.python_version() modname = splitext(basename(module_fpath))[0] if modname == '__init__': modname = split(dirname(module_fpath))[1] if util_inject.PRINT_INJECT_ORDER: if modname not in sys.argv: util_inject.noinject(modname, N=2, via='ut.import_module_from_fpath') if python_version.startswith('2.7'): import imp module = imp.load_source(modname, module_fpath) elif python_version.startswith('3'): import importlib.machinery loader = importlib.machinery.SourceFileLoader(modname, module_fpath) module = loader.load_module() # module = loader.exec_module(modname) else: raise AssertionError('invalid python version={!r}'.format( python_version)) return module
[ "def", "import_module_from_fpath", "(", "module_fpath", ")", ":", "from", "os", ".", "path", "import", "basename", ",", "splitext", ",", "isdir", ",", "join", ",", "exists", ",", "dirname", ",", "split", "import", "platform", "if", "isdir", "(", "module_fpath", ")", ":", "module_fpath", "=", "join", "(", "module_fpath", ",", "'__init__.py'", ")", "print", "(", "'module_fpath = {!r}'", ".", "format", "(", "module_fpath", ")", ")", "if", "not", "exists", "(", "module_fpath", ")", ":", "raise", "ImportError", "(", "'module_fpath={!r} does not exist'", ".", "format", "(", "module_fpath", ")", ")", "python_version", "=", "platform", ".", "python_version", "(", ")", "modname", "=", "splitext", "(", "basename", "(", "module_fpath", ")", ")", "[", "0", "]", "if", "modname", "==", "'__init__'", ":", "modname", "=", "split", "(", "dirname", "(", "module_fpath", ")", ")", "[", "1", "]", "if", "util_inject", ".", "PRINT_INJECT_ORDER", ":", "if", "modname", "not", "in", "sys", ".", "argv", ":", "util_inject", ".", "noinject", "(", "modname", ",", "N", "=", "2", ",", "via", "=", "'ut.import_module_from_fpath'", ")", "if", "python_version", ".", "startswith", "(", "'2.7'", ")", ":", "import", "imp", "module", "=", "imp", ".", "load_source", "(", "modname", ",", "module_fpath", ")", "elif", "python_version", ".", "startswith", "(", "'3'", ")", ":", "import", "importlib", ".", "machinery", "loader", "=", "importlib", ".", "machinery", ".", "SourceFileLoader", "(", "modname", ",", "module_fpath", ")", "module", "=", "loader", ".", "load_module", "(", ")", "# module = loader.exec_module(modname)", "else", ":", "raise", "AssertionError", "(", "'invalid python version={!r}'", ".", "format", "(", "python_version", ")", ")", "return", "module" ]
35.191489
21.524823
def get_affiliation_details(self, value, affiliation_id, institute_literal): """ This method is used to map the Affiliation between an author and Institution. Parameters ---------- value - The author name affiliation_id - Primary key of the affiliation table institute_literal Returns ------- Affiliation details(JSON fixture) which can be written to a file """ tokens = tuple([t.upper().strip() for t in value.split(',')]) if len(tokens) == 1: tokens = value.split() if len(tokens) > 0: if len(tokens) > 1: aulast, auinit = tokens[0:2] else: aulast = tokens[0] auinit = '' else: aulast, auinit = tokens[0], '' aulast = _strip_punctuation(aulast).upper() auinit = _strip_punctuation(auinit).upper() author_key = auinit+aulast affiliation_row = { "model": "django-tethne.affiliation", "pk": affiliation_id, "fields": { "author": self.authorIdMap[author_key], "institution": self.instituteIdMap[institute_literal] } } return affiliation_row
[ "def", "get_affiliation_details", "(", "self", ",", "value", ",", "affiliation_id", ",", "institute_literal", ")", ":", "tokens", "=", "tuple", "(", "[", "t", ".", "upper", "(", ")", ".", "strip", "(", ")", "for", "t", "in", "value", ".", "split", "(", "','", ")", "]", ")", "if", "len", "(", "tokens", ")", "==", "1", ":", "tokens", "=", "value", ".", "split", "(", ")", "if", "len", "(", "tokens", ")", ">", "0", ":", "if", "len", "(", "tokens", ")", ">", "1", ":", "aulast", ",", "auinit", "=", "tokens", "[", "0", ":", "2", "]", "else", ":", "aulast", "=", "tokens", "[", "0", "]", "auinit", "=", "''", "else", ":", "aulast", ",", "auinit", "=", "tokens", "[", "0", "]", ",", "''", "aulast", "=", "_strip_punctuation", "(", "aulast", ")", ".", "upper", "(", ")", "auinit", "=", "_strip_punctuation", "(", "auinit", ")", ".", "upper", "(", ")", "author_key", "=", "auinit", "+", "aulast", "affiliation_row", "=", "{", "\"model\"", ":", "\"django-tethne.affiliation\"", ",", "\"pk\"", ":", "affiliation_id", ",", "\"fields\"", ":", "{", "\"author\"", ":", "self", ".", "authorIdMap", "[", "author_key", "]", ",", "\"institution\"", ":", "self", ".", "instituteIdMap", "[", "institute_literal", "]", "}", "}", "return", "affiliation_row" ]
32
19.025641
def __random_density_bures(N, rank=None, seed=None): """ Generate a random density matrix from the Bures metric. Args: N (int): the length of the density matrix. rank (int or None): the rank of the density matrix. The default value is full-rank. seed (int): Optional. To set a random seed. Returns: ndarray: rho (N,N) a density matrix. """ P = np.eye(N) + random_unitary(N).data G = P.dot(__ginibre_matrix(N, rank, seed)) G = G.dot(G.conj().T) return G / np.trace(G)
[ "def", "__random_density_bures", "(", "N", ",", "rank", "=", "None", ",", "seed", "=", "None", ")", ":", "P", "=", "np", ".", "eye", "(", "N", ")", "+", "random_unitary", "(", "N", ")", ".", "data", "G", "=", "P", ".", "dot", "(", "__ginibre_matrix", "(", "N", ",", "rank", ",", "seed", ")", ")", "G", "=", "G", ".", "dot", "(", "G", ".", "conj", "(", ")", ".", "T", ")", "return", "G", "/", "np", ".", "trace", "(", "G", ")" ]
33.25
14.5
def _computeforce(self,R,z,phi=0,t=0): """ NAME: _computeforce PURPOSE: Evaluate the first derivative of Phi with respect to R, z and phi INPUT: R - Cylindrical Galactocentric radius z - vertical height phi - azimuth t - time OUTPUT: dPhi/dr, dPhi/dtheta, dPhi/dphi HISTORY: 2016-06-07 - Written - Aladdin """ Acos, Asin = self._Acos, self._Asin N, L, M = Acos.shape r, theta, phi = bovy_coords.cyl_to_spher(R,z,phi) new_hash= hashlib.md5(nu.array([R, z,phi])).hexdigest() if new_hash == self._force_hash: dPhi_dr = self._cached_dPhi_dr dPhi_dtheta = self._cached_dPhi_dtheta dPhi_dphi = self._cached_dPhi_dphi else: PP, dPP = lpmn(M-1,L-1,nu.cos(theta)) ##Get the Legendre polynomials PP = PP.T[None,:,:] dPP = dPP.T[None,:,:] phi_tilde = self._phiTilde(r, N, L)[:,:,nu.newaxis] dphi_tilde = self._dphiTilde(r,N,L)[:,:,nu.newaxis] m = nu.arange(0, M)[nu.newaxis, nu.newaxis, :] mcos = nu.cos(m*phi) msin = nu.sin(m*phi) dPhi_dr = -nu.sum((Acos*mcos + Asin*msin)*PP*dphi_tilde) dPhi_dtheta = -nu.sum((Acos*mcos + Asin*msin)*phi_tilde*dPP*(-nu.sin(theta))) dPhi_dphi =-nu.sum(m*(Asin*mcos - Acos*msin)*phi_tilde*PP) self._force_hash = new_hash self._cached_dPhi_dr = dPhi_dr self._cached_dPhi_dtheta = dPhi_dtheta self._cached_dPhi_dphi = dPhi_dphi return dPhi_dr,dPhi_dtheta,dPhi_dphi
[ "def", "_computeforce", "(", "self", ",", "R", ",", "z", ",", "phi", "=", "0", ",", "t", "=", "0", ")", ":", "Acos", ",", "Asin", "=", "self", ".", "_Acos", ",", "self", ".", "_Asin", "N", ",", "L", ",", "M", "=", "Acos", ".", "shape", "r", ",", "theta", ",", "phi", "=", "bovy_coords", ".", "cyl_to_spher", "(", "R", ",", "z", ",", "phi", ")", "new_hash", "=", "hashlib", ".", "md5", "(", "nu", ".", "array", "(", "[", "R", ",", "z", ",", "phi", "]", ")", ")", ".", "hexdigest", "(", ")", "if", "new_hash", "==", "self", ".", "_force_hash", ":", "dPhi_dr", "=", "self", ".", "_cached_dPhi_dr", "dPhi_dtheta", "=", "self", ".", "_cached_dPhi_dtheta", "dPhi_dphi", "=", "self", ".", "_cached_dPhi_dphi", "else", ":", "PP", ",", "dPP", "=", "lpmn", "(", "M", "-", "1", ",", "L", "-", "1", ",", "nu", ".", "cos", "(", "theta", ")", ")", "##Get the Legendre polynomials", "PP", "=", "PP", ".", "T", "[", "None", ",", ":", ",", ":", "]", "dPP", "=", "dPP", ".", "T", "[", "None", ",", ":", ",", ":", "]", "phi_tilde", "=", "self", ".", "_phiTilde", "(", "r", ",", "N", ",", "L", ")", "[", ":", ",", ":", ",", "nu", ".", "newaxis", "]", "dphi_tilde", "=", "self", ".", "_dphiTilde", "(", "r", ",", "N", ",", "L", ")", "[", ":", ",", ":", ",", "nu", ".", "newaxis", "]", "m", "=", "nu", ".", "arange", "(", "0", ",", "M", ")", "[", "nu", ".", "newaxis", ",", "nu", ".", "newaxis", ",", ":", "]", "mcos", "=", "nu", ".", "cos", "(", "m", "*", "phi", ")", "msin", "=", "nu", ".", "sin", "(", "m", "*", "phi", ")", "dPhi_dr", "=", "-", "nu", ".", "sum", "(", "(", "Acos", "*", "mcos", "+", "Asin", "*", "msin", ")", "*", "PP", "*", "dphi_tilde", ")", "dPhi_dtheta", "=", "-", "nu", ".", "sum", "(", "(", "Acos", "*", "mcos", "+", "Asin", "*", "msin", ")", "*", "phi_tilde", "*", "dPP", "*", "(", "-", "nu", ".", "sin", "(", "theta", ")", ")", ")", "dPhi_dphi", "=", "-", "nu", ".", "sum", "(", "m", "*", "(", "Asin", "*", "mcos", "-", "Acos", "*", "msin", ")", "*", "phi_tilde", "*", "PP", ")", "self", ".", "_force_hash", "=", "new_hash", "self", ".", "_cached_dPhi_dr", "=", "dPhi_dr", "self", ".", "_cached_dPhi_dtheta", "=", "dPhi_dtheta", "self", ".", "_cached_dPhi_dphi", "=", "dPhi_dphi", "return", "dPhi_dr", ",", "dPhi_dtheta", ",", "dPhi_dphi" ]
38.244444
15.844444
def bna_config_cmd_status_input_session_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") bna_config_cmd_status = ET.Element("bna_config_cmd_status") config = bna_config_cmd_status input = ET.SubElement(bna_config_cmd_status, "input") session_id = ET.SubElement(input, "session-id") session_id.text = kwargs.pop('session_id') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "bna_config_cmd_status_input_session_id", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "bna_config_cmd_status", "=", "ET", ".", "Element", "(", "\"bna_config_cmd_status\"", ")", "config", "=", "bna_config_cmd_status", "input", "=", "ET", ".", "SubElement", "(", "bna_config_cmd_status", ",", "\"input\"", ")", "session_id", "=", "ET", ".", "SubElement", "(", "input", ",", "\"session-id\"", ")", "session_id", ".", "text", "=", "kwargs", ".", "pop", "(", "'session_id'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
41.333333
13.583333
def add_to_subset(self, id, s): """ Adds a node to a subset """ n = self.node(id) if 'meta' not in n: n['meta'] = {} meta = n['meta'] if 'subsets' not in meta: meta['subsets'] = [] meta['subsets'].append(s)
[ "def", "add_to_subset", "(", "self", ",", "id", ",", "s", ")", ":", "n", "=", "self", ".", "node", "(", "id", ")", "if", "'meta'", "not", "in", "n", ":", "n", "[", "'meta'", "]", "=", "{", "}", "meta", "=", "n", "[", "'meta'", "]", "if", "'subsets'", "not", "in", "meta", ":", "meta", "[", "'subsets'", "]", "=", "[", "]", "meta", "[", "'subsets'", "]", ".", "append", "(", "s", ")" ]
25.818182
8.909091
def verify(self, authenticators): """Verify this OMAPI message. >>> a1 = OmapiHMACMD5Authenticator(b"egg", b"spam") >>> a2 = OmapiHMACMD5Authenticator(b"egg", b"tomatoes") >>> a1.authid = a2.authid = 5 >>> m = OmapiMessage.open(b"host") >>> m.verify({a1.authid: a1}) False >>> m.sign(a1) >>> m.verify({a1.authid: a1}) True >>> m.sign(a2) >>> m.verify({a1.authid: a1}) False @type authenticators: {int: OmapiAuthenticatorBase} @rtype: bool """ try: return authenticators[self.authid]. sign(self.as_string(forsigning=True)) == self.signature except KeyError: return False
[ "def", "verify", "(", "self", ",", "authenticators", ")", ":", "try", ":", "return", "authenticators", "[", "self", ".", "authid", "]", ".", "sign", "(", "self", ".", "as_string", "(", "forsigning", "=", "True", ")", ")", "==", "self", ".", "signature", "except", "KeyError", ":", "return", "False" ]
25.695652
20.826087
def verify(self, connection_type=None): """ Verifies and update the remote system settings. :param connection_type: same as the one in `create` method. """ req_body = self._cli.make_body(connectionType=connection_type) resp = self.action('verify', **req_body) resp.raise_if_err() return resp
[ "def", "verify", "(", "self", ",", "connection_type", "=", "None", ")", ":", "req_body", "=", "self", ".", "_cli", ".", "make_body", "(", "connectionType", "=", "connection_type", ")", "resp", "=", "self", ".", "action", "(", "'verify'", ",", "*", "*", "req_body", ")", "resp", ".", "raise_if_err", "(", ")", "return", "resp" ]
31.545455
17.727273
def tie_weights(self): """ Run this to be sure output and input (adaptive) softmax weights are tied """ # sampled softmax if self.sample_softmax > 0: if self.config.tie_weight: self.out_layer.weight = self.transformer.word_emb.weight # adaptive softmax (including standard softmax) else: if self.config.tie_weight: for i in range(len(self.crit.out_layers)): self.crit.out_layers[i].weight = self.transformer.word_emb.emb_layers[i].weight if self.config.tie_projs: for i, tie_proj in enumerate(self.config.tie_projs): if tie_proj and self.config.div_val == 1 and self.config.d_model != self.config.d_embed: self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[0] elif tie_proj and self.config.div_val != 1: self.crit.out_projs[i] = self.transformer.word_emb.emb_projs[i]
[ "def", "tie_weights", "(", "self", ")", ":", "# sampled softmax", "if", "self", ".", "sample_softmax", ">", "0", ":", "if", "self", ".", "config", ".", "tie_weight", ":", "self", ".", "out_layer", ".", "weight", "=", "self", ".", "transformer", ".", "word_emb", ".", "weight", "# adaptive softmax (including standard softmax)", "else", ":", "if", "self", ".", "config", ".", "tie_weight", ":", "for", "i", "in", "range", "(", "len", "(", "self", ".", "crit", ".", "out_layers", ")", ")", ":", "self", ".", "crit", ".", "out_layers", "[", "i", "]", ".", "weight", "=", "self", ".", "transformer", ".", "word_emb", ".", "emb_layers", "[", "i", "]", ".", "weight", "if", "self", ".", "config", ".", "tie_projs", ":", "for", "i", ",", "tie_proj", "in", "enumerate", "(", "self", ".", "config", ".", "tie_projs", ")", ":", "if", "tie_proj", "and", "self", ".", "config", ".", "div_val", "==", "1", "and", "self", ".", "config", ".", "d_model", "!=", "self", ".", "config", ".", "d_embed", ":", "self", ".", "crit", ".", "out_projs", "[", "i", "]", "=", "self", ".", "transformer", ".", "word_emb", ".", "emb_projs", "[", "0", "]", "elif", "tie_proj", "and", "self", ".", "config", ".", "div_val", "!=", "1", ":", "self", ".", "crit", ".", "out_projs", "[", "i", "]", "=", "self", ".", "transformer", ".", "word_emb", ".", "emb_projs", "[", "i", "]" ]
58.411765
24.058824
def getaddresses(fieldvalues): """Return a list of (REALNAME, EMAIL) for each fieldvalue.""" all = COMMASPACE.join(fieldvalues) a = _AddressList(all) return a.addresslist
[ "def", "getaddresses", "(", "fieldvalues", ")", ":", "all", "=", "COMMASPACE", ".", "join", "(", "fieldvalues", ")", "a", "=", "_AddressList", "(", "all", ")", "return", "a", ".", "addresslist" ]
36.4
8.6
def authenticate(self, req, resp, resource): """ Extract basic auth token from request `authorization` header, deocode the token, verifies the username/password and return either a ``user`` object if successful else raise an `falcon.HTTPUnauthoried exception` """ username, password = self._extract_credentials(req) user = self.user_loader(username, password) if not user: raise falcon.HTTPUnauthorized( description='Invalid Username/Password') return user
[ "def", "authenticate", "(", "self", ",", "req", ",", "resp", ",", "resource", ")", ":", "username", ",", "password", "=", "self", ".", "_extract_credentials", "(", "req", ")", "user", "=", "self", ".", "user_loader", "(", "username", ",", "password", ")", "if", "not", "user", ":", "raise", "falcon", ".", "HTTPUnauthorized", "(", "description", "=", "'Invalid Username/Password'", ")", "return", "user" ]
42
18.923077
def resolve_parameters(value, parameters, date_time=datetime.datetime.now(), macros=False): """ Resolve a format modifier with the corresponding value. Args: value: The string (path, table, or any other artifact in a cell_body) which may have format modifiers. E.g. a table name could be <project-id>.<dataset-id>.logs_%(_ds)s parameters: The user-specified list of parameters in the cell-body. date_time: The timestamp at which the parameters need to be evaluated. E.g. when the table is <project-id>.<dataset-id>.logs_%(_ds)s, the '_ds' evaluates to the current date-time. macros: When true, the format modifers in the value are replaced with the corresponding airflow macro equivalents (like '{{ ds }}'. When false, the actual values are used (like '2015-12-12'. Returns: The resolved value, i.e. the value with the format modifiers replaced with the corresponding parameter-values. E.g. if value is <project-id>.<dataset-id>.logs_%(_ds)s, the returned value is something like <project-id>.<dataset-id>.logs_2017-12-21 """ merged_parameters = Query.merge_parameters(parameters, date_time=date_time, macros=macros, types_and_values=False) return Query._resolve_parameters(value, merged_parameters)
[ "def", "resolve_parameters", "(", "value", ",", "parameters", ",", "date_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ",", "macros", "=", "False", ")", ":", "merged_parameters", "=", "Query", ".", "merge_parameters", "(", "parameters", ",", "date_time", "=", "date_time", ",", "macros", "=", "macros", ",", "types_and_values", "=", "False", ")", "return", "Query", ".", "_resolve_parameters", "(", "value", ",", "merged_parameters", ")" ]
63.904762
39.238095
def scale_dataset(self, dsid, variable, info): """Scale the data set, applying the attributes from the netCDF file""" variable = remove_empties(variable) scale = variable.attrs.get('scale_factor', np.array(1)) offset = variable.attrs.get('add_offset', np.array(0)) if np.issubdtype((scale + offset).dtype, np.floating) or np.issubdtype(variable.dtype, np.floating): if '_FillValue' in variable.attrs: variable = variable.where( variable != variable.attrs['_FillValue']) variable.attrs['_FillValue'] = np.nan if 'valid_range' in variable.attrs: variable = variable.where( variable <= variable.attrs['valid_range'][1]) variable = variable.where( variable >= variable.attrs['valid_range'][0]) if 'valid_max' in variable.attrs: variable = variable.where( variable <= variable.attrs['valid_max']) if 'valid_min' in variable.attrs: variable = variable.where( variable >= variable.attrs['valid_min']) attrs = variable.attrs variable = variable * scale + offset variable.attrs = attrs variable.attrs.update({'platform_name': self.platform_name, 'sensor': self.sensor}) variable.attrs.setdefault('units', '1') ancillary_names = variable.attrs.get('ancillary_variables', '') try: variable.attrs['ancillary_variables'] = ancillary_names.split() except AttributeError: pass if 'palette_meanings' in variable.attrs: variable.attrs['palette_meanings'] = [int(val) for val in variable.attrs['palette_meanings'].split()] if variable.attrs['palette_meanings'][0] == 1: variable.attrs['palette_meanings'] = [0] + variable.attrs['palette_meanings'] variable = xr.DataArray(da.vstack((np.array(variable.attrs['fill_value_color']), variable.data)), coords=variable.coords, dims=variable.dims, attrs=variable.attrs) val, idx = np.unique(variable.attrs['palette_meanings'], return_index=True) variable.attrs['palette_meanings'] = val variable = variable[idx] if 'standard_name' in info: variable.attrs.setdefault('standard_name', info['standard_name']) if self.sw_version == 'NWC/PPS version v2014' and dsid.name == 'ctth_alti': # pps 2014 valid range and palette don't match variable.attrs['valid_range'] = (0., 9000.) if self.sw_version == 'NWC/PPS version v2014' and dsid.name == 'ctth_alti_pal': # pps 2014 palette has the nodata color (black) first variable = variable[1:, :] return variable
[ "def", "scale_dataset", "(", "self", ",", "dsid", ",", "variable", ",", "info", ")", ":", "variable", "=", "remove_empties", "(", "variable", ")", "scale", "=", "variable", ".", "attrs", ".", "get", "(", "'scale_factor'", ",", "np", ".", "array", "(", "1", ")", ")", "offset", "=", "variable", ".", "attrs", ".", "get", "(", "'add_offset'", ",", "np", ".", "array", "(", "0", ")", ")", "if", "np", ".", "issubdtype", "(", "(", "scale", "+", "offset", ")", ".", "dtype", ",", "np", ".", "floating", ")", "or", "np", ".", "issubdtype", "(", "variable", ".", "dtype", ",", "np", ".", "floating", ")", ":", "if", "'_FillValue'", "in", "variable", ".", "attrs", ":", "variable", "=", "variable", ".", "where", "(", "variable", "!=", "variable", ".", "attrs", "[", "'_FillValue'", "]", ")", "variable", ".", "attrs", "[", "'_FillValue'", "]", "=", "np", ".", "nan", "if", "'valid_range'", "in", "variable", ".", "attrs", ":", "variable", "=", "variable", ".", "where", "(", "variable", "<=", "variable", ".", "attrs", "[", "'valid_range'", "]", "[", "1", "]", ")", "variable", "=", "variable", ".", "where", "(", "variable", ">=", "variable", ".", "attrs", "[", "'valid_range'", "]", "[", "0", "]", ")", "if", "'valid_max'", "in", "variable", ".", "attrs", ":", "variable", "=", "variable", ".", "where", "(", "variable", "<=", "variable", ".", "attrs", "[", "'valid_max'", "]", ")", "if", "'valid_min'", "in", "variable", ".", "attrs", ":", "variable", "=", "variable", ".", "where", "(", "variable", ">=", "variable", ".", "attrs", "[", "'valid_min'", "]", ")", "attrs", "=", "variable", ".", "attrs", "variable", "=", "variable", "*", "scale", "+", "offset", "variable", ".", "attrs", "=", "attrs", "variable", ".", "attrs", ".", "update", "(", "{", "'platform_name'", ":", "self", ".", "platform_name", ",", "'sensor'", ":", "self", ".", "sensor", "}", ")", "variable", ".", "attrs", ".", "setdefault", "(", "'units'", ",", "'1'", ")", "ancillary_names", "=", "variable", ".", "attrs", ".", "get", "(", "'ancillary_variables'", ",", "''", ")", "try", ":", "variable", ".", "attrs", "[", "'ancillary_variables'", "]", "=", "ancillary_names", ".", "split", "(", ")", "except", "AttributeError", ":", "pass", "if", "'palette_meanings'", "in", "variable", ".", "attrs", ":", "variable", ".", "attrs", "[", "'palette_meanings'", "]", "=", "[", "int", "(", "val", ")", "for", "val", "in", "variable", ".", "attrs", "[", "'palette_meanings'", "]", ".", "split", "(", ")", "]", "if", "variable", ".", "attrs", "[", "'palette_meanings'", "]", "[", "0", "]", "==", "1", ":", "variable", ".", "attrs", "[", "'palette_meanings'", "]", "=", "[", "0", "]", "+", "variable", ".", "attrs", "[", "'palette_meanings'", "]", "variable", "=", "xr", ".", "DataArray", "(", "da", ".", "vstack", "(", "(", "np", ".", "array", "(", "variable", ".", "attrs", "[", "'fill_value_color'", "]", ")", ",", "variable", ".", "data", ")", ")", ",", "coords", "=", "variable", ".", "coords", ",", "dims", "=", "variable", ".", "dims", ",", "attrs", "=", "variable", ".", "attrs", ")", "val", ",", "idx", "=", "np", ".", "unique", "(", "variable", ".", "attrs", "[", "'palette_meanings'", "]", ",", "return_index", "=", "True", ")", "variable", ".", "attrs", "[", "'palette_meanings'", "]", "=", "val", "variable", "=", "variable", "[", "idx", "]", "if", "'standard_name'", "in", "info", ":", "variable", ".", "attrs", ".", "setdefault", "(", "'standard_name'", ",", "info", "[", "'standard_name'", "]", ")", "if", "self", ".", "sw_version", "==", "'NWC/PPS version v2014'", "and", "dsid", ".", "name", "==", "'ctth_alti'", ":", "# pps 2014 valid range and palette don't match", "variable", ".", "attrs", "[", "'valid_range'", "]", "=", "(", "0.", ",", "9000.", ")", "if", "self", ".", "sw_version", "==", "'NWC/PPS version v2014'", "and", "dsid", ".", "name", "==", "'ctth_alti_pal'", ":", "# pps 2014 palette has the nodata color (black) first", "variable", "=", "variable", "[", "1", ":", ",", ":", "]", "return", "variable" ]
49.288136
23.220339
def _append_hdu_info(self, ext): """ internal routine append info for indiciated extension """ # raised IOError if not found hdu_type = self._FITS.movabs_hdu(ext+1) if hdu_type == IMAGE_HDU: hdu = ImageHDU(self._FITS, ext, **self.keys) elif hdu_type == BINARY_TBL: hdu = TableHDU(self._FITS, ext, **self.keys) elif hdu_type == ASCII_TBL: hdu = AsciiTableHDU(self._FITS, ext, **self.keys) else: mess = ("extension %s is of unknown type %s " "this is probably a bug") mess = mess % (ext, hdu_type) raise IOError(mess) self.hdu_list.append(hdu) self.hdu_map[ext] = hdu extname = hdu.get_extname() if not self.case_sensitive: extname = extname.lower() if extname != '': # this will guarantee we default to *first* version, # if version is not requested, using __getitem__ if extname not in self.hdu_map: self.hdu_map[extname] = hdu ver = hdu.get_extver() if ver > 0: key = '%s-%s' % (extname, ver) self.hdu_map[key] = hdu
[ "def", "_append_hdu_info", "(", "self", ",", "ext", ")", ":", "# raised IOError if not found", "hdu_type", "=", "self", ".", "_FITS", ".", "movabs_hdu", "(", "ext", "+", "1", ")", "if", "hdu_type", "==", "IMAGE_HDU", ":", "hdu", "=", "ImageHDU", "(", "self", ".", "_FITS", ",", "ext", ",", "*", "*", "self", ".", "keys", ")", "elif", "hdu_type", "==", "BINARY_TBL", ":", "hdu", "=", "TableHDU", "(", "self", ".", "_FITS", ",", "ext", ",", "*", "*", "self", ".", "keys", ")", "elif", "hdu_type", "==", "ASCII_TBL", ":", "hdu", "=", "AsciiTableHDU", "(", "self", ".", "_FITS", ",", "ext", ",", "*", "*", "self", ".", "keys", ")", "else", ":", "mess", "=", "(", "\"extension %s is of unknown type %s \"", "\"this is probably a bug\"", ")", "mess", "=", "mess", "%", "(", "ext", ",", "hdu_type", ")", "raise", "IOError", "(", "mess", ")", "self", ".", "hdu_list", ".", "append", "(", "hdu", ")", "self", ".", "hdu_map", "[", "ext", "]", "=", "hdu", "extname", "=", "hdu", ".", "get_extname", "(", ")", "if", "not", "self", ".", "case_sensitive", ":", "extname", "=", "extname", ".", "lower", "(", ")", "if", "extname", "!=", "''", ":", "# this will guarantee we default to *first* version,", "# if version is not requested, using __getitem__", "if", "extname", "not", "in", "self", ".", "hdu_map", ":", "self", ".", "hdu_map", "[", "extname", "]", "=", "hdu", "ver", "=", "hdu", ".", "get_extver", "(", ")", "if", "ver", ">", "0", ":", "key", "=", "'%s-%s'", "%", "(", "extname", ",", "ver", ")", "self", ".", "hdu_map", "[", "key", "]", "=", "hdu" ]
32.052632
13.947368
def get_dimension_type(self, dim): """Get the type of the requested dimension. Type is determined by Dimension.type attribute or common type of the dimension values, otherwise None. Args: dimension: Dimension to look up by name or by index Returns: Declared type of values along the dimension """ dim = self.get_dimension(dim) if dim is None: return None elif dim.type is not None: return dim.type elif dim in self.vdims: return np.float64 return self.interface.dimension_type(self, dim)
[ "def", "get_dimension_type", "(", "self", ",", "dim", ")", ":", "dim", "=", "self", ".", "get_dimension", "(", "dim", ")", "if", "dim", "is", "None", ":", "return", "None", "elif", "dim", ".", "type", "is", "not", "None", ":", "return", "dim", ".", "type", "elif", "dim", "in", "self", ".", "vdims", ":", "return", "np", ".", "float64", "return", "self", ".", "interface", ".", "dimension_type", "(", "self", ",", "dim", ")" ]
30.95
17.15
def where(self, column_or_label, value_or_predicate=None, other=None): """ Return a new ``Table`` containing rows where ``value_or_predicate`` returns True for values in ``column_or_label``. Args: ``column_or_label``: A column of the ``Table`` either as a label (``str``) or an index (``int``). Can also be an array of booleans; only the rows where the array value is ``True`` are kept. ``value_or_predicate``: If a function, it is applied to every value in ``column_or_label``. Only the rows where ``value_or_predicate`` returns True are kept. If a single value, only the rows where the values in ``column_or_label`` are equal to ``value_or_predicate`` are kept. ``other``: Optional additional column label for ``value_or_predicate`` to make pairwise comparisons. See the examples below for usage. When ``other`` is supplied, ``value_or_predicate`` must be a callable function. Returns: If ``value_or_predicate`` is a function, returns a new ``Table`` containing only the rows where ``value_or_predicate(val)`` is True for the ``val``s in ``column_or_label``. If ``value_or_predicate`` is a value, returns a new ``Table`` containing only the rows where the values in ``column_or_label`` are equal to ``value_or_predicate``. If ``column_or_label`` is an array of booleans, returns a new ``Table`` containing only the rows where ``column_or_label`` is ``True``. >>> marbles = Table().with_columns( ... "Color", make_array("Red", "Green", "Blue", ... "Red", "Green", "Green"), ... "Shape", make_array("Round", "Rectangular", "Rectangular", ... "Round", "Rectangular", "Round"), ... "Amount", make_array(4, 6, 12, 7, 9, 2), ... "Price", make_array(1.30, 1.20, 2.00, 1.75, 0, 3.00)) >>> marbles Color | Shape | Amount | Price Red | Round | 4 | 1.3 Green | Rectangular | 6 | 1.2 Blue | Rectangular | 12 | 2 Red | Round | 7 | 1.75 Green | Rectangular | 9 | 0 Green | Round | 2 | 3 Use a value to select matching rows >>> marbles.where("Price", 1.3) Color | Shape | Amount | Price Red | Round | 4 | 1.3 In general, a higher order predicate function such as the functions in ``datascience.predicates.are`` can be used. >>> from datascience.predicates import are >>> # equivalent to previous example >>> marbles.where("Price", are.equal_to(1.3)) Color | Shape | Amount | Price Red | Round | 4 | 1.3 >>> marbles.where("Price", are.above(1.5)) Color | Shape | Amount | Price Blue | Rectangular | 12 | 2 Red | Round | 7 | 1.75 Green | Round | 2 | 3 Use the optional argument ``other`` to apply predicates to compare columns. >>> marbles.where("Price", are.above, "Amount") Color | Shape | Amount | Price Green | Round | 2 | 3 >>> marbles.where("Price", are.equal_to, "Amount") # empty table Color | Shape | Amount | Price """ column = self._get_column(column_or_label) if other is not None: assert callable(value_or_predicate), "Predicate required for 3-arg where" predicate = value_or_predicate other = self._get_column(other) column = [predicate(y)(x) for x, y in zip(column, other)] elif value_or_predicate is not None: if not callable(value_or_predicate): predicate = _predicates.are.equal_to(value_or_predicate) else: predicate = value_or_predicate column = [predicate(x) for x in column] return self.take(np.nonzero(column)[0])
[ "def", "where", "(", "self", ",", "column_or_label", ",", "value_or_predicate", "=", "None", ",", "other", "=", "None", ")", ":", "column", "=", "self", ".", "_get_column", "(", "column_or_label", ")", "if", "other", "is", "not", "None", ":", "assert", "callable", "(", "value_or_predicate", ")", ",", "\"Predicate required for 3-arg where\"", "predicate", "=", "value_or_predicate", "other", "=", "self", ".", "_get_column", "(", "other", ")", "column", "=", "[", "predicate", "(", "y", ")", "(", "x", ")", "for", "x", ",", "y", "in", "zip", "(", "column", ",", "other", ")", "]", "elif", "value_or_predicate", "is", "not", "None", ":", "if", "not", "callable", "(", "value_or_predicate", ")", ":", "predicate", "=", "_predicates", ".", "are", ".", "equal_to", "(", "value_or_predicate", ")", "else", ":", "predicate", "=", "value_or_predicate", "column", "=", "[", "predicate", "(", "x", ")", "for", "x", "in", "column", "]", "return", "self", ".", "take", "(", "np", ".", "nonzero", "(", "column", ")", "[", "0", "]", ")" ]
42.789474
20.894737
def equal_to_be(self, be_record): # type: (PathTableRecord) -> bool ''' A method to compare a little-endian path table record to its big-endian counterpart. This is used to ensure that the ISO is sane. Parameters: be_record - The big-endian object to compare with the little-endian object. Returns: True if this record is equal to the big-endian record passed in, False otherwise. ''' if not self._initialized: raise pycdlibexception.PyCdlibInternalError('This Path Table Record is not yet initialized') if be_record.len_di != self.len_di or \ be_record.xattr_length != self.xattr_length or \ utils.swab_32bit(be_record.extent_location) != self.extent_location or \ utils.swab_16bit(be_record.parent_directory_num) != self.parent_directory_num or \ be_record.directory_identifier != self.directory_identifier: return False return True
[ "def", "equal_to_be", "(", "self", ",", "be_record", ")", ":", "# type: (PathTableRecord) -> bool", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'This Path Table Record is not yet initialized'", ")", "if", "be_record", ".", "len_di", "!=", "self", ".", "len_di", "or", "be_record", ".", "xattr_length", "!=", "self", ".", "xattr_length", "or", "utils", ".", "swab_32bit", "(", "be_record", ".", "extent_location", ")", "!=", "self", ".", "extent_location", "or", "utils", ".", "swab_16bit", "(", "be_record", ".", "parent_directory_num", ")", "!=", "self", ".", "parent_directory_num", "or", "be_record", ".", "directory_identifier", "!=", "self", ".", "directory_identifier", ":", "return", "False", "return", "True" ]
43.956522
26.652174
def upload_video(self, media, media_type, media_category=None, size=None, check_progress=False): """Uploads video file to Twitter servers in chunks. The file will be available to be attached to a status for 60 minutes. To attach to a update, pass a list of returned media ids to the :meth:`update_status` method using the ``media_ids`` param. Upload happens in 3 stages: - INIT call with size of media to be uploaded(in bytes). If this is more than 15mb, twitter will return error. - APPEND calls each with media chunk. This returns a 204(No Content) if chunk is received. - FINALIZE call to complete media upload. This returns media_id to be used with status update. Twitter media upload api expects each chunk to be not more than 5mb. We are sending chunk of 1mb each. Docs: https://developer.twitter.com/en/docs/media/upload-media/uploading-media/chunked-media-upload """ upload_url = 'https://upload.twitter.com/1.1/media/upload.json' if not size: media.seek(0, os.SEEK_END) size = media.tell() media.seek(0) # Stage 1: INIT call params = { 'command': 'INIT', 'media_type': media_type, 'total_bytes': size, 'media_category': media_category } response_init = self.post(upload_url, params=params) media_id = response_init['media_id'] # Stage 2: APPEND calls with 1mb chunks segment_index = 0 while True: data = media.read(1*1024*1024) if not data: break media_chunk = BytesIO() media_chunk.write(data) media_chunk.seek(0) params = { 'command': 'APPEND', 'media_id': media_id, 'segment_index': segment_index, 'media': media_chunk, } self.post(upload_url, params=params) segment_index += 1 # Stage 3: FINALIZE call to complete upload params = { 'command': 'FINALIZE', 'media_id': media_id } response = self.post(upload_url, params=params) # Only get the status if explicity asked to # Default to False if check_progress: # Stage 4: STATUS call if still processing params = { 'command': 'STATUS', 'media_id': media_id } # added code to handle if media_category is NOT set and check_progress=True # the API will return a NoneType object in this case try: processing_state = response.get('processing_info').get('state') except AttributeError: return response if processing_state: while (processing_state == 'pending' or processing_state == 'in_progress') : # get the secs to wait check_after_secs = response.get('processing_info').get('check_after_secs') if check_after_secs: sleep(check_after_secs) response = self.get(upload_url, params=params) # get new state after waiting processing_state = response.get('processing_info').get('state') return response
[ "def", "upload_video", "(", "self", ",", "media", ",", "media_type", ",", "media_category", "=", "None", ",", "size", "=", "None", ",", "check_progress", "=", "False", ")", ":", "upload_url", "=", "'https://upload.twitter.com/1.1/media/upload.json'", "if", "not", "size", ":", "media", ".", "seek", "(", "0", ",", "os", ".", "SEEK_END", ")", "size", "=", "media", ".", "tell", "(", ")", "media", ".", "seek", "(", "0", ")", "# Stage 1: INIT call", "params", "=", "{", "'command'", ":", "'INIT'", ",", "'media_type'", ":", "media_type", ",", "'total_bytes'", ":", "size", ",", "'media_category'", ":", "media_category", "}", "response_init", "=", "self", ".", "post", "(", "upload_url", ",", "params", "=", "params", ")", "media_id", "=", "response_init", "[", "'media_id'", "]", "# Stage 2: APPEND calls with 1mb chunks", "segment_index", "=", "0", "while", "True", ":", "data", "=", "media", ".", "read", "(", "1", "*", "1024", "*", "1024", ")", "if", "not", "data", ":", "break", "media_chunk", "=", "BytesIO", "(", ")", "media_chunk", ".", "write", "(", "data", ")", "media_chunk", ".", "seek", "(", "0", ")", "params", "=", "{", "'command'", ":", "'APPEND'", ",", "'media_id'", ":", "media_id", ",", "'segment_index'", ":", "segment_index", ",", "'media'", ":", "media_chunk", ",", "}", "self", ".", "post", "(", "upload_url", ",", "params", "=", "params", ")", "segment_index", "+=", "1", "# Stage 3: FINALIZE call to complete upload", "params", "=", "{", "'command'", ":", "'FINALIZE'", ",", "'media_id'", ":", "media_id", "}", "response", "=", "self", ".", "post", "(", "upload_url", ",", "params", "=", "params", ")", "# Only get the status if explicity asked to", "# Default to False", "if", "check_progress", ":", "# Stage 4: STATUS call if still processing", "params", "=", "{", "'command'", ":", "'STATUS'", ",", "'media_id'", ":", "media_id", "}", "# added code to handle if media_category is NOT set and check_progress=True", "# the API will return a NoneType object in this case", "try", ":", "processing_state", "=", "response", ".", "get", "(", "'processing_info'", ")", ".", "get", "(", "'state'", ")", "except", "AttributeError", ":", "return", "response", "if", "processing_state", ":", "while", "(", "processing_state", "==", "'pending'", "or", "processing_state", "==", "'in_progress'", ")", ":", "# get the secs to wait", "check_after_secs", "=", "response", ".", "get", "(", "'processing_info'", ")", ".", "get", "(", "'check_after_secs'", ")", "if", "check_after_secs", ":", "sleep", "(", "check_after_secs", ")", "response", "=", "self", ".", "get", "(", "upload_url", ",", "params", "=", "params", ")", "# get new state after waiting", "processing_state", "=", "response", ".", "get", "(", "'processing_info'", ")", ".", "get", "(", "'state'", ")", "return", "response" ]
37.965909
23.306818
def centerdc_2_twosided(data): """Convert a center-dc PSD to a twosided PSD""" N = len(data) newpsd = np.concatenate((data[N//2:], (cshift(data[0:N//2], -1)))) return newpsd
[ "def", "centerdc_2_twosided", "(", "data", ")", ":", "N", "=", "len", "(", "data", ")", "newpsd", "=", "np", ".", "concatenate", "(", "(", "data", "[", "N", "//", "2", ":", "]", ",", "(", "cshift", "(", "data", "[", "0", ":", "N", "//", "2", "]", ",", "-", "1", ")", ")", ")", ")", "return", "newpsd" ]
37
17.2
def measurements_to_bf(measurements: np.ndarray) -> float: """ Convert measurements into gradient binary fraction. :param measurements: Output measurements of gradient program. :return: Binary fraction representation of gradient estimate. """ try: measurements.sum(axis=0) except AttributeError: measurements = np.asarray(measurements) finally: stats = measurements.sum(axis=0) / len(measurements) stats_str = [str(int(i)) for i in np.round(stats[::-1][1:])] bf_str = '0.' + ''.join(stats_str) bf = float(bf_str) return bf
[ "def", "measurements_to_bf", "(", "measurements", ":", "np", ".", "ndarray", ")", "->", "float", ":", "try", ":", "measurements", ".", "sum", "(", "axis", "=", "0", ")", "except", "AttributeError", ":", "measurements", "=", "np", ".", "asarray", "(", "measurements", ")", "finally", ":", "stats", "=", "measurements", ".", "sum", "(", "axis", "=", "0", ")", "/", "len", "(", "measurements", ")", "stats_str", "=", "[", "str", "(", "int", "(", "i", ")", ")", "for", "i", "in", "np", ".", "round", "(", "stats", "[", ":", ":", "-", "1", "]", "[", "1", ":", "]", ")", "]", "bf_str", "=", "'0.'", "+", "''", ".", "join", "(", "stats_str", ")", "bf", "=", "float", "(", "bf_str", ")", "return", "bf" ]
30.473684
20.157895
def save(self, filename, clobber=True, **kwargs): """ Save the `Spectrum1D` object to the specified filename. :param filename: The filename to save the Spectrum1D object to. :type filename: str :param clobber: [optional] Whether to overwrite the `filename` if it already exists. :type clobber: bool :raises IOError: If the filename exists and we were not asked to clobber it. """ if os.path.exists(filename) and not clobber: raise IOError("filename '{0}' exists and we have been asked not to"\ " clobber it".format(filename)) if not filename.endswith('fits'): # ASCII data = np.hstack([ self.disp.reshape(-1, 1), self.flux.reshape(-1, 1), self.variance.reshape(-1, 1) ]) return np.savetxt(filename, data, **kwargs) else: # Create a tabular FITS format disp = fits.Column(name='disp', format='1D', array=self.disp) flux = fits.Column(name='flux', format='1D', array=self.flux) var = fits.Column(name='variance', format='1D', array=self.variance) table_hdu = fits.new_table([disp, flux, var]) # Create Primary HDU hdu = fits.PrimaryHDU() # Update primary HDU with headers for key, value in self.headers.iteritems(): if len(key) > 8: # To deal with ESO compatibility hdu.header.update('HIERARCH {}'.format(key), value) try: hdu.header.update(key, value) except ValueError: logger.warn("Could not save header key/value combination: "\ "{0} = {1}".format(key, value)) # Create HDU list with our tables hdulist = fits.HDUList([hdu, table_hdu]) return hdulist.writeto(filename, clobber=clobber, **kwargs)
[ "def", "save", "(", "self", ",", "filename", ",", "clobber", "=", "True", ",", "*", "*", "kwargs", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", "and", "not", "clobber", ":", "raise", "IOError", "(", "\"filename '{0}' exists and we have been asked not to\"", "\" clobber it\"", ".", "format", "(", "filename", ")", ")", "if", "not", "filename", ".", "endswith", "(", "'fits'", ")", ":", "# ASCII", "data", "=", "np", ".", "hstack", "(", "[", "self", ".", "disp", ".", "reshape", "(", "-", "1", ",", "1", ")", ",", "self", ".", "flux", ".", "reshape", "(", "-", "1", ",", "1", ")", ",", "self", ".", "variance", ".", "reshape", "(", "-", "1", ",", "1", ")", "]", ")", "return", "np", ".", "savetxt", "(", "filename", ",", "data", ",", "*", "*", "kwargs", ")", "else", ":", "# Create a tabular FITS format", "disp", "=", "fits", ".", "Column", "(", "name", "=", "'disp'", ",", "format", "=", "'1D'", ",", "array", "=", "self", ".", "disp", ")", "flux", "=", "fits", ".", "Column", "(", "name", "=", "'flux'", ",", "format", "=", "'1D'", ",", "array", "=", "self", ".", "flux", ")", "var", "=", "fits", ".", "Column", "(", "name", "=", "'variance'", ",", "format", "=", "'1D'", ",", "array", "=", "self", ".", "variance", ")", "table_hdu", "=", "fits", ".", "new_table", "(", "[", "disp", ",", "flux", ",", "var", "]", ")", "# Create Primary HDU", "hdu", "=", "fits", ".", "PrimaryHDU", "(", ")", "# Update primary HDU with headers", "for", "key", ",", "value", "in", "self", ".", "headers", ".", "iteritems", "(", ")", ":", "if", "len", "(", "key", ")", ">", "8", ":", "# To deal with ESO compatibility", "hdu", ".", "header", ".", "update", "(", "'HIERARCH {}'", ".", "format", "(", "key", ")", ",", "value", ")", "try", ":", "hdu", ".", "header", ".", "update", "(", "key", ",", "value", ")", "except", "ValueError", ":", "logger", ".", "warn", "(", "\"Could not save header key/value combination: \"", "\"{0} = {1}\"", ".", "format", "(", "key", ",", "value", ")", ")", "# Create HDU list with our tables", "hdulist", "=", "fits", ".", "HDUList", "(", "[", "hdu", ",", "table_hdu", "]", ")", "return", "hdulist", ".", "writeto", "(", "filename", ",", "clobber", "=", "clobber", ",", "*", "*", "kwargs", ")" ]
35.982456
20.684211
def getclusters(self, count): """ Generates *count* clusters. :param count: The amount of clusters that should be generated. count must be greater than ``1``. :raises ClusteringError: if *count* is out of bounds. """ # only proceed if we got sensible input if count <= 1: raise ClusteringError("When clustering, you need to ask for at " "least two clusters! " "You asked for %d" % count) # return the data straight away if there is nothing to cluster if (self.__data == [] or len(self.__data) == 1 or count == self.__initial_length): return self.__data # It makes no sense to ask for more clusters than data-items available if count > self.__initial_length: raise ClusteringError( "Unable to generate more clusters than " "items available. You supplied %d items, and asked for " "%d clusters." % (self.__initial_length, count)) self.initialise_clusters(self.__data, count) items_moved = True # tells us if any item moved between the clusters, # as we initialised the clusters, we assume that # is the case while items_moved is True: items_moved = False for cluster in self.__clusters: for item in cluster: res = self.assign_item(item, cluster) if items_moved is False: items_moved = res return self.__clusters
[ "def", "getclusters", "(", "self", ",", "count", ")", ":", "# only proceed if we got sensible input", "if", "count", "<=", "1", ":", "raise", "ClusteringError", "(", "\"When clustering, you need to ask for at \"", "\"least two clusters! \"", "\"You asked for %d\"", "%", "count", ")", "# return the data straight away if there is nothing to cluster", "if", "(", "self", ".", "__data", "==", "[", "]", "or", "len", "(", "self", ".", "__data", ")", "==", "1", "or", "count", "==", "self", ".", "__initial_length", ")", ":", "return", "self", ".", "__data", "# It makes no sense to ask for more clusters than data-items available", "if", "count", ">", "self", ".", "__initial_length", ":", "raise", "ClusteringError", "(", "\"Unable to generate more clusters than \"", "\"items available. You supplied %d items, and asked for \"", "\"%d clusters.\"", "%", "(", "self", ".", "__initial_length", ",", "count", ")", ")", "self", ".", "initialise_clusters", "(", "self", ".", "__data", ",", "count", ")", "items_moved", "=", "True", "# tells us if any item moved between the clusters,", "# as we initialised the clusters, we assume that", "# is the case", "while", "items_moved", "is", "True", ":", "items_moved", "=", "False", "for", "cluster", "in", "self", ".", "__clusters", ":", "for", "item", "in", "cluster", ":", "res", "=", "self", ".", "assign_item", "(", "item", ",", "cluster", ")", "if", "items_moved", "is", "False", ":", "items_moved", "=", "res", "return", "self", ".", "__clusters" ]
39.95122
18.926829
def warning(lineno, msg): """ Generic warning error routine """ msg = "%s:%i: warning: %s" % (global_.FILENAME, lineno, msg) msg_output(msg) global_.has_warnings += 1
[ "def", "warning", "(", "lineno", ",", "msg", ")", ":", "msg", "=", "\"%s:%i: warning: %s\"", "%", "(", "global_", ".", "FILENAME", ",", "lineno", ",", "msg", ")", "msg_output", "(", "msg", ")", "global_", ".", "has_warnings", "+=", "1" ]
30.166667
11.833333
def flatten(x): """flatten(sequence) -> list Returns a single, flat list which contains all elements retrieved from the sequence and all recursively contained sub-sequences (iterables). Examples: >>> [1, 2, [3,4], (5,6)] [1, 2, [3, 4], (5, 6)] >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)]) [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10] """ for el in x: if hasattr(el, "__iter__") and not isinstance(el, (binary, unicode)): for els in flatten(el): yield els else: yield el
[ "def", "flatten", "(", "x", ")", ":", "for", "el", "in", "x", ":", "if", "hasattr", "(", "el", ",", "\"__iter__\"", ")", "and", "not", "isinstance", "(", "el", ",", "(", "binary", ",", "unicode", ")", ")", ":", "for", "els", "in", "flatten", "(", "el", ")", ":", "yield", "els", "else", ":", "yield", "el" ]
28.7
22.05
def ratio(value, decimal_places=0, failure_string='N/A'): """ Converts a floating point value a X:1 ratio. Number of decimal places set by the `precision` kwarg. Default is one. """ try: f = float(value) except ValueError: return failure_string return _saferound(f, decimal_places) + ':1'
[ "def", "ratio", "(", "value", ",", "decimal_places", "=", "0", ",", "failure_string", "=", "'N/A'", ")", ":", "try", ":", "f", "=", "float", "(", "value", ")", "except", "ValueError", ":", "return", "failure_string", "return", "_saferound", "(", "f", ",", "decimal_places", ")", "+", "':1'" ]
29.727273
16.272727
def _generateFirstOrder0(): """ Generate the initial, first order, and second order transition probabilities for 'probability0'. For this model, we generate the following set of sequences: .1 .75 0----1-----2 \ \ \ \ .25 \ \-----3 \ \ .9 .5 \--- 4--------- 2 \ \ .5 \---------3 Parameters: ---------------------------------------------------------------------- retval: (initProb, firstOrder, secondOrder, seqLen) initProb: Initial probability for each category. This is a vector of length len(categoryList). firstOrder: A dictionary of the 1st order probabilities. The key is the 1st element of the sequence, the value is the probability of each 2nd element given the first. secondOrder: A dictionary of the 2nd order probabilities. The key is the first 2 elements of the sequence, the value is the probability of each possible 3rd element given the first two. seqLen: Desired length of each sequence. The 1st element will be generated using the initProb, the 2nd element by the firstOrder table, and the 3rd and all successive elements by the secondOrder table. categoryList: list of category names to use Here is an example of some return values when there are 3 categories initProb: [0.7, 0.2, 0.1] firstOrder: {'[0]': [0.3, 0.3, 0.4], '[1]': [0.3, 0.3, 0.4], '[2]': [0.3, 0.3, 0.4]} secondOrder: {'[0,0]': [0.3, 0.3, 0.4], '[0,1]': [0.3, 0.3, 0.4], '[0,2]': [0.3, 0.3, 0.4], '[1,0]': [0.3, 0.3, 0.4], '[1,1]': [0.3, 0.3, 0.4], '[1,2]': [0.3, 0.3, 0.4], '[2,0]': [0.3, 0.3, 0.4], '[2,1]': [0.3, 0.3, 0.4], '[2,2]': [0.3, 0.3, 0.4]} """ # -------------------------------------------------------------------- # Initial probabilities, 'a' and 'e' equally likely numCategories = 5 initProb = numpy.zeros(numCategories) initProb[0] = 1.0 # -------------------------------------------------------------------- # 1st order transitions firstOrder = dict() firstOrder['0'] = numpy.array([0, 0.1, 0, 0, 0.9]) firstOrder['1'] = numpy.array([0, 0, 0.75, 0.25, 0]) firstOrder['2'] = numpy.array([1.0, 0, 0, 0, 0]) firstOrder['3'] = numpy.array([1.0, 0, 0, 0, 0]) firstOrder['4'] = numpy.array([0, 0, 0.5, 0.5, 0]) # -------------------------------------------------------------------- # 2nd order transitions don't apply secondOrder = None # Generate the category list categoryList = ['%d' % x for x in range(5)] return (initProb, firstOrder, secondOrder, 3, categoryList)
[ "def", "_generateFirstOrder0", "(", ")", ":", "# --------------------------------------------------------------------", "# Initial probabilities, 'a' and 'e' equally likely", "numCategories", "=", "5", "initProb", "=", "numpy", ".", "zeros", "(", "numCategories", ")", "initProb", "[", "0", "]", "=", "1.0", "# --------------------------------------------------------------------", "# 1st order transitions", "firstOrder", "=", "dict", "(", ")", "firstOrder", "[", "'0'", "]", "=", "numpy", ".", "array", "(", "[", "0", ",", "0.1", ",", "0", ",", "0", ",", "0.9", "]", ")", "firstOrder", "[", "'1'", "]", "=", "numpy", ".", "array", "(", "[", "0", ",", "0", ",", "0.75", ",", "0.25", ",", "0", "]", ")", "firstOrder", "[", "'2'", "]", "=", "numpy", ".", "array", "(", "[", "1.0", ",", "0", ",", "0", ",", "0", ",", "0", "]", ")", "firstOrder", "[", "'3'", "]", "=", "numpy", ".", "array", "(", "[", "1.0", ",", "0", ",", "0", ",", "0", ",", "0", "]", ")", "firstOrder", "[", "'4'", "]", "=", "numpy", ".", "array", "(", "[", "0", ",", "0", ",", "0.5", ",", "0.5", ",", "0", "]", ")", "# --------------------------------------------------------------------", "# 2nd order transitions don't apply", "secondOrder", "=", "None", "# Generate the category list", "categoryList", "=", "[", "'%d'", "%", "x", "for", "x", "in", "range", "(", "5", ")", "]", "return", "(", "initProb", ",", "firstOrder", ",", "secondOrder", ",", "3", ",", "categoryList", ")" ]
37.876543
22.209877
def main(): """Simply merge all trajectories in the working directory""" folder = os.getcwd() print('Merging all files') merge_all_in_folder(folder, delete_other_files=True, # We will only keep one trajectory dynamic_imports=FunctionParameter, backup=False) print('Done')
[ "def", "main", "(", ")", ":", "folder", "=", "os", ".", "getcwd", "(", ")", "print", "(", "'Merging all files'", ")", "merge_all_in_folder", "(", "folder", ",", "delete_other_files", "=", "True", ",", "# We will only keep one trajectory", "dynamic_imports", "=", "FunctionParameter", ",", "backup", "=", "False", ")", "print", "(", "'Done'", ")" ]
39.555556
16.888889
def _citation_sort_key(t: EdgeTuple) -> str: """Make a confusing 4 tuple sortable by citation.""" return '"{}", "{}"'.format(t[3][CITATION][CITATION_TYPE], t[3][CITATION][CITATION_REFERENCE])
[ "def", "_citation_sort_key", "(", "t", ":", "EdgeTuple", ")", "->", "str", ":", "return", "'\"{}\", \"{}\"'", ".", "format", "(", "t", "[", "3", "]", "[", "CITATION", "]", "[", "CITATION_TYPE", "]", ",", "t", "[", "3", "]", "[", "CITATION", "]", "[", "CITATION_REFERENCE", "]", ")" ]
65.666667
20.333333
def get_tau_at_quantile(mean, stddev, quantile): """ Returns the value of tau at a given quantile in the form of a dictionary organised by intensity measure """ tau_model = {} for imt in mean: tau_model[imt] = {} for key in mean[imt]: if quantile is None: tau_model[imt][key] = mean[imt][key] else: tau_model[imt][key] = _at_percentile(mean[imt][key], stddev[imt][key], quantile) return tau_model
[ "def", "get_tau_at_quantile", "(", "mean", ",", "stddev", ",", "quantile", ")", ":", "tau_model", "=", "{", "}", "for", "imt", "in", "mean", ":", "tau_model", "[", "imt", "]", "=", "{", "}", "for", "key", "in", "mean", "[", "imt", "]", ":", "if", "quantile", "is", "None", ":", "tau_model", "[", "imt", "]", "[", "key", "]", "=", "mean", "[", "imt", "]", "[", "key", "]", "else", ":", "tau_model", "[", "imt", "]", "[", "key", "]", "=", "_at_percentile", "(", "mean", "[", "imt", "]", "[", "key", "]", ",", "stddev", "[", "imt", "]", "[", "key", "]", ",", "quantile", ")", "return", "tau_model" ]
36.6875
16.1875
def split(x, split_dim, num_or_size_splits, name=None): """Like tf.split. Args: x: a Tensor split_dim: a Dimension in x.shape.dims num_or_size_splits: either an integer dividing split_dim.size or a list of integers adding up to split_dim.size name: an optional string Returns: a list of Tensors. """ return SplitOperation(x, split_dim, num_or_size_splits, name=name).outputs
[ "def", "split", "(", "x", ",", "split_dim", ",", "num_or_size_splits", ",", "name", "=", "None", ")", ":", "return", "SplitOperation", "(", "x", ",", "split_dim", ",", "num_or_size_splits", ",", "name", "=", "name", ")", ".", "outputs" ]
30.769231
19.384615
def p_mp_createClass(p): """mp_createClass : classDeclaration """ # pylint: disable=too-many-branches,too-many-statements,too-many-locals ns = p.parser.handle.default_namespace cc = p[1] try: fixedNS = fixedRefs = fixedSuper = False while not fixedNS or not fixedRefs or not fixedSuper: try: if p.parser.verbose: p.parser.log( _format("Creating class {0!A}:{1!A}", ns, cc.classname)) p.parser.handle.CreateClass(cc) if p.parser.verbose: p.parser.log( _format("Created class {0!A}:{1!A}", ns, cc.classname)) p.parser.classnames[ns].append(cc.classname.lower()) break except CIMError as ce: ce.file_line = (p.parser.file, p.lexer.lineno) errcode = ce.status_code if errcode == CIM_ERR_INVALID_NAMESPACE: if fixedNS: raise if p.parser.verbose: p.parser.log( _format("Creating namespace {0!A}", ns)) p.parser.server.create_namespace(ns) fixedNS = True continue if not p.parser.search_paths: raise if errcode == CIM_ERR_INVALID_SUPERCLASS: if fixedSuper: raise moffile = p.parser.mofcomp.find_mof(cc.superclass) if not moffile: raise p.parser.mofcomp.compile_file(moffile, ns) fixedSuper = True elif errcode in [CIM_ERR_INVALID_PARAMETER, CIM_ERR_NOT_FOUND, CIM_ERR_FAILED]: if fixedRefs: raise if not p.parser.qualcache[ns]: for fname in ['qualifiers', 'qualifiers_optional']: qualfile = p.parser.mofcomp.find_mof(fname) if qualfile: p.parser.mofcomp.compile_file(qualfile, ns) if not p.parser.qualcache[ns]: # can't find qualifiers raise objects = list(cc.properties.values()) for meth in cc.methods.values(): objects += list(meth.parameters.values()) dep_classes = NocaseDict() # dict dep_class, ce for obj in objects: if obj.type not in ['reference', 'string']: continue if obj.type == 'reference': if obj.reference_class not in dep_classes: dep_classes[obj.reference_class] = ce elif obj.type == 'string': try: embedded_inst = \ obj.qualifiers['embeddedinstance'] except KeyError: continue if embedded_inst.value not in dep_classes: dep_classes[embedded_inst.value] = ce continue for cln, err in dep_classes.items(): if cln in p.parser.classnames[ns]: continue try: # don't limit it with LocalOnly=True, # PropertyList, IncludeQualifiers=False, ... # because of caching in case we're using the # special WBEMConnection subclass used for # removing schema elements p.parser.handle.GetClass(cln, LocalOnly=False, IncludeQualifiers=True) p.parser.classnames[ns].append(cln) except CIMError: moffile = p.parser.mofcomp.find_mof(cln) if not moffile: raise err try: if p.parser.verbose: p.parser.log( _format("Class {0!A} namespace {1!A} " "depends on class {2!A} which " "is not in repository.", cc.classname, ns, cln)) p.parser.mofcomp.compile_file(moffile, ns) except CIMError as ce: if ce.status_code == CIM_ERR_NOT_FOUND: raise err raise p.parser.classnames[ns].append(cln) fixedRefs = True else: raise except CIMError as ce: ce.file_line = (p.parser.file, p.lexer.lineno) if ce.status_code != CIM_ERR_ALREADY_EXISTS: raise if p.parser.verbose: p.parser.log( _format("Class {0!A} already exist. Modifying...", cc.classname)) try: p.parser.handle.ModifyClass(cc, ns) except CIMError as ce: p.parser.log( _format("Error modifying class {0!A}: {1}, {2}", cc.classname, ce.status_code, ce.status_description))
[ "def", "p_mp_createClass", "(", "p", ")", ":", "# pylint: disable=too-many-branches,too-many-statements,too-many-locals", "ns", "=", "p", ".", "parser", ".", "handle", ".", "default_namespace", "cc", "=", "p", "[", "1", "]", "try", ":", "fixedNS", "=", "fixedRefs", "=", "fixedSuper", "=", "False", "while", "not", "fixedNS", "or", "not", "fixedRefs", "or", "not", "fixedSuper", ":", "try", ":", "if", "p", ".", "parser", ".", "verbose", ":", "p", ".", "parser", ".", "log", "(", "_format", "(", "\"Creating class {0!A}:{1!A}\"", ",", "ns", ",", "cc", ".", "classname", ")", ")", "p", ".", "parser", ".", "handle", ".", "CreateClass", "(", "cc", ")", "if", "p", ".", "parser", ".", "verbose", ":", "p", ".", "parser", ".", "log", "(", "_format", "(", "\"Created class {0!A}:{1!A}\"", ",", "ns", ",", "cc", ".", "classname", ")", ")", "p", ".", "parser", ".", "classnames", "[", "ns", "]", ".", "append", "(", "cc", ".", "classname", ".", "lower", "(", ")", ")", "break", "except", "CIMError", "as", "ce", ":", "ce", ".", "file_line", "=", "(", "p", ".", "parser", ".", "file", ",", "p", ".", "lexer", ".", "lineno", ")", "errcode", "=", "ce", ".", "status_code", "if", "errcode", "==", "CIM_ERR_INVALID_NAMESPACE", ":", "if", "fixedNS", ":", "raise", "if", "p", ".", "parser", ".", "verbose", ":", "p", ".", "parser", ".", "log", "(", "_format", "(", "\"Creating namespace {0!A}\"", ",", "ns", ")", ")", "p", ".", "parser", ".", "server", ".", "create_namespace", "(", "ns", ")", "fixedNS", "=", "True", "continue", "if", "not", "p", ".", "parser", ".", "search_paths", ":", "raise", "if", "errcode", "==", "CIM_ERR_INVALID_SUPERCLASS", ":", "if", "fixedSuper", ":", "raise", "moffile", "=", "p", ".", "parser", ".", "mofcomp", ".", "find_mof", "(", "cc", ".", "superclass", ")", "if", "not", "moffile", ":", "raise", "p", ".", "parser", ".", "mofcomp", ".", "compile_file", "(", "moffile", ",", "ns", ")", "fixedSuper", "=", "True", "elif", "errcode", "in", "[", "CIM_ERR_INVALID_PARAMETER", ",", "CIM_ERR_NOT_FOUND", ",", "CIM_ERR_FAILED", "]", ":", "if", "fixedRefs", ":", "raise", "if", "not", "p", ".", "parser", ".", "qualcache", "[", "ns", "]", ":", "for", "fname", "in", "[", "'qualifiers'", ",", "'qualifiers_optional'", "]", ":", "qualfile", "=", "p", ".", "parser", ".", "mofcomp", ".", "find_mof", "(", "fname", ")", "if", "qualfile", ":", "p", ".", "parser", ".", "mofcomp", ".", "compile_file", "(", "qualfile", ",", "ns", ")", "if", "not", "p", ".", "parser", ".", "qualcache", "[", "ns", "]", ":", "# can't find qualifiers", "raise", "objects", "=", "list", "(", "cc", ".", "properties", ".", "values", "(", ")", ")", "for", "meth", "in", "cc", ".", "methods", ".", "values", "(", ")", ":", "objects", "+=", "list", "(", "meth", ".", "parameters", ".", "values", "(", ")", ")", "dep_classes", "=", "NocaseDict", "(", ")", "# dict dep_class, ce", "for", "obj", "in", "objects", ":", "if", "obj", ".", "type", "not", "in", "[", "'reference'", ",", "'string'", "]", ":", "continue", "if", "obj", ".", "type", "==", "'reference'", ":", "if", "obj", ".", "reference_class", "not", "in", "dep_classes", ":", "dep_classes", "[", "obj", ".", "reference_class", "]", "=", "ce", "elif", "obj", ".", "type", "==", "'string'", ":", "try", ":", "embedded_inst", "=", "obj", ".", "qualifiers", "[", "'embeddedinstance'", "]", "except", "KeyError", ":", "continue", "if", "embedded_inst", ".", "value", "not", "in", "dep_classes", ":", "dep_classes", "[", "embedded_inst", ".", "value", "]", "=", "ce", "continue", "for", "cln", ",", "err", "in", "dep_classes", ".", "items", "(", ")", ":", "if", "cln", "in", "p", ".", "parser", ".", "classnames", "[", "ns", "]", ":", "continue", "try", ":", "# don't limit it with LocalOnly=True,", "# PropertyList, IncludeQualifiers=False, ...", "# because of caching in case we're using the", "# special WBEMConnection subclass used for", "# removing schema elements", "p", ".", "parser", ".", "handle", ".", "GetClass", "(", "cln", ",", "LocalOnly", "=", "False", ",", "IncludeQualifiers", "=", "True", ")", "p", ".", "parser", ".", "classnames", "[", "ns", "]", ".", "append", "(", "cln", ")", "except", "CIMError", ":", "moffile", "=", "p", ".", "parser", ".", "mofcomp", ".", "find_mof", "(", "cln", ")", "if", "not", "moffile", ":", "raise", "err", "try", ":", "if", "p", ".", "parser", ".", "verbose", ":", "p", ".", "parser", ".", "log", "(", "_format", "(", "\"Class {0!A} namespace {1!A} \"", "\"depends on class {2!A} which \"", "\"is not in repository.\"", ",", "cc", ".", "classname", ",", "ns", ",", "cln", ")", ")", "p", ".", "parser", ".", "mofcomp", ".", "compile_file", "(", "moffile", ",", "ns", ")", "except", "CIMError", "as", "ce", ":", "if", "ce", ".", "status_code", "==", "CIM_ERR_NOT_FOUND", ":", "raise", "err", "raise", "p", ".", "parser", ".", "classnames", "[", "ns", "]", ".", "append", "(", "cln", ")", "fixedRefs", "=", "True", "else", ":", "raise", "except", "CIMError", "as", "ce", ":", "ce", ".", "file_line", "=", "(", "p", ".", "parser", ".", "file", ",", "p", ".", "lexer", ".", "lineno", ")", "if", "ce", ".", "status_code", "!=", "CIM_ERR_ALREADY_EXISTS", ":", "raise", "if", "p", ".", "parser", ".", "verbose", ":", "p", ".", "parser", ".", "log", "(", "_format", "(", "\"Class {0!A} already exist. Modifying...\"", ",", "cc", ".", "classname", ")", ")", "try", ":", "p", ".", "parser", ".", "handle", ".", "ModifyClass", "(", "cc", ",", "ns", ")", "except", "CIMError", "as", "ce", ":", "p", ".", "parser", ".", "log", "(", "_format", "(", "\"Error modifying class {0!A}: {1}, {2}\"", ",", "cc", ".", "classname", ",", "ce", ".", "status_code", ",", "ce", ".", "status_description", ")", ")" ]
46.4
17.208
def check_distance_funciton_input(distance_func_name, netinfo): """ Funciton checks distance_func_name, if it is specified as 'default'. Then given the type of the network selects a default distance function. Parameters ---------- distance_func_name : str distance function name. netinfo : dict the output of utils.process_input Returns ------- distance_func_name : str distance function name. """ if distance_func_name == 'default' and netinfo['nettype'][0] == 'b': print('Default distance funciton specified. As network is binary, using Hamming') distance_func_name = 'hamming' elif distance_func_name == 'default' and netinfo['nettype'][0] == 'w': distance_func_name = 'euclidean' print( 'Default distance funciton specified. ' 'As network is weighted, using Euclidean') return distance_func_name
[ "def", "check_distance_funciton_input", "(", "distance_func_name", ",", "netinfo", ")", ":", "if", "distance_func_name", "==", "'default'", "and", "netinfo", "[", "'nettype'", "]", "[", "0", "]", "==", "'b'", ":", "print", "(", "'Default distance funciton specified. As network is binary, using Hamming'", ")", "distance_func_name", "=", "'hamming'", "elif", "distance_func_name", "==", "'default'", "and", "netinfo", "[", "'nettype'", "]", "[", "0", "]", "==", "'w'", ":", "distance_func_name", "=", "'euclidean'", "print", "(", "'Default distance funciton specified. '", "'As network is weighted, using Euclidean'", ")", "return", "distance_func_name" ]
30.3
25.366667
def _find_any(self, task_spec): """ Returns any descendants that have the given task spec assigned. :type task_spec: TaskSpec :param task_spec: The wanted task spec. :rtype: list(Task) :returns: The tasks objects that are attached to the given task spec. """ tasks = [] if self.task_spec == task_spec: tasks.append(self) for child in self: if child.task_spec != task_spec: continue tasks.append(child) return tasks
[ "def", "_find_any", "(", "self", ",", "task_spec", ")", ":", "tasks", "=", "[", "]", "if", "self", ".", "task_spec", "==", "task_spec", ":", "tasks", ".", "append", "(", "self", ")", "for", "child", "in", "self", ":", "if", "child", ".", "task_spec", "!=", "task_spec", ":", "continue", "tasks", ".", "append", "(", "child", ")", "return", "tasks" ]
31.823529
14.058824
def generate_map(map, name='url_map'): """ Generates a JavaScript function containing the rules defined in this map, to be used with a MapAdapter's generate_javascript method. If you don't pass a name the returned JavaScript code is an expression that returns a function. Otherwise it's a standalone script that assigns the function with that name. Dotted names are resolved (so you an use a name like 'obj.url_for') In order to use JavaScript generation, simplejson must be installed. Note that using this feature will expose the rules defined in your map to users. If your rules contain sensitive information, don't use JavaScript generation! """ from warnings import warn warn(DeprecationWarning('This module is deprecated')) map.update() rules = [] converters = [] for rule in map.iter_rules(): trace = [{ 'is_dynamic': is_dynamic, 'data': data } for is_dynamic, data in rule._trace] rule_converters = {} for key, converter in iteritems(rule._converters): js_func = js_to_url_function(converter) try: index = converters.index(js_func) except ValueError: converters.append(js_func) index = len(converters) - 1 rule_converters[key] = index rules.append({ u'endpoint': rule.endpoint, u'arguments': list(rule.arguments), u'converters': rule_converters, u'trace': trace, u'defaults': rule.defaults }) return render_template(name_parts=name and name.split('.') or [], rules=dumps(rules), converters=converters)
[ "def", "generate_map", "(", "map", ",", "name", "=", "'url_map'", ")", ":", "from", "warnings", "import", "warn", "warn", "(", "DeprecationWarning", "(", "'This module is deprecated'", ")", ")", "map", ".", "update", "(", ")", "rules", "=", "[", "]", "converters", "=", "[", "]", "for", "rule", "in", "map", ".", "iter_rules", "(", ")", ":", "trace", "=", "[", "{", "'is_dynamic'", ":", "is_dynamic", ",", "'data'", ":", "data", "}", "for", "is_dynamic", ",", "data", "in", "rule", ".", "_trace", "]", "rule_converters", "=", "{", "}", "for", "key", ",", "converter", "in", "iteritems", "(", "rule", ".", "_converters", ")", ":", "js_func", "=", "js_to_url_function", "(", "converter", ")", "try", ":", "index", "=", "converters", ".", "index", "(", "js_func", ")", "except", "ValueError", ":", "converters", ".", "append", "(", "js_func", ")", "index", "=", "len", "(", "converters", ")", "-", "1", "rule_converters", "[", "key", "]", "=", "index", "rules", ".", "append", "(", "{", "u'endpoint'", ":", "rule", ".", "endpoint", ",", "u'arguments'", ":", "list", "(", "rule", ".", "arguments", ")", ",", "u'converters'", ":", "rule_converters", ",", "u'trace'", ":", "trace", ",", "u'defaults'", ":", "rule", ".", "defaults", "}", ")", "return", "render_template", "(", "name_parts", "=", "name", "and", "name", ".", "split", "(", "'.'", ")", "or", "[", "]", ",", "rules", "=", "dumps", "(", "rules", ")", ",", "converters", "=", "converters", ")" ]
38.955556
15.622222
def get_section_by_sis_id(self, sis_section_id, params={}): """ Return section resource for given sis id. """ return self.get_section( self._sis_id(sis_section_id, sis_field="section"), params)
[ "def", "get_section_by_sis_id", "(", "self", ",", "sis_section_id", ",", "params", "=", "{", "}", ")", ":", "return", "self", ".", "get_section", "(", "self", ".", "_sis_id", "(", "sis_section_id", ",", "sis_field", "=", "\"section\"", ")", ",", "params", ")" ]
38.666667
11
def _summary_sim(sim, pars, probs): """Summarize chains together and separately REF: rstan/rstan/R/misc.R Parameters are unraveled in *column-major order*. Parameters ---------- sim : dict dict from from a stanfit fit object, i.e., fit['sim'] pars : Iterable of str parameter names probs : Iterable of probs desired quantiles Returns ------- summaries : OrderedDict of array This dictionary contains the following arrays indexed by the keys given below: - 'msd' : array of shape (num_params, 2) with mean and sd - 'sem' : array of length num_params with standard error for the mean - 'c_msd' : array of shape (num_params, 2, num_chains) - 'quan' : array of shape (num_params, num_quan) - 'c_quan' : array of shape (num_params, num_quan, num_chains) - 'ess' : array of shape (num_params, 1) - 'rhat' : array of shape (num_params, 1) Note ---- `_summary_sim` has the parameters in *column-major* order whereas `_summary` gives them in *row-major* order. (This follows RStan.) """ # NOTE: this follows RStan rather closely. Some of the calculations here probs_len = len(probs) n_chains = len(sim['samples']) # tidx is a dict with keys that are parameters and values that are their # indices using column-major ordering tidx = _pars_total_indexes(sim['pars_oi'], sim['dims_oi'], sim['fnames_oi'], pars) tidx_colm = [tidx[par] for par in pars] tidx_colm = list(itertools.chain(*tidx_colm)) # like R's unlist() tidx_rowm = [tidx[par+'_rowmajor'] for par in pars] tidx_rowm = list(itertools.chain(*tidx_rowm)) tidx_len = len(tidx_colm) lmsdq = [_get_par_summary(sim, i, probs) for i in tidx_colm] msd = np.row_stack([x['msd'] for x in lmsdq]) quan = np.row_stack([x['quan'] for x in lmsdq]) probs_str = tuple(["{:g}%".format(100*p) for p in probs]) msd.shape = (tidx_len, 2) quan.shape = (tidx_len, probs_len) c_msd = np.row_stack([x['c_msd'] for x in lmsdq]) c_quan = np.row_stack([x['c_quan'] for x in lmsdq]) c_msd.shape = (tidx_len, 2, n_chains) c_quan.shape = (tidx_len, probs_len, n_chains) sim_attr_args = sim.get('args', None) if sim_attr_args is None: cids = list(range(n_chains)) else: cids = [x['chain_id'] for x in sim_attr_args] c_msd_names = dict(parameters=np.asarray(sim['fnames_oi'])[tidx_colm], stats=("mean", "sd"), chains=tuple("chain:{}".format(cid) for cid in cids)) c_quan_names = dict(parameters=np.asarray(sim['fnames_oi'])[tidx_colm], stats=probs_str, chains=tuple("chain:{}".format(cid) for cid in cids)) ess_and_rhat = np.array([pystan.chains.ess_and_splitrhat(sim, n) for n in tidx_colm]) ess, rhat = [arr.ravel() for arr in np.hsplit(ess_and_rhat, 2)] return dict(msd=msd, c_msd=c_msd, c_msd_names=c_msd_names, quan=quan, c_quan=c_quan, c_quan_names=c_quan_names, sem=msd[:, 1] / np.sqrt(ess), ess=ess, rhat=rhat, row_major_idx=tidx_rowm, col_major_idx=tidx_colm)
[ "def", "_summary_sim", "(", "sim", ",", "pars", ",", "probs", ")", ":", "# NOTE: this follows RStan rather closely. Some of the calculations here", "probs_len", "=", "len", "(", "probs", ")", "n_chains", "=", "len", "(", "sim", "[", "'samples'", "]", ")", "# tidx is a dict with keys that are parameters and values that are their", "# indices using column-major ordering", "tidx", "=", "_pars_total_indexes", "(", "sim", "[", "'pars_oi'", "]", ",", "sim", "[", "'dims_oi'", "]", ",", "sim", "[", "'fnames_oi'", "]", ",", "pars", ")", "tidx_colm", "=", "[", "tidx", "[", "par", "]", "for", "par", "in", "pars", "]", "tidx_colm", "=", "list", "(", "itertools", ".", "chain", "(", "*", "tidx_colm", ")", ")", "# like R's unlist()", "tidx_rowm", "=", "[", "tidx", "[", "par", "+", "'_rowmajor'", "]", "for", "par", "in", "pars", "]", "tidx_rowm", "=", "list", "(", "itertools", ".", "chain", "(", "*", "tidx_rowm", ")", ")", "tidx_len", "=", "len", "(", "tidx_colm", ")", "lmsdq", "=", "[", "_get_par_summary", "(", "sim", ",", "i", ",", "probs", ")", "for", "i", "in", "tidx_colm", "]", "msd", "=", "np", ".", "row_stack", "(", "[", "x", "[", "'msd'", "]", "for", "x", "in", "lmsdq", "]", ")", "quan", "=", "np", ".", "row_stack", "(", "[", "x", "[", "'quan'", "]", "for", "x", "in", "lmsdq", "]", ")", "probs_str", "=", "tuple", "(", "[", "\"{:g}%\"", ".", "format", "(", "100", "*", "p", ")", "for", "p", "in", "probs", "]", ")", "msd", ".", "shape", "=", "(", "tidx_len", ",", "2", ")", "quan", ".", "shape", "=", "(", "tidx_len", ",", "probs_len", ")", "c_msd", "=", "np", ".", "row_stack", "(", "[", "x", "[", "'c_msd'", "]", "for", "x", "in", "lmsdq", "]", ")", "c_quan", "=", "np", ".", "row_stack", "(", "[", "x", "[", "'c_quan'", "]", "for", "x", "in", "lmsdq", "]", ")", "c_msd", ".", "shape", "=", "(", "tidx_len", ",", "2", ",", "n_chains", ")", "c_quan", ".", "shape", "=", "(", "tidx_len", ",", "probs_len", ",", "n_chains", ")", "sim_attr_args", "=", "sim", ".", "get", "(", "'args'", ",", "None", ")", "if", "sim_attr_args", "is", "None", ":", "cids", "=", "list", "(", "range", "(", "n_chains", ")", ")", "else", ":", "cids", "=", "[", "x", "[", "'chain_id'", "]", "for", "x", "in", "sim_attr_args", "]", "c_msd_names", "=", "dict", "(", "parameters", "=", "np", ".", "asarray", "(", "sim", "[", "'fnames_oi'", "]", ")", "[", "tidx_colm", "]", ",", "stats", "=", "(", "\"mean\"", ",", "\"sd\"", ")", ",", "chains", "=", "tuple", "(", "\"chain:{}\"", ".", "format", "(", "cid", ")", "for", "cid", "in", "cids", ")", ")", "c_quan_names", "=", "dict", "(", "parameters", "=", "np", ".", "asarray", "(", "sim", "[", "'fnames_oi'", "]", ")", "[", "tidx_colm", "]", ",", "stats", "=", "probs_str", ",", "chains", "=", "tuple", "(", "\"chain:{}\"", ".", "format", "(", "cid", ")", "for", "cid", "in", "cids", ")", ")", "ess_and_rhat", "=", "np", ".", "array", "(", "[", "pystan", ".", "chains", ".", "ess_and_splitrhat", "(", "sim", ",", "n", ")", "for", "n", "in", "tidx_colm", "]", ")", "ess", ",", "rhat", "=", "[", "arr", ".", "ravel", "(", ")", "for", "arr", "in", "np", ".", "hsplit", "(", "ess_and_rhat", ",", "2", ")", "]", "return", "dict", "(", "msd", "=", "msd", ",", "c_msd", "=", "c_msd", ",", "c_msd_names", "=", "c_msd_names", ",", "quan", "=", "quan", ",", "c_quan", "=", "c_quan", ",", "c_quan_names", "=", "c_quan_names", ",", "sem", "=", "msd", "[", ":", ",", "1", "]", "/", "np", ".", "sqrt", "(", "ess", ")", ",", "ess", "=", "ess", ",", "rhat", "=", "rhat", ",", "row_major_idx", "=", "tidx_rowm", ",", "col_major_idx", "=", "tidx_colm", ")" ]
42.635135
21.013514
def get_series_first_release(self, series_id): """ Get first-release data for a Fred series id. This ignores any revision to the data series. For instance, The US GDP for Q1 2014 was first released to be 17149.6, and then later revised to 17101.3, and 17016.0. This will ignore revisions after the first release. Parameters ---------- series_id : str Fred series id such as 'GDP' Returns ------- data : Series a Series where each index is the observation date and the value is the data for the Fred series """ df = self.get_series_all_releases(series_id) first_release = df.groupby('date').head(1) data = first_release.set_index('date')['value'] return data
[ "def", "get_series_first_release", "(", "self", ",", "series_id", ")", ":", "df", "=", "self", ".", "get_series_all_releases", "(", "series_id", ")", "first_release", "=", "df", ".", "groupby", "(", "'date'", ")", ".", "head", "(", "1", ")", "data", "=", "first_release", ".", "set_index", "(", "'date'", ")", "[", "'value'", "]", "return", "data" ]
39.2
25.2
def auth_tls(self, mount_point='cert', use_token=True): """POST /auth/<mount point>/login :param mount_point: :type mount_point: :param use_token: :type use_token: :return: :rtype: """ return self.login('/v1/auth/{0}/login'.format(mount_point), use_token=use_token)
[ "def", "auth_tls", "(", "self", ",", "mount_point", "=", "'cert'", ",", "use_token", "=", "True", ")", ":", "return", "self", ".", "login", "(", "'/v1/auth/{0}/login'", ".", "format", "(", "mount_point", ")", ",", "use_token", "=", "use_token", ")" ]
29.818182
19.090909
def function_exists(FunctionName, region=None, key=None, keyid=None, profile=None): ''' Given a function name, check to see if the given function name exists. Returns True if the given function exists and returns False if the given function does not exist. CLI Example: .. code-block:: bash salt myminion boto_lambda.function_exists myfunction ''' try: func = _find_function(FunctionName, region=region, key=key, keyid=keyid, profile=profile) return {'exists': bool(func)} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
[ "def", "function_exists", "(", "FunctionName", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "try", ":", "func", "=", "_find_function", "(", "FunctionName", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "return", "{", "'exists'", ":", "bool", "(", "func", ")", "}", "except", "ClientError", "as", "e", ":", "return", "{", "'error'", ":", "__utils__", "[", "'boto3.get_error'", "]", "(", "e", ")", "}" ]
29.545455
26.363636
def three_cornered_hat_phase(phasedata_ab, phasedata_bc, phasedata_ca, rate, taus, function): """ Three Cornered Hat Method Given three clocks A, B, C, we seek to find their variances :math:`\\sigma^2_A`, :math:`\\sigma^2_B`, :math:`\\sigma^2_C`. We measure three phase differences, assuming no correlation between the clocks, the measurements have variances: .. math:: \\sigma^2_{AB} = \\sigma^2_{A} + \\sigma^2_{B} \\sigma^2_{BC} = \\sigma^2_{B} + \\sigma^2_{C} \\sigma^2_{CA} = \\sigma^2_{C} + \\sigma^2_{A} Which allows solving for the variance of one clock as: .. math:: \\sigma^2_{A} = {1 \\over 2} ( \\sigma^2_{AB} + \\sigma^2_{CA} - \\sigma^2_{BC} ) and similarly cyclic permutations for :math:`\\sigma^2_B` and :math:`\\sigma^2_C` Parameters ---------- phasedata_ab: np.array phase measurements between clock A and B, in seconds phasedata_bc: np.array phase measurements between clock B and C, in seconds phasedata_ca: np.array phase measurements between clock C and A, in seconds rate: float The sampling rate for phase, in Hz taus: np.array The tau values for deviations, in seconds function: allantools deviation function The type of statistic to compute, e.g. allantools.oadev Returns ------- tau_ab: np.array Tau values corresponding to output deviations dev_a: np.array List of computed values for clock A References ---------- http://www.wriley.com/3-CornHat.htm """ (tau_ab, dev_ab, err_ab, ns_ab) = function(phasedata_ab, data_type='phase', rate=rate, taus=taus) (tau_bc, dev_bc, err_bc, ns_bc) = function(phasedata_bc, data_type='phase', rate=rate, taus=taus) (tau_ca, dev_ca, err_ca, ns_ca) = function(phasedata_ca, data_type='phase', rate=rate, taus=taus) var_ab = dev_ab * dev_ab var_bc = dev_bc * dev_bc var_ca = dev_ca * dev_ca assert len(var_ab) == len(var_bc) == len(var_ca) var_a = 0.5 * (var_ab + var_ca - var_bc) var_a[var_a < 0] = 0 # don't return imaginary deviations (?) dev_a = np.sqrt(var_a) err_a = [d/np.sqrt(nn) for (d, nn) in zip(dev_a, ns_ab)] return tau_ab, dev_a, err_a, ns_ab
[ "def", "three_cornered_hat_phase", "(", "phasedata_ab", ",", "phasedata_bc", ",", "phasedata_ca", ",", "rate", ",", "taus", ",", "function", ")", ":", "(", "tau_ab", ",", "dev_ab", ",", "err_ab", ",", "ns_ab", ")", "=", "function", "(", "phasedata_ab", ",", "data_type", "=", "'phase'", ",", "rate", "=", "rate", ",", "taus", "=", "taus", ")", "(", "tau_bc", ",", "dev_bc", ",", "err_bc", ",", "ns_bc", ")", "=", "function", "(", "phasedata_bc", ",", "data_type", "=", "'phase'", ",", "rate", "=", "rate", ",", "taus", "=", "taus", ")", "(", "tau_ca", ",", "dev_ca", ",", "err_ca", ",", "ns_ca", ")", "=", "function", "(", "phasedata_ca", ",", "data_type", "=", "'phase'", ",", "rate", "=", "rate", ",", "taus", "=", "taus", ")", "var_ab", "=", "dev_ab", "*", "dev_ab", "var_bc", "=", "dev_bc", "*", "dev_bc", "var_ca", "=", "dev_ca", "*", "dev_ca", "assert", "len", "(", "var_ab", ")", "==", "len", "(", "var_bc", ")", "==", "len", "(", "var_ca", ")", "var_a", "=", "0.5", "*", "(", "var_ab", "+", "var_ca", "-", "var_bc", ")", "var_a", "[", "var_a", "<", "0", "]", "=", "0", "# don't return imaginary deviations (?)", "dev_a", "=", "np", ".", "sqrt", "(", "var_a", ")", "err_a", "=", "[", "d", "/", "np", ".", "sqrt", "(", "nn", ")", "for", "(", "d", ",", "nn", ")", "in", "zip", "(", "dev_a", ",", "ns_ab", ")", "]", "return", "tau_ab", ",", "dev_a", ",", "err_a", ",", "ns_ab" ]
33.289474
22.236842
def get_aspect(self, xspan, yspan): """ Computes the aspect ratio of the plot """ if self.data_aspect: return (yspan/xspan)*self.data_aspect elif self.aspect == 'equal': return xspan/yspan elif self.aspect == 'square': return 1 elif self.aspect is not None: return self.aspect elif self.width is not None and self.height is not None: return self.width/self.height else: return 1
[ "def", "get_aspect", "(", "self", ",", "xspan", ",", "yspan", ")", ":", "if", "self", ".", "data_aspect", ":", "return", "(", "yspan", "/", "xspan", ")", "*", "self", ".", "data_aspect", "elif", "self", ".", "aspect", "==", "'equal'", ":", "return", "xspan", "/", "yspan", "elif", "self", ".", "aspect", "==", "'square'", ":", "return", "1", "elif", "self", ".", "aspect", "is", "not", "None", ":", "return", "self", ".", "aspect", "elif", "self", ".", "width", "is", "not", "None", "and", "self", ".", "height", "is", "not", "None", ":", "return", "self", ".", "width", "/", "self", ".", "height", "else", ":", "return", "1" ]
31.6875
9.5625
def formfield(self, *args, **kwargs): """ Returns proper formfield, according to empty_values setting (only for ``forms.CharField`` subclasses). There are 3 different formfields: - CharField that stores all empty values as empty strings; - NullCharField that stores all empty values as None (Null); - NullableField that can store both None and empty string. By default, if no empty_values was specified in model's translation options, NullCharField would be used if the original field is nullable, CharField otherwise. This can be overridden by setting empty_values to '' or None. Setting 'both' will result in NullableField being used. Textual widgets (subclassing ``TextInput`` or ``Textarea``) used for nullable fields are enriched with a clear checkbox, allowing ``None`` values to be preserved rather than saved as empty strings. The ``forms.CharField`` somewhat surprising behaviour is documented as a "won't fix": https://code.djangoproject.com/ticket/9590. """ formfield = super(TranslationField, self).formfield(*args, **kwargs) if isinstance(formfield, forms.CharField): if self.empty_value is None: from modeltranslation.forms import NullCharField form_class = formfield.__class__ kwargs['form_class'] = type( 'Null%s' % form_class.__name__, (NullCharField, form_class), {}) formfield = super(TranslationField, self).formfield(*args, **kwargs) elif self.empty_value == 'both': from modeltranslation.forms import NullableField form_class = formfield.__class__ kwargs['form_class'] = type( 'Nullable%s' % form_class.__name__, (NullableField, form_class), {}) formfield = super(TranslationField, self).formfield(*args, **kwargs) if isinstance(formfield.widget, (forms.TextInput, forms.Textarea)): formfield.widget = ClearableWidgetWrapper(formfield.widget) return formfield
[ "def", "formfield", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "formfield", "=", "super", "(", "TranslationField", ",", "self", ")", ".", "formfield", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "formfield", ",", "forms", ".", "CharField", ")", ":", "if", "self", ".", "empty_value", "is", "None", ":", "from", "modeltranslation", ".", "forms", "import", "NullCharField", "form_class", "=", "formfield", ".", "__class__", "kwargs", "[", "'form_class'", "]", "=", "type", "(", "'Null%s'", "%", "form_class", ".", "__name__", ",", "(", "NullCharField", ",", "form_class", ")", ",", "{", "}", ")", "formfield", "=", "super", "(", "TranslationField", ",", "self", ")", ".", "formfield", "(", "*", "args", ",", "*", "*", "kwargs", ")", "elif", "self", ".", "empty_value", "==", "'both'", ":", "from", "modeltranslation", ".", "forms", "import", "NullableField", "form_class", "=", "formfield", ".", "__class__", "kwargs", "[", "'form_class'", "]", "=", "type", "(", "'Nullable%s'", "%", "form_class", ".", "__name__", ",", "(", "NullableField", ",", "form_class", ")", ",", "{", "}", ")", "formfield", "=", "super", "(", "TranslationField", ",", "self", ")", ".", "formfield", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "formfield", ".", "widget", ",", "(", "forms", ".", "TextInput", ",", "forms", ".", "Textarea", ")", ")", ":", "formfield", ".", "widget", "=", "ClearableWidgetWrapper", "(", "formfield", ".", "widget", ")", "return", "formfield" ]
53.375
25.775
def get_schema_input_format(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_schema = ET.Element("get_schema") config = get_schema input = ET.SubElement(get_schema, "input") format = ET.SubElement(input, "format") format.text = kwargs.pop('format') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_schema_input_format", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_schema", "=", "ET", ".", "Element", "(", "\"get_schema\"", ")", "config", "=", "get_schema", "input", "=", "ET", ".", "SubElement", "(", "get_schema", ",", "\"input\"", ")", "format", "=", "ET", ".", "SubElement", "(", "input", ",", "\"format\"", ")", "format", ".", "text", "=", "kwargs", ".", "pop", "(", "'format'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
35.083333
9.166667
def makeCertHokTokenLoginMethod(stsUrl, stsCert=None): '''Return a function that will call the vim.SessionManager.LoginByToken() after obtaining a HoK SAML token from the STS. The result of this function can be passed as the "loginMethod" to a SessionOrientedStub constructor. @param stsUrl: URL of the SAML Token issuing service. (i.e. SSO server). @param stsCert: public key of the STS service. ''' assert(stsUrl) def _doLogin(soapStub): from . import sso cert = soapStub.schemeArgs['cert_file'] key = soapStub.schemeArgs['key_file'] authenticator = sso.SsoAuthenticator(sts_url=stsUrl, sts_cert=stsCert) samlAssertion = authenticator.get_hok_saml_assertion(cert,key) def _requestModifier(request): return sso.add_saml_context(request, samlAssertion, key) si = vim.ServiceInstance("ServiceInstance", soapStub) sm = si.content.sessionManager if not sm.currentSession: with soapStub.requestModifier(_requestModifier): try: soapStub.samlToken = samlAssertion si.content.sessionManager.LoginByToken() finally: soapStub.samlToken = None return _doLogin
[ "def", "makeCertHokTokenLoginMethod", "(", "stsUrl", ",", "stsCert", "=", "None", ")", ":", "assert", "(", "stsUrl", ")", "def", "_doLogin", "(", "soapStub", ")", ":", "from", ".", "import", "sso", "cert", "=", "soapStub", ".", "schemeArgs", "[", "'cert_file'", "]", "key", "=", "soapStub", ".", "schemeArgs", "[", "'key_file'", "]", "authenticator", "=", "sso", ".", "SsoAuthenticator", "(", "sts_url", "=", "stsUrl", ",", "sts_cert", "=", "stsCert", ")", "samlAssertion", "=", "authenticator", ".", "get_hok_saml_assertion", "(", "cert", ",", "key", ")", "def", "_requestModifier", "(", "request", ")", ":", "return", "sso", ".", "add_saml_context", "(", "request", ",", "samlAssertion", ",", "key", ")", "si", "=", "vim", ".", "ServiceInstance", "(", "\"ServiceInstance\"", ",", "soapStub", ")", "sm", "=", "si", ".", "content", ".", "sessionManager", "if", "not", "sm", ".", "currentSession", ":", "with", "soapStub", ".", "requestModifier", "(", "_requestModifier", ")", ":", "try", ":", "soapStub", ".", "samlToken", "=", "samlAssertion", "si", ".", "content", ".", "sessionManager", ".", "LoginByToken", "(", ")", "finally", ":", "soapStub", ".", "samlToken", "=", "None", "return", "_doLogin" ]
38.617647
23.382353
def drop_retention_policy(self, name, database=None): """Drop an existing retention policy for a database. :param name: the name of the retention policy to drop :type name: str :param database: the database for which the retention policy is dropped. Defaults to current client's database :type database: str """ query_string = ( "DROP RETENTION POLICY {0} ON {1}" ).format(quote_ident(name), quote_ident(database or self._database)) self.query(query_string, method="POST")
[ "def", "drop_retention_policy", "(", "self", ",", "name", ",", "database", "=", "None", ")", ":", "query_string", "=", "(", "\"DROP RETENTION POLICY {0} ON {1}\"", ")", ".", "format", "(", "quote_ident", "(", "name", ")", ",", "quote_ident", "(", "database", "or", "self", ".", "_database", ")", ")", "self", ".", "query", "(", "query_string", ",", "method", "=", "\"POST\"", ")" ]
42.846154
16.769231
def message(blockers): """Create a sequence of key messages based on what is blocking.""" if not blockers: encoding = getattr(sys.stdout, 'encoding', '') if encoding: encoding = encoding.lower() if encoding == 'utf-8': # party hat flair = "\U0001F389 " else: flair = '' return [flair + 'You have 0 projects blocking you from using Python 3!'] flattened_blockers = set() for blocker_reasons in blockers: for blocker in blocker_reasons: flattened_blockers.add(blocker) need = 'You need {0} project{1} to transition to Python 3.' formatted_need = need.format(len(flattened_blockers), 's' if len(flattened_blockers) != 1 else '') can_port = ('Of {0} {1} project{2}, {3} {4} no direct dependencies ' 'blocking {5} transition:') formatted_can_port = can_port.format( 'those' if len(flattened_blockers) != 1 else 'that', len(flattened_blockers), 's' if len(flattened_blockers) != 1 else '', len(blockers), 'have' if len(blockers) != 1 else 'has', 'their' if len(blockers) != 1 else 'its') return formatted_need, formatted_can_port
[ "def", "message", "(", "blockers", ")", ":", "if", "not", "blockers", ":", "encoding", "=", "getattr", "(", "sys", ".", "stdout", ",", "'encoding'", ",", "''", ")", "if", "encoding", ":", "encoding", "=", "encoding", ".", "lower", "(", ")", "if", "encoding", "==", "'utf-8'", ":", "# party hat", "flair", "=", "\"\\U0001F389 \"", "else", ":", "flair", "=", "''", "return", "[", "flair", "+", "'You have 0 projects blocking you from using Python 3!'", "]", "flattened_blockers", "=", "set", "(", ")", "for", "blocker_reasons", "in", "blockers", ":", "for", "blocker", "in", "blocker_reasons", ":", "flattened_blockers", ".", "add", "(", "blocker", ")", "need", "=", "'You need {0} project{1} to transition to Python 3.'", "formatted_need", "=", "need", ".", "format", "(", "len", "(", "flattened_blockers", ")", ",", "'s'", "if", "len", "(", "flattened_blockers", ")", "!=", "1", "else", "''", ")", "can_port", "=", "(", "'Of {0} {1} project{2}, {3} {4} no direct dependencies '", "'blocking {5} transition:'", ")", "formatted_can_port", "=", "can_port", ".", "format", "(", "'those'", "if", "len", "(", "flattened_blockers", ")", "!=", "1", "else", "'that'", ",", "len", "(", "flattened_blockers", ")", ",", "'s'", "if", "len", "(", "flattened_blockers", ")", "!=", "1", "else", "''", ",", "len", "(", "blockers", ")", ",", "'have'", "if", "len", "(", "blockers", ")", "!=", "1", "else", "'has'", ",", "'their'", "if", "len", "(", "blockers", ")", "!=", "1", "else", "'its'", ")", "return", "formatted_need", ",", "formatted_can_port" ]
42.166667
13.566667
async def set_presence(self, status: str = "online", ignore_cache: bool = False): """ Set the online status of the user. See also: `API reference`_ Args: status: The online status of the user. Allowed values: "online", "offline", "unavailable". ignore_cache: Whether or not to set presence even if the cache says the presence is already set to that value. .. _API reference: https://matrix.org/docs/spec/client_server/r0.3.0.html#put-matrix-client-r0-presence-userid-status """ await self.ensure_registered() if not ignore_cache and self.state_store.has_presence(self.mxid, status): return content = { "presence": status } resp = await self.client.request("PUT", f"/presence/{self.mxid}/status", content) self.state_store.set_presence(self.mxid, status)
[ "async", "def", "set_presence", "(", "self", ",", "status", ":", "str", "=", "\"online\"", ",", "ignore_cache", ":", "bool", "=", "False", ")", ":", "await", "self", ".", "ensure_registered", "(", ")", "if", "not", "ignore_cache", "and", "self", ".", "state_store", ".", "has_presence", "(", "self", ".", "mxid", ",", "status", ")", ":", "return", "content", "=", "{", "\"presence\"", ":", "status", "}", "resp", "=", "await", "self", ".", "client", ".", "request", "(", "\"PUT\"", ",", "f\"/presence/{self.mxid}/status\"", ",", "content", ")", "self", ".", "state_store", ".", "set_presence", "(", "self", ".", "mxid", ",", "status", ")" ]
42.809524
29.095238
def calcMzFromMass(mass, charge): """Calculate the mz value of a peptide from its mass and charge. :param mass: float, exact non protonated mass :param charge: int, charge state :returns: mass to charge ratio of the specified charge state """ mz = (mass + (maspy.constants.atomicMassProton * charge)) / charge return mz
[ "def", "calcMzFromMass", "(", "mass", ",", "charge", ")", ":", "mz", "=", "(", "mass", "+", "(", "maspy", ".", "constants", ".", "atomicMassProton", "*", "charge", ")", ")", "/", "charge", "return", "mz" ]
34
18.1
def _partialParseDateStd(self, s, sourceTime): """ test if giving C{s} matched CRE_DATE, used by L{parse()} @type s: string @param s: date/time text to evaluate @type sourceTime: struct_time @param sourceTime: C{struct_time} value to use as the base @rtype: tuple @return: tuple of remained date/time text, datetime object and an boolean value to describ if matched or not """ parseStr = None chunk1 = chunk2 = '' # Standard date format m = self.ptc.CRE_DATE.search(s) if m is not None: if (m.group('date') != s): # capture remaining string parseStr = m.group('date') chunk1 = s[:m.start('date')] chunk2 = s[m.end('date'):] s = '%s %s' % (chunk1, chunk2) else: parseStr = s s = '' if parseStr: debug and log.debug( 'found (date) [%s][%s][%s]', parseStr, chunk1, chunk2) sourceTime = self._evalDateStd(parseStr, sourceTime) return s, sourceTime, bool(parseStr)
[ "def", "_partialParseDateStd", "(", "self", ",", "s", ",", "sourceTime", ")", ":", "parseStr", "=", "None", "chunk1", "=", "chunk2", "=", "''", "# Standard date format", "m", "=", "self", ".", "ptc", ".", "CRE_DATE", ".", "search", "(", "s", ")", "if", "m", "is", "not", "None", ":", "if", "(", "m", ".", "group", "(", "'date'", ")", "!=", "s", ")", ":", "# capture remaining string", "parseStr", "=", "m", ".", "group", "(", "'date'", ")", "chunk1", "=", "s", "[", ":", "m", ".", "start", "(", "'date'", ")", "]", "chunk2", "=", "s", "[", "m", ".", "end", "(", "'date'", ")", ":", "]", "s", "=", "'%s %s'", "%", "(", "chunk1", ",", "chunk2", ")", "else", ":", "parseStr", "=", "s", "s", "=", "''", "if", "parseStr", ":", "debug", "and", "log", ".", "debug", "(", "'found (date) [%s][%s][%s]'", ",", "parseStr", ",", "chunk1", ",", "chunk2", ")", "sourceTime", "=", "self", ".", "_evalDateStd", "(", "parseStr", ",", "sourceTime", ")", "return", "s", ",", "sourceTime", ",", "bool", "(", "parseStr", ")" ]
31.675676
17.297297
def search_databases(self, search_term, location=None, markets_only=False, databases_to_search=None, allow_internal=False): """ Search external databases linked to your lcopt model. To restrict the search to particular databases (e.g. technosphere or biosphere only) use a list of database names in the ``database_to_search`` variable """ dict_list = [] if allow_internal: internal_dict = {} for k, v in self.database['items'].items(): if v.get('lcopt_type') == 'intermediate': internal_dict[k] = v dict_list.append(internal_dict) if databases_to_search is None: #Search all of the databases available #data = Dictionaries(self.database['items'], *[x['items'] for x in self.external_databases]) dict_list += [x['items'] for x in self.external_databases] else: #data = Dictionaries(self.database['items'], *[x['items'] for x in self.external_databases if x['name'] in databases_to_search]) dict_list += [x['items'] for x in self.external_databases if x['name'] in databases_to_search] data = Dictionaries(*dict_list) #data = Dictionaries(self.database['items'], *[x['items'] for x in self.external_databases if x['name'] in databases_to_search]) query = Query() if markets_only: market_filter = Filter("name", "has", "market for") query.add(market_filter) if location is not None: location_filter = Filter("location", "is", location) query.add(location_filter) query.add(Filter("name", "ihas", search_term)) result = query(data) return result
[ "def", "search_databases", "(", "self", ",", "search_term", ",", "location", "=", "None", ",", "markets_only", "=", "False", ",", "databases_to_search", "=", "None", ",", "allow_internal", "=", "False", ")", ":", "dict_list", "=", "[", "]", "if", "allow_internal", ":", "internal_dict", "=", "{", "}", "for", "k", ",", "v", "in", "self", ".", "database", "[", "'items'", "]", ".", "items", "(", ")", ":", "if", "v", ".", "get", "(", "'lcopt_type'", ")", "==", "'intermediate'", ":", "internal_dict", "[", "k", "]", "=", "v", "dict_list", ".", "append", "(", "internal_dict", ")", "if", "databases_to_search", "is", "None", ":", "#Search all of the databases available", "#data = Dictionaries(self.database['items'], *[x['items'] for x in self.external_databases])", "dict_list", "+=", "[", "x", "[", "'items'", "]", "for", "x", "in", "self", ".", "external_databases", "]", "else", ":", "#data = Dictionaries(self.database['items'], *[x['items'] for x in self.external_databases if x['name'] in databases_to_search])", "dict_list", "+=", "[", "x", "[", "'items'", "]", "for", "x", "in", "self", ".", "external_databases", "if", "x", "[", "'name'", "]", "in", "databases_to_search", "]", "data", "=", "Dictionaries", "(", "*", "dict_list", ")", "#data = Dictionaries(self.database['items'], *[x['items'] for x in self.external_databases if x['name'] in databases_to_search])", "query", "=", "Query", "(", ")", "if", "markets_only", ":", "market_filter", "=", "Filter", "(", "\"name\"", ",", "\"has\"", ",", "\"market for\"", ")", "query", ".", "add", "(", "market_filter", ")", "if", "location", "is", "not", "None", ":", "location_filter", "=", "Filter", "(", "\"location\"", ",", "\"is\"", ",", "location", ")", "query", ".", "add", "(", "location_filter", ")", "query", ".", "add", "(", "Filter", "(", "\"name\"", ",", "\"ihas\"", ",", "search_term", ")", ")", "result", "=", "query", "(", "data", ")", "return", "result" ]
40.068182
29.795455
def pass_q_v1(self): """Update the outlet link sequence.""" flu = self.sequences.fluxes.fastaccess out = self.sequences.outlets.fastaccess out.q[0] += flu.qa
[ "def", "pass_q_v1", "(", "self", ")", ":", "flu", "=", "self", ".", "sequences", ".", "fluxes", ".", "fastaccess", "out", "=", "self", ".", "sequences", ".", "outlets", ".", "fastaccess", "out", ".", "q", "[", "0", "]", "+=", "flu", ".", "qa" ]
33.8
8.6
def lookup_package(self, definition_name): """Determines the package name for any definition. Determine the package that any definition name belongs to. May check parent for package name and will resolve missing descriptors if provided descriptor loader. Args: definition_name: Definition name to find package for. """ while True: descriptor = self.lookup_descriptor(definition_name) if isinstance(descriptor, FileDescriptor): return descriptor.package else: index = definition_name.rfind('.') if index < 0: return None definition_name = definition_name[:index]
[ "def", "lookup_package", "(", "self", ",", "definition_name", ")", ":", "while", "True", ":", "descriptor", "=", "self", ".", "lookup_descriptor", "(", "definition_name", ")", "if", "isinstance", "(", "descriptor", ",", "FileDescriptor", ")", ":", "return", "descriptor", ".", "package", "else", ":", "index", "=", "definition_name", ".", "rfind", "(", "'.'", ")", "if", "index", "<", "0", ":", "return", "None", "definition_name", "=", "definition_name", "[", ":", "index", "]" ]
36.55
18.2
def auto_cleaned_path_stripped_uuid4(instance, filename: str) -> str: """ Gets upload path in this format: {MODEL_NAME}/{UUID4}{SUFFIX}. Same as `upload_path_uuid4` but deletes the original file name from the user. :param instance: Instance of model or model class. :param filename: Uploaded file name. :return: Target upload path. """ _, suffix = parse_filename(filename) base_dir = get_base_dir_from_object(instance) rand_uuid = uuid.uuid4() return os.path.join(base_dir, "{rand_uuid}{suffix}".format(rand_uuid=rand_uuid, suffix=suffix))
[ "def", "auto_cleaned_path_stripped_uuid4", "(", "instance", ",", "filename", ":", "str", ")", "->", "str", ":", "_", ",", "suffix", "=", "parse_filename", "(", "filename", ")", "base_dir", "=", "get_base_dir_from_object", "(", "instance", ")", "rand_uuid", "=", "uuid", ".", "uuid4", "(", ")", "return", "os", ".", "path", ".", "join", "(", "base_dir", ",", "\"{rand_uuid}{suffix}\"", ".", "format", "(", "rand_uuid", "=", "rand_uuid", ",", "suffix", "=", "suffix", ")", ")" ]
42.266667
20
def get_item_attribute(self, item, name): """ Method called by item when an attribute is not found. """ if name in self.__item_attributes: return self.__item_attributes[name](item) elif self.section: return self.section.get_item_attribute(item, name) else: raise AttributeError(name)
[ "def", "get_item_attribute", "(", "self", ",", "item", ",", "name", ")", ":", "if", "name", "in", "self", ".", "__item_attributes", ":", "return", "self", ".", "__item_attributes", "[", "name", "]", "(", "item", ")", "elif", "self", ".", "section", ":", "return", "self", ".", "section", ".", "get_item_attribute", "(", "item", ",", "name", ")", "else", ":", "raise", "AttributeError", "(", "name", ")" ]
35.8
10.2
def create_vector_input(self, name='vector_observation'): """ Creates ops for vector observation input. :param name: Name of the placeholder op. :param vec_obs_size: Size of stacked vector observation. :return: """ self.vector_in = tf.placeholder(shape=[None, self.vec_obs_size], dtype=tf.float32, name=name) if self.normalize: self.running_mean = tf.get_variable("running_mean", [self.vec_obs_size], trainable=False, dtype=tf.float32, initializer=tf.zeros_initializer()) self.running_variance = tf.get_variable("running_variance", [self.vec_obs_size], trainable=False, dtype=tf.float32, initializer=tf.ones_initializer()) self.update_mean, self.update_variance = self.create_normalizer_update(self.vector_in) self.normalized_state = tf.clip_by_value((self.vector_in - self.running_mean) / tf.sqrt( self.running_variance / (tf.cast(self.global_step, tf.float32) + 1)), -5, 5, name="normalized_state") return self.normalized_state else: return self.vector_in
[ "def", "create_vector_input", "(", "self", ",", "name", "=", "'vector_observation'", ")", ":", "self", ".", "vector_in", "=", "tf", ".", "placeholder", "(", "shape", "=", "[", "None", ",", "self", ".", "vec_obs_size", "]", ",", "dtype", "=", "tf", ".", "float32", ",", "name", "=", "name", ")", "if", "self", ".", "normalize", ":", "self", ".", "running_mean", "=", "tf", ".", "get_variable", "(", "\"running_mean\"", ",", "[", "self", ".", "vec_obs_size", "]", ",", "trainable", "=", "False", ",", "dtype", "=", "tf", ".", "float32", ",", "initializer", "=", "tf", ".", "zeros_initializer", "(", ")", ")", "self", ".", "running_variance", "=", "tf", ".", "get_variable", "(", "\"running_variance\"", ",", "[", "self", ".", "vec_obs_size", "]", ",", "trainable", "=", "False", ",", "dtype", "=", "tf", ".", "float32", ",", "initializer", "=", "tf", ".", "ones_initializer", "(", ")", ")", "self", ".", "update_mean", ",", "self", ".", "update_variance", "=", "self", ".", "create_normalizer_update", "(", "self", ".", "vector_in", ")", "self", ".", "normalized_state", "=", "tf", ".", "clip_by_value", "(", "(", "self", ".", "vector_in", "-", "self", ".", "running_mean", ")", "/", "tf", ".", "sqrt", "(", "self", ".", "running_variance", "/", "(", "tf", ".", "cast", "(", "self", ".", "global_step", ",", "tf", ".", "float32", ")", "+", "1", ")", ")", ",", "-", "5", ",", "5", ",", "name", "=", "\"normalized_state\"", ")", "return", "self", ".", "normalized_state", "else", ":", "return", "self", ".", "vector_in" ]
57.56
28.84
def _emiss_ep(self, Eph): """ Electron-proton bremsstrahlung emissivity per unit photon energy """ if self.weight_ep == 0.0: return np.zeros_like(Eph) gam = np.vstack(self._gam) eps = (Eph / mec2).decompose().value # compute integral with electron distribution emiss = c.cgs * trapz_loglog( np.vstack(self._nelec) * self._sigma_ep(gam, eps), self._gam, axis=0, ).to(u.cm ** 2 / Eph.unit) return emiss
[ "def", "_emiss_ep", "(", "self", ",", "Eph", ")", ":", "if", "self", ".", "weight_ep", "==", "0.0", ":", "return", "np", ".", "zeros_like", "(", "Eph", ")", "gam", "=", "np", ".", "vstack", "(", "self", ".", "_gam", ")", "eps", "=", "(", "Eph", "/", "mec2", ")", ".", "decompose", "(", ")", ".", "value", "# compute integral with electron distribution", "emiss", "=", "c", ".", "cgs", "*", "trapz_loglog", "(", "np", ".", "vstack", "(", "self", ".", "_nelec", ")", "*", "self", ".", "_sigma_ep", "(", "gam", ",", "eps", ")", ",", "self", ".", "_gam", ",", "axis", "=", "0", ",", ")", ".", "to", "(", "u", ".", "cm", "**", "2", "/", "Eph", ".", "unit", ")", "return", "emiss" ]
32.125
13.125
def XKX(self): """ compute self covariance for rest """ cov_beta = np.zeros((self.dof,self.dof)) start_row = 0 #This is trivially parallelizable: for term1 in range(self.len): stop_row = start_row + self.A[term1].shape[0] * self.F[term1].shape[1] start_col = start_row #This is trivially parallelizable: for term2 in range(term1,self.len): stop_col = start_col + self.A[term2].shape[0] * self.F[term2].shape[1] cov_beta[start_row:stop_row, start_col:stop_col] = compute_X1KX2(Y=self.Ystar(), D=self.D, X1=self.Fstar[term1], X2=self.Fstar[term2], A1=self.Astar[term1], A2=self.Astar[term2]) if term1!=term2: cov_beta[start_col:stop_col, start_row:stop_row] = cov_beta[n_weights1:stop_row, n_weights2:stop_col].T start_col = stop_col start_row = stop_row return cov_beta
[ "def", "XKX", "(", "self", ")", ":", "cov_beta", "=", "np", ".", "zeros", "(", "(", "self", ".", "dof", ",", "self", ".", "dof", ")", ")", "start_row", "=", "0", "#This is trivially parallelizable:", "for", "term1", "in", "range", "(", "self", ".", "len", ")", ":", "stop_row", "=", "start_row", "+", "self", ".", "A", "[", "term1", "]", ".", "shape", "[", "0", "]", "*", "self", ".", "F", "[", "term1", "]", ".", "shape", "[", "1", "]", "start_col", "=", "start_row", "#This is trivially parallelizable:", "for", "term2", "in", "range", "(", "term1", ",", "self", ".", "len", ")", ":", "stop_col", "=", "start_col", "+", "self", ".", "A", "[", "term2", "]", ".", "shape", "[", "0", "]", "*", "self", ".", "F", "[", "term2", "]", ".", "shape", "[", "1", "]", "cov_beta", "[", "start_row", ":", "stop_row", ",", "start_col", ":", "stop_col", "]", "=", "compute_X1KX2", "(", "Y", "=", "self", ".", "Ystar", "(", ")", ",", "D", "=", "self", ".", "D", ",", "X1", "=", "self", ".", "Fstar", "[", "term1", "]", ",", "X2", "=", "self", ".", "Fstar", "[", "term2", "]", ",", "A1", "=", "self", ".", "Astar", "[", "term1", "]", ",", "A2", "=", "self", ".", "Astar", "[", "term2", "]", ")", "if", "term1", "!=", "term2", ":", "cov_beta", "[", "start_col", ":", "stop_col", ",", "start_row", ":", "stop_row", "]", "=", "cov_beta", "[", "n_weights1", ":", "stop_row", ",", "n_weights2", ":", "stop_col", "]", ".", "T", "start_col", "=", "stop_col", "start_row", "=", "stop_row", "return", "cov_beta" ]
50.421053
23.157895
def phymem_usage(): """Return the amount of total, used and free physical memory on the system in bytes plus the percentage usage. Deprecated by psutil.virtual_memory(). """ mem = virtual_memory() return _nt_sysmeminfo(mem.total, mem.used, mem.free, mem.percent)
[ "def", "phymem_usage", "(", ")", ":", "mem", "=", "virtual_memory", "(", ")", "return", "_nt_sysmeminfo", "(", "mem", ".", "total", ",", "mem", ".", "used", ",", "mem", ".", "free", ",", "mem", ".", "percent", ")" ]
40
11.285714
def process(self, request, response, environ): """ Create a new access token. :param request: The incoming :class:`oauth2.web.Request`. :param response: The :class:`oauth2.web.Response` that will be returned to the client. :param environ: A ``dict`` containing data of the environment. :return: :class:`oauth2.web.Response` """ token_data = self.token_generator.create_access_token_data(self.refresh_grant_type) expires_at = int(time.time()) + token_data["expires_in"] access_token = AccessToken(client_id=self.client.identifier, token=token_data["access_token"], grant_type=self.refresh_grant_type, data=self.data, expires_at=expires_at, scopes=self.scope_handler.scopes, user_id=self.user_id) if self.reissue_refresh_tokens: self.access_token_store.delete_refresh_token(self.refresh_token) access_token.refresh_token = token_data["refresh_token"] refresh_expires_in = self.token_generator.refresh_expires_in refresh_expires_at = int(time.time()) + refresh_expires_in access_token.refresh_expires_at = refresh_expires_at else: del token_data["refresh_token"] self.access_token_store.save_token(access_token) json_success_response(data=token_data, response=response) return response
[ "def", "process", "(", "self", ",", "request", ",", "response", ",", "environ", ")", ":", "token_data", "=", "self", ".", "token_generator", ".", "create_access_token_data", "(", "self", ".", "refresh_grant_type", ")", "expires_at", "=", "int", "(", "time", ".", "time", "(", ")", ")", "+", "token_data", "[", "\"expires_in\"", "]", "access_token", "=", "AccessToken", "(", "client_id", "=", "self", ".", "client", ".", "identifier", ",", "token", "=", "token_data", "[", "\"access_token\"", "]", ",", "grant_type", "=", "self", ".", "refresh_grant_type", ",", "data", "=", "self", ".", "data", ",", "expires_at", "=", "expires_at", ",", "scopes", "=", "self", ".", "scope_handler", ".", "scopes", ",", "user_id", "=", "self", ".", "user_id", ")", "if", "self", ".", "reissue_refresh_tokens", ":", "self", ".", "access_token_store", ".", "delete_refresh_token", "(", "self", ".", "refresh_token", ")", "access_token", ".", "refresh_token", "=", "token_data", "[", "\"refresh_token\"", "]", "refresh_expires_in", "=", "self", ".", "token_generator", ".", "refresh_expires_in", "refresh_expires_at", "=", "int", "(", "time", ".", "time", "(", ")", ")", "+", "refresh_expires_in", "access_token", ".", "refresh_expires_at", "=", "refresh_expires_at", "else", ":", "del", "token_data", "[", "\"refresh_token\"", "]", "self", ".", "access_token_store", ".", "save_token", "(", "access_token", ")", "json_success_response", "(", "data", "=", "token_data", ",", "response", "=", "response", ")", "return", "response" ]
42.972222
25.25
def get_devices_from_response_dict(response_dict, device_type): """ :rtype: list of WinkDevice """ items = response_dict.get('data') devices = [] api_interface = WinkApiInterface() check_list = isinstance(device_type, (list,)) for item in items: if (check_list and get_object_type(item) in device_type) or \ (not check_list and get_object_type(item) == device_type): _devices = build_device(item, api_interface) for device in _devices: devices.append(device) return devices
[ "def", "get_devices_from_response_dict", "(", "response_dict", ",", "device_type", ")", ":", "items", "=", "response_dict", ".", "get", "(", "'data'", ")", "devices", "=", "[", "]", "api_interface", "=", "WinkApiInterface", "(", ")", "check_list", "=", "isinstance", "(", "device_type", ",", "(", "list", ",", ")", ")", "for", "item", "in", "items", ":", "if", "(", "check_list", "and", "get_object_type", "(", "item", ")", "in", "device_type", ")", "or", "(", "not", "check_list", "and", "get_object_type", "(", "item", ")", "==", "device_type", ")", ":", "_devices", "=", "build_device", "(", "item", ",", "api_interface", ")", "for", "device", "in", "_devices", ":", "devices", ".", "append", "(", "device", ")", "return", "devices" ]
29.421053
18.789474
def _request(self, method, path, params=None): """Make the actual request and returns the parsed response.""" url = self._base_url + path try: if method == 'GET': response = requests.get(url, timeout=TIMEOUT) elif method == "POST": response = requests.post(url, params, timeout=TIMEOUT) elif method == "PUT": response = requests.put(url, params, timeout=TIMEOUT) elif method == "DELETE": response = requests.delete(url, timeout=TIMEOUT) if response: return response.json() else: return {'status': 'error'} except requests.exceptions.HTTPError: return {'status': 'error'} except requests.exceptions.Timeout: return {'status': 'offline'} except requests.exceptions.RequestException: return {'status': 'offline'}
[ "def", "_request", "(", "self", ",", "method", ",", "path", ",", "params", "=", "None", ")", ":", "url", "=", "self", ".", "_base_url", "+", "path", "try", ":", "if", "method", "==", "'GET'", ":", "response", "=", "requests", ".", "get", "(", "url", ",", "timeout", "=", "TIMEOUT", ")", "elif", "method", "==", "\"POST\"", ":", "response", "=", "requests", ".", "post", "(", "url", ",", "params", ",", "timeout", "=", "TIMEOUT", ")", "elif", "method", "==", "\"PUT\"", ":", "response", "=", "requests", ".", "put", "(", "url", ",", "params", ",", "timeout", "=", "TIMEOUT", ")", "elif", "method", "==", "\"DELETE\"", ":", "response", "=", "requests", ".", "delete", "(", "url", ",", "timeout", "=", "TIMEOUT", ")", "if", "response", ":", "return", "response", ".", "json", "(", ")", "else", ":", "return", "{", "'status'", ":", "'error'", "}", "except", "requests", ".", "exceptions", ".", "HTTPError", ":", "return", "{", "'status'", ":", "'error'", "}", "except", "requests", ".", "exceptions", ".", "Timeout", ":", "return", "{", "'status'", ":", "'offline'", "}", "except", "requests", ".", "exceptions", ".", "RequestException", ":", "return", "{", "'status'", ":", "'offline'", "}" ]
39.166667
13.083333
def present(name, running=None, source=None, profiles=None, config=None, devices=None, architecture='x86_64', ephemeral=False, restart_on_change=False, remote_addr=None, cert=None, key=None, verify_cert=True): ''' Create the named container if it does not exist name The name of the container to be created running : None * If ``True``, ensure that the container is running * If ``False``, ensure that the container is stopped * If ``None``, do nothing with regards to the running state of the container source : None Can be either a string containing an image alias: "xenial/amd64" or an dict with type "image" with alias: {"type": "image", "alias": "xenial/amd64"} or image with "fingerprint": {"type": "image", "fingerprint": "SHA-256"} or image with "properties": {"type": "image", "properties": { "os": "ubuntu", "release": "14.04", "architecture": "x86_64" }} or none: {"type": "none"} or copy: {"type": "copy", "source": "my-old-container"} profiles : ['default'] List of profiles to apply on this container config : A config dict or None (None = unset). Can also be a list: [{'key': 'boot.autostart', 'value': 1}, {'key': 'security.privileged', 'value': '1'}] devices : A device dict or None (None = unset). architecture : 'x86_64' Can be one of the following: * unknown * i686 * x86_64 * armv7l * aarch64 * ppc * ppc64 * ppc64le * s390x ephemeral : False Destroy this container after stop? restart_on_change : False Restart the container when we detect changes on the config or its devices? remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Zertifikate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. ''' if profiles is None: profiles = ['default'] if source is None: source = {} ret = { 'name': name, 'running': running, 'profiles': profiles, 'source': source, 'config': config, 'devices': devices, 'architecture': architecture, 'ephemeral': ephemeral, 'restart_on_change': restart_on_change, 'remote_addr': remote_addr, 'cert': cert, 'key': key, 'verify_cert': verify_cert, 'changes': {} } container = None try: container = __salt__['lxd.container_get']( name, remote_addr, cert, key, verify_cert, _raw=True ) except CommandExecutionError as e: return _error(ret, six.text_type(e)) except SaltInvocationError as e: # Profile not found pass if container is None: if __opts__['test']: # Test is on, just return that we would create the container msg = 'Would create the container "{0}"'.format(name) ret['changes'] = { 'created': msg } if running is True: msg = msg + ' and start it.' ret['changes']['started'] = ( 'Would start the container "{0}"'.format(name) ) ret['changes'] = {'created': msg} return _unchanged(ret, msg) # create the container try: __salt__['lxd.container_create']( name, source, profiles, config, devices, architecture, ephemeral, True, # Wait remote_addr, cert, key, verify_cert ) except CommandExecutionError as e: return _error(ret, six.text_type(e)) msg = 'Created the container "{0}"'.format(name) ret['changes'] = { 'created': msg } if running is True: try: __salt__['lxd.container_start']( name, remote_addr, cert, key, verify_cert ) except CommandExecutionError as e: return _error(ret, six.text_type(e)) msg = msg + ' and started it.' ret['changes'] = { 'started': 'Started the container "{0}"'.format(name) } return _success(ret, msg) # Container exists, lets check for differences new_profiles = set(map(six.text_type, profiles)) old_profiles = set(map(six.text_type, container.profiles)) container_changed = False profile_changes = [] # Removed profiles for k in old_profiles.difference(new_profiles): if not __opts__['test']: profile_changes.append('Removed profile "{0}"'.format(k)) old_profiles.discard(k) else: profile_changes.append('Would remove profile "{0}"'.format(k)) # Added profiles for k in new_profiles.difference(old_profiles): if not __opts__['test']: profile_changes.append('Added profile "{0}"'.format(k)) old_profiles.add(k) else: profile_changes.append('Would add profile "{0}"'.format(k)) if profile_changes: container_changed = True ret['changes']['profiles'] = profile_changes container.profiles = list(old_profiles) # Config and devices changes config, devices = __salt__['lxd.normalize_input_values']( config, devices ) changes = __salt__['lxd.sync_config_devices']( container, config, devices, __opts__['test'] ) if changes: container_changed = True ret['changes'].update(changes) is_running = \ container.status_code == CONTAINER_STATUS_RUNNING if not __opts__['test']: try: __salt__['lxd.pylxd_save_object'](container) except CommandExecutionError as e: return _error(ret, six.text_type(e)) if running != is_running: if running is True: if __opts__['test']: changes['running'] = 'Would start the container' return _unchanged( ret, ('Container "{0}" would get changed ' 'and started.').format(name) ) else: container.start(wait=True) changes['running'] = 'Started the container' elif running is False: if __opts__['test']: changes['stopped'] = 'Would stopped the container' return _unchanged( ret, ('Container "{0}" would get changed ' 'and stopped.').format(name) ) else: container.stop(wait=True) changes['stopped'] = 'Stopped the container' if ((running is True or running is None) and is_running and restart_on_change and container_changed): if __opts__['test']: changes['restarted'] = 'Would restart the container' return _unchanged( ret, 'Would restart the container "{0}"'.format(name) ) else: container.restart(wait=True) changes['restarted'] = ( 'Container "{0}" has been restarted'.format(name) ) return _success( ret, 'Container "{0}" has been restarted'.format(name) ) if not container_changed: return _success(ret, 'No changes') if __opts__['test']: return _unchanged( ret, 'Container "{0}" would get changed.'.format(name) ) return _success(ret, '{0} changes'.format(len(ret['changes'].keys())))
[ "def", "present", "(", "name", ",", "running", "=", "None", ",", "source", "=", "None", ",", "profiles", "=", "None", ",", "config", "=", "None", ",", "devices", "=", "None", ",", "architecture", "=", "'x86_64'", ",", "ephemeral", "=", "False", ",", "restart_on_change", "=", "False", ",", "remote_addr", "=", "None", ",", "cert", "=", "None", ",", "key", "=", "None", ",", "verify_cert", "=", "True", ")", ":", "if", "profiles", "is", "None", ":", "profiles", "=", "[", "'default'", "]", "if", "source", "is", "None", ":", "source", "=", "{", "}", "ret", "=", "{", "'name'", ":", "name", ",", "'running'", ":", "running", ",", "'profiles'", ":", "profiles", ",", "'source'", ":", "source", ",", "'config'", ":", "config", ",", "'devices'", ":", "devices", ",", "'architecture'", ":", "architecture", ",", "'ephemeral'", ":", "ephemeral", ",", "'restart_on_change'", ":", "restart_on_change", ",", "'remote_addr'", ":", "remote_addr", ",", "'cert'", ":", "cert", ",", "'key'", ":", "key", ",", "'verify_cert'", ":", "verify_cert", ",", "'changes'", ":", "{", "}", "}", "container", "=", "None", "try", ":", "container", "=", "__salt__", "[", "'lxd.container_get'", "]", "(", "name", ",", "remote_addr", ",", "cert", ",", "key", ",", "verify_cert", ",", "_raw", "=", "True", ")", "except", "CommandExecutionError", "as", "e", ":", "return", "_error", "(", "ret", ",", "six", ".", "text_type", "(", "e", ")", ")", "except", "SaltInvocationError", "as", "e", ":", "# Profile not found", "pass", "if", "container", "is", "None", ":", "if", "__opts__", "[", "'test'", "]", ":", "# Test is on, just return that we would create the container", "msg", "=", "'Would create the container \"{0}\"'", ".", "format", "(", "name", ")", "ret", "[", "'changes'", "]", "=", "{", "'created'", ":", "msg", "}", "if", "running", "is", "True", ":", "msg", "=", "msg", "+", "' and start it.'", "ret", "[", "'changes'", "]", "[", "'started'", "]", "=", "(", "'Would start the container \"{0}\"'", ".", "format", "(", "name", ")", ")", "ret", "[", "'changes'", "]", "=", "{", "'created'", ":", "msg", "}", "return", "_unchanged", "(", "ret", ",", "msg", ")", "# create the container", "try", ":", "__salt__", "[", "'lxd.container_create'", "]", "(", "name", ",", "source", ",", "profiles", ",", "config", ",", "devices", ",", "architecture", ",", "ephemeral", ",", "True", ",", "# Wait", "remote_addr", ",", "cert", ",", "key", ",", "verify_cert", ")", "except", "CommandExecutionError", "as", "e", ":", "return", "_error", "(", "ret", ",", "six", ".", "text_type", "(", "e", ")", ")", "msg", "=", "'Created the container \"{0}\"'", ".", "format", "(", "name", ")", "ret", "[", "'changes'", "]", "=", "{", "'created'", ":", "msg", "}", "if", "running", "is", "True", ":", "try", ":", "__salt__", "[", "'lxd.container_start'", "]", "(", "name", ",", "remote_addr", ",", "cert", ",", "key", ",", "verify_cert", ")", "except", "CommandExecutionError", "as", "e", ":", "return", "_error", "(", "ret", ",", "six", ".", "text_type", "(", "e", ")", ")", "msg", "=", "msg", "+", "' and started it.'", "ret", "[", "'changes'", "]", "=", "{", "'started'", ":", "'Started the container \"{0}\"'", ".", "format", "(", "name", ")", "}", "return", "_success", "(", "ret", ",", "msg", ")", "# Container exists, lets check for differences", "new_profiles", "=", "set", "(", "map", "(", "six", ".", "text_type", ",", "profiles", ")", ")", "old_profiles", "=", "set", "(", "map", "(", "six", ".", "text_type", ",", "container", ".", "profiles", ")", ")", "container_changed", "=", "False", "profile_changes", "=", "[", "]", "# Removed profiles", "for", "k", "in", "old_profiles", ".", "difference", "(", "new_profiles", ")", ":", "if", "not", "__opts__", "[", "'test'", "]", ":", "profile_changes", ".", "append", "(", "'Removed profile \"{0}\"'", ".", "format", "(", "k", ")", ")", "old_profiles", ".", "discard", "(", "k", ")", "else", ":", "profile_changes", ".", "append", "(", "'Would remove profile \"{0}\"'", ".", "format", "(", "k", ")", ")", "# Added profiles", "for", "k", "in", "new_profiles", ".", "difference", "(", "old_profiles", ")", ":", "if", "not", "__opts__", "[", "'test'", "]", ":", "profile_changes", ".", "append", "(", "'Added profile \"{0}\"'", ".", "format", "(", "k", ")", ")", "old_profiles", ".", "add", "(", "k", ")", "else", ":", "profile_changes", ".", "append", "(", "'Would add profile \"{0}\"'", ".", "format", "(", "k", ")", ")", "if", "profile_changes", ":", "container_changed", "=", "True", "ret", "[", "'changes'", "]", "[", "'profiles'", "]", "=", "profile_changes", "container", ".", "profiles", "=", "list", "(", "old_profiles", ")", "# Config and devices changes", "config", ",", "devices", "=", "__salt__", "[", "'lxd.normalize_input_values'", "]", "(", "config", ",", "devices", ")", "changes", "=", "__salt__", "[", "'lxd.sync_config_devices'", "]", "(", "container", ",", "config", ",", "devices", ",", "__opts__", "[", "'test'", "]", ")", "if", "changes", ":", "container_changed", "=", "True", "ret", "[", "'changes'", "]", ".", "update", "(", "changes", ")", "is_running", "=", "container", ".", "status_code", "==", "CONTAINER_STATUS_RUNNING", "if", "not", "__opts__", "[", "'test'", "]", ":", "try", ":", "__salt__", "[", "'lxd.pylxd_save_object'", "]", "(", "container", ")", "except", "CommandExecutionError", "as", "e", ":", "return", "_error", "(", "ret", ",", "six", ".", "text_type", "(", "e", ")", ")", "if", "running", "!=", "is_running", ":", "if", "running", "is", "True", ":", "if", "__opts__", "[", "'test'", "]", ":", "changes", "[", "'running'", "]", "=", "'Would start the container'", "return", "_unchanged", "(", "ret", ",", "(", "'Container \"{0}\" would get changed '", "'and started.'", ")", ".", "format", "(", "name", ")", ")", "else", ":", "container", ".", "start", "(", "wait", "=", "True", ")", "changes", "[", "'running'", "]", "=", "'Started the container'", "elif", "running", "is", "False", ":", "if", "__opts__", "[", "'test'", "]", ":", "changes", "[", "'stopped'", "]", "=", "'Would stopped the container'", "return", "_unchanged", "(", "ret", ",", "(", "'Container \"{0}\" would get changed '", "'and stopped.'", ")", ".", "format", "(", "name", ")", ")", "else", ":", "container", ".", "stop", "(", "wait", "=", "True", ")", "changes", "[", "'stopped'", "]", "=", "'Stopped the container'", "if", "(", "(", "running", "is", "True", "or", "running", "is", "None", ")", "and", "is_running", "and", "restart_on_change", "and", "container_changed", ")", ":", "if", "__opts__", "[", "'test'", "]", ":", "changes", "[", "'restarted'", "]", "=", "'Would restart the container'", "return", "_unchanged", "(", "ret", ",", "'Would restart the container \"{0}\"'", ".", "format", "(", "name", ")", ")", "else", ":", "container", ".", "restart", "(", "wait", "=", "True", ")", "changes", "[", "'restarted'", "]", "=", "(", "'Container \"{0}\" has been restarted'", ".", "format", "(", "name", ")", ")", "return", "_success", "(", "ret", ",", "'Container \"{0}\" has been restarted'", ".", "format", "(", "name", ")", ")", "if", "not", "container_changed", ":", "return", "_success", "(", "ret", ",", "'No changes'", ")", "if", "__opts__", "[", "'test'", "]", ":", "return", "_unchanged", "(", "ret", ",", "'Container \"{0}\" would get changed.'", ".", "format", "(", "name", ")", ")", "return", "_success", "(", "ret", ",", "'{0} changes'", ".", "format", "(", "len", "(", "ret", "[", "'changes'", "]", ".", "keys", "(", ")", ")", ")", ")" ]
28.072368
19.230263
def parse_args(self): """Parse CLI args.""" Args(self.tcex.parser) self.args = self.tcex.args
[ "def", "parse_args", "(", "self", ")", ":", "Args", "(", "self", ".", "tcex", ".", "parser", ")", "self", ".", "args", "=", "self", ".", "tcex", ".", "args" ]
28.5
8.75
def keras_dropout(layer, rate): '''keras dropout layer. ''' from keras import layers input_dim = len(layer.input.shape) if input_dim == 2: return layers.SpatialDropout1D(rate) elif input_dim == 3: return layers.SpatialDropout2D(rate) elif input_dim == 4: return layers.SpatialDropout3D(rate) else: return layers.Dropout(rate)
[ "def", "keras_dropout", "(", "layer", ",", "rate", ")", ":", "from", "keras", "import", "layers", "input_dim", "=", "len", "(", "layer", ".", "input", ".", "shape", ")", "if", "input_dim", "==", "2", ":", "return", "layers", ".", "SpatialDropout1D", "(", "rate", ")", "elif", "input_dim", "==", "3", ":", "return", "layers", ".", "SpatialDropout2D", "(", "rate", ")", "elif", "input_dim", "==", "4", ":", "return", "layers", ".", "SpatialDropout3D", "(", "rate", ")", "else", ":", "return", "layers", ".", "Dropout", "(", "rate", ")" ]
25.133333
16.466667
def pca( data: Union[AnnData, np.ndarray, spmatrix], n_comps: int = N_PCS, zero_center: Optional[bool] = True, svd_solver: str = 'auto', random_state: int = 0, return_info: bool = False, use_highly_variable: Optional[bool] = None, dtype: str = 'float32', copy: bool = False, chunked: bool = False, chunk_size: Optional[int] = None, ) -> Union[AnnData, np.ndarray, spmatrix]: """Principal component analysis [Pedregosa11]_. Computes PCA coordinates, loadings and variance decomposition. Uses the implementation of *scikit-learn* [Pedregosa11]_. Parameters ---------- data The (annotated) data matrix of shape ``n_obs`` × ``n_vars``. Rows correspond to cells and columns to genes. n_comps Number of principal components to compute. zero_center If `True`, compute standard PCA from covariance matrix. If ``False``, omit zero-centering variables (uses :class:`~sklearn.decomposition.TruncatedSVD`), which allows to handle sparse input efficiently. Passing ``None`` decides automatically based on sparseness of the data. svd_solver SVD solver to use: ``'arpack'`` for the ARPACK wrapper in SciPy (:func:`~scipy.sparse.linalg.svds`) ``'randomized'`` for the randomized algorithm due to Halko (2009). ``'auto'`` (the default) chooses automatically depending on the size of the problem. random_state Change to use different initial states for the optimization. return_info Only relevant when not passing an :class:`~anndata.AnnData`: see “**Returns**”. use_highly_variable Whether to use highly variable genes only, stored in ``.var['highly_variable']``. By default uses them if they have been determined beforehand. dtype Numpy data type string to which to convert the result. copy If an :class:`~anndata.AnnData` is passed, determines whether a copy is returned. Is ignored otherwise. chunked If ``True``, perform an incremental PCA on segments of ``chunk_size``. The incremental PCA automatically zero centers and ignores settings of ``random_seed`` and ``svd_solver``. If ``False``, perform a full PCA. chunk_size Number of observations to include in each chunk. Required if ``chunked=True`` was passed. Returns ------- X_pca : :class:`scipy.sparse.spmatrix` or :class:`numpy.ndarray` If `data` is array-like and ``return_info=False`` was passed, this function only returns `X_pca`… adata : anndata.AnnData …otherwise if ``copy=True`` it returns or else adds fields to ``adata``: ``.obsm['X_pca']`` PCA representation of data. ``.varm['PCs']`` The principal components containing the loadings. ``.uns['pca']['variance_ratio']``) Ratio of explained variance. ``.uns['pca']['variance']`` Explained variance, equivalent to the eigenvalues of the covariance matrix. """ # chunked calculation is not randomized, anyways if svd_solver in {'auto', 'randomized'} and not chunked: logg.info( 'Note that scikit-learn\'s randomized PCA might not be exactly ' 'reproducible across different computational platforms. For exact ' 'reproducibility, choose `svd_solver=\'arpack\'.` This will likely ' 'become the Scanpy default in the future.') data_is_AnnData = isinstance(data, AnnData) if data_is_AnnData: adata = data.copy() if copy else data else: adata = AnnData(data) logg.info('computing PCA with n_comps =', n_comps, r=True) if adata.n_vars < n_comps: n_comps = adata.n_vars - 1 logg.msg('reducing number of computed PCs to', n_comps, 'as dim of data is only', adata.n_vars, v=4) if use_highly_variable is True and 'highly_variable' not in adata.var.keys(): raise ValueError('Did not find adata.var[\'highly_variable\']. ' 'Either your data already only consists of highly-variable genes ' 'or consider running `pp.filter_genes_dispersion` first.') if use_highly_variable is None: use_highly_variable = True if 'highly_variable' in adata.var.keys() else False if use_highly_variable: logg.info('computing PCA on highly variable genes') adata_comp = adata[:, adata.var['highly_variable']] if use_highly_variable else adata if chunked: if not zero_center or random_state or svd_solver != 'auto': logg.msg('Ignoring zero_center, random_state, svd_solver', v=4) from sklearn.decomposition import IncrementalPCA X_pca = np.zeros((adata_comp.X.shape[0], n_comps), adata_comp.X.dtype) pca_ = IncrementalPCA(n_components=n_comps) for chunk, _, _ in adata_comp.chunked_X(chunk_size): chunk = chunk.toarray() if issparse(chunk) else chunk pca_.partial_fit(chunk) for chunk, start, end in adata_comp.chunked_X(chunk_size): chunk = chunk.toarray() if issparse(chunk) else chunk X_pca[start:end] = pca_.transform(chunk) else: if zero_center is None: zero_center = not issparse(adata_comp.X) if zero_center: from sklearn.decomposition import PCA if issparse(adata_comp.X): logg.msg(' as `zero_center=True`, ' 'sparse input is densified and may ' 'lead to huge memory consumption', v=4) X = adata_comp.X.toarray() # Copying the whole adata_comp.X here, could cause memory problems else: X = adata_comp.X pca_ = PCA(n_components=n_comps, svd_solver=svd_solver, random_state=random_state) else: from sklearn.decomposition import TruncatedSVD logg.msg(' without zero-centering: \n' ' the explained variance does not correspond to the exact statistical defintion\n' ' the first component, e.g., might be heavily influenced by different means\n' ' the following components often resemble the exact PCA very closely', v=4) pca_ = TruncatedSVD(n_components=n_comps, random_state=random_state) X = adata_comp.X X_pca = pca_.fit_transform(X) if X_pca.dtype.descr != np.dtype(dtype).descr: X_pca = X_pca.astype(dtype) if data_is_AnnData: adata.obsm['X_pca'] = X_pca if use_highly_variable: adata.varm['PCs'] = np.zeros(shape=(adata.n_vars, n_comps)) adata.varm['PCs'][adata.var['highly_variable']] = pca_.components_.T else: adata.varm['PCs'] = pca_.components_.T adata.uns['pca'] = {} adata.uns['pca']['variance'] = pca_.explained_variance_ adata.uns['pca']['variance_ratio'] = pca_.explained_variance_ratio_ logg.info(' finished', t=True) logg.msg('and added\n' ' \'X_pca\', the PCA coordinates (adata.obs)\n' ' \'PC1\', \'PC2\', ..., the loadings (adata.var)\n' ' \'pca_variance\', the variance / eigenvalues (adata.uns)\n' ' \'pca_variance_ratio\', the variance ratio (adata.uns)', v=4) return adata if copy else None else: logg.info(' finished', t=True) if return_info: return X_pca, pca_.components_, pca_.explained_variance_ratio_, pca_.explained_variance_ else: return X_pca
[ "def", "pca", "(", "data", ":", "Union", "[", "AnnData", ",", "np", ".", "ndarray", ",", "spmatrix", "]", ",", "n_comps", ":", "int", "=", "N_PCS", ",", "zero_center", ":", "Optional", "[", "bool", "]", "=", "True", ",", "svd_solver", ":", "str", "=", "'auto'", ",", "random_state", ":", "int", "=", "0", ",", "return_info", ":", "bool", "=", "False", ",", "use_highly_variable", ":", "Optional", "[", "bool", "]", "=", "None", ",", "dtype", ":", "str", "=", "'float32'", ",", "copy", ":", "bool", "=", "False", ",", "chunked", ":", "bool", "=", "False", ",", "chunk_size", ":", "Optional", "[", "int", "]", "=", "None", ",", ")", "->", "Union", "[", "AnnData", ",", "np", ".", "ndarray", ",", "spmatrix", "]", ":", "# chunked calculation is not randomized, anyways", "if", "svd_solver", "in", "{", "'auto'", ",", "'randomized'", "}", "and", "not", "chunked", ":", "logg", ".", "info", "(", "'Note that scikit-learn\\'s randomized PCA might not be exactly '", "'reproducible across different computational platforms. For exact '", "'reproducibility, choose `svd_solver=\\'arpack\\'.` This will likely '", "'become the Scanpy default in the future.'", ")", "data_is_AnnData", "=", "isinstance", "(", "data", ",", "AnnData", ")", "if", "data_is_AnnData", ":", "adata", "=", "data", ".", "copy", "(", ")", "if", "copy", "else", "data", "else", ":", "adata", "=", "AnnData", "(", "data", ")", "logg", ".", "info", "(", "'computing PCA with n_comps ='", ",", "n_comps", ",", "r", "=", "True", ")", "if", "adata", ".", "n_vars", "<", "n_comps", ":", "n_comps", "=", "adata", ".", "n_vars", "-", "1", "logg", ".", "msg", "(", "'reducing number of computed PCs to'", ",", "n_comps", ",", "'as dim of data is only'", ",", "adata", ".", "n_vars", ",", "v", "=", "4", ")", "if", "use_highly_variable", "is", "True", "and", "'highly_variable'", "not", "in", "adata", ".", "var", ".", "keys", "(", ")", ":", "raise", "ValueError", "(", "'Did not find adata.var[\\'highly_variable\\']. '", "'Either your data already only consists of highly-variable genes '", "'or consider running `pp.filter_genes_dispersion` first.'", ")", "if", "use_highly_variable", "is", "None", ":", "use_highly_variable", "=", "True", "if", "'highly_variable'", "in", "adata", ".", "var", ".", "keys", "(", ")", "else", "False", "if", "use_highly_variable", ":", "logg", ".", "info", "(", "'computing PCA on highly variable genes'", ")", "adata_comp", "=", "adata", "[", ":", ",", "adata", ".", "var", "[", "'highly_variable'", "]", "]", "if", "use_highly_variable", "else", "adata", "if", "chunked", ":", "if", "not", "zero_center", "or", "random_state", "or", "svd_solver", "!=", "'auto'", ":", "logg", ".", "msg", "(", "'Ignoring zero_center, random_state, svd_solver'", ",", "v", "=", "4", ")", "from", "sklearn", ".", "decomposition", "import", "IncrementalPCA", "X_pca", "=", "np", ".", "zeros", "(", "(", "adata_comp", ".", "X", ".", "shape", "[", "0", "]", ",", "n_comps", ")", ",", "adata_comp", ".", "X", ".", "dtype", ")", "pca_", "=", "IncrementalPCA", "(", "n_components", "=", "n_comps", ")", "for", "chunk", ",", "_", ",", "_", "in", "adata_comp", ".", "chunked_X", "(", "chunk_size", ")", ":", "chunk", "=", "chunk", ".", "toarray", "(", ")", "if", "issparse", "(", "chunk", ")", "else", "chunk", "pca_", ".", "partial_fit", "(", "chunk", ")", "for", "chunk", ",", "start", ",", "end", "in", "adata_comp", ".", "chunked_X", "(", "chunk_size", ")", ":", "chunk", "=", "chunk", ".", "toarray", "(", ")", "if", "issparse", "(", "chunk", ")", "else", "chunk", "X_pca", "[", "start", ":", "end", "]", "=", "pca_", ".", "transform", "(", "chunk", ")", "else", ":", "if", "zero_center", "is", "None", ":", "zero_center", "=", "not", "issparse", "(", "adata_comp", ".", "X", ")", "if", "zero_center", ":", "from", "sklearn", ".", "decomposition", "import", "PCA", "if", "issparse", "(", "adata_comp", ".", "X", ")", ":", "logg", ".", "msg", "(", "' as `zero_center=True`, '", "'sparse input is densified and may '", "'lead to huge memory consumption'", ",", "v", "=", "4", ")", "X", "=", "adata_comp", ".", "X", ".", "toarray", "(", ")", "# Copying the whole adata_comp.X here, could cause memory problems", "else", ":", "X", "=", "adata_comp", ".", "X", "pca_", "=", "PCA", "(", "n_components", "=", "n_comps", ",", "svd_solver", "=", "svd_solver", ",", "random_state", "=", "random_state", ")", "else", ":", "from", "sklearn", ".", "decomposition", "import", "TruncatedSVD", "logg", ".", "msg", "(", "' without zero-centering: \\n'", "' the explained variance does not correspond to the exact statistical defintion\\n'", "' the first component, e.g., might be heavily influenced by different means\\n'", "' the following components often resemble the exact PCA very closely'", ",", "v", "=", "4", ")", "pca_", "=", "TruncatedSVD", "(", "n_components", "=", "n_comps", ",", "random_state", "=", "random_state", ")", "X", "=", "adata_comp", ".", "X", "X_pca", "=", "pca_", ".", "fit_transform", "(", "X", ")", "if", "X_pca", ".", "dtype", ".", "descr", "!=", "np", ".", "dtype", "(", "dtype", ")", ".", "descr", ":", "X_pca", "=", "X_pca", ".", "astype", "(", "dtype", ")", "if", "data_is_AnnData", ":", "adata", ".", "obsm", "[", "'X_pca'", "]", "=", "X_pca", "if", "use_highly_variable", ":", "adata", ".", "varm", "[", "'PCs'", "]", "=", "np", ".", "zeros", "(", "shape", "=", "(", "adata", ".", "n_vars", ",", "n_comps", ")", ")", "adata", ".", "varm", "[", "'PCs'", "]", "[", "adata", ".", "var", "[", "'highly_variable'", "]", "]", "=", "pca_", ".", "components_", ".", "T", "else", ":", "adata", ".", "varm", "[", "'PCs'", "]", "=", "pca_", ".", "components_", ".", "T", "adata", ".", "uns", "[", "'pca'", "]", "=", "{", "}", "adata", ".", "uns", "[", "'pca'", "]", "[", "'variance'", "]", "=", "pca_", ".", "explained_variance_", "adata", ".", "uns", "[", "'pca'", "]", "[", "'variance_ratio'", "]", "=", "pca_", ".", "explained_variance_ratio_", "logg", ".", "info", "(", "' finished'", ",", "t", "=", "True", ")", "logg", ".", "msg", "(", "'and added\\n'", "' \\'X_pca\\', the PCA coordinates (adata.obs)\\n'", "' \\'PC1\\', \\'PC2\\', ..., the loadings (adata.var)\\n'", "' \\'pca_variance\\', the variance / eigenvalues (adata.uns)\\n'", "' \\'pca_variance_ratio\\', the variance ratio (adata.uns)'", ",", "v", "=", "4", ")", "return", "adata", "if", "copy", "else", "None", "else", ":", "logg", ".", "info", "(", "' finished'", ",", "t", "=", "True", ")", "if", "return_info", ":", "return", "X_pca", ",", "pca_", ".", "components_", ",", "pca_", ".", "explained_variance_ratio_", ",", "pca_", ".", "explained_variance_", "else", ":", "return", "X_pca" ]
41.773481
24.314917
def procrustes(source, target, scaling=True, reflection=True, reduction=False, oblique=False, oblique_rcond=-1, format_data=True): """ Function to project from one space to another using Procrustean transformation (shift + scaling + rotation + reflection). The implementation of this function was based on the ProcrusteanMapper in pyMVPA: https://github.com/PyMVPA/PyMVPA See also: http://en.wikipedia.org/wiki/Procrustes_transformation Parameters ---------- source : Numpy array Array to be aligned to target's coordinate system. target: Numpy array Source is aligned to this target space scaling : bool Estimate a global scaling factor for the transformation (no longer rigid body) reflection : bool Allow for the data to be reflected (so it might not be a rotation. Effective only for non-oblique transformations. reduction : bool If true, it is allowed to map into lower-dimensional space. Forward transformation might be suboptimal then and reverse transformation might not recover all original variance. oblique : bool Either to allow non-orthogonal transformation -- might heavily overfit the data if there is less samples than dimensions. Use `oblique_rcond`. oblique_rcond : float Cutoff for 'small' singular values to regularize the inverse. See :class:`~numpy.linalg.lstsq` for more information. Returns ---------- aligned_source : Numpy array The array source is aligned to target and returned """ def fit(source, target): datas = (source, target) sn, sm = source.shape tn, tm = target.shape # Check the sizes if sn != tn: raise ValueError("Data for both spaces should have the same " \ "number of samples. Got %d in template and %d in target space" \ % (sn, tn)) # Sums of squares ssqs = [np.sum(d**2, axis=0) for d in datas] # XXX check for being invariant? # needs to be tuned up properly and not raise but handle for i in range(2): if np.all(ssqs[i] <= np.abs((np.finfo(datas[i].dtype).eps * sn )**2)): raise ValueError("For now do not handle invariant in time datasets") norms = [ np.sqrt(np.sum(ssq)) for ssq in ssqs ] normed = [ data/norm for (data, norm) in zip(datas, norms) ] # add new blank dimensions to template space if needed if sm < tm: normed[0] = np.hstack( (normed[0], np.zeros((sn, tm-sm))) ) if sm > tm: if reduction: normed[1] = np.hstack( (normed[1], np.zeros((sn, sm-tm))) ) else: raise ValueError("reduction=False, so mapping from " \ "higher dimensionality " \ "template space is not supported. template space had %d " \ "while target %d dimensions (features)" % (sm, tm)) source, target = normed if oblique: # Just do silly linear system of equations ;) or naive # inverse problem if sn == sm and tm == 1: T = np.linalg.solve(source, target) else: T = np.linalg.lstsq(source, target, rcond=oblique_rcond)[0] ss = 1.0 else: # Orthogonal transformation # figure out optimal rotation U, s, Vh = np.linalg.svd(np.dot(target.T, source), full_matrices=False) T = np.dot(Vh.T, U.T) if not reflection: # then we need to assure that it is only rotation # "recipe" from # http://en.wikipedia.org/wiki/Orthogonal_Procrustes_problem # for more and info and original references, see # http://dx.doi.org/10.1007%2FBF02289451 nsv = len(s) s[:-1] = 1 s[-1] = np.linalg.det(T) T = np.dot(U[:, :nsv] * s, Vh) # figure out scale and final translation # XXX with reflection False -- not sure if here or there or anywhere... ss = sum(s) # if we were to collect standardized distance # std_d = 1 - sD**2 # select out only relevant dimensions if sm != tm: T = T[:sm, :tm] # Assign projection if scaling: scale = ss * norms[1] / norms[0] proj = scale * T else: proj = T return proj def transform(data, proj): if proj is None: raise RuntimeError("Mapper needs to be trained before use.") d = np.asmatrix(data) # Do projection res = (d * proj).A return res if format_data: source, target = formatter([source, target]) # fit and transform proj = fit(source, target) return transform(source, proj)
[ "def", "procrustes", "(", "source", ",", "target", ",", "scaling", "=", "True", ",", "reflection", "=", "True", ",", "reduction", "=", "False", ",", "oblique", "=", "False", ",", "oblique_rcond", "=", "-", "1", ",", "format_data", "=", "True", ")", ":", "def", "fit", "(", "source", ",", "target", ")", ":", "datas", "=", "(", "source", ",", "target", ")", "sn", ",", "sm", "=", "source", ".", "shape", "tn", ",", "tm", "=", "target", ".", "shape", "# Check the sizes", "if", "sn", "!=", "tn", ":", "raise", "ValueError", "(", "\"Data for both spaces should have the same \"", "\"number of samples. Got %d in template and %d in target space\"", "%", "(", "sn", ",", "tn", ")", ")", "# Sums of squares", "ssqs", "=", "[", "np", ".", "sum", "(", "d", "**", "2", ",", "axis", "=", "0", ")", "for", "d", "in", "datas", "]", "# XXX check for being invariant?", "# needs to be tuned up properly and not raise but handle", "for", "i", "in", "range", "(", "2", ")", ":", "if", "np", ".", "all", "(", "ssqs", "[", "i", "]", "<=", "np", ".", "abs", "(", "(", "np", ".", "finfo", "(", "datas", "[", "i", "]", ".", "dtype", ")", ".", "eps", "*", "sn", ")", "**", "2", ")", ")", ":", "raise", "ValueError", "(", "\"For now do not handle invariant in time datasets\"", ")", "norms", "=", "[", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "ssq", ")", ")", "for", "ssq", "in", "ssqs", "]", "normed", "=", "[", "data", "/", "norm", "for", "(", "data", ",", "norm", ")", "in", "zip", "(", "datas", ",", "norms", ")", "]", "# add new blank dimensions to template space if needed", "if", "sm", "<", "tm", ":", "normed", "[", "0", "]", "=", "np", ".", "hstack", "(", "(", "normed", "[", "0", "]", ",", "np", ".", "zeros", "(", "(", "sn", ",", "tm", "-", "sm", ")", ")", ")", ")", "if", "sm", ">", "tm", ":", "if", "reduction", ":", "normed", "[", "1", "]", "=", "np", ".", "hstack", "(", "(", "normed", "[", "1", "]", ",", "np", ".", "zeros", "(", "(", "sn", ",", "sm", "-", "tm", ")", ")", ")", ")", "else", ":", "raise", "ValueError", "(", "\"reduction=False, so mapping from \"", "\"higher dimensionality \"", "\"template space is not supported. template space had %d \"", "\"while target %d dimensions (features)\"", "%", "(", "sm", ",", "tm", ")", ")", "source", ",", "target", "=", "normed", "if", "oblique", ":", "# Just do silly linear system of equations ;) or naive", "# inverse problem", "if", "sn", "==", "sm", "and", "tm", "==", "1", ":", "T", "=", "np", ".", "linalg", ".", "solve", "(", "source", ",", "target", ")", "else", ":", "T", "=", "np", ".", "linalg", ".", "lstsq", "(", "source", ",", "target", ",", "rcond", "=", "oblique_rcond", ")", "[", "0", "]", "ss", "=", "1.0", "else", ":", "# Orthogonal transformation", "# figure out optimal rotation", "U", ",", "s", ",", "Vh", "=", "np", ".", "linalg", ".", "svd", "(", "np", ".", "dot", "(", "target", ".", "T", ",", "source", ")", ",", "full_matrices", "=", "False", ")", "T", "=", "np", ".", "dot", "(", "Vh", ".", "T", ",", "U", ".", "T", ")", "if", "not", "reflection", ":", "# then we need to assure that it is only rotation", "# \"recipe\" from", "# http://en.wikipedia.org/wiki/Orthogonal_Procrustes_problem", "# for more and info and original references, see", "# http://dx.doi.org/10.1007%2FBF02289451", "nsv", "=", "len", "(", "s", ")", "s", "[", ":", "-", "1", "]", "=", "1", "s", "[", "-", "1", "]", "=", "np", ".", "linalg", ".", "det", "(", "T", ")", "T", "=", "np", ".", "dot", "(", "U", "[", ":", ",", ":", "nsv", "]", "*", "s", ",", "Vh", ")", "# figure out scale and final translation", "# XXX with reflection False -- not sure if here or there or anywhere...", "ss", "=", "sum", "(", "s", ")", "# if we were to collect standardized distance", "# std_d = 1 - sD**2", "# select out only relevant dimensions", "if", "sm", "!=", "tm", ":", "T", "=", "T", "[", ":", "sm", ",", ":", "tm", "]", "# Assign projection", "if", "scaling", ":", "scale", "=", "ss", "*", "norms", "[", "1", "]", "/", "norms", "[", "0", "]", "proj", "=", "scale", "*", "T", "else", ":", "proj", "=", "T", "return", "proj", "def", "transform", "(", "data", ",", "proj", ")", ":", "if", "proj", "is", "None", ":", "raise", "RuntimeError", "(", "\"Mapper needs to be trained before use.\"", ")", "d", "=", "np", ".", "asmatrix", "(", "data", ")", "# Do projection", "res", "=", "(", "d", "*", "proj", ")", ".", "A", "return", "res", "if", "format_data", ":", "source", ",", "target", "=", "formatter", "(", "[", "source", ",", "target", "]", ")", "# fit and transform", "proj", "=", "fit", "(", "source", ",", "target", ")", "return", "transform", "(", "source", ",", "proj", ")" ]
32.966887
22.874172
def fetch(weeks, force): """Fetch newest PageViews and Downloads.""" weeks = get_last_weeks(weeks) print(weeks) recommender = RecordRecommender(config) recommender.fetch_weeks(weeks, overwrite=force)
[ "def", "fetch", "(", "weeks", ",", "force", ")", ":", "weeks", "=", "get_last_weeks", "(", "weeks", ")", "print", "(", "weeks", ")", "recommender", "=", "RecordRecommender", "(", "config", ")", "recommender", ".", "fetch_weeks", "(", "weeks", ",", "overwrite", "=", "force", ")" ]
35.666667
10.166667
def create_constants(self, rdbms): """ Factory for creating a Constants objects (i.e. objects for creating constants based on column widths, and auto increment columns and labels). :param str rdbms: The target RDBMS (i.e. mysql, mssql or pgsql). :rtype: pystratum.Constants.Constants """ # Note: We load modules and classes dynamically such that on the end user's system only the required modules # and other dependencies for the targeted RDBMS must be installed (and required modules and other # dependencies for the other RDBMSs are not required). if rdbms == 'mysql': module = locate('pystratum_mysql.MySqlConstants') return module.MySqlConstants(self.output) if rdbms == 'mssql': module = locate('pystratum_mssql.MsSqlConstants') return module.MsSqlConstants(self.output) if rdbms == 'pgsql': module = locate('pystratum_pgsql.PgSqlConstants') return module.PgSqlConstants(self.output) raise Exception("Unknown RDBMS '{0!s}'.".format(rdbms))
[ "def", "create_constants", "(", "self", ",", "rdbms", ")", ":", "# Note: We load modules and classes dynamically such that on the end user's system only the required modules", "# and other dependencies for the targeted RDBMS must be installed (and required modules and other", "# dependencies for the other RDBMSs are not required).", "if", "rdbms", "==", "'mysql'", ":", "module", "=", "locate", "(", "'pystratum_mysql.MySqlConstants'", ")", "return", "module", ".", "MySqlConstants", "(", "self", ".", "output", ")", "if", "rdbms", "==", "'mssql'", ":", "module", "=", "locate", "(", "'pystratum_mssql.MsSqlConstants'", ")", "return", "module", ".", "MsSqlConstants", "(", "self", ".", "output", ")", "if", "rdbms", "==", "'pgsql'", ":", "module", "=", "locate", "(", "'pystratum_pgsql.PgSqlConstants'", ")", "return", "module", ".", "PgSqlConstants", "(", "self", ".", "output", ")", "raise", "Exception", "(", "\"Unknown RDBMS '{0!s}'.\"", ".", "format", "(", "rdbms", ")", ")" ]
42.807692
26.884615
def load_from_module(self, module): '''Load all benchmarks from a given module''' benchmarks = [] for name in dir(module): obj = getattr(module, name) if (inspect.isclass(obj) and issubclass(obj, Benchmark) and obj != Benchmark): benchmarks.append(obj) return benchmarks
[ "def", "load_from_module", "(", "self", ",", "module", ")", ":", "benchmarks", "=", "[", "]", "for", "name", "in", "dir", "(", "module", ")", ":", "obj", "=", "getattr", "(", "module", ",", "name", ")", "if", "(", "inspect", ".", "isclass", "(", "obj", ")", "and", "issubclass", "(", "obj", ",", "Benchmark", ")", "and", "obj", "!=", "Benchmark", ")", ":", "benchmarks", ".", "append", "(", "obj", ")", "return", "benchmarks" ]
39.333333
10
def evaluate_all(ctx, model): """Evaluate POS taggers on WSJ and GENIA.""" click.echo('chemdataextractor.pos.evaluate_all') click.echo('Model: %s' % model) ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='wsj', clusters=False) ctx.invoke(evaluate, model='%s_wsj_nocluster.pickle' % model, corpus='genia', clusters=False) ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='wsj', clusters=True) ctx.invoke(evaluate, model='%s_wsj.pickle' % model, corpus='genia', clusters=True) ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='wsj', clusters=False) ctx.invoke(evaluate, model='%s_genia_nocluster.pickle' % model, corpus='genia', clusters=False) ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='wsj', clusters=True) ctx.invoke(evaluate, model='%s_genia.pickle' % model, corpus='genia', clusters=True) ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='wsj', clusters=False) ctx.invoke(evaluate, model='%s_wsj_genia_nocluster.pickle' % model, corpus='genia', clusters=False) ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='wsj', clusters=True) ctx.invoke(evaluate, model='%s_wsj_genia.pickle' % model, corpus='genia', clusters=True)
[ "def", "evaluate_all", "(", "ctx", ",", "model", ")", ":", "click", ".", "echo", "(", "'chemdataextractor.pos.evaluate_all'", ")", "click", ".", "echo", "(", "'Model: %s'", "%", "model", ")", "ctx", ".", "invoke", "(", "evaluate", ",", "model", "=", "'%s_wsj_nocluster.pickle'", "%", "model", ",", "corpus", "=", "'wsj'", ",", "clusters", "=", "False", ")", "ctx", ".", "invoke", "(", "evaluate", ",", "model", "=", "'%s_wsj_nocluster.pickle'", "%", "model", ",", "corpus", "=", "'genia'", ",", "clusters", "=", "False", ")", "ctx", ".", "invoke", "(", "evaluate", ",", "model", "=", "'%s_wsj.pickle'", "%", "model", ",", "corpus", "=", "'wsj'", ",", "clusters", "=", "True", ")", "ctx", ".", "invoke", "(", "evaluate", ",", "model", "=", "'%s_wsj.pickle'", "%", "model", ",", "corpus", "=", "'genia'", ",", "clusters", "=", "True", ")", "ctx", ".", "invoke", "(", "evaluate", ",", "model", "=", "'%s_genia_nocluster.pickle'", "%", "model", ",", "corpus", "=", "'wsj'", ",", "clusters", "=", "False", ")", "ctx", ".", "invoke", "(", "evaluate", ",", "model", "=", "'%s_genia_nocluster.pickle'", "%", "model", ",", "corpus", "=", "'genia'", ",", "clusters", "=", "False", ")", "ctx", ".", "invoke", "(", "evaluate", ",", "model", "=", "'%s_genia.pickle'", "%", "model", ",", "corpus", "=", "'wsj'", ",", "clusters", "=", "True", ")", "ctx", ".", "invoke", "(", "evaluate", ",", "model", "=", "'%s_genia.pickle'", "%", "model", ",", "corpus", "=", "'genia'", ",", "clusters", "=", "True", ")", "ctx", ".", "invoke", "(", "evaluate", ",", "model", "=", "'%s_wsj_genia_nocluster.pickle'", "%", "model", ",", "corpus", "=", "'wsj'", ",", "clusters", "=", "False", ")", "ctx", ".", "invoke", "(", "evaluate", ",", "model", "=", "'%s_wsj_genia_nocluster.pickle'", "%", "model", ",", "corpus", "=", "'genia'", ",", "clusters", "=", "False", ")", "ctx", ".", "invoke", "(", "evaluate", ",", "model", "=", "'%s_wsj_genia.pickle'", "%", "model", ",", "corpus", "=", "'wsj'", ",", "clusters", "=", "True", ")", "ctx", ".", "invoke", "(", "evaluate", ",", "model", "=", "'%s_wsj_genia.pickle'", "%", "model", ",", "corpus", "=", "'genia'", ",", "clusters", "=", "True", ")" ]
80.125
41.625
def enable(self, timeout=0): """ Enable the plugin. Args: timeout (int): Timeout in seconds. Default: 0 Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ self.client.api.enable_plugin(self.name, timeout) self.reload()
[ "def", "enable", "(", "self", ",", "timeout", "=", "0", ")", ":", "self", ".", "client", ".", "api", ".", "enable_plugin", "(", "self", ".", "name", ",", "timeout", ")", "self", ".", "reload", "(", ")" ]
27.384615
17.230769
def _property_table(): """Download the PDB -> resolution table directly from the RCSB PDB REST service. See the other fields that you can get here: http://www.rcsb.org/pdb/results/reportField.do Returns: Pandas DataFrame: table of structureId as the index, resolution and experimentalTechnique as the columns """ url = 'http://www.rcsb.org/pdb/rest/customReport.csv?pdbids=*&customReportColumns=structureId,resolution,experimentalTechnique,releaseDate&service=wsfile&format=csv' r = requests.get(url) p = pd.read_csv(StringIO(r.text)).set_index('structureId') return p
[ "def", "_property_table", "(", ")", ":", "url", "=", "'http://www.rcsb.org/pdb/rest/customReport.csv?pdbids=*&customReportColumns=structureId,resolution,experimentalTechnique,releaseDate&service=wsfile&format=csv'", "r", "=", "requests", ".", "get", "(", "url", ")", "p", "=", "pd", ".", "read_csv", "(", "StringIO", "(", "r", ".", "text", ")", ")", ".", "set_index", "(", "'structureId'", ")", "return", "p" ]
46.076923
37.384615
def apply(self, styles=None, verbose=False): """ Applies the specified style to the selected views and returns the SUIDs of the affected views. :param styles (string): Name of Style to be applied to the selected views. = ['Directed', 'BioPAX_SIF', 'Bridging Reads Histogram:unique_0', 'PSIMI 25 Style', 'Coverage Histogram:best&unique', 'Minimal', 'Bridging Reads Histogram:best&unique_0', 'Coverage Histogram_0', 'Big Labels', 'No Histogram:best&unique_0', 'Bridging Reads Histogram:best', 'No Histogram_0', 'No Histogram:best&unique', 'Bridging Reads Histogram_0', 'Ripple', 'Coverage Histogram:unique_0', 'Nested Network Style', 'Coverage Histogram:best', 'Coverage Histogram:best&unique_0', 'default black', 'No Histogram:best_0', 'No Histogram:unique', 'No Histogram:unique_0', 'Solid', 'Bridging Reads Histogram:unique', 'No Histogram:best', 'Coverage Histogram', 'BioPAX', 'Bridging Reads Histogram', 'Coverage Histogram:best_0', 'Sample1', 'Universe', 'Bridging Reads Histogram:best_0', 'Coverage Histogram:unique', 'Bridging Reads Histogram:best&unique', 'No Histogram', 'default'] :param verbose: print more :returns: SUIDs of the affected views """ PARAMS=set_param(["styles"],[styles]) response=api(url=self.__url+"/apply", PARAMS=PARAMS, method="POST", verbose=verbose) return response
[ "def", "apply", "(", "self", ",", "styles", "=", "None", ",", "verbose", "=", "False", ")", ":", "PARAMS", "=", "set_param", "(", "[", "\"styles\"", "]", ",", "[", "styles", "]", ")", "response", "=", "api", "(", "url", "=", "self", ".", "__url", "+", "\"/apply\"", ",", "PARAMS", "=", "PARAMS", ",", "method", "=", "\"POST\"", ",", "verbose", "=", "verbose", ")", "return", "response" ]
55.962963
29.148148
def replace(s, pattern, replacement): """Replaces occurrences of a match string in a given string and returns the new string. The match string can be a regex expression. Args: s (str): the string to modify pattern (str): the search expression replacement (str): the string to replace each match with """ # the replacement string may contain invalid backreferences (like \1 or \g) # which will cause python's regex to blow up. Since this should emulate # the jam version exactly and the jam version didn't support # backreferences, this version shouldn't either. re.sub # allows replacement to be a callable; this is being used # to simply return the replacement string and avoid the hassle # of worrying about backreferences within the string. def _replacement(matchobj): return replacement return re.sub(pattern, _replacement, s)
[ "def", "replace", "(", "s", ",", "pattern", ",", "replacement", ")", ":", "# the replacement string may contain invalid backreferences (like \\1 or \\g)", "# which will cause python's regex to blow up. Since this should emulate", "# the jam version exactly and the jam version didn't support", "# backreferences, this version shouldn't either. re.sub", "# allows replacement to be a callable; this is being used", "# to simply return the replacement string and avoid the hassle", "# of worrying about backreferences within the string.", "def", "_replacement", "(", "matchobj", ")", ":", "return", "replacement", "return", "re", ".", "sub", "(", "pattern", ",", "_replacement", ",", "s", ")" ]
45.7
17.25
def clear(self, rows=None): """Reset episodes in the memory. Internally, this only sets their lengths to zero. The memory entries will be overridden by future calls to append() or replace(). Args: rows: Episodes to clear, defaults to all. Returns: Operation. """ rows = tf.range(self._capacity) if rows is None else rows assert rows.shape.ndims == 1 return tf.scatter_update(self._length, rows, tf.zeros_like(rows))
[ "def", "clear", "(", "self", ",", "rows", "=", "None", ")", ":", "rows", "=", "tf", ".", "range", "(", "self", ".", "_capacity", ")", "if", "rows", "is", "None", "else", "rows", "assert", "rows", ".", "shape", ".", "ndims", "==", "1", "return", "tf", ".", "scatter_update", "(", "self", ".", "_length", ",", "rows", ",", "tf", ".", "zeros_like", "(", "rows", ")", ")" ]
30.133333
22.466667
def _create_decoration(self, selection_start, selection_end): """ Creates the text occurences decoration """ deco = TextDecoration(self.editor.document(), selection_start, selection_end) deco.set_background(QtGui.QBrush(self.background)) deco.set_outline(self._outline) deco.set_foreground(QtCore.Qt.black) deco.draw_order = 1 return deco
[ "def", "_create_decoration", "(", "self", ",", "selection_start", ",", "selection_end", ")", ":", "deco", "=", "TextDecoration", "(", "self", ".", "editor", ".", "document", "(", ")", ",", "selection_start", ",", "selection_end", ")", "deco", ".", "set_background", "(", "QtGui", ".", "QBrush", "(", "self", ".", "background", ")", ")", "deco", ".", "set_outline", "(", "self", ".", "_outline", ")", "deco", ".", "set_foreground", "(", "QtCore", ".", "Qt", ".", "black", ")", "deco", ".", "draw_order", "=", "1", "return", "deco" ]
46.222222
12.444444
def get_rev_id(localRepoPath): """returns the current full git revision id of the specified local repository. Expected method of execution: python subroutine call Parameters ---------- localRepoPath: string Local repository path. Returns ======= full git revision ID of the specified repository if everything ran OK, and "FAILURE" if something went wrong. """ start_path = os.getcwd() try: os.chdir(localRepoPath) instream = os.popen("git --no-pager log --max-count=1 | head -1") for streamline in instream.readlines(): streamline = streamline.strip() if streamline.startswith("commit "): rv = streamline.replace("commit ","") else: raise except: rv = "FAILURE: git revision info not found" finally: os.chdir(start_path) return(rv)
[ "def", "get_rev_id", "(", "localRepoPath", ")", ":", "start_path", "=", "os", ".", "getcwd", "(", ")", "try", ":", "os", ".", "chdir", "(", "localRepoPath", ")", "instream", "=", "os", ".", "popen", "(", "\"git --no-pager log --max-count=1 | head -1\"", ")", "for", "streamline", "in", "instream", ".", "readlines", "(", ")", ":", "streamline", "=", "streamline", ".", "strip", "(", ")", "if", "streamline", ".", "startswith", "(", "\"commit \"", ")", ":", "rv", "=", "streamline", ".", "replace", "(", "\"commit \"", ",", "\"\"", ")", "else", ":", "raise", "except", ":", "rv", "=", "\"FAILURE: git revision info not found\"", "finally", ":", "os", ".", "chdir", "(", "start_path", ")", "return", "(", "rv", ")" ]
28.419355
22.16129
def filter(self, func, axis=(0,)): """ Filter array along an axis. Applies a function which should evaluate to boolean, along a single axis or multiple axes. Array will be aligned so that the desired set of axes are in the keys, which may require a transpose/reshape. Parameters ---------- func : function Function to apply, should return boolean axis : tuple or int, optional, default=(0,) Axis or multiple axes to filter along. Returns ------- BoltArrayLocal """ axes = sorted(tupleize(axis)) reshaped = self._align(axes) filtered = asarray(list(filter(func, reshaped))) return self._constructor(filtered)
[ "def", "filter", "(", "self", ",", "func", ",", "axis", "=", "(", "0", ",", ")", ")", ":", "axes", "=", "sorted", "(", "tupleize", "(", "axis", ")", ")", "reshaped", "=", "self", ".", "_align", "(", "axes", ")", "filtered", "=", "asarray", "(", "list", "(", "filter", "(", "func", ",", "reshaped", ")", ")", ")", "return", "self", ".", "_constructor", "(", "filtered", ")" ]
27.962963
18.777778
def _delLocalOwnerRole(self, username): """Remove local owner role from parent object """ parent = self.getParent() if parent.portal_type == "Client": parent.manage_delLocalRoles([username]) # reindex object security self._recursive_reindex_object_security(parent)
[ "def", "_delLocalOwnerRole", "(", "self", ",", "username", ")", ":", "parent", "=", "self", ".", "getParent", "(", ")", "if", "parent", ".", "portal_type", "==", "\"Client\"", ":", "parent", ".", "manage_delLocalRoles", "(", "[", "username", "]", ")", "# reindex object security", "self", ".", "_recursive_reindex_object_security", "(", "parent", ")" ]
40.625
5.375
def helper_parallel_lines(start0, end0, start1, end1, filename): """Image for :func:`.parallel_lines_parameters` docstring.""" if NO_IMAGES: return figure = plt.figure() ax = figure.gca() points = stack1d(start0, end0, start1, end1) ax.plot(points[0, :2], points[1, :2], marker="o") ax.plot(points[0, 2:], points[1, 2:], marker="o") ax.axis("scaled") _plot_helpers.add_plot_boundary(ax) save_image(figure, filename)
[ "def", "helper_parallel_lines", "(", "start0", ",", "end0", ",", "start1", ",", "end1", ",", "filename", ")", ":", "if", "NO_IMAGES", ":", "return", "figure", "=", "plt", ".", "figure", "(", ")", "ax", "=", "figure", ".", "gca", "(", ")", "points", "=", "stack1d", "(", "start0", ",", "end0", ",", "start1", ",", "end1", ")", "ax", ".", "plot", "(", "points", "[", "0", ",", ":", "2", "]", ",", "points", "[", "1", ",", ":", "2", "]", ",", "marker", "=", "\"o\"", ")", "ax", ".", "plot", "(", "points", "[", "0", ",", "2", ":", "]", ",", "points", "[", "1", ",", "2", ":", "]", ",", "marker", "=", "\"o\"", ")", "ax", ".", "axis", "(", "\"scaled\"", ")", "_plot_helpers", ".", "add_plot_boundary", "(", "ax", ")", "save_image", "(", "figure", ",", "filename", ")" ]
34.769231
16.076923
async def metrics(self, offs, size=None): ''' Yield metrics rows starting at offset. Args: offs (int): The index offset. size (int): The maximum number of records to yield. Yields: ((int, dict)): An index offset, info tuple for metrics. ''' for i, (indx, item) in enumerate(self._metrics.iter(offs)): if size is not None and i >= size: return yield indx, item
[ "async", "def", "metrics", "(", "self", ",", "offs", ",", "size", "=", "None", ")", ":", "for", "i", ",", "(", "indx", ",", "item", ")", "in", "enumerate", "(", "self", ".", "_metrics", ".", "iter", "(", "offs", ")", ")", ":", "if", "size", "is", "not", "None", "and", "i", ">=", "size", ":", "return", "yield", "indx", ",", "item" ]
27.705882
23
def finished(self): """ Mark the activity as finished """ self.data_service.update_activity(self.id, self.name, self.desc, started_on=self.started, ended_on=self._current_timestamp_str())
[ "def", "finished", "(", "self", ")", ":", "self", ".", "data_service", ".", "update_activity", "(", "self", ".", "id", ",", "self", ".", "name", ",", "self", ".", "desc", ",", "started_on", "=", "self", ".", "started", ",", "ended_on", "=", "self", ".", "_current_timestamp_str", "(", ")", ")" ]
42.428571
17.571429
def print_http_nfc_lease_info(info): """ Prints information about the lease, such as the entity covered by the lease, and HTTP URLs for up/downloading file backings. :param info: :type info: vim.HttpNfcLease.Info :return: """ print 'Lease timeout: {0.leaseTimeout}\n' \ 'Disk Capacity KB: {0.totalDiskCapacityInKB}'.format(info) device_number = 1 if info.deviceUrl: for device_url in info.deviceUrl: print 'HttpNfcLeaseDeviceUrl: {1}\n' \ 'Device URL Import Key: {0.importKey}\n' \ 'Device URL Key: {0.key}\n' \ 'Device URL: {0.url}\n' \ 'Device URL Size: {0.fileSize}\n' \ 'SSL Thumbprint: {0.sslThumbprint}\n'.format(device_url, device_number) if not device_url.targetId: print "No targetId found for this device" print "Device is not eligible for export. This could be a mounted iso or img of some sort" print "It will NOT be downloaded\n" device_number += 1 else: print 'No devices were found.'
[ "def", "print_http_nfc_lease_info", "(", "info", ")", ":", "print", "'Lease timeout: {0.leaseTimeout}\\n'", "'Disk Capacity KB: {0.totalDiskCapacityInKB}'", ".", "format", "(", "info", ")", "device_number", "=", "1", "if", "info", ".", "deviceUrl", ":", "for", "device_url", "in", "info", ".", "deviceUrl", ":", "print", "'HttpNfcLeaseDeviceUrl: {1}\\n'", "'Device URL Import Key: {0.importKey}\\n'", "'Device URL Key: {0.key}\\n'", "'Device URL: {0.url}\\n'", "'Device URL Size: {0.fileSize}\\n'", "'SSL Thumbprint: {0.sslThumbprint}\\n'", ".", "format", "(", "device_url", ",", "device_number", ")", "if", "not", "device_url", ".", "targetId", ":", "print", "\"No targetId found for this device\"", "print", "\"Device is not eligible for export. This could be a mounted iso or img of some sort\"", "print", "\"It will NOT be downloaded\\n\"", "device_number", "+=", "1", "else", ":", "print", "'No devices were found.'" ]
42.107143
16.035714
def vcpu_pin(vm_, vcpu, cpus): ''' Set which CPUs a VCPU can use. CLI Example: .. code-block:: bash salt 'foo' virt.vcpu_pin domU-id 2 1 salt 'foo' virt.vcpu_pin domU-id 2 2-6 ''' with _get_xapi_session() as xapi: vm_uuid = _get_label_uuid(xapi, 'VM', vm_) if vm_uuid is False: return False # from xm's main def cpu_make_map(cpulist): cpus = [] for c in cpulist.split(','): if c == '': continue if '-' in c: (x, y) = c.split('-') for i in range(int(x), int(y) + 1): cpus.append(int(i)) else: # remove this element from the list if c[0] == '^': cpus = [x for x in cpus if x != int(c[1:])] else: cpus.append(int(c)) cpus.sort() return ','.join(map(str, cpus)) if cpus == 'all': cpumap = cpu_make_map('0-63') else: cpumap = cpu_make_map('{0}'.format(cpus)) try: xapi.VM.add_to_VCPUs_params_live(vm_uuid, 'cpumap{0}'.format(vcpu), cpumap) return True # VM.add_to_VCPUs_params_live() implementation in xend 4.1+ has # a bug which makes the client call fail. # That code is accurate for all others XenAPI implementations, but # for that particular one, fallback to xm / xl instead. except Exception: return __salt__['cmd.run']( '{0} vcpu-pin {1} {2} {3}'.format(_get_xtool(), vm_, vcpu, cpus), python_shell=False)
[ "def", "vcpu_pin", "(", "vm_", ",", "vcpu", ",", "cpus", ")", ":", "with", "_get_xapi_session", "(", ")", "as", "xapi", ":", "vm_uuid", "=", "_get_label_uuid", "(", "xapi", ",", "'VM'", ",", "vm_", ")", "if", "vm_uuid", "is", "False", ":", "return", "False", "# from xm's main", "def", "cpu_make_map", "(", "cpulist", ")", ":", "cpus", "=", "[", "]", "for", "c", "in", "cpulist", ".", "split", "(", "','", ")", ":", "if", "c", "==", "''", ":", "continue", "if", "'-'", "in", "c", ":", "(", "x", ",", "y", ")", "=", "c", ".", "split", "(", "'-'", ")", "for", "i", "in", "range", "(", "int", "(", "x", ")", ",", "int", "(", "y", ")", "+", "1", ")", ":", "cpus", ".", "append", "(", "int", "(", "i", ")", ")", "else", ":", "# remove this element from the list", "if", "c", "[", "0", "]", "==", "'^'", ":", "cpus", "=", "[", "x", "for", "x", "in", "cpus", "if", "x", "!=", "int", "(", "c", "[", "1", ":", "]", ")", "]", "else", ":", "cpus", ".", "append", "(", "int", "(", "c", ")", ")", "cpus", ".", "sort", "(", ")", "return", "','", ".", "join", "(", "map", "(", "str", ",", "cpus", ")", ")", "if", "cpus", "==", "'all'", ":", "cpumap", "=", "cpu_make_map", "(", "'0-63'", ")", "else", ":", "cpumap", "=", "cpu_make_map", "(", "'{0}'", ".", "format", "(", "cpus", ")", ")", "try", ":", "xapi", ".", "VM", ".", "add_to_VCPUs_params_live", "(", "vm_uuid", ",", "'cpumap{0}'", ".", "format", "(", "vcpu", ")", ",", "cpumap", ")", "return", "True", "# VM.add_to_VCPUs_params_live() implementation in xend 4.1+ has", "# a bug which makes the client call fail.", "# That code is accurate for all others XenAPI implementations, but", "# for that particular one, fallback to xm / xl instead.", "except", "Exception", ":", "return", "__salt__", "[", "'cmd.run'", "]", "(", "'{0} vcpu-pin {1} {2} {3}'", ".", "format", "(", "_get_xtool", "(", ")", ",", "vm_", ",", "vcpu", ",", "cpus", ")", ",", "python_shell", "=", "False", ")" ]
32.886792
18.207547
def to_tuple(self): """Cast to tuple. Returns ------- tuple The confusion table as a 4-tuple (tp, tn, fp, fn) Example ------- >>> ct = ConfusionTable(120, 60, 20, 30) >>> ct.to_tuple() (120, 60, 20, 30) """ return self._tp, self._tn, self._fp, self._fn
[ "def", "to_tuple", "(", "self", ")", ":", "return", "self", ".", "_tp", ",", "self", ".", "_tn", ",", "self", ".", "_fp", ",", "self", ".", "_fn" ]
21.25
21.25
def setPrefilter(self, edfsignal, prefilter): """ Sets the prefilter of signal edfsignal ("HP:0.1Hz", "LP:75Hz N:50Hz", etc.) :param edfsignal: int :param prefilter: str Notes ----- This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action. """ if edfsignal < 0 or edfsignal > self.n_channels: raise ChannelDoesNotExist(edfsignal) self.channels[edfsignal]['prefilter'] = prefilter self.update_header()
[ "def", "setPrefilter", "(", "self", ",", "edfsignal", ",", "prefilter", ")", ":", "if", "edfsignal", "<", "0", "or", "edfsignal", ">", "self", ".", "n_channels", ":", "raise", "ChannelDoesNotExist", "(", "edfsignal", ")", "self", ".", "channels", "[", "edfsignal", "]", "[", "'prefilter'", "]", "=", "prefilter", "self", ".", "update_header", "(", ")" ]
38.133333
24.4
def getAdditionalImages(self): ''' The same as calling ``client.getAdditionalImages(build.setID)``. :returns: A list of URL strings. :rtype: list ''' self._additionalImages = self._client.getAdditionalImages(self.setID) return self._additionalImages
[ "def", "getAdditionalImages", "(", "self", ")", ":", "self", ".", "_additionalImages", "=", "self", ".", "_client", ".", "getAdditionalImages", "(", "self", ".", "setID", ")", "return", "self", ".", "_additionalImages" ]
29.8
24
def wnfltd(small, window): """ Filter (remove) small intervals from a double precision window. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnfltd_c.html :param small: Limiting measure of small intervals. :type small: float :param window: Window to be filtered. :type window: spiceypy.utils.support_types.SpiceCell :return: Filtered Window. :rtype: spiceypy.utils.support_types.SpiceCell """ assert isinstance(window, stypes.SpiceCell) assert window.dtype == 1 small = ctypes.c_double(small) libspice.wnfltd_c(small, ctypes.byref(window)) return window
[ "def", "wnfltd", "(", "small", ",", "window", ")", ":", "assert", "isinstance", "(", "window", ",", "stypes", ".", "SpiceCell", ")", "assert", "window", ".", "dtype", "==", "1", "small", "=", "ctypes", ".", "c_double", "(", "small", ")", "libspice", ".", "wnfltd_c", "(", "small", ",", "ctypes", ".", "byref", "(", "window", ")", ")", "return", "window" ]
33.888889
15.777778
def _check_file(parameters): """Return list of errors.""" (filename, args) = parameters if filename == '-': contents = sys.stdin.read() else: with contextlib.closing( docutils.io.FileInput(source_path=filename) ) as input_file: contents = input_file.read() args = load_configuration_from_file( os.path.dirname(os.path.realpath(filename)), args) ignore_directives_and_roles(args.ignore_directives, args.ignore_roles) for substitution in args.ignore_substitutions: contents = contents.replace('|{}|'.format(substitution), 'None') ignore = { 'languages': args.ignore_language, 'messages': args.ignore_messages, } all_errors = [] for error in check(contents, filename=filename, report_level=args.report, ignore=ignore, debug=args.debug): all_errors.append(error) return (filename, all_errors)
[ "def", "_check_file", "(", "parameters", ")", ":", "(", "filename", ",", "args", ")", "=", "parameters", "if", "filename", "==", "'-'", ":", "contents", "=", "sys", ".", "stdin", ".", "read", "(", ")", "else", ":", "with", "contextlib", ".", "closing", "(", "docutils", ".", "io", ".", "FileInput", "(", "source_path", "=", "filename", ")", ")", "as", "input_file", ":", "contents", "=", "input_file", ".", "read", "(", ")", "args", "=", "load_configuration_from_file", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "filename", ")", ")", ",", "args", ")", "ignore_directives_and_roles", "(", "args", ".", "ignore_directives", ",", "args", ".", "ignore_roles", ")", "for", "substitution", "in", "args", ".", "ignore_substitutions", ":", "contents", "=", "contents", ".", "replace", "(", "'|{}|'", ".", "format", "(", "substitution", ")", ",", "'None'", ")", "ignore", "=", "{", "'languages'", ":", "args", ".", "ignore_language", ",", "'messages'", ":", "args", ".", "ignore_messages", ",", "}", "all_errors", "=", "[", "]", "for", "error", "in", "check", "(", "contents", ",", "filename", "=", "filename", ",", "report_level", "=", "args", ".", "report", ",", "ignore", "=", "ignore", ",", "debug", "=", "args", ".", "debug", ")", ":", "all_errors", ".", "append", "(", "error", ")", "return", "(", "filename", ",", "all_errors", ")" ]
30.9375
16.4375
async def _handle_bad_notification(self, message): """ Adjusts the current state to be correct based on the received bad message notification whenever possible: bad_msg_notification#a7eff811 bad_msg_id:long bad_msg_seqno:int error_code:int = BadMsgNotification; """ bad_msg = message.obj states = self._pop_states(bad_msg.bad_msg_id) self._log.debug('Handling bad msg %s', bad_msg) if bad_msg.error_code in (16, 17): # Sent msg_id too low or too high (respectively). # Use the current msg_id to determine the right time offset. to = self._state.update_time_offset( correct_msg_id=message.msg_id) self._log.info('System clock is wrong, set time offset to %ds', to) elif bad_msg.error_code == 32: # msg_seqno too low, so just pump it up by some "large" amount # TODO A better fix would be to start with a new fresh session ID self._state._sequence += 64 elif bad_msg.error_code == 33: # msg_seqno too high never seems to happen but just in case self._state._sequence -= 16 else: for state in states: state.future.set_exception( BadMessageError(state.request, bad_msg.error_code)) return # Messages are to be re-sent once we've corrected the issue self._send_queue.extend(states) self._log.debug('%d messages will be resent due to bad msg', len(states))
[ "async", "def", "_handle_bad_notification", "(", "self", ",", "message", ")", ":", "bad_msg", "=", "message", ".", "obj", "states", "=", "self", ".", "_pop_states", "(", "bad_msg", ".", "bad_msg_id", ")", "self", ".", "_log", ".", "debug", "(", "'Handling bad msg %s'", ",", "bad_msg", ")", "if", "bad_msg", ".", "error_code", "in", "(", "16", ",", "17", ")", ":", "# Sent msg_id too low or too high (respectively).", "# Use the current msg_id to determine the right time offset.", "to", "=", "self", ".", "_state", ".", "update_time_offset", "(", "correct_msg_id", "=", "message", ".", "msg_id", ")", "self", ".", "_log", ".", "info", "(", "'System clock is wrong, set time offset to %ds'", ",", "to", ")", "elif", "bad_msg", ".", "error_code", "==", "32", ":", "# msg_seqno too low, so just pump it up by some \"large\" amount", "# TODO A better fix would be to start with a new fresh session ID", "self", ".", "_state", ".", "_sequence", "+=", "64", "elif", "bad_msg", ".", "error_code", "==", "33", ":", "# msg_seqno too high never seems to happen but just in case", "self", ".", "_state", ".", "_sequence", "-=", "16", "else", ":", "for", "state", "in", "states", ":", "state", ".", "future", ".", "set_exception", "(", "BadMessageError", "(", "state", ".", "request", ",", "bad_msg", ".", "error_code", ")", ")", "return", "# Messages are to be re-sent once we've corrected the issue", "self", ".", "_send_queue", ".", "extend", "(", "states", ")", "self", ".", "_log", ".", "debug", "(", "'%d messages will be resent due to bad msg'", ",", "len", "(", "states", ")", ")" ]
44.657143
17.685714
def Colebrook(Re, eD, tol=None): r'''Calculates Darcy friction factor using the Colebrook equation originally published in [1]_. Normally, this function uses an exact solution to the Colebrook equation, derived with a CAS. A numerical can also be used. .. math:: \frac{1}{\sqrt{f}}=-2\log_{10}\left(\frac{\epsilon/D}{3.7} +\frac{2.51}{\text{Re}\sqrt{f}}\right) Parameters ---------- Re : float Reynolds number, [-] eD : float Relative roughness, [-] tol : float, optional None for analytical solution (default); user specified value to use the numerical solution; 0 to use `mpmath` and provide a bit-correct exact solution to the maximum fidelity of the system's `float`; -1 to apply the Clamond solution where appropriate for greater speed (Re > 10), [-] Returns ------- fd : float Darcy friction factor [-] Notes ----- The solution is as follows: .. math:: f_d = \frac{\ln(10)^2\cdot {3.7}^2\cdot{2.51}^2} {\left(\log(10)\epsilon/D\cdot\text{Re} - 2\cdot 2.51\cdot 3.7\cdot \text{lambertW}\left[\log(\sqrt{10})\sqrt{ 10^{\left(\frac{\epsilon \text{Re}}{2.51\cdot 3.7D}\right)} \cdot \text{Re}^2/{2.51}^2}\right]\right)} Some effort to optimize this function has been made. The `lambertw` function from scipy is used, and is defined to solve the specific function: .. math:: y = x\exp(x) \text{lambertW}(y) = x This is relatively slow despite its explicit form as it uses the mathematical function `lambertw` which is expensive to compute. For high relative roughness and Reynolds numbers, an OverflowError can be encountered in the solution of this equation. The numerical solution is then used. The numerical solution provides values which are generally within an rtol of 1E-12 to the analytical solution; however, due to the different rounding order, it is possible for them to be as different as rtol 1E-5 or higher. The 1E-5 accuracy regime has been tested and confirmed numerically for hundreds of thousand of points within the region 1E-12 < Re < 1E12 and 0 < eD < 0.1. The numerical solution attempts the secant method using `scipy`'s `newton` solver, and in the event of nonconvergence, attempts the `fsolve` solver as well. An initial guess is provided via the `Clamond` function. The numerical and analytical solution take similar amounts of time; the `mpmath` solution used when `tol=0` is approximately 45 times slower. This function takes approximately 8 us normally. Examples -------- >>> Colebrook(1E5, 1E-4) 0.018513866077471648 References ---------- .. [1] Colebrook, C F."Turbulent Flow in Pipes, with Particular Reference to the Transition Region Between the Smooth and Rough Pipe Laws." Journal of the ICE 11, no. 4 (February 1, 1939): 133-156. doi:10.1680/ijoti.1939.13150. ''' if tol == -1: if Re > 10.0: return Clamond(Re, eD) else: tol = None elif tol == 0: # from sympy import LambertW, Rational, log, sqrt # Re = Rational(Re) # eD_Re = Rational(eD)*Re # sub = 1/Rational('6.3001')*10**(1/Rational('9.287')*eD_Re)*Re*Re # lambert_term = LambertW(log(sqrt(10))*sqrt(sub)) # den = log(10)*eD_Re - 18.574*lambert_term # return float(log(10)**2*Rational('3.7')**2*Rational('2.51')**2/(den*den)) try: from mpmath import mpf, log, sqrt, mp from mpmath import lambertw as mp_lambertw except: raise ImportError('For exact solutions, the `mpmath` library is ' 'required') mp.dps = 50 Re = mpf(Re) eD_Re = mpf(eD)*Re sub = 1/mpf('6.3001')*10**(1/mpf('9.287')*eD_Re)*Re*Re lambert_term = mp_lambertw(log(sqrt(10))*sqrt(sub)) den = log(10)*eD_Re - 18.574*lambert_term return float(log(10)**2*mpf('3.7')**2*mpf('2.51')**2/(den*den)) if tol is None: try: eD_Re = eD*Re # 9.287 = 2.51*3.7; 6.3001 = 2.51**2 # xn = 1/6.3001 = 0.15872763924382155 # 1/9.287 = 0.10767739851405189 sub = 0.15872763924382155*10.0**(0.10767739851405189*eD_Re)*Re*Re if isinf(sub): # Can't continue, need numerical approach raise OverflowError # 1.15129... = log(sqrt(10)) lambert_term = float(lambertw(1.151292546497022950546806896454654633998870849609375*sub**0.5).real) # log(10) = 2.302585...; 2*2.51*3.7 = 18.574 # 457.28... = log(10)**2*3.7**2*2.51**2 den = 2.30258509299404590109361379290930926799774169921875*eD_Re - 18.574*lambert_term return 457.28006463294371997108100913465023040771484375/(den*den) except OverflowError: pass # Either user-specified tolerance, or an error in the analytical solution if tol is None: tol = 1e-12 try: fd_guess = Clamond(Re, eD) except ValueError: fd_guess = Blasius(Re) def err(x): # Convert the newton search domain to always positive f_12_inv = abs(x)**-0.5 # 0.27027027027027023 = 1/3.7 return f_12_inv + 2.0*log10(eD*0.27027027027027023 + 2.51/Re*f_12_inv) try: fd = abs(newton(err, fd_guess, tol=tol)) if fd > 1E10: raise ValueError return fd except: from scipy.optimize import fsolve return abs(float(fsolve(err, fd_guess, xtol=tol)))
[ "def", "Colebrook", "(", "Re", ",", "eD", ",", "tol", "=", "None", ")", ":", "if", "tol", "==", "-", "1", ":", "if", "Re", ">", "10.0", ":", "return", "Clamond", "(", "Re", ",", "eD", ")", "else", ":", "tol", "=", "None", "elif", "tol", "==", "0", ":", "# from sympy import LambertW, Rational, log, sqrt", "# Re = Rational(Re)", "# eD_Re = Rational(eD)*Re", "# sub = 1/Rational('6.3001')*10**(1/Rational('9.287')*eD_Re)*Re*Re", "# lambert_term = LambertW(log(sqrt(10))*sqrt(sub))", "# den = log(10)*eD_Re - 18.574*lambert_term", "# return float(log(10)**2*Rational('3.7')**2*Rational('2.51')**2/(den*den))", "try", ":", "from", "mpmath", "import", "mpf", ",", "log", ",", "sqrt", ",", "mp", "from", "mpmath", "import", "lambertw", "as", "mp_lambertw", "except", ":", "raise", "ImportError", "(", "'For exact solutions, the `mpmath` library is '", "'required'", ")", "mp", ".", "dps", "=", "50", "Re", "=", "mpf", "(", "Re", ")", "eD_Re", "=", "mpf", "(", "eD", ")", "*", "Re", "sub", "=", "1", "/", "mpf", "(", "'6.3001'", ")", "*", "10", "**", "(", "1", "/", "mpf", "(", "'9.287'", ")", "*", "eD_Re", ")", "*", "Re", "*", "Re", "lambert_term", "=", "mp_lambertw", "(", "log", "(", "sqrt", "(", "10", ")", ")", "*", "sqrt", "(", "sub", ")", ")", "den", "=", "log", "(", "10", ")", "*", "eD_Re", "-", "18.574", "*", "lambert_term", "return", "float", "(", "log", "(", "10", ")", "**", "2", "*", "mpf", "(", "'3.7'", ")", "**", "2", "*", "mpf", "(", "'2.51'", ")", "**", "2", "/", "(", "den", "*", "den", ")", ")", "if", "tol", "is", "None", ":", "try", ":", "eD_Re", "=", "eD", "*", "Re", "# 9.287 = 2.51*3.7; 6.3001 = 2.51**2", "# xn = 1/6.3001 = 0.15872763924382155", "# 1/9.287 = 0.10767739851405189", "sub", "=", "0.15872763924382155", "*", "10.0", "**", "(", "0.10767739851405189", "*", "eD_Re", ")", "*", "Re", "*", "Re", "if", "isinf", "(", "sub", ")", ":", "# Can't continue, need numerical approach", "raise", "OverflowError", "# 1.15129... = log(sqrt(10))", "lambert_term", "=", "float", "(", "lambertw", "(", "1.151292546497022950546806896454654633998870849609375", "*", "sub", "**", "0.5", ")", ".", "real", ")", "# log(10) = 2.302585...; 2*2.51*3.7 = 18.574", "# 457.28... = log(10)**2*3.7**2*2.51**2", "den", "=", "2.30258509299404590109361379290930926799774169921875", "*", "eD_Re", "-", "18.574", "*", "lambert_term", "return", "457.28006463294371997108100913465023040771484375", "/", "(", "den", "*", "den", ")", "except", "OverflowError", ":", "pass", "# Either user-specified tolerance, or an error in the analytical solution", "if", "tol", "is", "None", ":", "tol", "=", "1e-12", "try", ":", "fd_guess", "=", "Clamond", "(", "Re", ",", "eD", ")", "except", "ValueError", ":", "fd_guess", "=", "Blasius", "(", "Re", ")", "def", "err", "(", "x", ")", ":", "# Convert the newton search domain to always positive", "f_12_inv", "=", "abs", "(", "x", ")", "**", "-", "0.5", "# 0.27027027027027023 = 1/3.7", "return", "f_12_inv", "+", "2.0", "*", "log10", "(", "eD", "*", "0.27027027027027023", "+", "2.51", "/", "Re", "*", "f_12_inv", ")", "try", ":", "fd", "=", "abs", "(", "newton", "(", "err", ",", "fd_guess", ",", "tol", "=", "tol", ")", ")", "if", "fd", ">", "1E10", ":", "raise", "ValueError", "return", "fd", "except", ":", "from", "scipy", ".", "optimize", "import", "fsolve", "return", "abs", "(", "float", "(", "fsolve", "(", "err", ",", "fd_guess", ",", "xtol", "=", "tol", ")", ")", ")" ]
38.724138
24.42069
def set_hash_value(self, key, field, value, pipeline=False): """Set the value of field in a hash stored at key. Args: key (str): key (name) of the hash field (str): Field within the hash to set value: Value to set pipeline (bool): True, start a transaction block. Default false. """ # FIXME(BMo): new name for this function -> save_dict_value ? if pipeline: self._pipeline.hset(key, field, str(value)) else: self._db.hset(key, field, str(value))
[ "def", "set_hash_value", "(", "self", ",", "key", ",", "field", ",", "value", ",", "pipeline", "=", "False", ")", ":", "# FIXME(BMo): new name for this function -> save_dict_value ?", "if", "pipeline", ":", "self", ".", "_pipeline", ".", "hset", "(", "key", ",", "field", ",", "str", "(", "value", ")", ")", "else", ":", "self", ".", "_db", ".", "hset", "(", "key", ",", "field", ",", "str", "(", "value", ")", ")" ]
36.866667
19.333333
def receive(host, timeout): """ Print all messages in queue. Args: host (str): Specified --host. timeout (int): How log should script wait for message. """ parameters = settings.get_amqp_settings()[host] queues = parameters["queues"] queues = dict(map(lambda (x, y): (y, x), queues.items())) # reverse items queue = queues[parameters["out_key"]] channel = _get_channel(host, timeout) for method_frame, properties, body in channel.consume(queue): print json.dumps({ "method_frame": str(method_frame), "properties": str(properties), "body": body }) print "-" * 79 print channel.basic_ack(method_frame.delivery_tag)
[ "def", "receive", "(", "host", ",", "timeout", ")", ":", "parameters", "=", "settings", ".", "get_amqp_settings", "(", ")", "[", "host", "]", "queues", "=", "parameters", "[", "\"queues\"", "]", "queues", "=", "dict", "(", "map", "(", "lambda", "(", "x", ",", "y", ")", ":", "(", "y", ",", "x", ")", ",", "queues", ".", "items", "(", ")", ")", ")", "# reverse items", "queue", "=", "queues", "[", "parameters", "[", "\"out_key\"", "]", "]", "channel", "=", "_get_channel", "(", "host", ",", "timeout", ")", "for", "method_frame", ",", "properties", ",", "body", "in", "channel", ".", "consume", "(", "queue", ")", ":", "print", "json", ".", "dumps", "(", "{", "\"method_frame\"", ":", "str", "(", "method_frame", ")", ",", "\"properties\"", ":", "str", "(", "properties", ")", ",", "\"body\"", ":", "body", "}", ")", "print", "\"-\"", "*", "79", "print", "channel", ".", "basic_ack", "(", "method_frame", ".", "delivery_tag", ")" ]
29
17.8
def readMyEC2Tag(tagName, connection=None): """ Load an EC2 tag for the running instance & print it. :param str tagName: Name of the tag to read :param connection: Optional boto connection """ assert isinstance(tagName, basestring), ("tagName must be a string but is %r" % tagName) # Load metadata. if == {} we are on localhost # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html if not connection: # Assume AWS credentials are in the environment or the instance is using an IAM role connection = boto.ec2.connect_to_region(myRegion()) return readInstanceTag(connection=connection, instanceID=myInstanceID(), tagName=tagName)
[ "def", "readMyEC2Tag", "(", "tagName", ",", "connection", "=", "None", ")", ":", "assert", "isinstance", "(", "tagName", ",", "basestring", ")", ",", "(", "\"tagName must be a string but is %r\"", "%", "tagName", ")", "# Load metadata. if == {} we are on localhost", "# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html", "if", "not", "connection", ":", "# Assume AWS credentials are in the environment or the instance is using an IAM role", "connection", "=", "boto", ".", "ec2", ".", "connect_to_region", "(", "myRegion", "(", ")", ")", "return", "readInstanceTag", "(", "connection", "=", "connection", ",", "instanceID", "=", "myInstanceID", "(", ")", ",", "tagName", "=", "tagName", ")" ]
38
20.631579