repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
39
1.84M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
ultrabug/uhashring
uhashring/ring.py
HashRing.range
def range(self, key, size=None, unique=True): """Returns a generator of nodes' configuration available in the continuum/ring. :param key: the key to look for. :param size: limit the list to at most this number of nodes. :param unique: a node may only appear once in the list (default True). """ all_nodes = set() if unique: size = size or len(self.runtime._nodes) else: all_nodes = [] pos = self._get_pos(key) for key in self.runtime._keys[pos:]: nodename = self.runtime._ring[key] if unique: if nodename in all_nodes: continue all_nodes.add(nodename) else: all_nodes.append(nodename) yield self.runtime._nodes[nodename] if len(all_nodes) == size: break else: for i, key in enumerate(self.runtime._keys): if i < pos: nodename = self.runtime._ring[key] if unique: if nodename in all_nodes: continue all_nodes.add(nodename) else: all_nodes.append(nodename) yield self.runtime._nodes[nodename] if len(all_nodes) == size: break
python
def range(self, key, size=None, unique=True): all_nodes = set() if unique: size = size or len(self.runtime._nodes) else: all_nodes = [] pos = self._get_pos(key) for key in self.runtime._keys[pos:]: nodename = self.runtime._ring[key] if unique: if nodename in all_nodes: continue all_nodes.add(nodename) else: all_nodes.append(nodename) yield self.runtime._nodes[nodename] if len(all_nodes) == size: break else: for i, key in enumerate(self.runtime._keys): if i < pos: nodename = self.runtime._ring[key] if unique: if nodename in all_nodes: continue all_nodes.add(nodename) else: all_nodes.append(nodename) yield self.runtime._nodes[nodename] if len(all_nodes) == size: break
[ "def", "range", "(", "self", ",", "key", ",", "size", "=", "None", ",", "unique", "=", "True", ")", ":", "all_nodes", "=", "set", "(", ")", "if", "unique", ":", "size", "=", "size", "or", "len", "(", "self", ".", "runtime", ".", "_nodes", ")", "else", ":", "all_nodes", "=", "[", "]", "pos", "=", "self", ".", "_get_pos", "(", "key", ")", "for", "key", "in", "self", ".", "runtime", ".", "_keys", "[", "pos", ":", "]", ":", "nodename", "=", "self", ".", "runtime", ".", "_ring", "[", "key", "]", "if", "unique", ":", "if", "nodename", "in", "all_nodes", ":", "continue", "all_nodes", ".", "add", "(", "nodename", ")", "else", ":", "all_nodes", ".", "append", "(", "nodename", ")", "yield", "self", ".", "runtime", ".", "_nodes", "[", "nodename", "]", "if", "len", "(", "all_nodes", ")", "==", "size", ":", "break", "else", ":", "for", "i", ",", "key", "in", "enumerate", "(", "self", ".", "runtime", ".", "_keys", ")", ":", "if", "i", "<", "pos", ":", "nodename", "=", "self", ".", "runtime", ".", "_ring", "[", "key", "]", "if", "unique", ":", "if", "nodename", "in", "all_nodes", ":", "continue", "all_nodes", ".", "add", "(", "nodename", ")", "else", ":", "all_nodes", ".", "append", "(", "nodename", ")", "yield", "self", ".", "runtime", ".", "_nodes", "[", "nodename", "]", "if", "len", "(", "all_nodes", ")", "==", "size", ":", "break" ]
Returns a generator of nodes' configuration available in the continuum/ring. :param key: the key to look for. :param size: limit the list to at most this number of nodes. :param unique: a node may only appear once in the list (default True).
[ "Returns", "a", "generator", "of", "nodes", "configuration", "available", "in", "the", "continuum", "/", "ring", "." ]
train
https://github.com/ultrabug/uhashring/blob/2297471a392e28ed913b3276c2f48d0c01523375/uhashring/ring.py#L272-L310
ultrabug/uhashring
uhashring/monkey.py
patch_memcache
def patch_memcache(): """Monkey patch python-memcached to implement our consistent hashring in its node selection and operations. """ def _init(self, servers, *k, **kw): self._old_init(servers, *k, **kw) nodes = {} for server in self.servers: conf = { 'hostname': server.ip, 'instance': server, 'port': server.port, 'weight': server.weight } nodes[server.ip] = conf self.uhashring = HashRing(nodes) def _get_server(self, key): if isinstance(key, tuple): return self._old_get_server(key) for i in range(self._SERVER_RETRIES): for node in self.uhashring.range(key): if node['instance'].connect(): return node['instance'], key return None, None memcache = __import__('memcache') memcache.Client._old_get_server = memcache.Client._get_server memcache.Client._old_init = memcache.Client.__init__ memcache.Client.__init__ = _init memcache.Client._get_server = _get_server
python
def patch_memcache(): def _init(self, servers, *k, **kw): self._old_init(servers, *k, **kw) nodes = {} for server in self.servers: conf = { 'hostname': server.ip, 'instance': server, 'port': server.port, 'weight': server.weight } nodes[server.ip] = conf self.uhashring = HashRing(nodes) def _get_server(self, key): if isinstance(key, tuple): return self._old_get_server(key) for i in range(self._SERVER_RETRIES): for node in self.uhashring.range(key): if node['instance'].connect(): return node['instance'], key return None, None memcache = __import__('memcache') memcache.Client._old_get_server = memcache.Client._get_server memcache.Client._old_init = memcache.Client.__init__ memcache.Client.__init__ = _init memcache.Client._get_server = _get_server
[ "def", "patch_memcache", "(", ")", ":", "def", "_init", "(", "self", ",", "servers", ",", "*", "k", ",", "*", "*", "kw", ")", ":", "self", ".", "_old_init", "(", "servers", ",", "*", "k", ",", "*", "*", "kw", ")", "nodes", "=", "{", "}", "for", "server", "in", "self", ".", "servers", ":", "conf", "=", "{", "'hostname'", ":", "server", ".", "ip", ",", "'instance'", ":", "server", ",", "'port'", ":", "server", ".", "port", ",", "'weight'", ":", "server", ".", "weight", "}", "nodes", "[", "server", ".", "ip", "]", "=", "conf", "self", ".", "uhashring", "=", "HashRing", "(", "nodes", ")", "def", "_get_server", "(", "self", ",", "key", ")", ":", "if", "isinstance", "(", "key", ",", "tuple", ")", ":", "return", "self", ".", "_old_get_server", "(", "key", ")", "for", "i", "in", "range", "(", "self", ".", "_SERVER_RETRIES", ")", ":", "for", "node", "in", "self", ".", "uhashring", ".", "range", "(", "key", ")", ":", "if", "node", "[", "'instance'", "]", ".", "connect", "(", ")", ":", "return", "node", "[", "'instance'", "]", ",", "key", "return", "None", ",", "None", "memcache", "=", "__import__", "(", "'memcache'", ")", "memcache", ".", "Client", ".", "_old_get_server", "=", "memcache", ".", "Client", ".", "_get_server", "memcache", ".", "Client", ".", "_old_init", "=", "memcache", ".", "Client", ".", "__init__", "memcache", ".", "Client", ".", "__init__", "=", "_init", "memcache", ".", "Client", ".", "_get_server", "=", "_get_server" ]
Monkey patch python-memcached to implement our consistent hashring in its node selection and operations.
[ "Monkey", "patch", "python", "-", "memcached", "to", "implement", "our", "consistent", "hashring", "in", "its", "node", "selection", "and", "operations", "." ]
train
https://github.com/ultrabug/uhashring/blob/2297471a392e28ed913b3276c2f48d0c01523375/uhashring/monkey.py#L8-L42
ultrabug/uhashring
uhashring/ring_meta.py
MetaRing._create_ring
def _create_ring(self, nodes): """Generate a ketama compatible continuum/ring. """ for node_name, node_conf in nodes: for w in range(0, node_conf['vnodes'] * node_conf['weight']): self._distribution[node_name] += 1 self._ring[self.hashi('%s-%s' % (node_name, w))] = node_name self._keys = sorted(self._ring.keys())
python
def _create_ring(self, nodes): for node_name, node_conf in nodes: for w in range(0, node_conf['vnodes'] * node_conf['weight']): self._distribution[node_name] += 1 self._ring[self.hashi('%s-%s' % (node_name, w))] = node_name self._keys = sorted(self._ring.keys())
[ "def", "_create_ring", "(", "self", ",", "nodes", ")", ":", "for", "node_name", ",", "node_conf", "in", "nodes", ":", "for", "w", "in", "range", "(", "0", ",", "node_conf", "[", "'vnodes'", "]", "*", "node_conf", "[", "'weight'", "]", ")", ":", "self", ".", "_distribution", "[", "node_name", "]", "+=", "1", "self", ".", "_ring", "[", "self", ".", "hashi", "(", "'%s-%s'", "%", "(", "node_name", ",", "w", ")", ")", "]", "=", "node_name", "self", ".", "_keys", "=", "sorted", "(", "self", ".", "_ring", ".", "keys", "(", ")", ")" ]
Generate a ketama compatible continuum/ring.
[ "Generate", "a", "ketama", "compatible", "continuum", "/", "ring", "." ]
train
https://github.com/ultrabug/uhashring/blob/2297471a392e28ed913b3276c2f48d0c01523375/uhashring/ring_meta.py#L30-L37
ultrabug/uhashring
uhashring/ring_meta.py
MetaRing._remove_node
def _remove_node(self, node_name): """Remove the given node from the continuum/ring. :param node_name: the node name. """ try: node_conf = self._nodes.pop(node_name) except Exception: raise KeyError('node \'{}\' not found, available nodes: {}'.format( node_name, self._nodes.keys())) else: self._distribution.pop(node_name) for w in range(0, node_conf['vnodes'] * node_conf['weight']): del self._ring[self.hashi('%s-%s' % (node_name, w))] self._keys = sorted(self._ring.keys())
python
def _remove_node(self, node_name): try: node_conf = self._nodes.pop(node_name) except Exception: raise KeyError('node \'{}\' not found, available nodes: {}'.format( node_name, self._nodes.keys())) else: self._distribution.pop(node_name) for w in range(0, node_conf['vnodes'] * node_conf['weight']): del self._ring[self.hashi('%s-%s' % (node_name, w))] self._keys = sorted(self._ring.keys())
[ "def", "_remove_node", "(", "self", ",", "node_name", ")", ":", "try", ":", "node_conf", "=", "self", ".", "_nodes", ".", "pop", "(", "node_name", ")", "except", "Exception", ":", "raise", "KeyError", "(", "'node \\'{}\\' not found, available nodes: {}'", ".", "format", "(", "node_name", ",", "self", ".", "_nodes", ".", "keys", "(", ")", ")", ")", "else", ":", "self", ".", "_distribution", ".", "pop", "(", "node_name", ")", "for", "w", "in", "range", "(", "0", ",", "node_conf", "[", "'vnodes'", "]", "*", "node_conf", "[", "'weight'", "]", ")", ":", "del", "self", ".", "_ring", "[", "self", ".", "hashi", "(", "'%s-%s'", "%", "(", "node_name", ",", "w", ")", ")", "]", "self", ".", "_keys", "=", "sorted", "(", "self", ".", "_ring", ".", "keys", "(", ")", ")" ]
Remove the given node from the continuum/ring. :param node_name: the node name.
[ "Remove", "the", "given", "node", "from", "the", "continuum", "/", "ring", "." ]
train
https://github.com/ultrabug/uhashring/blob/2297471a392e28ed913b3276c2f48d0c01523375/uhashring/ring_meta.py#L39-L53
ultrabug/uhashring
uhashring/ring_ketama.py
KetamaRing.hashi
def hashi(self, key, replica=0): """Returns a ketama compatible hash from the given key. """ dh = self._listbytes(md5(str(key).encode('utf-8')).digest()) rd = replica * 4 return ( (dh[3 + rd] << 24) | (dh[2 + rd] << 16) | (dh[1 + rd] << 8) | dh[0 + rd])
python
def hashi(self, key, replica=0): dh = self._listbytes(md5(str(key).encode('utf-8')).digest()) rd = replica * 4 return ( (dh[3 + rd] << 24) | (dh[2 + rd] << 16) | (dh[1 + rd] << 8) | dh[0 + rd])
[ "def", "hashi", "(", "self", ",", "key", ",", "replica", "=", "0", ")", ":", "dh", "=", "self", ".", "_listbytes", "(", "md5", "(", "str", "(", "key", ")", ".", "encode", "(", "'utf-8'", ")", ")", ".", "digest", "(", ")", ")", "rd", "=", "replica", "*", "4", "return", "(", "(", "dh", "[", "3", "+", "rd", "]", "<<", "24", ")", "|", "(", "dh", "[", "2", "+", "rd", "]", "<<", "16", ")", "|", "(", "dh", "[", "1", "+", "rd", "]", "<<", "8", ")", "|", "dh", "[", "0", "+", "rd", "]", ")" ]
Returns a ketama compatible hash from the given key.
[ "Returns", "a", "ketama", "compatible", "hash", "from", "the", "given", "key", "." ]
train
https://github.com/ultrabug/uhashring/blob/2297471a392e28ed913b3276c2f48d0c01523375/uhashring/ring_ketama.py#L24-L31
ultrabug/uhashring
uhashring/ring_ketama.py
KetamaRing._hashi_weight_generator
def _hashi_weight_generator(self, node_name, node_conf): """Calculate the weight factor of the given node and yield its hash key for every configured replica. :param node_name: the node name. """ ks = (node_conf['vnodes'] * len(self._nodes) * node_conf['weight']) // self._weight_sum for w in range(0, ks): w_node_name = '%s-%s' % (node_name, w) for i in range(0, self._replicas): yield self.hashi(w_node_name, replica=i)
python
def _hashi_weight_generator(self, node_name, node_conf): ks = (node_conf['vnodes'] * len(self._nodes) * node_conf['weight']) // self._weight_sum for w in range(0, ks): w_node_name = '%s-%s' % (node_name, w) for i in range(0, self._replicas): yield self.hashi(w_node_name, replica=i)
[ "def", "_hashi_weight_generator", "(", "self", ",", "node_name", ",", "node_conf", ")", ":", "ks", "=", "(", "node_conf", "[", "'vnodes'", "]", "*", "len", "(", "self", ".", "_nodes", ")", "*", "node_conf", "[", "'weight'", "]", ")", "//", "self", ".", "_weight_sum", "for", "w", "in", "range", "(", "0", ",", "ks", ")", ":", "w_node_name", "=", "'%s-%s'", "%", "(", "node_name", ",", "w", ")", "for", "i", "in", "range", "(", "0", ",", "self", ".", "_replicas", ")", ":", "yield", "self", ".", "hashi", "(", "w_node_name", ",", "replica", "=", "i", ")" ]
Calculate the weight factor of the given node and yield its hash key for every configured replica. :param node_name: the node name.
[ "Calculate", "the", "weight", "factor", "of", "the", "given", "node", "and", "yield", "its", "hash", "key", "for", "every", "configured", "replica", "." ]
train
https://github.com/ultrabug/uhashring/blob/2297471a392e28ed913b3276c2f48d0c01523375/uhashring/ring_ketama.py#L33-L44
ultrabug/uhashring
uhashring/ring_ketama.py
KetamaRing._create_ring
def _create_ring(self, nodes): """Generate a ketama compatible continuum/ring. """ _weight_sum = 0 for node_conf in self._nodes.values(): _weight_sum += node_conf['weight'] self._weight_sum = _weight_sum _distribution = Counter() _keys = [] _ring = {} for node_name, node_conf in self._nodes.items(): for h in self._hashi_weight_generator(node_name, node_conf): _ring[h] = node_name insort(_keys, h) _distribution[node_name] += 1 self._distribution = _distribution self._keys = _keys self._ring = _ring
python
def _create_ring(self, nodes): _weight_sum = 0 for node_conf in self._nodes.values(): _weight_sum += node_conf['weight'] self._weight_sum = _weight_sum _distribution = Counter() _keys = [] _ring = {} for node_name, node_conf in self._nodes.items(): for h in self._hashi_weight_generator(node_name, node_conf): _ring[h] = node_name insort(_keys, h) _distribution[node_name] += 1 self._distribution = _distribution self._keys = _keys self._ring = _ring
[ "def", "_create_ring", "(", "self", ",", "nodes", ")", ":", "_weight_sum", "=", "0", "for", "node_conf", "in", "self", ".", "_nodes", ".", "values", "(", ")", ":", "_weight_sum", "+=", "node_conf", "[", "'weight'", "]", "self", ".", "_weight_sum", "=", "_weight_sum", "_distribution", "=", "Counter", "(", ")", "_keys", "=", "[", "]", "_ring", "=", "{", "}", "for", "node_name", ",", "node_conf", "in", "self", ".", "_nodes", ".", "items", "(", ")", ":", "for", "h", "in", "self", ".", "_hashi_weight_generator", "(", "node_name", ",", "node_conf", ")", ":", "_ring", "[", "h", "]", "=", "node_name", "insort", "(", "_keys", ",", "h", ")", "_distribution", "[", "node_name", "]", "+=", "1", "self", ".", "_distribution", "=", "_distribution", "self", ".", "_keys", "=", "_keys", "self", ".", "_ring", "=", "_ring" ]
Generate a ketama compatible continuum/ring.
[ "Generate", "a", "ketama", "compatible", "continuum", "/", "ring", "." ]
train
https://github.com/ultrabug/uhashring/blob/2297471a392e28ed913b3276c2f48d0c01523375/uhashring/ring_ketama.py#L54-L72
ultrabug/uhashring
uhashring/ring_ketama.py
KetamaRing._remove_node
def _remove_node(self, node_name): """Remove the given node from the continuum/ring. :param node_name: the node name. """ try: self._nodes.pop(node_name) except Exception: raise KeyError('node \'{}\' not found, available nodes: {}'.format( node_name, self._nodes.keys())) else: self._create_ring(self._nodes)
python
def _remove_node(self, node_name): try: self._nodes.pop(node_name) except Exception: raise KeyError('node \'{}\' not found, available nodes: {}'.format( node_name, self._nodes.keys())) else: self._create_ring(self._nodes)
[ "def", "_remove_node", "(", "self", ",", "node_name", ")", ":", "try", ":", "self", ".", "_nodes", ".", "pop", "(", "node_name", ")", "except", "Exception", ":", "raise", "KeyError", "(", "'node \\'{}\\' not found, available nodes: {}'", ".", "format", "(", "node_name", ",", "self", ".", "_nodes", ".", "keys", "(", ")", ")", ")", "else", ":", "self", ".", "_create_ring", "(", "self", ".", "_nodes", ")" ]
Remove the given node from the continuum/ring. :param node_name: the node name.
[ "Remove", "the", "given", "node", "from", "the", "continuum", "/", "ring", "." ]
train
https://github.com/ultrabug/uhashring/blob/2297471a392e28ed913b3276c2f48d0c01523375/uhashring/ring_ketama.py#L74-L85
gatagat/lap
setup.py
get_numpy_status
def get_numpy_status(): """ Returns a dictionary containing a boolean specifying whether NumPy is up-to-date, along with the version string (empty string if not installed). """ numpy_status = {} try: import numpy numpy_version = numpy.__version__ numpy_status['up_to_date'] = parse_version( numpy_version) >= parse_version(NUMPY_MIN_VERSION) numpy_status['version'] = numpy_version except ImportError: traceback.print_exc() numpy_status['up_to_date'] = False numpy_status['version'] = "" return numpy_status
python
def get_numpy_status(): numpy_status = {} try: import numpy numpy_version = numpy.__version__ numpy_status['up_to_date'] = parse_version( numpy_version) >= parse_version(NUMPY_MIN_VERSION) numpy_status['version'] = numpy_version except ImportError: traceback.print_exc() numpy_status['up_to_date'] = False numpy_status['version'] = "" return numpy_status
[ "def", "get_numpy_status", "(", ")", ":", "numpy_status", "=", "{", "}", "try", ":", "import", "numpy", "numpy_version", "=", "numpy", ".", "__version__", "numpy_status", "[", "'up_to_date'", "]", "=", "parse_version", "(", "numpy_version", ")", ">=", "parse_version", "(", "NUMPY_MIN_VERSION", ")", "numpy_status", "[", "'version'", "]", "=", "numpy_version", "except", "ImportError", ":", "traceback", ".", "print_exc", "(", ")", "numpy_status", "[", "'up_to_date'", "]", "=", "False", "numpy_status", "[", "'version'", "]", "=", "\"\"", "return", "numpy_status" ]
Returns a dictionary containing a boolean specifying whether NumPy is up-to-date, along with the version string (empty string if not installed).
[ "Returns", "a", "dictionary", "containing", "a", "boolean", "specifying", "whether", "NumPy", "is", "up", "-", "to", "-", "date", "along", "with", "the", "version", "string", "(", "empty", "string", "if", "not", "installed", ")", "." ]
train
https://github.com/gatagat/lap/blob/c2b6309ba246d18205a71228cdaea67210e1a039/setup.py#L119-L136
gatagat/lap
lap/lapmod.py
lapmod
def lapmod(n, cc, ii, kk, fast=True, return_cost=True, fp_version=FP_DYNAMIC): """Solve sparse linear assignment problem using Jonker-Volgenant algorithm. n: number of rows of the assignment cost matrix cc: 1D array of all finite elements of the assignement cost matrix ii: 1D array of indices of the row starts in cc. The following must hold: ii[0] = 0 and ii[n+1] = len(cc). kk: 1D array of the column indices so that: cost[i, kk[ii[i] + k]] == cc[ii[i] + k]. Indices within one row must be sorted. extend_cost: whether or not extend a non-square matrix [default: False] cost_limit: an upper limit for a cost of a single assignment [default: np.inf] return_cost: whether or not to return the assignment cost Returns (opt, x, y) where: opt: cost of the assignment x: vector of columns assigned to rows y: vector of rows assigned to columns or (x, y) if return_cost is not True. When extend_cost and/or cost_limit is set, all unmatched entries will be marked by -1 in x/y. """ # log = logging.getLogger('lapmod') check_cost(n, cc, ii, kk) if fast is True: # log.debug('[----CR & RT & ARR & augmentation ----]') x, y = _lapmod(n, cc, ii, kk, fp_version=fp_version) else: cc = np.ascontiguousarray(cc, dtype=np.float64) ii = np.ascontiguousarray(ii, dtype=np.int32) kk = np.ascontiguousarray(kk, dtype=np.int32) x = np.empty((n,), dtype=np.int32) y = np.empty((n,), dtype=np.int32) v = np.empty((n,), dtype=np.float64) free_rows = np.empty((n,), dtype=np.int32) # log.debug('[----Column reduction & reduction transfer----]') n_free_rows = _pycrrt(n, cc, ii, kk, free_rows, x, y, v) # log.debug( # 'free, x, y, v: %s %s %s %s', free_rows[:n_free_rows], x, y, v) if n_free_rows == 0: # log.info('Reduction solved it.') if return_cost is True: return get_cost(n, cc, ii, kk, x), x, y else: return x, y for it in range(2): # log.debug('[---Augmenting row reduction (iteration: %d)---]', it) n_free_rows = _pyarr( n, cc, ii, kk, n_free_rows, free_rows, x, y, v) # log.debug( # 'free, x, y, v: %s %s %s %s', free_rows[:n_free_rows], x, y, v) if n_free_rows == 0: # log.info('Augmenting row reduction solved it.') if return_cost is True: return get_cost(n, cc, ii, kk, x), x, y else: return x, y # log.info('[----Augmentation----]') _pya(n, cc, ii, kk, n_free_rows, free_rows, x, y, v) # log.debug('x, y, v: %s %s %s', x, y, v) if return_cost is True: return get_cost(n, cc, ii, kk, x), x, y else: return x, y
python
def lapmod(n, cc, ii, kk, fast=True, return_cost=True, fp_version=FP_DYNAMIC): check_cost(n, cc, ii, kk) if fast is True: x, y = _lapmod(n, cc, ii, kk, fp_version=fp_version) else: cc = np.ascontiguousarray(cc, dtype=np.float64) ii = np.ascontiguousarray(ii, dtype=np.int32) kk = np.ascontiguousarray(kk, dtype=np.int32) x = np.empty((n,), dtype=np.int32) y = np.empty((n,), dtype=np.int32) v = np.empty((n,), dtype=np.float64) free_rows = np.empty((n,), dtype=np.int32) n_free_rows = _pycrrt(n, cc, ii, kk, free_rows, x, y, v) if n_free_rows == 0: if return_cost is True: return get_cost(n, cc, ii, kk, x), x, y else: return x, y for it in range(2): n_free_rows = _pyarr( n, cc, ii, kk, n_free_rows, free_rows, x, y, v) if n_free_rows == 0: if return_cost is True: return get_cost(n, cc, ii, kk, x), x, y else: return x, y _pya(n, cc, ii, kk, n_free_rows, free_rows, x, y, v) if return_cost is True: return get_cost(n, cc, ii, kk, x), x, y else: return x, y
[ "def", "lapmod", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "fast", "=", "True", ",", "return_cost", "=", "True", ",", "fp_version", "=", "FP_DYNAMIC", ")", ":", "# log = logging.getLogger('lapmod')", "check_cost", "(", "n", ",", "cc", ",", "ii", ",", "kk", ")", "if", "fast", "is", "True", ":", "# log.debug('[----CR & RT & ARR & augmentation ----]')", "x", ",", "y", "=", "_lapmod", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "fp_version", "=", "fp_version", ")", "else", ":", "cc", "=", "np", ".", "ascontiguousarray", "(", "cc", ",", "dtype", "=", "np", ".", "float64", ")", "ii", "=", "np", ".", "ascontiguousarray", "(", "ii", ",", "dtype", "=", "np", ".", "int32", ")", "kk", "=", "np", ".", "ascontiguousarray", "(", "kk", ",", "dtype", "=", "np", ".", "int32", ")", "x", "=", "np", ".", "empty", "(", "(", "n", ",", ")", ",", "dtype", "=", "np", ".", "int32", ")", "y", "=", "np", ".", "empty", "(", "(", "n", ",", ")", ",", "dtype", "=", "np", ".", "int32", ")", "v", "=", "np", ".", "empty", "(", "(", "n", ",", ")", ",", "dtype", "=", "np", ".", "float64", ")", "free_rows", "=", "np", ".", "empty", "(", "(", "n", ",", ")", ",", "dtype", "=", "np", ".", "int32", ")", "# log.debug('[----Column reduction & reduction transfer----]')", "n_free_rows", "=", "_pycrrt", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "free_rows", ",", "x", ",", "y", ",", "v", ")", "# log.debug(", "# 'free, x, y, v: %s %s %s %s', free_rows[:n_free_rows], x, y, v)", "if", "n_free_rows", "==", "0", ":", "# log.info('Reduction solved it.')", "if", "return_cost", "is", "True", ":", "return", "get_cost", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "x", ")", ",", "x", ",", "y", "else", ":", "return", "x", ",", "y", "for", "it", "in", "range", "(", "2", ")", ":", "# log.debug('[---Augmenting row reduction (iteration: %d)---]', it)", "n_free_rows", "=", "_pyarr", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "n_free_rows", ",", "free_rows", ",", "x", ",", "y", ",", "v", ")", "# log.debug(", "# 'free, x, y, v: %s %s %s %s', free_rows[:n_free_rows], x, y, v)", "if", "n_free_rows", "==", "0", ":", "# log.info('Augmenting row reduction solved it.')", "if", "return_cost", "is", "True", ":", "return", "get_cost", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "x", ")", ",", "x", ",", "y", "else", ":", "return", "x", ",", "y", "# log.info('[----Augmentation----]')", "_pya", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "n_free_rows", ",", "free_rows", ",", "x", ",", "y", ",", "v", ")", "# log.debug('x, y, v: %s %s %s', x, y, v)", "if", "return_cost", "is", "True", ":", "return", "get_cost", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "x", ")", ",", "x", ",", "y", "else", ":", "return", "x", ",", "y" ]
Solve sparse linear assignment problem using Jonker-Volgenant algorithm. n: number of rows of the assignment cost matrix cc: 1D array of all finite elements of the assignement cost matrix ii: 1D array of indices of the row starts in cc. The following must hold: ii[0] = 0 and ii[n+1] = len(cc). kk: 1D array of the column indices so that: cost[i, kk[ii[i] + k]] == cc[ii[i] + k]. Indices within one row must be sorted. extend_cost: whether or not extend a non-square matrix [default: False] cost_limit: an upper limit for a cost of a single assignment [default: np.inf] return_cost: whether or not to return the assignment cost Returns (opt, x, y) where: opt: cost of the assignment x: vector of columns assigned to rows y: vector of rows assigned to columns or (x, y) if return_cost is not True. When extend_cost and/or cost_limit is set, all unmatched entries will be marked by -1 in x/y.
[ "Solve", "sparse", "linear", "assignment", "problem", "using", "Jonker", "-", "Volgenant", "algorithm", "." ]
train
https://github.com/gatagat/lap/blob/c2b6309ba246d18205a71228cdaea67210e1a039/lap/lapmod.py#L273-L341
simonvh/genomepy
genomepy/provider.py
ProviderBase.register_provider
def register_provider(cls, provider): """Register method to keep list of providers.""" def decorator(subclass): """Register as decorator function.""" cls._providers[provider] = subclass subclass.name = provider return subclass return decorator
python
def register_provider(cls, provider): def decorator(subclass): cls._providers[provider] = subclass subclass.name = provider return subclass return decorator
[ "def", "register_provider", "(", "cls", ",", "provider", ")", ":", "def", "decorator", "(", "subclass", ")", ":", "\"\"\"Register as decorator function.\"\"\"", "cls", ".", "_providers", "[", "provider", "]", "=", "subclass", "subclass", ".", "name", "=", "provider", "return", "subclass", "return", "decorator" ]
Register method to keep list of providers.
[ "Register", "method", "to", "keep", "list", "of", "providers", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/provider.py#L73-L80
simonvh/genomepy
genomepy/provider.py
ProviderBase.tar_to_bigfile
def tar_to_bigfile(self, fname, outfile): """Convert tar of multiple FASTAs to one file.""" fnames = [] tmpdir = mkdtemp() # Extract files to temporary directory with tarfile.open(fname) as tar: tar.extractall(path=tmpdir) for root, _, files in os.walk(tmpdir): fnames += [os.path.join(root, fname) for fname in files] # Concatenate with open(outfile, "w") as out: for infile in fnames: for line in open(infile): out.write(line) os.unlink(infile) # Remove temp dir shutil.rmtree(tmpdir)
python
def tar_to_bigfile(self, fname, outfile): fnames = [] tmpdir = mkdtemp() with tarfile.open(fname) as tar: tar.extractall(path=tmpdir) for root, _, files in os.walk(tmpdir): fnames += [os.path.join(root, fname) for fname in files] with open(outfile, "w") as out: for infile in fnames: for line in open(infile): out.write(line) os.unlink(infile) shutil.rmtree(tmpdir)
[ "def", "tar_to_bigfile", "(", "self", ",", "fname", ",", "outfile", ")", ":", "fnames", "=", "[", "]", "tmpdir", "=", "mkdtemp", "(", ")", "# Extract files to temporary directory", "with", "tarfile", ".", "open", "(", "fname", ")", "as", "tar", ":", "tar", ".", "extractall", "(", "path", "=", "tmpdir", ")", "for", "root", ",", "_", ",", "files", "in", "os", ".", "walk", "(", "tmpdir", ")", ":", "fnames", "+=", "[", "os", ".", "path", ".", "join", "(", "root", ",", "fname", ")", "for", "fname", "in", "files", "]", "# Concatenate", "with", "open", "(", "outfile", ",", "\"w\"", ")", "as", "out", ":", "for", "infile", "in", "fnames", ":", "for", "line", "in", "open", "(", "infile", ")", ":", "out", ".", "write", "(", "line", ")", "os", ".", "unlink", "(", "infile", ")", "# Remove temp dir", "shutil", ".", "rmtree", "(", "tmpdir", ")" ]
Convert tar of multiple FASTAs to one file.
[ "Convert", "tar", "of", "multiple", "FASTAs", "to", "one", "file", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/provider.py#L90-L109
simonvh/genomepy
genomepy/provider.py
ProviderBase.download_genome
def download_genome(self, name, genome_dir, localname=None, mask="soft", regex=None, invert_match=False, version=None): """ Download a (gzipped) genome file to a specific directory Parameters ---------- name : str Genome / species name genome_dir : str Directory to install genome mask: str , optional Masking, soft, hard or none (all other strings) """ genome_dir = os.path.expanduser(genome_dir) if not os.path.exists(genome_dir): os.makedirs(genome_dir) dbname, link = self.get_genome_download_link(name, mask=mask, version=version) myname = dbname if localname: myname = localname myname = myname.replace(" ", "_") gzipped = False if link.endswith(".gz"): gzipped = True if not os.path.exists(os.path.join(genome_dir, myname)): os.makedirs(os.path.join(genome_dir, myname)) urlcleanup() response = urlopen(link) sys.stderr.write("downloading from {}...\n".format(link)) down_dir = genome_dir fname = os.path.join(genome_dir, myname, myname + ".fa") if regex: down_dir = mkdtemp() os.mkdir(os.path.join(down_dir, myname)) fname = os.path.join(down_dir, myname, myname + ".fa") with open(fname, "wb") as f_out: if gzipped: # Supports both Python 2.7 as well as 3 with gzip.GzipFile(fileobj=io.BytesIO(response.read())) as f_in: shutil.copyfileobj(f_in, f_out) else: f_out.write(response.read()) sys.stderr.write("done...\n") if link.endswith("tar.gz"): self.tar_to_bigfile(fname, fname) if hasattr(self, '_post_process_download'): self._post_process_download(name, down_dir, mask) if regex: infa = fname outfa = os.path.join(genome_dir, myname, myname + ".fa") filter_fasta( infa, outfa, regex=regex, v=invert_match, force=True ) not_included = [k for k in Fasta(infa).keys() if k not in Fasta(outfa).keys()] shutil.rmtree(down_dir) fname = outfa sys.stderr.write("name: {}\n".format(dbname)) sys.stderr.write("local name: {}\n".format(myname)) sys.stderr.write("fasta: {}\n".format(fname)) # Create readme with information readme = os.path.join(genome_dir, myname, "README.txt") with open(readme, "w") as f: f.write("name: {}\n".format(myname)) f.write("original name: {}\n".format(dbname)) f.write("original filename: {}\n".format(os.path.split(link)[-1])) f.write("url: {}\n".format(link)) f.write("mask: {}\n".format(mask)) f.write("date: {}\n".format(time.strftime("%Y-%m-%d %H:%M:%S"))) if regex: if invert_match: f.write("regex: {} (inverted match)\n".format(regex)) else: f.write("regex: {}\n".format(regex)) f.write("sequences that were excluded:\n") for seq in not_included: f.write("\t{}\n".format(seq))
python
def download_genome(self, name, genome_dir, localname=None, mask="soft", regex=None, invert_match=False, version=None): genome_dir = os.path.expanduser(genome_dir) if not os.path.exists(genome_dir): os.makedirs(genome_dir) dbname, link = self.get_genome_download_link(name, mask=mask, version=version) myname = dbname if localname: myname = localname myname = myname.replace(" ", "_") gzipped = False if link.endswith(".gz"): gzipped = True if not os.path.exists(os.path.join(genome_dir, myname)): os.makedirs(os.path.join(genome_dir, myname)) urlcleanup() response = urlopen(link) sys.stderr.write("downloading from {}...\n".format(link)) down_dir = genome_dir fname = os.path.join(genome_dir, myname, myname + ".fa") if regex: down_dir = mkdtemp() os.mkdir(os.path.join(down_dir, myname)) fname = os.path.join(down_dir, myname, myname + ".fa") with open(fname, "wb") as f_out: if gzipped: with gzip.GzipFile(fileobj=io.BytesIO(response.read())) as f_in: shutil.copyfileobj(f_in, f_out) else: f_out.write(response.read()) sys.stderr.write("done...\n") if link.endswith("tar.gz"): self.tar_to_bigfile(fname, fname) if hasattr(self, '_post_process_download'): self._post_process_download(name, down_dir, mask) if regex: infa = fname outfa = os.path.join(genome_dir, myname, myname + ".fa") filter_fasta( infa, outfa, regex=regex, v=invert_match, force=True ) not_included = [k for k in Fasta(infa).keys() if k not in Fasta(outfa).keys()] shutil.rmtree(down_dir) fname = outfa sys.stderr.write("name: {}\n".format(dbname)) sys.stderr.write("local name: {}\n".format(myname)) sys.stderr.write("fasta: {}\n".format(fname)) readme = os.path.join(genome_dir, myname, "README.txt") with open(readme, "w") as f: f.write("name: {}\n".format(myname)) f.write("original name: {}\n".format(dbname)) f.write("original filename: {}\n".format(os.path.split(link)[-1])) f.write("url: {}\n".format(link)) f.write("mask: {}\n".format(mask)) f.write("date: {}\n".format(time.strftime("%Y-%m-%d %H:%M:%S"))) if regex: if invert_match: f.write("regex: {} (inverted match)\n".format(regex)) else: f.write("regex: {}\n".format(regex)) f.write("sequences that were excluded:\n") for seq in not_included: f.write("\t{}\n".format(seq))
[ "def", "download_genome", "(", "self", ",", "name", ",", "genome_dir", ",", "localname", "=", "None", ",", "mask", "=", "\"soft\"", ",", "regex", "=", "None", ",", "invert_match", "=", "False", ",", "version", "=", "None", ")", ":", "genome_dir", "=", "os", ".", "path", ".", "expanduser", "(", "genome_dir", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "genome_dir", ")", ":", "os", ".", "makedirs", "(", "genome_dir", ")", "dbname", ",", "link", "=", "self", ".", "get_genome_download_link", "(", "name", ",", "mask", "=", "mask", ",", "version", "=", "version", ")", "myname", "=", "dbname", "if", "localname", ":", "myname", "=", "localname", "myname", "=", "myname", ".", "replace", "(", "\" \"", ",", "\"_\"", ")", "gzipped", "=", "False", "if", "link", ".", "endswith", "(", "\".gz\"", ")", ":", "gzipped", "=", "True", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "genome_dir", ",", "myname", ")", ")", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "join", "(", "genome_dir", ",", "myname", ")", ")", "urlcleanup", "(", ")", "response", "=", "urlopen", "(", "link", ")", "sys", ".", "stderr", ".", "write", "(", "\"downloading from {}...\\n\"", ".", "format", "(", "link", ")", ")", "down_dir", "=", "genome_dir", "fname", "=", "os", ".", "path", ".", "join", "(", "genome_dir", ",", "myname", ",", "myname", "+", "\".fa\"", ")", "if", "regex", ":", "down_dir", "=", "mkdtemp", "(", ")", "os", ".", "mkdir", "(", "os", ".", "path", ".", "join", "(", "down_dir", ",", "myname", ")", ")", "fname", "=", "os", ".", "path", ".", "join", "(", "down_dir", ",", "myname", ",", "myname", "+", "\".fa\"", ")", "with", "open", "(", "fname", ",", "\"wb\"", ")", "as", "f_out", ":", "if", "gzipped", ":", "# Supports both Python 2.7 as well as 3", "with", "gzip", ".", "GzipFile", "(", "fileobj", "=", "io", ".", "BytesIO", "(", "response", ".", "read", "(", ")", ")", ")", "as", "f_in", ":", "shutil", ".", "copyfileobj", "(", "f_in", ",", "f_out", ")", "else", ":", "f_out", ".", "write", "(", "response", ".", "read", "(", ")", ")", "sys", ".", "stderr", ".", "write", "(", "\"done...\\n\"", ")", "if", "link", ".", "endswith", "(", "\"tar.gz\"", ")", ":", "self", ".", "tar_to_bigfile", "(", "fname", ",", "fname", ")", "if", "hasattr", "(", "self", ",", "'_post_process_download'", ")", ":", "self", ".", "_post_process_download", "(", "name", ",", "down_dir", ",", "mask", ")", "if", "regex", ":", "infa", "=", "fname", "outfa", "=", "os", ".", "path", ".", "join", "(", "genome_dir", ",", "myname", ",", "myname", "+", "\".fa\"", ")", "filter_fasta", "(", "infa", ",", "outfa", ",", "regex", "=", "regex", ",", "v", "=", "invert_match", ",", "force", "=", "True", ")", "not_included", "=", "[", "k", "for", "k", "in", "Fasta", "(", "infa", ")", ".", "keys", "(", ")", "if", "k", "not", "in", "Fasta", "(", "outfa", ")", ".", "keys", "(", ")", "]", "shutil", ".", "rmtree", "(", "down_dir", ")", "fname", "=", "outfa", "sys", ".", "stderr", ".", "write", "(", "\"name: {}\\n\"", ".", "format", "(", "dbname", ")", ")", "sys", ".", "stderr", ".", "write", "(", "\"local name: {}\\n\"", ".", "format", "(", "myname", ")", ")", "sys", ".", "stderr", ".", "write", "(", "\"fasta: {}\\n\"", ".", "format", "(", "fname", ")", ")", "# Create readme with information", "readme", "=", "os", ".", "path", ".", "join", "(", "genome_dir", ",", "myname", ",", "\"README.txt\"", ")", "with", "open", "(", "readme", ",", "\"w\"", ")", "as", "f", ":", "f", ".", "write", "(", "\"name: {}\\n\"", ".", "format", "(", "myname", ")", ")", "f", ".", "write", "(", "\"original name: {}\\n\"", ".", "format", "(", "dbname", ")", ")", "f", ".", "write", "(", "\"original filename: {}\\n\"", ".", "format", "(", "os", ".", "path", ".", "split", "(", "link", ")", "[", "-", "1", "]", ")", ")", "f", ".", "write", "(", "\"url: {}\\n\"", ".", "format", "(", "link", ")", ")", "f", ".", "write", "(", "\"mask: {}\\n\"", ".", "format", "(", "mask", ")", ")", "f", ".", "write", "(", "\"date: {}\\n\"", ".", "format", "(", "time", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", ")", ")", "if", "regex", ":", "if", "invert_match", ":", "f", ".", "write", "(", "\"regex: {} (inverted match)\\n\"", ".", "format", "(", "regex", ")", ")", "else", ":", "f", ".", "write", "(", "\"regex: {}\\n\"", ".", "format", "(", "regex", ")", ")", "f", ".", "write", "(", "\"sequences that were excluded:\\n\"", ")", "for", "seq", "in", "not_included", ":", "f", ".", "write", "(", "\"\\t{}\\n\"", ".", "format", "(", "seq", ")", ")" ]
Download a (gzipped) genome file to a specific directory Parameters ---------- name : str Genome / species name genome_dir : str Directory to install genome mask: str , optional Masking, soft, hard or none (all other strings)
[ "Download", "a", "(", "gzipped", ")", "genome", "file", "to", "a", "specific", "directory" ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/provider.py#L111-L204
simonvh/genomepy
genomepy/plugin.py
find_plugins
def find_plugins(): """Locate and initialize all available plugins. """ plugin_dir = os.path.dirname(os.path.realpath(__file__)) plugin_dir = os.path.join(plugin_dir, "plugins") plugin_files = [x[:-3] for x in os.listdir(plugin_dir) if x.endswith(".py")] sys.path.insert(0, plugin_dir) for plugin in plugin_files: __import__(plugin)
python
def find_plugins(): plugin_dir = os.path.dirname(os.path.realpath(__file__)) plugin_dir = os.path.join(plugin_dir, "plugins") plugin_files = [x[:-3] for x in os.listdir(plugin_dir) if x.endswith(".py")] sys.path.insert(0, plugin_dir) for plugin in plugin_files: __import__(plugin)
[ "def", "find_plugins", "(", ")", ":", "plugin_dir", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", "plugin_dir", "=", "os", ".", "path", ".", "join", "(", "plugin_dir", ",", "\"plugins\"", ")", "plugin_files", "=", "[", "x", "[", ":", "-", "3", "]", "for", "x", "in", "os", ".", "listdir", "(", "plugin_dir", ")", "if", "x", ".", "endswith", "(", "\".py\"", ")", "]", "sys", ".", "path", ".", "insert", "(", "0", ",", "plugin_dir", ")", "for", "plugin", "in", "plugin_files", ":", "__import__", "(", "plugin", ")" ]
Locate and initialize all available plugins.
[ "Locate", "and", "initialize", "all", "available", "plugins", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/plugin.py#L30-L38
simonvh/genomepy
genomepy/plugin.py
convert
def convert(name): """Convert CamelCase to underscore Parameters ---------- name : str Camelcase string Returns ------- name : str Converted name """ s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
python
def convert(name): s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
[ "def", "convert", "(", "name", ")", ":", "s1", "=", "re", ".", "sub", "(", "'(.)([A-Z][a-z]+)'", ",", "r'\\1_\\2'", ",", "name", ")", "return", "re", ".", "sub", "(", "'([a-z0-9])([A-Z])'", ",", "r'\\1_\\2'", ",", "s1", ")", ".", "lower", "(", ")" ]
Convert CamelCase to underscore Parameters ---------- name : str Camelcase string Returns ------- name : str Converted name
[ "Convert", "CamelCase", "to", "underscore" ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/plugin.py#L40-L54
simonvh/genomepy
genomepy/plugin.py
init_plugins
def init_plugins(): """Return dictionary of available plugins Returns ------- plugins : dictionary key is plugin name, value Plugin object """ find_plugins() d = {} for c in Plugin.__subclasses__(): ins = c() if ins.name() in config.get("plugin", []): ins.activate() d[ins.name()] = ins return d
python
def init_plugins(): find_plugins() d = {} for c in Plugin.__subclasses__(): ins = c() if ins.name() in config.get("plugin", []): ins.activate() d[ins.name()] = ins return d
[ "def", "init_plugins", "(", ")", ":", "find_plugins", "(", ")", "d", "=", "{", "}", "for", "c", "in", "Plugin", ".", "__subclasses__", "(", ")", ":", "ins", "=", "c", "(", ")", "if", "ins", ".", "name", "(", ")", "in", "config", ".", "get", "(", "\"plugin\"", ",", "[", "]", ")", ":", "ins", ".", "activate", "(", ")", "d", "[", "ins", ".", "name", "(", ")", "]", "=", "ins", "return", "d" ]
Return dictionary of available plugins Returns ------- plugins : dictionary key is plugin name, value Plugin object
[ "Return", "dictionary", "of", "available", "plugins" ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/plugin.py#L56-L74
simonvh/genomepy
genomepy/plugin.py
activate
def activate(name): """Activate plugin. Parameters ---------- name : str Plugin name. """ if name in plugins: plugins[name].activate() else: raise Exception("plugin {} not found".format(name))
python
def activate(name): if name in plugins: plugins[name].activate() else: raise Exception("plugin {} not found".format(name))
[ "def", "activate", "(", "name", ")", ":", "if", "name", "in", "plugins", ":", "plugins", "[", "name", "]", ".", "activate", "(", ")", "else", ":", "raise", "Exception", "(", "\"plugin {} not found\"", ".", "format", "(", "name", ")", ")" ]
Activate plugin. Parameters ---------- name : str Plugin name.
[ "Activate", "plugin", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/plugin.py#L76-L87
simonvh/genomepy
genomepy/plugin.py
deactivate
def deactivate(name): """Deactivate plugin. Parameters ---------- name : str Plugin name. """ if name in plugins: plugins[name].deactivate() else: raise Exception("plugin {} not found".format(name))
python
def deactivate(name): if name in plugins: plugins[name].deactivate() else: raise Exception("plugin {} not found".format(name))
[ "def", "deactivate", "(", "name", ")", ":", "if", "name", "in", "plugins", ":", "plugins", "[", "name", "]", ".", "deactivate", "(", ")", "else", ":", "raise", "Exception", "(", "\"plugin {} not found\"", ".", "format", "(", "name", ")", ")" ]
Deactivate plugin. Parameters ---------- name : str Plugin name.
[ "Deactivate", "plugin", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/plugin.py#L89-L100
simonvh/genomepy
genomepy/functions.py
manage_config
def manage_config(cmd, *args): """Manage genomepy config file.""" if cmd == "file": print(config.config_file) elif cmd == "show": with open(config.config_file) as f: print(f.read()) elif cmd == "generate": fname = os.path.join( user_config_dir("genomepy"), "{}.yaml".format("genomepy") ) if not os.path.exists(user_config_dir("genomepy")): os.makedirs(user_config_dir("genomepy")) with open(fname, "w") as fout: with open(config.config_file) as fin: fout.write(fin.read()) print("Created config file {}".format(fname))
python
def manage_config(cmd, *args): if cmd == "file": print(config.config_file) elif cmd == "show": with open(config.config_file) as f: print(f.read()) elif cmd == "generate": fname = os.path.join( user_config_dir("genomepy"), "{}.yaml".format("genomepy") ) if not os.path.exists(user_config_dir("genomepy")): os.makedirs(user_config_dir("genomepy")) with open(fname, "w") as fout: with open(config.config_file) as fin: fout.write(fin.read()) print("Created config file {}".format(fname))
[ "def", "manage_config", "(", "cmd", ",", "*", "args", ")", ":", "if", "cmd", "==", "\"file\"", ":", "print", "(", "config", ".", "config_file", ")", "elif", "cmd", "==", "\"show\"", ":", "with", "open", "(", "config", ".", "config_file", ")", "as", "f", ":", "print", "(", "f", ".", "read", "(", ")", ")", "elif", "cmd", "==", "\"generate\"", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "user_config_dir", "(", "\"genomepy\"", ")", ",", "\"{}.yaml\"", ".", "format", "(", "\"genomepy\"", ")", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "user_config_dir", "(", "\"genomepy\"", ")", ")", ":", "os", ".", "makedirs", "(", "user_config_dir", "(", "\"genomepy\"", ")", ")", "with", "open", "(", "fname", ",", "\"w\"", ")", "as", "fout", ":", "with", "open", "(", "config", ".", "config_file", ")", "as", "fin", ":", "fout", ".", "write", "(", "fin", ".", "read", "(", ")", ")", "print", "(", "\"Created config file {}\"", ".", "format", "(", "fname", ")", ")" ]
Manage genomepy config file.
[ "Manage", "genomepy", "config", "file", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/functions.py#L26-L44
simonvh/genomepy
genomepy/functions.py
list_available_genomes
def list_available_genomes(provider=None): """ List all available genomes. Parameters ---------- provider : str, optional List genomes from specific provider. Genomes from all providers will be returned if not specified. Returns ------- list with genome names """ if provider: providers = [ProviderBase.create(provider)] else: # if provider is not specified search all providers providers = [ProviderBase.create(p) for p in ProviderBase.list_providers()] for p in providers: for row in p.list_available_genomes(): yield [p.name] + list(row)
python
def list_available_genomes(provider=None): if provider: providers = [ProviderBase.create(provider)] else: providers = [ProviderBase.create(p) for p in ProviderBase.list_providers()] for p in providers: for row in p.list_available_genomes(): yield [p.name] + list(row)
[ "def", "list_available_genomes", "(", "provider", "=", "None", ")", ":", "if", "provider", ":", "providers", "=", "[", "ProviderBase", ".", "create", "(", "provider", ")", "]", "else", ":", "# if provider is not specified search all providers", "providers", "=", "[", "ProviderBase", ".", "create", "(", "p", ")", "for", "p", "in", "ProviderBase", ".", "list_providers", "(", ")", "]", "for", "p", "in", "providers", ":", "for", "row", "in", "p", ".", "list_available_genomes", "(", ")", ":", "yield", "[", "p", ".", "name", "]", "+", "list", "(", "row", ")" ]
List all available genomes. Parameters ---------- provider : str, optional List genomes from specific provider. Genomes from all providers will be returned if not specified. Returns ------- list with genome names
[ "List", "all", "available", "genomes", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/functions.py#L46-L69
simonvh/genomepy
genomepy/functions.py
list_installed_genomes
def list_installed_genomes(genome_dir=None): """ List all available genomes. Parameters ---------- genome_dir : str Directory with installed genomes. Returns ------- list with genome names """ if not genome_dir: genome_dir = config.get("genome_dir", None) if not genome_dir: raise norns.exceptions.ConfigError("Please provide or configure a genome_dir") return [f for f in os.listdir(genome_dir) if _is_genome_dir(genome_dir + "/" + f)]
python
def list_installed_genomes(genome_dir=None): if not genome_dir: genome_dir = config.get("genome_dir", None) if not genome_dir: raise norns.exceptions.ConfigError("Please provide or configure a genome_dir") return [f for f in os.listdir(genome_dir) if _is_genome_dir(genome_dir + "/" + f)]
[ "def", "list_installed_genomes", "(", "genome_dir", "=", "None", ")", ":", "if", "not", "genome_dir", ":", "genome_dir", "=", "config", ".", "get", "(", "\"genome_dir\"", ",", "None", ")", "if", "not", "genome_dir", ":", "raise", "norns", ".", "exceptions", ".", "ConfigError", "(", "\"Please provide or configure a genome_dir\"", ")", "return", "[", "f", "for", "f", "in", "os", ".", "listdir", "(", "genome_dir", ")", "if", "_is_genome_dir", "(", "genome_dir", "+", "\"/\"", "+", "f", ")", "]" ]
List all available genomes. Parameters ---------- genome_dir : str Directory with installed genomes. Returns ------- list with genome names
[ "List", "all", "available", "genomes", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/functions.py#L96-L115
simonvh/genomepy
genomepy/functions.py
search
def search(term, provider=None): """ Search for a genome. If provider is specified, search only that specific provider, else search all providers. Both the name and description are used for the search. Seacrch term is case-insensitive. Parameters ---------- term : str Search term, case-insensitive. provider : str , optional Provider name Yields ------ tuple genome information (name/identfier and description) """ if provider: providers = [ProviderBase.create(provider)] else: # if provider is not specified search all providers providers = [ProviderBase.create(p) for p in ProviderBase.list_providers()] for p in providers: for row in p.search(term): yield [x.encode('latin-1') for x in [p.name] + list(row)]
python
def search(term, provider=None): if provider: providers = [ProviderBase.create(provider)] else: providers = [ProviderBase.create(p) for p in ProviderBase.list_providers()] for p in providers: for row in p.search(term): yield [x.encode('latin-1') for x in [p.name] + list(row)]
[ "def", "search", "(", "term", ",", "provider", "=", "None", ")", ":", "if", "provider", ":", "providers", "=", "[", "ProviderBase", ".", "create", "(", "provider", ")", "]", "else", ":", "# if provider is not specified search all providers", "providers", "=", "[", "ProviderBase", ".", "create", "(", "p", ")", "for", "p", "in", "ProviderBase", ".", "list_providers", "(", ")", "]", "for", "p", "in", "providers", ":", "for", "row", "in", "p", ".", "search", "(", "term", ")", ":", "yield", "[", "x", ".", "encode", "(", "'latin-1'", ")", "for", "x", "in", "[", "p", ".", "name", "]", "+", "list", "(", "row", ")", "]" ]
Search for a genome. If provider is specified, search only that specific provider, else search all providers. Both the name and description are used for the search. Seacrch term is case-insensitive. Parameters ---------- term : str Search term, case-insensitive. provider : str , optional Provider name Yields ------ tuple genome information (name/identfier and description)
[ "Search", "for", "a", "genome", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/functions.py#L117-L147
simonvh/genomepy
genomepy/functions.py
install_genome
def install_genome(name, provider, version=None, genome_dir=None, localname=None, mask="soft", regex=None, invert_match=False, annotation=False): """ Install a genome. Parameters ---------- name : str Genome name provider : str Provider name version : str Version (only for Ensembl) genome_dir : str , optional Where to store the fasta files localname : str , optional Custom name for this genome. mask : str , optional Default is 'soft', specify 'hard' for hard masking. regex : str , optional Regular expression to select specific chromosome / scaffold names. invert_match : bool , optional Set to True to select all chromosomes that don't match the regex. annotation : bool , optional If set to True, download gene annotation in BED and GTF format. """ if not genome_dir: genome_dir = config.get("genome_dir", None) if not genome_dir: raise norns.exceptions.ConfigError("Please provide or configure a genome_dir") genome_dir = os.path.expanduser(genome_dir) localname = get_localname(name, localname) # Download genome from provider p = ProviderBase.create(provider) p.download_genome( name, genome_dir, version=version, mask=mask, localname=localname, regex=regex, invert_match=invert_match) if annotation: # Download annotation from provider p.download_annotation(name, genome_dir, localname=localname, version=version) g = Genome(localname, genome_dir=genome_dir) for plugin in get_active_plugins(): plugin.after_genome_download(g) generate_env()
python
def install_genome(name, provider, version=None, genome_dir=None, localname=None, mask="soft", regex=None, invert_match=False, annotation=False): if not genome_dir: genome_dir = config.get("genome_dir", None) if not genome_dir: raise norns.exceptions.ConfigError("Please provide or configure a genome_dir") genome_dir = os.path.expanduser(genome_dir) localname = get_localname(name, localname) p = ProviderBase.create(provider) p.download_genome( name, genome_dir, version=version, mask=mask, localname=localname, regex=regex, invert_match=invert_match) if annotation: p.download_annotation(name, genome_dir, localname=localname, version=version) g = Genome(localname, genome_dir=genome_dir) for plugin in get_active_plugins(): plugin.after_genome_download(g) generate_env()
[ "def", "install_genome", "(", "name", ",", "provider", ",", "version", "=", "None", ",", "genome_dir", "=", "None", ",", "localname", "=", "None", ",", "mask", "=", "\"soft\"", ",", "regex", "=", "None", ",", "invert_match", "=", "False", ",", "annotation", "=", "False", ")", ":", "if", "not", "genome_dir", ":", "genome_dir", "=", "config", ".", "get", "(", "\"genome_dir\"", ",", "None", ")", "if", "not", "genome_dir", ":", "raise", "norns", ".", "exceptions", ".", "ConfigError", "(", "\"Please provide or configure a genome_dir\"", ")", "genome_dir", "=", "os", ".", "path", ".", "expanduser", "(", "genome_dir", ")", "localname", "=", "get_localname", "(", "name", ",", "localname", ")", "# Download genome from provider", "p", "=", "ProviderBase", ".", "create", "(", "provider", ")", "p", ".", "download_genome", "(", "name", ",", "genome_dir", ",", "version", "=", "version", ",", "mask", "=", "mask", ",", "localname", "=", "localname", ",", "regex", "=", "regex", ",", "invert_match", "=", "invert_match", ")", "if", "annotation", ":", "# Download annotation from provider", "p", ".", "download_annotation", "(", "name", ",", "genome_dir", ",", "localname", "=", "localname", ",", "version", "=", "version", ")", "g", "=", "Genome", "(", "localname", ",", "genome_dir", "=", "genome_dir", ")", "for", "plugin", "in", "get_active_plugins", "(", ")", ":", "plugin", ".", "after_genome_download", "(", "g", ")", "generate_env", "(", ")" ]
Install a genome. Parameters ---------- name : str Genome name provider : str Provider name version : str Version (only for Ensembl) genome_dir : str , optional Where to store the fasta files localname : str , optional Custom name for this genome. mask : str , optional Default is 'soft', specify 'hard' for hard masking. regex : str , optional Regular expression to select specific chromosome / scaffold names. invert_match : bool , optional Set to True to select all chromosomes that don't match the regex. annotation : bool , optional If set to True, download gene annotation in BED and GTF format.
[ "Install", "a", "genome", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/functions.py#L149-L209
simonvh/genomepy
genomepy/functions.py
_weighted_selection
def _weighted_selection(l, n): """ Selects n random elements from a list of (weight, item) tuples. Based on code snippet by Nick Johnson """ cuml = [] items = [] total_weight = 0.0 for weight, item in l: total_weight += weight cuml.append(total_weight) items.append(item) return [items[bisect.bisect(cuml, random.random()*total_weight)] for _ in range(n)]
python
def _weighted_selection(l, n): cuml = [] items = [] total_weight = 0.0 for weight, item in l: total_weight += weight cuml.append(total_weight) items.append(item) return [items[bisect.bisect(cuml, random.random()*total_weight)] for _ in range(n)]
[ "def", "_weighted_selection", "(", "l", ",", "n", ")", ":", "cuml", "=", "[", "]", "items", "=", "[", "]", "total_weight", "=", "0.0", "for", "weight", ",", "item", "in", "l", ":", "total_weight", "+=", "weight", "cuml", ".", "append", "(", "total_weight", ")", "items", ".", "append", "(", "item", ")", "return", "[", "items", "[", "bisect", ".", "bisect", "(", "cuml", ",", "random", ".", "random", "(", ")", "*", "total_weight", ")", "]", "for", "_", "in", "range", "(", "n", ")", "]" ]
Selects n random elements from a list of (weight, item) tuples. Based on code snippet by Nick Johnson
[ "Selects", "n", "random", "elements", "from", "a", "list", "of", "(", "weight", "item", ")", "tuples", ".", "Based", "on", "code", "snippet", "by", "Nick", "Johnson" ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/functions.py#L223-L236
simonvh/genomepy
genomepy/functions.py
generate_exports
def generate_exports(): """Print export commands for setting environment variables. """ env = [] for name in list_installed_genomes(): try: g = Genome(name) env_name = re.sub(r'[^\w]+', "_", name).upper() env.append("export {}={}".format(env_name, g.filename)) except: pass return env
python
def generate_exports(): env = [] for name in list_installed_genomes(): try: g = Genome(name) env_name = re.sub(r'[^\w]+', "_", name).upper() env.append("export {}={}".format(env_name, g.filename)) except: pass return env
[ "def", "generate_exports", "(", ")", ":", "env", "=", "[", "]", "for", "name", "in", "list_installed_genomes", "(", ")", ":", "try", ":", "g", "=", "Genome", "(", "name", ")", "env_name", "=", "re", ".", "sub", "(", "r'[^\\w]+'", ",", "\"_\"", ",", "name", ")", ".", "upper", "(", ")", "env", ".", "append", "(", "\"export {}={}\"", ".", "format", "(", "env_name", ",", "g", ".", "filename", ")", ")", "except", ":", "pass", "return", "env" ]
Print export commands for setting environment variables.
[ "Print", "export", "commands", "for", "setting", "environment", "variables", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/functions.py#L238-L249
simonvh/genomepy
genomepy/functions.py
generate_env
def generate_env(fname=None): """Generate file with exports. By default this is in .config/genomepy/exports.txt. Parameters ---------- fname: strs, optional Name of the output file. """ config_dir = user_config_dir("genomepy") if os.path.exists(config_dir): fname = os.path.join(config_dir, "exports.txt") with open(fname, "w") as fout: for env in generate_exports(): fout.write("{}\n".format(env))
python
def generate_env(fname=None): config_dir = user_config_dir("genomepy") if os.path.exists(config_dir): fname = os.path.join(config_dir, "exports.txt") with open(fname, "w") as fout: for env in generate_exports(): fout.write("{}\n".format(env))
[ "def", "generate_env", "(", "fname", "=", "None", ")", ":", "config_dir", "=", "user_config_dir", "(", "\"genomepy\"", ")", "if", "os", ".", "path", ".", "exists", "(", "config_dir", ")", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "config_dir", ",", "\"exports.txt\"", ")", "with", "open", "(", "fname", ",", "\"w\"", ")", "as", "fout", ":", "for", "env", "in", "generate_exports", "(", ")", ":", "fout", ".", "write", "(", "\"{}\\n\"", ".", "format", "(", "env", ")", ")" ]
Generate file with exports. By default this is in .config/genomepy/exports.txt. Parameters ---------- fname: strs, optional Name of the output file.
[ "Generate", "file", "with", "exports", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/functions.py#L251-L266
simonvh/genomepy
genomepy/functions.py
manage_plugins
def manage_plugins(command, plugin_names=None): """Enable or disable plugins. """ if plugin_names is None: plugin_names = [] active_plugins = config.get("plugin", []) plugins = init_plugins() if command == "enable": for name in plugin_names: if name not in plugins: raise ValueError("Unknown plugin: {}".format(name)) if name not in active_plugins: active_plugins.append(name) elif command == "disable": for name in plugin_names: if name in active_plugins: active_plugins.remove(name) elif command == "list": print("{:20}{}".format("plugin", "enabled")) for plugin in sorted(plugins): print("{:20}{}".format(plugin, {False:"", True:"*"}[plugin in active_plugins])) else: raise ValueError("Invalid plugin command") config["plugin"] = active_plugins config.save() if command in ["enable", "disable"]: print("Enabled plugins: {}".format(", ".join(sorted(active_plugins))))
python
def manage_plugins(command, plugin_names=None): if plugin_names is None: plugin_names = [] active_plugins = config.get("plugin", []) plugins = init_plugins() if command == "enable": for name in plugin_names: if name not in plugins: raise ValueError("Unknown plugin: {}".format(name)) if name not in active_plugins: active_plugins.append(name) elif command == "disable": for name in plugin_names: if name in active_plugins: active_plugins.remove(name) elif command == "list": print("{:20}{}".format("plugin", "enabled")) for plugin in sorted(plugins): print("{:20}{}".format(plugin, {False:"", True:"*"}[plugin in active_plugins])) else: raise ValueError("Invalid plugin command") config["plugin"] = active_plugins config.save() if command in ["enable", "disable"]: print("Enabled plugins: {}".format(", ".join(sorted(active_plugins))))
[ "def", "manage_plugins", "(", "command", ",", "plugin_names", "=", "None", ")", ":", "if", "plugin_names", "is", "None", ":", "plugin_names", "=", "[", "]", "active_plugins", "=", "config", ".", "get", "(", "\"plugin\"", ",", "[", "]", ")", "plugins", "=", "init_plugins", "(", ")", "if", "command", "==", "\"enable\"", ":", "for", "name", "in", "plugin_names", ":", "if", "name", "not", "in", "plugins", ":", "raise", "ValueError", "(", "\"Unknown plugin: {}\"", ".", "format", "(", "name", ")", ")", "if", "name", "not", "in", "active_plugins", ":", "active_plugins", ".", "append", "(", "name", ")", "elif", "command", "==", "\"disable\"", ":", "for", "name", "in", "plugin_names", ":", "if", "name", "in", "active_plugins", ":", "active_plugins", ".", "remove", "(", "name", ")", "elif", "command", "==", "\"list\"", ":", "print", "(", "\"{:20}{}\"", ".", "format", "(", "\"plugin\"", ",", "\"enabled\"", ")", ")", "for", "plugin", "in", "sorted", "(", "plugins", ")", ":", "print", "(", "\"{:20}{}\"", ".", "format", "(", "plugin", ",", "{", "False", ":", "\"\"", ",", "True", ":", "\"*\"", "}", "[", "plugin", "in", "active_plugins", "]", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Invalid plugin command\"", ")", "config", "[", "\"plugin\"", "]", "=", "active_plugins", "config", ".", "save", "(", ")", "if", "command", "in", "[", "\"enable\"", ",", "\"disable\"", "]", ":", "print", "(", "\"Enabled plugins: {}\"", ".", "format", "(", "\", \"", ".", "join", "(", "sorted", "(", "active_plugins", ")", ")", ")", ")" ]
Enable or disable plugins.
[ "Enable", "or", "disable", "plugins", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/functions.py#L514-L541
simonvh/genomepy
genomepy/functions.py
Genome.gap_sizes
def gap_sizes(self): """Return gap sizes per chromosome. Returns ------- gap_sizes : dict a dictionary with chromosomes as key and the total number of Ns as values """ if not self._gap_sizes: gap_file = self.props["gaps"]["gaps"] self._gap_sizes = {} with open(gap_file) as f: for line in f: chrom, start, end = line.strip().split("\t") start, end = int(start), int(end) self._gap_sizes[chrom] = self._gap_sizes.get(chrom, 0) + end - start return self._gap_sizes
python
def gap_sizes(self): if not self._gap_sizes: gap_file = self.props["gaps"]["gaps"] self._gap_sizes = {} with open(gap_file) as f: for line in f: chrom, start, end = line.strip().split("\t") start, end = int(start), int(end) self._gap_sizes[chrom] = self._gap_sizes.get(chrom, 0) + end - start return self._gap_sizes
[ "def", "gap_sizes", "(", "self", ")", ":", "if", "not", "self", ".", "_gap_sizes", ":", "gap_file", "=", "self", ".", "props", "[", "\"gaps\"", "]", "[", "\"gaps\"", "]", "self", ".", "_gap_sizes", "=", "{", "}", "with", "open", "(", "gap_file", ")", "as", "f", ":", "for", "line", "in", "f", ":", "chrom", ",", "start", ",", "end", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "start", ",", "end", "=", "int", "(", "start", ")", ",", "int", "(", "end", ")", "self", ".", "_gap_sizes", "[", "chrom", "]", "=", "self", ".", "_gap_sizes", ".", "get", "(", "chrom", ",", "0", ")", "+", "end", "-", "start", "return", "self", ".", "_gap_sizes" ]
Return gap sizes per chromosome. Returns ------- gap_sizes : dict a dictionary with chromosomes as key and the total number of Ns as values
[ "Return", "gap", "sizes", "per", "chromosome", ".", "Returns", "-------", "gap_sizes", ":", "dict", "a", "dictionary", "with", "chromosomes", "as", "key", "and", "the", "total", "number", "of", "Ns", "as", "values" ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/functions.py#L436-L453
simonvh/genomepy
genomepy/functions.py
Genome.get_random_sequences
def get_random_sequences(self, n=10, length=200, chroms=None, max_n=0.1): """Return random genomic sequences. Parameters ---------- n : int , optional Number of sequences to return. length : int , optional Length of sequences to return. chroms : list , optional Return sequences only from these chromosomes. max_n : float , optional Maximum fraction of Ns. Returns ------- coords : list List with [chrom, start, end] genomic coordinates. """ retries = 100 cutoff = length * max_n if not chroms: chroms = self.keys() try: gap_sizes = self.gap_sizes() except: gap_sizes = {} sizes = dict([(chrom, len(self[chrom]) - gap_sizes.get(chrom, 0)) for chrom in chroms]) l = [(sizes[x], x) for x in chroms if sizes[x] / len(self[x]) > 0.1 and sizes[x] > 10 * length] chroms = _weighted_selection(l, n) coords = [] count = {} for chrom in chroms: if chrom in count: count[chrom] += 1 else: count[chrom] = 1 for chrom in chroms: for i in range(retries): start = int(random.random() * (sizes[chrom] - length)) end = start + length count_n = self[chrom][start:end].seq.upper().count("N") if count_n <= cutoff: break if count_n > cutoff: raise ValueError("Failed to find suitable non-N sequence for {}".format(chrom)) coords.append([chrom, start, end]) return coords
python
def get_random_sequences(self, n=10, length=200, chroms=None, max_n=0.1): retries = 100 cutoff = length * max_n if not chroms: chroms = self.keys() try: gap_sizes = self.gap_sizes() except: gap_sizes = {} sizes = dict([(chrom, len(self[chrom]) - gap_sizes.get(chrom, 0)) for chrom in chroms]) l = [(sizes[x], x) for x in chroms if sizes[x] / len(self[x]) > 0.1 and sizes[x] > 10 * length] chroms = _weighted_selection(l, n) coords = [] count = {} for chrom in chroms: if chrom in count: count[chrom] += 1 else: count[chrom] = 1 for chrom in chroms: for i in range(retries): start = int(random.random() * (sizes[chrom] - length)) end = start + length count_n = self[chrom][start:end].seq.upper().count("N") if count_n <= cutoff: break if count_n > cutoff: raise ValueError("Failed to find suitable non-N sequence for {}".format(chrom)) coords.append([chrom, start, end]) return coords
[ "def", "get_random_sequences", "(", "self", ",", "n", "=", "10", ",", "length", "=", "200", ",", "chroms", "=", "None", ",", "max_n", "=", "0.1", ")", ":", "retries", "=", "100", "cutoff", "=", "length", "*", "max_n", "if", "not", "chroms", ":", "chroms", "=", "self", ".", "keys", "(", ")", "try", ":", "gap_sizes", "=", "self", ".", "gap_sizes", "(", ")", "except", ":", "gap_sizes", "=", "{", "}", "sizes", "=", "dict", "(", "[", "(", "chrom", ",", "len", "(", "self", "[", "chrom", "]", ")", "-", "gap_sizes", ".", "get", "(", "chrom", ",", "0", ")", ")", "for", "chrom", "in", "chroms", "]", ")", "l", "=", "[", "(", "sizes", "[", "x", "]", ",", "x", ")", "for", "x", "in", "chroms", "if", "sizes", "[", "x", "]", "/", "len", "(", "self", "[", "x", "]", ")", ">", "0.1", "and", "sizes", "[", "x", "]", ">", "10", "*", "length", "]", "chroms", "=", "_weighted_selection", "(", "l", ",", "n", ")", "coords", "=", "[", "]", "count", "=", "{", "}", "for", "chrom", "in", "chroms", ":", "if", "chrom", "in", "count", ":", "count", "[", "chrom", "]", "+=", "1", "else", ":", "count", "[", "chrom", "]", "=", "1", "for", "chrom", "in", "chroms", ":", "for", "i", "in", "range", "(", "retries", ")", ":", "start", "=", "int", "(", "random", ".", "random", "(", ")", "*", "(", "sizes", "[", "chrom", "]", "-", "length", ")", ")", "end", "=", "start", "+", "length", "count_n", "=", "self", "[", "chrom", "]", "[", "start", ":", "end", "]", ".", "seq", ".", "upper", "(", ")", ".", "count", "(", "\"N\"", ")", "if", "count_n", "<=", "cutoff", ":", "break", "if", "count_n", ">", "cutoff", ":", "raise", "ValueError", "(", "\"Failed to find suitable non-N sequence for {}\"", ".", "format", "(", "chrom", ")", ")", "coords", ".", "append", "(", "[", "chrom", ",", "start", ",", "end", "]", ")", "return", "coords" ]
Return random genomic sequences. Parameters ---------- n : int , optional Number of sequences to return. length : int , optional Length of sequences to return. chroms : list , optional Return sequences only from these chromosomes. max_n : float , optional Maximum fraction of Ns. Returns ------- coords : list List with [chrom, start, end] genomic coordinates.
[ "Return", "random", "genomic", "sequences", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/functions.py#L455-L512
simonvh/genomepy
genomepy/cli.py
search
def search(term, provider=None): """Search for genomes that contain TERM in their name or description.""" for row in genomepy.search(term, provider): print("\t".join([x.decode('utf-8', 'ignore') for x in row]))
python
def search(term, provider=None): for row in genomepy.search(term, provider): print("\t".join([x.decode('utf-8', 'ignore') for x in row]))
[ "def", "search", "(", "term", ",", "provider", "=", "None", ")", ":", "for", "row", "in", "genomepy", ".", "search", "(", "term", ",", "provider", ")", ":", "print", "(", "\"\\t\"", ".", "join", "(", "[", "x", ".", "decode", "(", "'utf-8'", ",", "'ignore'", ")", "for", "x", "in", "row", "]", ")", ")" ]
Search for genomes that contain TERM in their name or description.
[ "Search", "for", "genomes", "that", "contain", "TERM", "in", "their", "name", "or", "description", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/cli.py#L20-L23
simonvh/genomepy
genomepy/cli.py
install
def install(name, provider, genome_dir, localname, mask, regex, match, annotation): """Install genome NAME from provider PROVIDER in directory GENOME_DIR.""" genomepy.install_genome( name, provider, genome_dir=genome_dir, localname=localname, mask=mask, regex=regex, invert_match=not(match), annotation=annotation)
python
def install(name, provider, genome_dir, localname, mask, regex, match, annotation): genomepy.install_genome( name, provider, genome_dir=genome_dir, localname=localname, mask=mask, regex=regex, invert_match=not(match), annotation=annotation)
[ "def", "install", "(", "name", ",", "provider", ",", "genome_dir", ",", "localname", ",", "mask", ",", "regex", ",", "match", ",", "annotation", ")", ":", "genomepy", ".", "install_genome", "(", "name", ",", "provider", ",", "genome_dir", "=", "genome_dir", ",", "localname", "=", "localname", ",", "mask", "=", "mask", ",", "regex", "=", "regex", ",", "invert_match", "=", "not", "(", "match", ")", ",", "annotation", "=", "annotation", ")" ]
Install genome NAME from provider PROVIDER in directory GENOME_DIR.
[ "Install", "genome", "NAME", "from", "provider", "PROVIDER", "in", "directory", "GENOME_DIR", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/cli.py#L34-L38
simonvh/genomepy
genomepy/utils.py
generate_gap_bed
def generate_gap_bed(fname, outname): """ Generate a BED file with gap locations. Parameters ---------- fname : str Filename of input FASTA file. outname : str Filename of output BED file. """ f = Fasta(fname) with open(outname, "w") as bed: for chrom in f.keys(): for m in re.finditer(r'N+', f[chrom][:].seq): bed.write("{}\t{}\t{}\n".format(chrom, m.start(0), m.end(0)))
python
def generate_gap_bed(fname, outname): f = Fasta(fname) with open(outname, "w") as bed: for chrom in f.keys(): for m in re.finditer(r'N+', f[chrom][:].seq): bed.write("{}\t{}\t{}\n".format(chrom, m.start(0), m.end(0)))
[ "def", "generate_gap_bed", "(", "fname", ",", "outname", ")", ":", "f", "=", "Fasta", "(", "fname", ")", "with", "open", "(", "outname", ",", "\"w\"", ")", "as", "bed", ":", "for", "chrom", "in", "f", ".", "keys", "(", ")", ":", "for", "m", "in", "re", ".", "finditer", "(", "r'N+'", ",", "f", "[", "chrom", "]", "[", ":", "]", ".", "seq", ")", ":", "bed", ".", "write", "(", "\"{}\\t{}\\t{}\\n\"", ".", "format", "(", "chrom", ",", "m", ".", "start", "(", "0", ")", ",", "m", ".", "end", "(", "0", ")", ")", ")" ]
Generate a BED file with gap locations. Parameters ---------- fname : str Filename of input FASTA file. outname : str Filename of output BED file.
[ "Generate", "a", "BED", "file", "with", "gap", "locations", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/utils.py#L10-L25
simonvh/genomepy
genomepy/utils.py
generate_sizes
def generate_sizes(name, genome_dir): """Generate a sizes file with length of sequences in FASTA file.""" fa = os.path.join(genome_dir, name, "{}.fa".format(name)) sizes = fa + ".sizes" g = Fasta(fa) with open(sizes, "w") as f: for seqname in g.keys(): f.write("{}\t{}\n".format(seqname, len(g[seqname])))
python
def generate_sizes(name, genome_dir): fa = os.path.join(genome_dir, name, "{}.fa".format(name)) sizes = fa + ".sizes" g = Fasta(fa) with open(sizes, "w") as f: for seqname in g.keys(): f.write("{}\t{}\n".format(seqname, len(g[seqname])))
[ "def", "generate_sizes", "(", "name", ",", "genome_dir", ")", ":", "fa", "=", "os", ".", "path", ".", "join", "(", "genome_dir", ",", "name", ",", "\"{}.fa\"", ".", "format", "(", "name", ")", ")", "sizes", "=", "fa", "+", "\".sizes\"", "g", "=", "Fasta", "(", "fa", ")", "with", "open", "(", "sizes", ",", "\"w\"", ")", "as", "f", ":", "for", "seqname", "in", "g", ".", "keys", "(", ")", ":", "f", ".", "write", "(", "\"{}\\t{}\\n\"", ".", "format", "(", "seqname", ",", "len", "(", "g", "[", "seqname", "]", ")", ")", ")" ]
Generate a sizes file with length of sequences in FASTA file.
[ "Generate", "a", "sizes", "file", "with", "length", "of", "sequences", "in", "FASTA", "file", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/utils.py#L27-L34
simonvh/genomepy
genomepy/utils.py
filter_fasta
def filter_fasta(infa, outfa, regex=".*", v=False, force=False): """Filter fasta file based on regex. Parameters ---------- infa : str Filename of input fasta file. outfa : str Filename of output fasta file. Cannot be the same as infa. regex : str, optional Regular expression used for selecting sequences. v : bool, optional If set to True, select all sequence *not* matching regex. force : bool, optional If set to True, overwrite outfa if it already exists. Returns ------- fasta : Fasta instance pyfaidx Fasta instance of newly created file """ if infa == outfa: raise ValueError("Input and output FASTA are the same file.") if os.path.exists(outfa): if force: os.unlink(outfa) if os.path.exists(outfa + ".fai"): os.unlink(outfa + ".fai") else: raise ValueError( "{} already exists, set force to True to overwrite".format(outfa)) filt_function = re.compile(regex).search fa = Fasta(infa, filt_function=filt_function) seqs = fa.keys() if v: original_fa = Fasta(infa) seqs = [s for s in original_fa.keys() if s not in seqs] fa = original_fa if len(seqs) == 0: raise ValueError("No sequences left after filtering!") with open(outfa, "w") as out: for chrom in seqs: out.write(">{}\n".format(fa[chrom].name)) out.write("{}\n".format(fa[chrom][:].seq)) return Fasta(outfa)
python
def filter_fasta(infa, outfa, regex=".*", v=False, force=False): if infa == outfa: raise ValueError("Input and output FASTA are the same file.") if os.path.exists(outfa): if force: os.unlink(outfa) if os.path.exists(outfa + ".fai"): os.unlink(outfa + ".fai") else: raise ValueError( "{} already exists, set force to True to overwrite".format(outfa)) filt_function = re.compile(regex).search fa = Fasta(infa, filt_function=filt_function) seqs = fa.keys() if v: original_fa = Fasta(infa) seqs = [s for s in original_fa.keys() if s not in seqs] fa = original_fa if len(seqs) == 0: raise ValueError("No sequences left after filtering!") with open(outfa, "w") as out: for chrom in seqs: out.write(">{}\n".format(fa[chrom].name)) out.write("{}\n".format(fa[chrom][:].seq)) return Fasta(outfa)
[ "def", "filter_fasta", "(", "infa", ",", "outfa", ",", "regex", "=", "\".*\"", ",", "v", "=", "False", ",", "force", "=", "False", ")", ":", "if", "infa", "==", "outfa", ":", "raise", "ValueError", "(", "\"Input and output FASTA are the same file.\"", ")", "if", "os", ".", "path", ".", "exists", "(", "outfa", ")", ":", "if", "force", ":", "os", ".", "unlink", "(", "outfa", ")", "if", "os", ".", "path", ".", "exists", "(", "outfa", "+", "\".fai\"", ")", ":", "os", ".", "unlink", "(", "outfa", "+", "\".fai\"", ")", "else", ":", "raise", "ValueError", "(", "\"{} already exists, set force to True to overwrite\"", ".", "format", "(", "outfa", ")", ")", "filt_function", "=", "re", ".", "compile", "(", "regex", ")", ".", "search", "fa", "=", "Fasta", "(", "infa", ",", "filt_function", "=", "filt_function", ")", "seqs", "=", "fa", ".", "keys", "(", ")", "if", "v", ":", "original_fa", "=", "Fasta", "(", "infa", ")", "seqs", "=", "[", "s", "for", "s", "in", "original_fa", ".", "keys", "(", ")", "if", "s", "not", "in", "seqs", "]", "fa", "=", "original_fa", "if", "len", "(", "seqs", ")", "==", "0", ":", "raise", "ValueError", "(", "\"No sequences left after filtering!\"", ")", "with", "open", "(", "outfa", ",", "\"w\"", ")", "as", "out", ":", "for", "chrom", "in", "seqs", ":", "out", ".", "write", "(", "\">{}\\n\"", ".", "format", "(", "fa", "[", "chrom", "]", ".", "name", ")", ")", "out", ".", "write", "(", "\"{}\\n\"", ".", "format", "(", "fa", "[", "chrom", "]", "[", ":", "]", ".", "seq", ")", ")", "return", "Fasta", "(", "outfa", ")" ]
Filter fasta file based on regex. Parameters ---------- infa : str Filename of input fasta file. outfa : str Filename of output fasta file. Cannot be the same as infa. regex : str, optional Regular expression used for selecting sequences. v : bool, optional If set to True, select all sequence *not* matching regex. force : bool, optional If set to True, overwrite outfa if it already exists. Returns ------- fasta : Fasta instance pyfaidx Fasta instance of newly created file
[ "Filter", "fasta", "file", "based", "on", "regex", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/utils.py#L36-L89
simonvh/genomepy
genomepy/utils.py
cmd_ok
def cmd_ok(cmd): """Returns True if cmd can be run. """ try: sp.check_call(cmd, stderr=sp.PIPE, stdout=sp.PIPE) except sp.CalledProcessError: # bwa gives return code of 1 with no argument pass except: sys.stderr.write("{} not found, skipping\n".format(cmd)) return False return True
python
def cmd_ok(cmd): try: sp.check_call(cmd, stderr=sp.PIPE, stdout=sp.PIPE) except sp.CalledProcessError: pass except: sys.stderr.write("{} not found, skipping\n".format(cmd)) return False return True
[ "def", "cmd_ok", "(", "cmd", ")", ":", "try", ":", "sp", ".", "check_call", "(", "cmd", ",", "stderr", "=", "sp", ".", "PIPE", ",", "stdout", "=", "sp", ".", "PIPE", ")", "except", "sp", ".", "CalledProcessError", ":", "# bwa gives return code of 1 with no argument", "pass", "except", ":", "sys", ".", "stderr", ".", "write", "(", "\"{} not found, skipping\\n\"", ".", "format", "(", "cmd", ")", ")", "return", "False", "return", "True" ]
Returns True if cmd can be run.
[ "Returns", "True", "if", "cmd", "can", "be", "run", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/utils.py#L101-L112
simonvh/genomepy
genomepy/utils.py
run_index_cmd
def run_index_cmd(name, cmd): """Run command, show errors if the returncode is non-zero.""" sys.stderr.write("Creating {} index...\n".format(name)) # Create index p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: sys.stderr.write("Index for {} failed\n".format(name)) sys.stderr.write(stdout) sys.stderr.write(stderr)
python
def run_index_cmd(name, cmd): sys.stderr.write("Creating {} index...\n".format(name)) p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE) stdout, stderr = p.communicate() if p.returncode != 0: sys.stderr.write("Index for {} failed\n".format(name)) sys.stderr.write(stdout) sys.stderr.write(stderr)
[ "def", "run_index_cmd", "(", "name", ",", "cmd", ")", ":", "sys", ".", "stderr", ".", "write", "(", "\"Creating {} index...\\n\"", ".", "format", "(", "name", ")", ")", "# Create index", "p", "=", "sp", ".", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "sp", ".", "PIPE", ",", "stderr", "=", "sp", ".", "PIPE", ")", "stdout", ",", "stderr", "=", "p", ".", "communicate", "(", ")", "if", "p", ".", "returncode", "!=", "0", ":", "sys", ".", "stderr", ".", "write", "(", "\"Index for {} failed\\n\"", ".", "format", "(", "name", ")", ")", "sys", ".", "stderr", ".", "write", "(", "stdout", ")", "sys", ".", "stderr", ".", "write", "(", "stderr", ")" ]
Run command, show errors if the returncode is non-zero.
[ "Run", "command", "show", "errors", "if", "the", "returncode", "is", "non", "-", "zero", "." ]
train
https://github.com/simonvh/genomepy/blob/abace2366511dbe855fe1430b1f7d9ec4cbf6d29/genomepy/utils.py#L114-L123
peo3/cgroup-utils
cgutils/cgroup.py
scan_cgroups
def scan_cgroups(subsys_name, filters=list()): """ It returns a control group hierarchy which belong to the subsys_name. When collecting cgroups, filters are applied to the cgroups. See pydoc of apply_filters method of CGroup for more information about the filters. """ status = SubsystemStatus() if subsys_name not in status.get_all(): raise NoSuchSubsystemError("No such subsystem found: " + subsys_name) if subsys_name not in status.get_available(): raise EnvironmentError("Disabled in the kernel: " + subsys_name) if subsys_name not in status.get_enabled(): raise EnvironmentError("Not enabled in the system: " + subsys_name) subsystem = _get_subsystem(subsys_name) mount_point = status.get_path(subsys_name) return _scan_cgroups_recursive(subsystem, mount_point, mount_point, filters)
python
def scan_cgroups(subsys_name, filters=list()): status = SubsystemStatus() if subsys_name not in status.get_all(): raise NoSuchSubsystemError("No such subsystem found: " + subsys_name) if subsys_name not in status.get_available(): raise EnvironmentError("Disabled in the kernel: " + subsys_name) if subsys_name not in status.get_enabled(): raise EnvironmentError("Not enabled in the system: " + subsys_name) subsystem = _get_subsystem(subsys_name) mount_point = status.get_path(subsys_name) return _scan_cgroups_recursive(subsystem, mount_point, mount_point, filters)
[ "def", "scan_cgroups", "(", "subsys_name", ",", "filters", "=", "list", "(", ")", ")", ":", "status", "=", "SubsystemStatus", "(", ")", "if", "subsys_name", "not", "in", "status", ".", "get_all", "(", ")", ":", "raise", "NoSuchSubsystemError", "(", "\"No such subsystem found: \"", "+", "subsys_name", ")", "if", "subsys_name", "not", "in", "status", ".", "get_available", "(", ")", ":", "raise", "EnvironmentError", "(", "\"Disabled in the kernel: \"", "+", "subsys_name", ")", "if", "subsys_name", "not", "in", "status", ".", "get_enabled", "(", ")", ":", "raise", "EnvironmentError", "(", "\"Not enabled in the system: \"", "+", "subsys_name", ")", "subsystem", "=", "_get_subsystem", "(", "subsys_name", ")", "mount_point", "=", "status", ".", "get_path", "(", "subsys_name", ")", "return", "_scan_cgroups_recursive", "(", "subsystem", ",", "mount_point", ",", "mount_point", ",", "filters", ")" ]
It returns a control group hierarchy which belong to the subsys_name. When collecting cgroups, filters are applied to the cgroups. See pydoc of apply_filters method of CGroup for more information about the filters.
[ "It", "returns", "a", "control", "group", "hierarchy", "which", "belong", "to", "the", "subsys_name", ".", "When", "collecting", "cgroups", "filters", "are", "applied", "to", "the", "cgroups", ".", "See", "pydoc", "of", "apply_filters", "method", "of", "CGroup", "for", "more", "information", "about", "the", "filters", "." ]
train
https://github.com/peo3/cgroup-utils/blob/fd7e99f438ce334bac5669fba0d08a6502fd7a82/cgutils/cgroup.py#L917-L935
peo3/cgroup-utils
cgutils/cgroup.py
walk_cgroups
def walk_cgroups(cgroup, action, opaque): """ The function applies the action function with the opaque object to each control group under the cgroup recursively. """ action(cgroup, opaque) for child in cgroup.childs: walk_cgroups(child, action, opaque)
python
def walk_cgroups(cgroup, action, opaque): action(cgroup, opaque) for child in cgroup.childs: walk_cgroups(child, action, opaque)
[ "def", "walk_cgroups", "(", "cgroup", ",", "action", ",", "opaque", ")", ":", "action", "(", "cgroup", ",", "opaque", ")", "for", "child", "in", "cgroup", ".", "childs", ":", "walk_cgroups", "(", "child", ",", "action", ",", "opaque", ")" ]
The function applies the action function with the opaque object to each control group under the cgroup recursively.
[ "The", "function", "applies", "the", "action", "function", "with", "the", "opaque", "object", "to", "each", "control", "group", "under", "the", "cgroup", "recursively", "." ]
train
https://github.com/peo3/cgroup-utils/blob/fd7e99f438ce334bac5669fba0d08a6502fd7a82/cgutils/cgroup.py#L938-L945
peo3/cgroup-utils
cgutils/cgroup.py
get_cgroup
def get_cgroup(fullpath): """ It returns a CGroup object which is pointed by the fullpath. """ # Canonicalize symbolic links fullpath = os.path.realpath(fullpath) status = SubsystemStatus() name = None for name, path in status.paths.items(): if path in fullpath: break else: raise Exception('Invalid path: ' + fullpath) subsys = _get_subsystem(name) return CGroup(subsys, fullpath)
python
def get_cgroup(fullpath): fullpath = os.path.realpath(fullpath) status = SubsystemStatus() name = None for name, path in status.paths.items(): if path in fullpath: break else: raise Exception('Invalid path: ' + fullpath) subsys = _get_subsystem(name) return CGroup(subsys, fullpath)
[ "def", "get_cgroup", "(", "fullpath", ")", ":", "# Canonicalize symbolic links", "fullpath", "=", "os", ".", "path", ".", "realpath", "(", "fullpath", ")", "status", "=", "SubsystemStatus", "(", ")", "name", "=", "None", "for", "name", ",", "path", "in", "status", ".", "paths", ".", "items", "(", ")", ":", "if", "path", "in", "fullpath", ":", "break", "else", ":", "raise", "Exception", "(", "'Invalid path: '", "+", "fullpath", ")", "subsys", "=", "_get_subsystem", "(", "name", ")", "return", "CGroup", "(", "subsys", ",", "fullpath", ")" ]
It returns a CGroup object which is pointed by the fullpath.
[ "It", "returns", "a", "CGroup", "object", "which", "is", "pointed", "by", "the", "fullpath", "." ]
train
https://github.com/peo3/cgroup-utils/blob/fd7e99f438ce334bac5669fba0d08a6502fd7a82/cgutils/cgroup.py#L948-L964
peo3/cgroup-utils
cgutils/cgroup.py
SubsystemStatus._parse_proc_cgroups
def _parse_proc_cgroups(self): """Parse /proc/cgroups""" """ #subsys_name hierarchy num_cgroups enabled cpuset 0 1 1 ns 0 1 1 cpu 1 10 1 cpuacct 0 1 1 memory 0 1 1 devices 0 1 1 freezer 0 1 1 net_cls 0 1 1 """ for line in fileops.readlines('/proc/cgroups'): m = self._RE_CGROUPS.match(line) if m is None: continue name = m.group('name') hierarchy = int(m.group('hier')) n_cgroups = int(m.group('n')) if m.group('enabled') == '1': enabled = True else: enabled = False if name not in self: self[name] = {} self[name]['name'] = name self[name]['hierarchy'] = hierarchy self[name]['num_cgroups'] = n_cgroups self[name]['enabled'] = enabled
python
def _parse_proc_cgroups(self): for line in fileops.readlines('/proc/cgroups'): m = self._RE_CGROUPS.match(line) if m is None: continue name = m.group('name') hierarchy = int(m.group('hier')) n_cgroups = int(m.group('n')) if m.group('enabled') == '1': enabled = True else: enabled = False if name not in self: self[name] = {} self[name]['name'] = name self[name]['hierarchy'] = hierarchy self[name]['num_cgroups'] = n_cgroups self[name]['enabled'] = enabled
[ "def", "_parse_proc_cgroups", "(", "self", ")", ":", "\"\"\"\n #subsys_name\thierarchy\tnum_cgroups\tenabled\n cpuset\t0\t1\t1\n ns\t0\t1\t1\n cpu\t1\t10\t1\n cpuacct\t0\t1\t1\n memory\t0\t1\t1\n devices\t0\t1\t1\n freezer\t0\t1\t1\n net_cls\t0\t1\t1\n \"\"\"", "for", "line", "in", "fileops", ".", "readlines", "(", "'/proc/cgroups'", ")", ":", "m", "=", "self", ".", "_RE_CGROUPS", ".", "match", "(", "line", ")", "if", "m", "is", "None", ":", "continue", "name", "=", "m", ".", "group", "(", "'name'", ")", "hierarchy", "=", "int", "(", "m", ".", "group", "(", "'hier'", ")", ")", "n_cgroups", "=", "int", "(", "m", ".", "group", "(", "'n'", ")", ")", "if", "m", ".", "group", "(", "'enabled'", ")", "==", "'1'", ":", "enabled", "=", "True", "else", ":", "enabled", "=", "False", "if", "name", "not", "in", "self", ":", "self", "[", "name", "]", "=", "{", "}", "self", "[", "name", "]", "[", "'name'", "]", "=", "name", "self", "[", "name", "]", "[", "'hierarchy'", "]", "=", "hierarchy", "self", "[", "name", "]", "[", "'num_cgroups'", "]", "=", "n_cgroups", "self", "[", "name", "]", "[", "'enabled'", "]", "=", "enabled" ]
Parse /proc/cgroups
[ "Parse", "/", "proc", "/", "cgroups" ]
train
https://github.com/peo3/cgroup-utils/blob/fd7e99f438ce334bac5669fba0d08a6502fd7a82/cgutils/cgroup.py#L43-L74
peo3/cgroup-utils
cgutils/cgroup.py
SubsystemStatus._parse_proc_mount
def _parse_proc_mount(self): """Parse /proc/mounts""" """ cgroup /cgroup/cpu cgroup rw,relatime,cpuacct,cpu,release_agent=/sbin/cgroup_clean 0 0 cgroup /cgroup/memory cgroup rw,relatime,memory 0 0 cgroup /cgroup/blkio cgroup rw,relatime,blkio 0 0 cgroup /cgroup/freezer cgroup rw,relatime,freezer 0 0 """ for line in fileops.readlines('/proc/mounts'): if 'cgroup' not in line: continue items = line.split(' ') path = items[1] opts = items[3].split(',') name = None for opt in opts: if opt in self: name = opt self.paths[name] = path if 'name=' in opt: # We treat name=XXX as its name name = opt self.paths[name] = path self[name] = {} self[name]['name'] = name self[name]['enabled'] = True self[name]['hierarchy'] = 0 self[name]['num_cgroups'] = 0 # release_agent= may appear before name= for opt in opts: if 'release_agent=' in opt: self[name]['release_agent'] = opt.replace('release_agent=', '')
python
def _parse_proc_mount(self): for line in fileops.readlines('/proc/mounts'): if 'cgroup' not in line: continue items = line.split(' ') path = items[1] opts = items[3].split(',') name = None for opt in opts: if opt in self: name = opt self.paths[name] = path if 'name=' in opt: name = opt self.paths[name] = path self[name] = {} self[name]['name'] = name self[name]['enabled'] = True self[name]['hierarchy'] = 0 self[name]['num_cgroups'] = 0 for opt in opts: if 'release_agent=' in opt: self[name]['release_agent'] = opt.replace('release_agent=', '')
[ "def", "_parse_proc_mount", "(", "self", ")", ":", "\"\"\"\n cgroup /cgroup/cpu cgroup rw,relatime,cpuacct,cpu,release_agent=/sbin/cgroup_clean 0 0\n cgroup /cgroup/memory cgroup rw,relatime,memory 0 0\n cgroup /cgroup/blkio cgroup rw,relatime,blkio 0 0\n cgroup /cgroup/freezer cgroup rw,relatime,freezer 0 0\n \"\"\"", "for", "line", "in", "fileops", ".", "readlines", "(", "'/proc/mounts'", ")", ":", "if", "'cgroup'", "not", "in", "line", ":", "continue", "items", "=", "line", ".", "split", "(", "' '", ")", "path", "=", "items", "[", "1", "]", "opts", "=", "items", "[", "3", "]", ".", "split", "(", "','", ")", "name", "=", "None", "for", "opt", "in", "opts", ":", "if", "opt", "in", "self", ":", "name", "=", "opt", "self", ".", "paths", "[", "name", "]", "=", "path", "if", "'name='", "in", "opt", ":", "# We treat name=XXX as its name", "name", "=", "opt", "self", ".", "paths", "[", "name", "]", "=", "path", "self", "[", "name", "]", "=", "{", "}", "self", "[", "name", "]", "[", "'name'", "]", "=", "name", "self", "[", "name", "]", "[", "'enabled'", "]", "=", "True", "self", "[", "name", "]", "[", "'hierarchy'", "]", "=", "0", "self", "[", "name", "]", "[", "'num_cgroups'", "]", "=", "0", "# release_agent= may appear before name=", "for", "opt", "in", "opts", ":", "if", "'release_agent='", "in", "opt", ":", "self", "[", "name", "]", "[", "'release_agent'", "]", "=", "opt", ".", "replace", "(", "'release_agent='", ",", "''", ")" ]
Parse /proc/mounts
[ "Parse", "/", "proc", "/", "mounts" ]
train
https://github.com/peo3/cgroup-utils/blob/fd7e99f438ce334bac5669fba0d08a6502fd7a82/cgutils/cgroup.py#L76-L111
peo3/cgroup-utils
cgutils/cgroup.py
RdmaStat.parse
def parse(content): """ Parse rdma.curren and rdma.max Example contents: mlx4_0 hca_handle=2 hca_object=2000 ocrdma1 hca_handle=3 hca_object=max >>> RdmaStat.parse("mlx4_0 hca_handle=2 hca_object=2000\\nocrdma1 hca_handle=3 hca_object=max") {'mlx4_0': {'hca_handle': 2, 'hca_object': 2000}, 'ocrdma1': {'hca_handle': 3, 'hca_object': 'max'}} """ ret = {} lines = content.split('\n') for line in lines: m = RdmaStat._RE.match(line) if m is None: continue name = m.group('name') hca_handle = long(m.group('hca_handle')) hca_object = m.group('hca_object') if hca_object != "max": hca_object = long(hca_object) ret[name] = {"hca_handle": hca_handle, "hca_object": hca_object} return ret
python
def parse(content): ret = {} lines = content.split('\n') for line in lines: m = RdmaStat._RE.match(line) if m is None: continue name = m.group('name') hca_handle = long(m.group('hca_handle')) hca_object = m.group('hca_object') if hca_object != "max": hca_object = long(hca_object) ret[name] = {"hca_handle": hca_handle, "hca_object": hca_object} return ret
[ "def", "parse", "(", "content", ")", ":", "ret", "=", "{", "}", "lines", "=", "content", ".", "split", "(", "'\\n'", ")", "for", "line", "in", "lines", ":", "m", "=", "RdmaStat", ".", "_RE", ".", "match", "(", "line", ")", "if", "m", "is", "None", ":", "continue", "name", "=", "m", ".", "group", "(", "'name'", ")", "hca_handle", "=", "long", "(", "m", ".", "group", "(", "'hca_handle'", ")", ")", "hca_object", "=", "m", ".", "group", "(", "'hca_object'", ")", "if", "hca_object", "!=", "\"max\"", ":", "hca_object", "=", "long", "(", "hca_object", ")", "ret", "[", "name", "]", "=", "{", "\"hca_handle\"", ":", "hca_handle", ",", "\"hca_object\"", ":", "hca_object", "}", "return", "ret" ]
Parse rdma.curren and rdma.max Example contents: mlx4_0 hca_handle=2 hca_object=2000 ocrdma1 hca_handle=3 hca_object=max >>> RdmaStat.parse("mlx4_0 hca_handle=2 hca_object=2000\\nocrdma1 hca_handle=3 hca_object=max") {'mlx4_0': {'hca_handle': 2, 'hca_object': 2000}, 'ocrdma1': {'hca_handle': 3, 'hca_object': 'max'}}
[ "Parse", "rdma", ".", "curren", "and", "rdma", ".", "max" ]
train
https://github.com/peo3/cgroup-utils/blob/fd7e99f438ce334bac5669fba0d08a6502fd7a82/cgutils/cgroup.py#L313-L335
peo3/cgroup-utils
cgutils/cgroup.py
CGroup.apply_filters
def apply_filters(self, filters): """ It applies a specified filters. The filters are used to reduce the control groups which are accessed by get_confgs, get_stats, and get_defaults methods. """ _configs = self.configs _stats = self.stats self.configs = {} self.stats = {} for f in filters: if f in _configs: self.configs[f] = _configs[f] elif f in _stats: self.stats[f] = _stats[f] else: raise NoSuchControlFileError("%s for %s" % (f, self.subsystem.name))
python
def apply_filters(self, filters): _configs = self.configs _stats = self.stats self.configs = {} self.stats = {} for f in filters: if f in _configs: self.configs[f] = _configs[f] elif f in _stats: self.stats[f] = _stats[f] else: raise NoSuchControlFileError("%s for %s" % (f, self.subsystem.name))
[ "def", "apply_filters", "(", "self", ",", "filters", ")", ":", "_configs", "=", "self", ".", "configs", "_stats", "=", "self", ".", "stats", "self", ".", "configs", "=", "{", "}", "self", ".", "stats", "=", "{", "}", "for", "f", "in", "filters", ":", "if", "f", "in", "_configs", ":", "self", ".", "configs", "[", "f", "]", "=", "_configs", "[", "f", "]", "elif", "f", "in", "_stats", ":", "self", ".", "stats", "[", "f", "]", "=", "_stats", "[", "f", "]", "else", ":", "raise", "NoSuchControlFileError", "(", "\"%s for %s\"", "%", "(", "f", ",", "self", ".", "subsystem", ".", "name", ")", ")" ]
It applies a specified filters. The filters are used to reduce the control groups which are accessed by get_confgs, get_stats, and get_defaults methods.
[ "It", "applies", "a", "specified", "filters", ".", "The", "filters", "are", "used", "to", "reduce", "the", "control", "groups", "which", "are", "accessed", "by", "get_confgs", "get_stats", "and", "get_defaults", "methods", "." ]
train
https://github.com/peo3/cgroup-utils/blob/fd7e99f438ce334bac5669fba0d08a6502fd7a82/cgutils/cgroup.py#L722-L737
peo3/cgroup-utils
cgutils/cgroup.py
CGroup.get_configs
def get_configs(self): """ It returns a name and a current value pairs of control files which are categorised in the configs group. """ configs = {} for name, default in self.configs.items(): cls = default.__class__ path = self.paths[name] if os.path.exists(path): try: configs[name] = self._PARSERS[cls](fileops.read(path)) except IOError as e: if e.errno == errno.EOPNOTSUPP: # Since 3.5 memory.memsw.* are always created even if disabled. # If disabled we will get EOPNOTSUPP when read or write them. # See commit af36f906c0f4c2ffa0482ecdf856a33dc88ae8c5 of the kernel. pass else: raise return configs
python
def get_configs(self): configs = {} for name, default in self.configs.items(): cls = default.__class__ path = self.paths[name] if os.path.exists(path): try: configs[name] = self._PARSERS[cls](fileops.read(path)) except IOError as e: if e.errno == errno.EOPNOTSUPP: pass else: raise return configs
[ "def", "get_configs", "(", "self", ")", ":", "configs", "=", "{", "}", "for", "name", ",", "default", "in", "self", ".", "configs", ".", "items", "(", ")", ":", "cls", "=", "default", ".", "__class__", "path", "=", "self", ".", "paths", "[", "name", "]", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "try", ":", "configs", "[", "name", "]", "=", "self", ".", "_PARSERS", "[", "cls", "]", "(", "fileops", ".", "read", "(", "path", ")", ")", "except", "IOError", "as", "e", ":", "if", "e", ".", "errno", "==", "errno", ".", "EOPNOTSUPP", ":", "# Since 3.5 memory.memsw.* are always created even if disabled.", "# If disabled we will get EOPNOTSUPP when read or write them.", "# See commit af36f906c0f4c2ffa0482ecdf856a33dc88ae8c5 of the kernel.", "pass", "else", ":", "raise", "return", "configs" ]
It returns a name and a current value pairs of control files which are categorised in the configs group.
[ "It", "returns", "a", "name", "and", "a", "current", "value", "pairs", "of", "control", "files", "which", "are", "categorised", "in", "the", "configs", "group", "." ]
train
https://github.com/peo3/cgroup-utils/blob/fd7e99f438ce334bac5669fba0d08a6502fd7a82/cgutils/cgroup.py#L739-L759
peo3/cgroup-utils
cgutils/cgroup.py
CGroup.get_stats
def get_stats(self): """ It returns a name and a value pairs of control files which are categorised in the stats group. """ stats = {} for name, cls in self.stats.items(): path = self.paths[name] if os.path.exists(path): try: stats[name] = self._PARSERS[cls](fileops.read(path)) except IOError as e: # XXX: we have to distinguish unexpected errors from the expected ones if e.errno == errno.EOPNOTSUPP: # Since 3.5 memory.memsw.* are always created even if disabled. # If disabled we will get EOPNOTSUPP when read or write them. # See commit af36f906c0f4c2ffa0482ecdf856a33dc88ae8c5 of the kernel. pass if e.errno == errno.EIO: # memory.kmem.slabinfo throws EIO until limit_in_bytes is set. pass else: raise return stats
python
def get_stats(self): stats = {} for name, cls in self.stats.items(): path = self.paths[name] if os.path.exists(path): try: stats[name] = self._PARSERS[cls](fileops.read(path)) except IOError as e: if e.errno == errno.EOPNOTSUPP: pass if e.errno == errno.EIO: pass else: raise return stats
[ "def", "get_stats", "(", "self", ")", ":", "stats", "=", "{", "}", "for", "name", ",", "cls", "in", "self", ".", "stats", ".", "items", "(", ")", ":", "path", "=", "self", ".", "paths", "[", "name", "]", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "try", ":", "stats", "[", "name", "]", "=", "self", ".", "_PARSERS", "[", "cls", "]", "(", "fileops", ".", "read", "(", "path", ")", ")", "except", "IOError", "as", "e", ":", "# XXX: we have to distinguish unexpected errors from the expected ones", "if", "e", ".", "errno", "==", "errno", ".", "EOPNOTSUPP", ":", "# Since 3.5 memory.memsw.* are always created even if disabled.", "# If disabled we will get EOPNOTSUPP when read or write them.", "# See commit af36f906c0f4c2ffa0482ecdf856a33dc88ae8c5 of the kernel.", "pass", "if", "e", ".", "errno", "==", "errno", ".", "EIO", ":", "# memory.kmem.slabinfo throws EIO until limit_in_bytes is set.", "pass", "else", ":", "raise", "return", "stats" ]
It returns a name and a value pairs of control files which are categorised in the stats group.
[ "It", "returns", "a", "name", "and", "a", "value", "pairs", "of", "control", "files", "which", "are", "categorised", "in", "the", "stats", "group", "." ]
train
https://github.com/peo3/cgroup-utils/blob/fd7e99f438ce334bac5669fba0d08a6502fd7a82/cgutils/cgroup.py#L768-L791
peo3/cgroup-utils
cgutils/cgroup.py
CGroup.update
def update(self): """It updates process information of the cgroup.""" pids = fileops.readlines(self.paths['cgroup.procs']) self.pids = [int(pid) for pid in pids if pid != ''] self.n_procs = len(pids)
python
def update(self): pids = fileops.readlines(self.paths['cgroup.procs']) self.pids = [int(pid) for pid in pids if pid != ''] self.n_procs = len(pids)
[ "def", "update", "(", "self", ")", ":", "pids", "=", "fileops", ".", "readlines", "(", "self", ".", "paths", "[", "'cgroup.procs'", "]", ")", "self", ".", "pids", "=", "[", "int", "(", "pid", ")", "for", "pid", "in", "pids", "if", "pid", "!=", "''", "]", "self", ".", "n_procs", "=", "len", "(", "pids", ")" ]
It updates process information of the cgroup.
[ "It", "updates", "process", "information", "of", "the", "cgroup", "." ]
train
https://github.com/peo3/cgroup-utils/blob/fd7e99f438ce334bac5669fba0d08a6502fd7a82/cgutils/cgroup.py#L793-L797
peo3/cgroup-utils
cgutils/cgroup.py
EventListener.register
def register(self, arguments=list()): """ Register a target file with arguments (if required) to a event_control file which we want to be notified events. """ target_name = self.target_name if target_name in ['memory.usage_in_bytes', 'memory.memsw.usage_in_bytes']: threshold = arguments[0] line = "%d %d %d\0" % (self.event_fd, self.target_fd, long(threshold)) elif target_name in ['memory.pressure_level']: threshold = arguments[0] line = "%d %d %s\0" % (self.event_fd, self.target_fd, threshold) else: line = "%d %d\0" % (self.event_fd, self.target_fd) os.write(self.ec_fd, line)
python
def register(self, arguments=list()): target_name = self.target_name if target_name in ['memory.usage_in_bytes', 'memory.memsw.usage_in_bytes']: threshold = arguments[0] line = "%d %d %d\0" % (self.event_fd, self.target_fd, long(threshold)) elif target_name in ['memory.pressure_level']: threshold = arguments[0] line = "%d %d %s\0" % (self.event_fd, self.target_fd, threshold) else: line = "%d %d\0" % (self.event_fd, self.target_fd) os.write(self.ec_fd, line)
[ "def", "register", "(", "self", ",", "arguments", "=", "list", "(", ")", ")", ":", "target_name", "=", "self", ".", "target_name", "if", "target_name", "in", "[", "'memory.usage_in_bytes'", ",", "'memory.memsw.usage_in_bytes'", "]", ":", "threshold", "=", "arguments", "[", "0", "]", "line", "=", "\"%d %d %d\\0\"", "%", "(", "self", ".", "event_fd", ",", "self", ".", "target_fd", ",", "long", "(", "threshold", ")", ")", "elif", "target_name", "in", "[", "'memory.pressure_level'", "]", ":", "threshold", "=", "arguments", "[", "0", "]", "line", "=", "\"%d %d %s\\0\"", "%", "(", "self", ".", "event_fd", ",", "self", ".", "target_fd", ",", "threshold", ")", "else", ":", "line", "=", "\"%d %d\\0\"", "%", "(", "self", ".", "event_fd", ",", "self", ".", "target_fd", ")", "os", ".", "write", "(", "self", ".", "ec_fd", ",", "line", ")" ]
Register a target file with arguments (if required) to a event_control file which we want to be notified events.
[ "Register", "a", "target", "file", "with", "arguments", "(", "if", "required", ")", "to", "a", "event_control", "file", "which", "we", "want", "to", "be", "notified", "events", "." ]
train
https://github.com/peo3/cgroup-utils/blob/fd7e99f438ce334bac5669fba0d08a6502fd7a82/cgutils/cgroup.py#L871-L885
peo3/cgroup-utils
cgutils/cgroup.py
EventListener.wait
def wait(self): """ It returns when an event which we have configured by set_threshold happens. Note that it blocks until then. """ ret = os.read(self.event_fd, 64 / 8) return struct.unpack('Q', ret)
python
def wait(self): ret = os.read(self.event_fd, 64 / 8) return struct.unpack('Q', ret)
[ "def", "wait", "(", "self", ")", ":", "ret", "=", "os", ".", "read", "(", "self", ".", "event_fd", ",", "64", "/", "8", ")", "return", "struct", ".", "unpack", "(", "'Q'", ",", "ret", ")" ]
It returns when an event which we have configured by set_threshold happens. Note that it blocks until then.
[ "It", "returns", "when", "an", "event", "which", "we", "have", "configured", "by", "set_threshold", "happens", ".", "Note", "that", "it", "blocks", "until", "then", "." ]
train
https://github.com/peo3/cgroup-utils/blob/fd7e99f438ce334bac5669fba0d08a6502fd7a82/cgutils/cgroup.py#L887-L893
peo3/cgroup-utils
cgutils/commands/tree.py
Command._build_process_container_tree
def _build_process_container_tree(self, pids): """ tops = [1,2,3] childs = {1: [4,5], 2: [6,7], 3: [], 4: []} """ containers = [] procs = [] ppids = [] childs = {} for pid in pids: proc = process.Process(pid) procs.append(proc) ppids.append(proc.ppid) if proc.ppid not in childs: childs[proc.ppid] = [] childs[proc.ppid].append(proc) ppids = set(ppids) tops = [proc for proc in procs if proc.ppid not in pids] if len(tops) == 0: tops = procs def build_tree(proc_list): _containers = [] for proc in proc_list: if not self.args.show_kthread and proc.is_kthread(): continue cont = TreeContainer(proc) if proc.pid in childs: cont.childs = build_tree(childs[proc.pid]) _containers.append(cont) return _containers for top_proc in tops: if not self.args.show_kthread and top_proc.is_kthread(): continue cont = TreeContainer(top_proc) if top_proc.pid in childs: cont.childs = build_tree(childs[top_proc.pid]) containers.append(cont) return containers
python
def _build_process_container_tree(self, pids): containers = [] procs = [] ppids = [] childs = {} for pid in pids: proc = process.Process(pid) procs.append(proc) ppids.append(proc.ppid) if proc.ppid not in childs: childs[proc.ppid] = [] childs[proc.ppid].append(proc) ppids = set(ppids) tops = [proc for proc in procs if proc.ppid not in pids] if len(tops) == 0: tops = procs def build_tree(proc_list): _containers = [] for proc in proc_list: if not self.args.show_kthread and proc.is_kthread(): continue cont = TreeContainer(proc) if proc.pid in childs: cont.childs = build_tree(childs[proc.pid]) _containers.append(cont) return _containers for top_proc in tops: if not self.args.show_kthread and top_proc.is_kthread(): continue cont = TreeContainer(top_proc) if top_proc.pid in childs: cont.childs = build_tree(childs[top_proc.pid]) containers.append(cont) return containers
[ "def", "_build_process_container_tree", "(", "self", ",", "pids", ")", ":", "containers", "=", "[", "]", "procs", "=", "[", "]", "ppids", "=", "[", "]", "childs", "=", "{", "}", "for", "pid", "in", "pids", ":", "proc", "=", "process", ".", "Process", "(", "pid", ")", "procs", ".", "append", "(", "proc", ")", "ppids", ".", "append", "(", "proc", ".", "ppid", ")", "if", "proc", ".", "ppid", "not", "in", "childs", ":", "childs", "[", "proc", ".", "ppid", "]", "=", "[", "]", "childs", "[", "proc", ".", "ppid", "]", ".", "append", "(", "proc", ")", "ppids", "=", "set", "(", "ppids", ")", "tops", "=", "[", "proc", "for", "proc", "in", "procs", "if", "proc", ".", "ppid", "not", "in", "pids", "]", "if", "len", "(", "tops", ")", "==", "0", ":", "tops", "=", "procs", "def", "build_tree", "(", "proc_list", ")", ":", "_containers", "=", "[", "]", "for", "proc", "in", "proc_list", ":", "if", "not", "self", ".", "args", ".", "show_kthread", "and", "proc", ".", "is_kthread", "(", ")", ":", "continue", "cont", "=", "TreeContainer", "(", "proc", ")", "if", "proc", ".", "pid", "in", "childs", ":", "cont", ".", "childs", "=", "build_tree", "(", "childs", "[", "proc", ".", "pid", "]", ")", "_containers", ".", "append", "(", "cont", ")", "return", "_containers", "for", "top_proc", "in", "tops", ":", "if", "not", "self", ".", "args", ".", "show_kthread", "and", "top_proc", ".", "is_kthread", "(", ")", ":", "continue", "cont", "=", "TreeContainer", "(", "top_proc", ")", "if", "top_proc", ".", "pid", "in", "childs", ":", "cont", ".", "childs", "=", "build_tree", "(", "childs", "[", "top_proc", ".", "pid", "]", ")", "containers", ".", "append", "(", "cont", ")", "return", "containers" ]
tops = [1,2,3] childs = {1: [4,5], 2: [6,7], 3: [], 4: []}
[ "tops", "=", "[", "1", "2", "3", "]", "childs", "=", "{", "1", ":", "[", "4", "5", "]", "2", ":", "[", "6", "7", "]", "3", ":", "[]", "4", ":", "[]", "}" ]
train
https://github.com/peo3/cgroup-utils/blob/fd7e99f438ce334bac5669fba0d08a6502fd7a82/cgutils/commands/tree.py#L154-L197
peakwinter/python-nginx
nginx.py
loads
def loads(data, conf=True): """ Load an nginx configuration from a provided string. :param str data: nginx configuration :param bool conf: Load object(s) into a Conf object? """ f = Conf() if conf else [] lopen = [] index = 0 while True: m = re.compile(r'^\s*events\s*{', re.S).search(data[index:]) if m: e = Events() lopen.insert(0, e) index += m.end() continue m = re.compile(r'^\s*http\s*{', re.S).search(data[index:]) if m: h = Http() lopen.insert(0, h) index += m.end() continue m = re.compile(r'^\s*stream\s*{', re.S).search(data[index:]) if m: s = Stream() lopen.insert(0, s) index += m.end() continue m = re.compile(r'^\s*server\s*{', re.S).search(data[index:]) if m: s = Server() lopen.insert(0, s) index += m.end() continue m = re.compile(r'^\s*location\s*([^;]*?)\s*{', re.S).search(data[index:]) if m: l = Location(m.group(1)) lopen.insert(0, l) index += m.end() continue m = re.compile(r'^\s*if\s*([^;]*?)\s*{', re.S).search(data[index:]) if m: ifs = If(m.group(1)) lopen.insert(0, ifs) index += m.end() continue m = re.compile(r'^\s*upstream\s*([^;]*?)\s*{', re.S).search(data[index:]) if m: u = Upstream(m.group(1)) lopen.insert(0, u) index += m.end() continue m = re.compile(r'^\s*geo\s*([^;]*?)\s*{', re.S).search(data[index:]) if m: g = Geo(m.group(1)) lopen.insert(0, g) index += m.end() continue m = re.compile(r'^\s*map\s*([^;]*?)\s*{', re.S).search(data[index:]) if m: g = Map(m.group(1)) lopen.insert(0, g) index += m.end() continue m = re.compile(r'^\s*limit_except\s*([^;]*?)\s*{', re.S).search(data[index:]) if m: l = LimitExcept(m.group(1)) lopen.insert(0, l) index += m.end() continue m = re.compile(r'^\s*types\s*{', re.S).search(data[index:]) if m: l = Types() lopen.insert(0, l) index += m.end() continue m = re.compile(r'^(\s*)#[ \r\t\f]*(.*?)\n', re.S).search(data[index:]) if m: c = Comment(m.group(2), inline='\n' not in m.group(1)) if lopen and isinstance(lopen[0], Container): lopen[0].add(c) else: f.add(c) if conf else f.append(c) index += m.end() - 1 continue m = re.compile(r'^\s*}', re.S).search(data[index:]) if m: if isinstance(lopen[0], Container): c = lopen[0] lopen.pop(0) if lopen and isinstance(lopen[0], Container): lopen[0].add(c) else: f.add(c) if conf else f.append(c) index += m.end() continue double = r'\s*"[^"]*"' single = r'\s*\'[^\']*\'' normal = r'\s*[^;\s]*' s1 = r'{}|{}|{}'.format(double, single, normal) s = r'^\s*({})\s*((?:{})+);'.format(s1, s1) m = re.compile(s, re.S).search(data[index:]) if m: k = Key(m.group(1), m.group(2)) if lopen and isinstance(lopen[0], (Container, Server)): lopen[0].add(k) else: f.add(k) if conf else f.append(k) index += m.end() continue m = re.compile(r'^\s*(\S+);', re.S).search(data[index:]) if m: k = Key(m.group(1), '') if lopen and isinstance(lopen[0], (Container, Server)): lopen[0].add(k) else: f.add(k) if conf else f.append(k) index += m.end() continue break return f
python
def loads(data, conf=True): f = Conf() if conf else [] lopen = [] index = 0 while True: m = re.compile(r'^\s*events\s*{', re.S).search(data[index:]) if m: e = Events() lopen.insert(0, e) index += m.end() continue m = re.compile(r'^\s*http\s*{', re.S).search(data[index:]) if m: h = Http() lopen.insert(0, h) index += m.end() continue m = re.compile(r'^\s*stream\s*{', re.S).search(data[index:]) if m: s = Stream() lopen.insert(0, s) index += m.end() continue m = re.compile(r'^\s*server\s*{', re.S).search(data[index:]) if m: s = Server() lopen.insert(0, s) index += m.end() continue m = re.compile(r'^\s*location\s*([^;]*?)\s*{', re.S).search(data[index:]) if m: l = Location(m.group(1)) lopen.insert(0, l) index += m.end() continue m = re.compile(r'^\s*if\s*([^;]*?)\s*{', re.S).search(data[index:]) if m: ifs = If(m.group(1)) lopen.insert(0, ifs) index += m.end() continue m = re.compile(r'^\s*upstream\s*([^;]*?)\s*{', re.S).search(data[index:]) if m: u = Upstream(m.group(1)) lopen.insert(0, u) index += m.end() continue m = re.compile(r'^\s*geo\s*([^;]*?)\s*{', re.S).search(data[index:]) if m: g = Geo(m.group(1)) lopen.insert(0, g) index += m.end() continue m = re.compile(r'^\s*map\s*([^;]*?)\s*{', re.S).search(data[index:]) if m: g = Map(m.group(1)) lopen.insert(0, g) index += m.end() continue m = re.compile(r'^\s*limit_except\s*([^;]*?)\s*{', re.S).search(data[index:]) if m: l = LimitExcept(m.group(1)) lopen.insert(0, l) index += m.end() continue m = re.compile(r'^\s*types\s*{', re.S).search(data[index:]) if m: l = Types() lopen.insert(0, l) index += m.end() continue m = re.compile(r'^(\s*) if m: c = Comment(m.group(2), inline='\n' not in m.group(1)) if lopen and isinstance(lopen[0], Container): lopen[0].add(c) else: f.add(c) if conf else f.append(c) index += m.end() - 1 continue m = re.compile(r'^\s*}', re.S).search(data[index:]) if m: if isinstance(lopen[0], Container): c = lopen[0] lopen.pop(0) if lopen and isinstance(lopen[0], Container): lopen[0].add(c) else: f.add(c) if conf else f.append(c) index += m.end() continue double = r'\s*"[^"]*"' single = r'\s*\'[^\']*\'' normal = r'\s*[^;\s]*' s1 = r'{}|{}|{}'.format(double, single, normal) s = r'^\s*({})\s*((?:{})+);'.format(s1, s1) m = re.compile(s, re.S).search(data[index:]) if m: k = Key(m.group(1), m.group(2)) if lopen and isinstance(lopen[0], (Container, Server)): lopen[0].add(k) else: f.add(k) if conf else f.append(k) index += m.end() continue m = re.compile(r'^\s*(\S+);', re.S).search(data[index:]) if m: k = Key(m.group(1), '') if lopen and isinstance(lopen[0], (Container, Server)): lopen[0].add(k) else: f.add(k) if conf else f.append(k) index += m.end() continue break return f
[ "def", "loads", "(", "data", ",", "conf", "=", "True", ")", ":", "f", "=", "Conf", "(", ")", "if", "conf", "else", "[", "]", "lopen", "=", "[", "]", "index", "=", "0", "while", "True", ":", "m", "=", "re", ".", "compile", "(", "r'^\\s*events\\s*{'", ",", "re", ".", "S", ")", ".", "search", "(", "data", "[", "index", ":", "]", ")", "if", "m", ":", "e", "=", "Events", "(", ")", "lopen", ".", "insert", "(", "0", ",", "e", ")", "index", "+=", "m", ".", "end", "(", ")", "continue", "m", "=", "re", ".", "compile", "(", "r'^\\s*http\\s*{'", ",", "re", ".", "S", ")", ".", "search", "(", "data", "[", "index", ":", "]", ")", "if", "m", ":", "h", "=", "Http", "(", ")", "lopen", ".", "insert", "(", "0", ",", "h", ")", "index", "+=", "m", ".", "end", "(", ")", "continue", "m", "=", "re", ".", "compile", "(", "r'^\\s*stream\\s*{'", ",", "re", ".", "S", ")", ".", "search", "(", "data", "[", "index", ":", "]", ")", "if", "m", ":", "s", "=", "Stream", "(", ")", "lopen", ".", "insert", "(", "0", ",", "s", ")", "index", "+=", "m", ".", "end", "(", ")", "continue", "m", "=", "re", ".", "compile", "(", "r'^\\s*server\\s*{'", ",", "re", ".", "S", ")", ".", "search", "(", "data", "[", "index", ":", "]", ")", "if", "m", ":", "s", "=", "Server", "(", ")", "lopen", ".", "insert", "(", "0", ",", "s", ")", "index", "+=", "m", ".", "end", "(", ")", "continue", "m", "=", "re", ".", "compile", "(", "r'^\\s*location\\s*([^;]*?)\\s*{'", ",", "re", ".", "S", ")", ".", "search", "(", "data", "[", "index", ":", "]", ")", "if", "m", ":", "l", "=", "Location", "(", "m", ".", "group", "(", "1", ")", ")", "lopen", ".", "insert", "(", "0", ",", "l", ")", "index", "+=", "m", ".", "end", "(", ")", "continue", "m", "=", "re", ".", "compile", "(", "r'^\\s*if\\s*([^;]*?)\\s*{'", ",", "re", ".", "S", ")", ".", "search", "(", "data", "[", "index", ":", "]", ")", "if", "m", ":", "ifs", "=", "If", "(", "m", ".", "group", "(", "1", ")", ")", "lopen", ".", "insert", "(", "0", ",", "ifs", ")", "index", "+=", "m", ".", "end", "(", ")", "continue", "m", "=", "re", ".", "compile", "(", "r'^\\s*upstream\\s*([^;]*?)\\s*{'", ",", "re", ".", "S", ")", ".", "search", "(", "data", "[", "index", ":", "]", ")", "if", "m", ":", "u", "=", "Upstream", "(", "m", ".", "group", "(", "1", ")", ")", "lopen", ".", "insert", "(", "0", ",", "u", ")", "index", "+=", "m", ".", "end", "(", ")", "continue", "m", "=", "re", ".", "compile", "(", "r'^\\s*geo\\s*([^;]*?)\\s*{'", ",", "re", ".", "S", ")", ".", "search", "(", "data", "[", "index", ":", "]", ")", "if", "m", ":", "g", "=", "Geo", "(", "m", ".", "group", "(", "1", ")", ")", "lopen", ".", "insert", "(", "0", ",", "g", ")", "index", "+=", "m", ".", "end", "(", ")", "continue", "m", "=", "re", ".", "compile", "(", "r'^\\s*map\\s*([^;]*?)\\s*{'", ",", "re", ".", "S", ")", ".", "search", "(", "data", "[", "index", ":", "]", ")", "if", "m", ":", "g", "=", "Map", "(", "m", ".", "group", "(", "1", ")", ")", "lopen", ".", "insert", "(", "0", ",", "g", ")", "index", "+=", "m", ".", "end", "(", ")", "continue", "m", "=", "re", ".", "compile", "(", "r'^\\s*limit_except\\s*([^;]*?)\\s*{'", ",", "re", ".", "S", ")", ".", "search", "(", "data", "[", "index", ":", "]", ")", "if", "m", ":", "l", "=", "LimitExcept", "(", "m", ".", "group", "(", "1", ")", ")", "lopen", ".", "insert", "(", "0", ",", "l", ")", "index", "+=", "m", ".", "end", "(", ")", "continue", "m", "=", "re", ".", "compile", "(", "r'^\\s*types\\s*{'", ",", "re", ".", "S", ")", ".", "search", "(", "data", "[", "index", ":", "]", ")", "if", "m", ":", "l", "=", "Types", "(", ")", "lopen", ".", "insert", "(", "0", ",", "l", ")", "index", "+=", "m", ".", "end", "(", ")", "continue", "m", "=", "re", ".", "compile", "(", "r'^(\\s*)#[ \\r\\t\\f]*(.*?)\\n'", ",", "re", ".", "S", ")", ".", "search", "(", "data", "[", "index", ":", "]", ")", "if", "m", ":", "c", "=", "Comment", "(", "m", ".", "group", "(", "2", ")", ",", "inline", "=", "'\\n'", "not", "in", "m", ".", "group", "(", "1", ")", ")", "if", "lopen", "and", "isinstance", "(", "lopen", "[", "0", "]", ",", "Container", ")", ":", "lopen", "[", "0", "]", ".", "add", "(", "c", ")", "else", ":", "f", ".", "add", "(", "c", ")", "if", "conf", "else", "f", ".", "append", "(", "c", ")", "index", "+=", "m", ".", "end", "(", ")", "-", "1", "continue", "m", "=", "re", ".", "compile", "(", "r'^\\s*}'", ",", "re", ".", "S", ")", ".", "search", "(", "data", "[", "index", ":", "]", ")", "if", "m", ":", "if", "isinstance", "(", "lopen", "[", "0", "]", ",", "Container", ")", ":", "c", "=", "lopen", "[", "0", "]", "lopen", ".", "pop", "(", "0", ")", "if", "lopen", "and", "isinstance", "(", "lopen", "[", "0", "]", ",", "Container", ")", ":", "lopen", "[", "0", "]", ".", "add", "(", "c", ")", "else", ":", "f", ".", "add", "(", "c", ")", "if", "conf", "else", "f", ".", "append", "(", "c", ")", "index", "+=", "m", ".", "end", "(", ")", "continue", "double", "=", "r'\\s*\"[^\"]*\"'", "single", "=", "r'\\s*\\'[^\\']*\\''", "normal", "=", "r'\\s*[^;\\s]*'", "s1", "=", "r'{}|{}|{}'", ".", "format", "(", "double", ",", "single", ",", "normal", ")", "s", "=", "r'^\\s*({})\\s*((?:{})+);'", ".", "format", "(", "s1", ",", "s1", ")", "m", "=", "re", ".", "compile", "(", "s", ",", "re", ".", "S", ")", ".", "search", "(", "data", "[", "index", ":", "]", ")", "if", "m", ":", "k", "=", "Key", "(", "m", ".", "group", "(", "1", ")", ",", "m", ".", "group", "(", "2", ")", ")", "if", "lopen", "and", "isinstance", "(", "lopen", "[", "0", "]", ",", "(", "Container", ",", "Server", ")", ")", ":", "lopen", "[", "0", "]", ".", "add", "(", "k", ")", "else", ":", "f", ".", "add", "(", "k", ")", "if", "conf", "else", "f", ".", "append", "(", "k", ")", "index", "+=", "m", ".", "end", "(", ")", "continue", "m", "=", "re", ".", "compile", "(", "r'^\\s*(\\S+);'", ",", "re", ".", "S", ")", ".", "search", "(", "data", "[", "index", ":", "]", ")", "if", "m", ":", "k", "=", "Key", "(", "m", ".", "group", "(", "1", ")", ",", "''", ")", "if", "lopen", "and", "isinstance", "(", "lopen", "[", "0", "]", ",", "(", "Container", ",", "Server", ")", ")", ":", "lopen", "[", "0", "]", ".", "add", "(", "k", ")", "else", ":", "f", ".", "add", "(", "k", ")", "if", "conf", "else", "f", ".", "append", "(", "k", ")", "index", "+=", "m", ".", "end", "(", ")", "continue", "break", "return", "f" ]
Load an nginx configuration from a provided string. :param str data: nginx configuration :param bool conf: Load object(s) into a Conf object?
[ "Load", "an", "nginx", "configuration", "from", "a", "provided", "string", "." ]
train
https://github.com/peakwinter/python-nginx/blob/4ecd1cd2e1f11ffb633d188a578a004712eaae16/nginx.py#L399-L537
peakwinter/python-nginx
nginx.py
dumpf
def dumpf(obj, path): """ Write an nginx configuration to file. :param obj obj: nginx object (Conf, Server, Container) :param str path: path to nginx configuration on disk :returns: path the configuration was written to """ with open(path, 'w') as f: dump(obj, f) return path
python
def dumpf(obj, path): with open(path, 'w') as f: dump(obj, f) return path
[ "def", "dumpf", "(", "obj", ",", "path", ")", ":", "with", "open", "(", "path", ",", "'w'", ")", "as", "f", ":", "dump", "(", "obj", ",", "f", ")", "return", "path" ]
Write an nginx configuration to file. :param obj obj: nginx object (Conf, Server, Container) :param str path: path to nginx configuration on disk :returns: path the configuration was written to
[ "Write", "an", "nginx", "configuration", "to", "file", "." ]
train
https://github.com/peakwinter/python-nginx/blob/4ecd1cd2e1f11ffb633d188a578a004712eaae16/nginx.py#L581-L591
peakwinter/python-nginx
nginx.py
Conf.remove
def remove(self, *args): """ Remove object(s) from the Conf. :param *args: Any objects to remove from the Conf. :returns: full list of Conf's child objects """ for x in args: self.children.remove(x) return self.children
python
def remove(self, *args): for x in args: self.children.remove(x) return self.children
[ "def", "remove", "(", "self", ",", "*", "args", ")", ":", "for", "x", "in", "args", ":", "self", ".", "children", ".", "remove", "(", "x", ")", "return", "self", ".", "children" ]
Remove object(s) from the Conf. :param *args: Any objects to remove from the Conf. :returns: full list of Conf's child objects
[ "Remove", "object", "(", "s", ")", "from", "the", "Conf", "." ]
train
https://github.com/peakwinter/python-nginx/blob/4ecd1cd2e1f11ffb633d188a578a004712eaae16/nginx.py#L47-L56
peakwinter/python-nginx
nginx.py
Conf.filter
def filter(self, btype='', name=''): """ Return child object(s) of this Conf that satisfy certain criteria. :param str btype: Type of object to filter by (e.g. 'Key') :param str name: Name of key OR container value to filter by :returns: full list of matching child objects """ filtered = [] for x in self.children: if name and isinstance(x, Key) and x.name == name: filtered.append(x) elif isinstance(x, Container) and x.__class__.__name__ == btype\ and x.value == name: filtered.append(x) elif not name and btype and x.__class__.__name__ == btype: filtered.append(x) return filtered
python
def filter(self, btype='', name=''): filtered = [] for x in self.children: if name and isinstance(x, Key) and x.name == name: filtered.append(x) elif isinstance(x, Container) and x.__class__.__name__ == btype\ and x.value == name: filtered.append(x) elif not name and btype and x.__class__.__name__ == btype: filtered.append(x) return filtered
[ "def", "filter", "(", "self", ",", "btype", "=", "''", ",", "name", "=", "''", ")", ":", "filtered", "=", "[", "]", "for", "x", "in", "self", ".", "children", ":", "if", "name", "and", "isinstance", "(", "x", ",", "Key", ")", "and", "x", ".", "name", "==", "name", ":", "filtered", ".", "append", "(", "x", ")", "elif", "isinstance", "(", "x", ",", "Container", ")", "and", "x", ".", "__class__", ".", "__name__", "==", "btype", "and", "x", ".", "value", "==", "name", ":", "filtered", ".", "append", "(", "x", ")", "elif", "not", "name", "and", "btype", "and", "x", ".", "__class__", ".", "__name__", "==", "btype", ":", "filtered", ".", "append", "(", "x", ")", "return", "filtered" ]
Return child object(s) of this Conf that satisfy certain criteria. :param str btype: Type of object to filter by (e.g. 'Key') :param str name: Name of key OR container value to filter by :returns: full list of matching child objects
[ "Return", "child", "object", "(", "s", ")", "of", "this", "Conf", "that", "satisfy", "certain", "criteria", "." ]
train
https://github.com/peakwinter/python-nginx/blob/4ecd1cd2e1f11ffb633d188a578a004712eaae16/nginx.py#L58-L75
peakwinter/python-nginx
nginx.py
Conf.as_strings
def as_strings(self): """Return the entire Conf as nginx config strings.""" ret = [] for x in self.children: if isinstance(x, (Key, Comment)): ret.append(x.as_strings) else: for y in x.as_strings: ret.append(y) if ret: ret[-1] = re.sub('}\n+$', '}\n', ret[-1]) return ret
python
def as_strings(self): ret = [] for x in self.children: if isinstance(x, (Key, Comment)): ret.append(x.as_strings) else: for y in x.as_strings: ret.append(y) if ret: ret[-1] = re.sub('}\n+$', '}\n', ret[-1]) return ret
[ "def", "as_strings", "(", "self", ")", ":", "ret", "=", "[", "]", "for", "x", "in", "self", ".", "children", ":", "if", "isinstance", "(", "x", ",", "(", "Key", ",", "Comment", ")", ")", ":", "ret", ".", "append", "(", "x", ".", "as_strings", ")", "else", ":", "for", "y", "in", "x", ".", "as_strings", ":", "ret", ".", "append", "(", "y", ")", "if", "ret", ":", "ret", "[", "-", "1", "]", "=", "re", ".", "sub", "(", "'}\\n+$'", ",", "'}\\n'", ",", "ret", "[", "-", "1", "]", ")", "return", "ret" ]
Return the entire Conf as nginx config strings.
[ "Return", "the", "entire", "Conf", "as", "nginx", "config", "strings", "." ]
train
https://github.com/peakwinter/python-nginx/blob/4ecd1cd2e1f11ffb633d188a578a004712eaae16/nginx.py#L98-L109
peakwinter/python-nginx
nginx.py
Container.add
def add(self, *args): """ Add object(s) to the Container. :param *args: Any objects to add to the Container. :returns: full list of Container's child objects """ self.children.extend(args) bump_child_depth(self, self._depth) return self.children
python
def add(self, *args): self.children.extend(args) bump_child_depth(self, self._depth) return self.children
[ "def", "add", "(", "self", ",", "*", "args", ")", ":", "self", ".", "children", ".", "extend", "(", "args", ")", "bump_child_depth", "(", "self", ",", "self", ".", "_depth", ")", "return", "self", ".", "children" ]
Add object(s) to the Container. :param *args: Any objects to add to the Container. :returns: full list of Container's child objects
[ "Add", "object", "(", "s", ")", "to", "the", "Container", "." ]
train
https://github.com/peakwinter/python-nginx/blob/4ecd1cd2e1f11ffb633d188a578a004712eaae16/nginx.py#L133-L142
peakwinter/python-nginx
nginx.py
Container.as_list
def as_list(self): """Return all child objects in nested lists of strings.""" return [self.name, self.value, [x.as_list for x in self.children]]
python
def as_list(self): return [self.name, self.value, [x.as_list for x in self.children]]
[ "def", "as_list", "(", "self", ")", ":", "return", "[", "self", ".", "name", ",", "self", ".", "value", ",", "[", "x", ".", "as_list", "for", "x", "in", "self", ".", "children", "]", "]" ]
Return all child objects in nested lists of strings.
[ "Return", "all", "child", "objects", "in", "nested", "lists", "of", "strings", "." ]
train
https://github.com/peakwinter/python-nginx/blob/4ecd1cd2e1f11ffb633d188a578a004712eaae16/nginx.py#L190-L192
peakwinter/python-nginx
nginx.py
Container.as_dict
def as_dict(self): """Return all child objects in nested dict.""" dicts = [x.as_dict for x in self.children] return {'{0} {1}'.format(self.name, self.value): dicts}
python
def as_dict(self): dicts = [x.as_dict for x in self.children] return {'{0} {1}'.format(self.name, self.value): dicts}
[ "def", "as_dict", "(", "self", ")", ":", "dicts", "=", "[", "x", ".", "as_dict", "for", "x", "in", "self", ".", "children", "]", "return", "{", "'{0} {1}'", ".", "format", "(", "self", ".", "name", ",", "self", ".", "value", ")", ":", "dicts", "}" ]
Return all child objects in nested dict.
[ "Return", "all", "child", "objects", "in", "nested", "dict", "." ]
train
https://github.com/peakwinter/python-nginx/blob/4ecd1cd2e1f11ffb633d188a578a004712eaae16/nginx.py#L195-L198
peakwinter/python-nginx
nginx.py
Container.as_strings
def as_strings(self): """Return the entire Container as nginx config strings.""" ret = [] container_title = (INDENT * self._depth) container_title += '{0}{1} {{\n'.format( self.name, (' {0}'.format(self.value) if self.value else '') ) ret.append(container_title) for x in self.children: if isinstance(x, Key): ret.append(INDENT + x.as_strings) elif isinstance(x, Comment): if x.inline and len(ret) >= 1: ret[-1] = ret[-1].rstrip('\n') + ' ' + x.as_strings else: ret.append(INDENT + x.as_strings) elif isinstance(x, Container): y = x.as_strings ret.append('\n' + y[0]) for z in y[1:]: ret.append(INDENT + z) else: y = x.as_strings ret.append(INDENT + y) ret[-1] = re.sub('}\n+$', '}\n', ret[-1]) ret.append('}\n\n') return ret
python
def as_strings(self): ret = [] container_title = (INDENT * self._depth) container_title += '{0}{1} {{\n'.format( self.name, (' {0}'.format(self.value) if self.value else '') ) ret.append(container_title) for x in self.children: if isinstance(x, Key): ret.append(INDENT + x.as_strings) elif isinstance(x, Comment): if x.inline and len(ret) >= 1: ret[-1] = ret[-1].rstrip('\n') + ' ' + x.as_strings else: ret.append(INDENT + x.as_strings) elif isinstance(x, Container): y = x.as_strings ret.append('\n' + y[0]) for z in y[1:]: ret.append(INDENT + z) else: y = x.as_strings ret.append(INDENT + y) ret[-1] = re.sub('}\n+$', '}\n', ret[-1]) ret.append('}\n\n') return ret
[ "def", "as_strings", "(", "self", ")", ":", "ret", "=", "[", "]", "container_title", "=", "(", "INDENT", "*", "self", ".", "_depth", ")", "container_title", "+=", "'{0}{1} {{\\n'", ".", "format", "(", "self", ".", "name", ",", "(", "' {0}'", ".", "format", "(", "self", ".", "value", ")", "if", "self", ".", "value", "else", "''", ")", ")", "ret", ".", "append", "(", "container_title", ")", "for", "x", "in", "self", ".", "children", ":", "if", "isinstance", "(", "x", ",", "Key", ")", ":", "ret", ".", "append", "(", "INDENT", "+", "x", ".", "as_strings", ")", "elif", "isinstance", "(", "x", ",", "Comment", ")", ":", "if", "x", ".", "inline", "and", "len", "(", "ret", ")", ">=", "1", ":", "ret", "[", "-", "1", "]", "=", "ret", "[", "-", "1", "]", ".", "rstrip", "(", "'\\n'", ")", "+", "' '", "+", "x", ".", "as_strings", "else", ":", "ret", ".", "append", "(", "INDENT", "+", "x", ".", "as_strings", ")", "elif", "isinstance", "(", "x", ",", "Container", ")", ":", "y", "=", "x", ".", "as_strings", "ret", ".", "append", "(", "'\\n'", "+", "y", "[", "0", "]", ")", "for", "z", "in", "y", "[", "1", ":", "]", ":", "ret", ".", "append", "(", "INDENT", "+", "z", ")", "else", ":", "y", "=", "x", ".", "as_strings", "ret", ".", "append", "(", "INDENT", "+", "y", ")", "ret", "[", "-", "1", "]", "=", "re", ".", "sub", "(", "'}\\n+$'", ",", "'}\\n'", ",", "ret", "[", "-", "1", "]", ")", "ret", ".", "append", "(", "'}\\n\\n'", ")", "return", "ret" ]
Return the entire Container as nginx config strings.
[ "Return", "the", "entire", "Container", "as", "nginx", "config", "strings", "." ]
train
https://github.com/peakwinter/python-nginx/blob/4ecd1cd2e1f11ffb633d188a578a004712eaae16/nginx.py#L201-L227
peakwinter/python-nginx
nginx.py
Key.as_strings
def as_strings(self): """Return key as nginx config string.""" if self.value == '' or self.value is None: return '{0};\n'.format(self.name) if '"' not in self.value and (';' in self.value or '#' in self.value): return '{0} "{1}";\n'.format(self.name, self.value) return '{0} {1};\n'.format(self.name, self.value)
python
def as_strings(self): if self.value == '' or self.value is None: return '{0};\n'.format(self.name) if '"' not in self.value and (';' in self.value or ' return '{0} "{1}";\n'.format(self.name, self.value) return '{0} {1};\n'.format(self.name, self.value)
[ "def", "as_strings", "(", "self", ")", ":", "if", "self", ".", "value", "==", "''", "or", "self", ".", "value", "is", "None", ":", "return", "'{0};\\n'", ".", "format", "(", "self", ".", "name", ")", "if", "'\"'", "not", "in", "self", ".", "value", "and", "(", "';'", "in", "self", ".", "value", "or", "'#'", "in", "self", ".", "value", ")", ":", "return", "'{0} \"{1}\";\\n'", ".", "format", "(", "self", ".", "name", ",", "self", ".", "value", ")", "return", "'{0} {1};\\n'", ".", "format", "(", "self", ".", "name", ",", "self", ".", "value", ")" ]
Return key as nginx config string.
[ "Return", "key", "as", "nginx", "config", "string", "." ]
train
https://github.com/peakwinter/python-nginx/blob/4ecd1cd2e1f11ffb633d188a578a004712eaae16/nginx.py#L390-L396
yougov/elastic2-doc-manager
mongo_connector/doc_managers/elastic2_doc_manager.py
convert_aws_args
def convert_aws_args(aws_args): """Convert old style options into arguments to boto3.session.Session.""" if not isinstance(aws_args, dict): raise errors.InvalidConfiguration( 'Elastic DocManager config option "aws" must be a dict' ) old_session_kwargs = dict( region="region_name", access_id="aws_access_key_id", secret_key="aws_secret_access_key", ) new_kwargs = {} for arg in aws_args: if arg in old_session_kwargs: new_kwargs[old_session_kwargs[arg]] = aws_args[arg] else: new_kwargs[arg] = aws_args[arg] return new_kwargs
python
def convert_aws_args(aws_args): if not isinstance(aws_args, dict): raise errors.InvalidConfiguration( 'Elastic DocManager config option "aws" must be a dict' ) old_session_kwargs = dict( region="region_name", access_id="aws_access_key_id", secret_key="aws_secret_access_key", ) new_kwargs = {} for arg in aws_args: if arg in old_session_kwargs: new_kwargs[old_session_kwargs[arg]] = aws_args[arg] else: new_kwargs[arg] = aws_args[arg] return new_kwargs
[ "def", "convert_aws_args", "(", "aws_args", ")", ":", "if", "not", "isinstance", "(", "aws_args", ",", "dict", ")", ":", "raise", "errors", ".", "InvalidConfiguration", "(", "'Elastic DocManager config option \"aws\" must be a dict'", ")", "old_session_kwargs", "=", "dict", "(", "region", "=", "\"region_name\"", ",", "access_id", "=", "\"aws_access_key_id\"", ",", "secret_key", "=", "\"aws_secret_access_key\"", ",", ")", "new_kwargs", "=", "{", "}", "for", "arg", "in", "aws_args", ":", "if", "arg", "in", "old_session_kwargs", ":", "new_kwargs", "[", "old_session_kwargs", "[", "arg", "]", "]", "=", "aws_args", "[", "arg", "]", "else", ":", "new_kwargs", "[", "arg", "]", "=", "aws_args", "[", "arg", "]", "return", "new_kwargs" ]
Convert old style options into arguments to boto3.session.Session.
[ "Convert", "old", "style", "options", "into", "arguments", "to", "boto3", ".", "session", ".", "Session", "." ]
train
https://github.com/yougov/elastic2-doc-manager/blob/ad92138d1fd6656bb2e71cb5cc840f9ba0109c49/mongo_connector/doc_managers/elastic2_doc_manager.py#L82-L99
yougov/elastic2-doc-manager
mongo_connector/doc_managers/elastic2_doc_manager.py
AutoCommiter.run
def run(self): """Periodically sends buffered operations and/or commit. """ if not self._should_auto_commit and not self._should_auto_send: return last_send, last_commit = 0, 0 while not self._stopped: if self._should_auto_commit: if last_commit > self._commit_interval: self._docman.commit() # commit also sends so reset both last_send, last_commit = 0, 0 # Give a chance to exit the loop if self._stopped: break if self._should_auto_send: if last_send > self._send_interval: self._docman.send_buffered_operations() last_send = 0 time.sleep(self._sleep_interval) last_send += self._sleep_interval last_commit += self._sleep_interval
python
def run(self): if not self._should_auto_commit and not self._should_auto_send: return last_send, last_commit = 0, 0 while not self._stopped: if self._should_auto_commit: if last_commit > self._commit_interval: self._docman.commit() last_send, last_commit = 0, 0 if self._stopped: break if self._should_auto_send: if last_send > self._send_interval: self._docman.send_buffered_operations() last_send = 0 time.sleep(self._sleep_interval) last_send += self._sleep_interval last_commit += self._sleep_interval
[ "def", "run", "(", "self", ")", ":", "if", "not", "self", ".", "_should_auto_commit", "and", "not", "self", ".", "_should_auto_send", ":", "return", "last_send", ",", "last_commit", "=", "0", ",", "0", "while", "not", "self", ".", "_stopped", ":", "if", "self", ".", "_should_auto_commit", ":", "if", "last_commit", ">", "self", ".", "_commit_interval", ":", "self", ".", "_docman", ".", "commit", "(", ")", "# commit also sends so reset both", "last_send", ",", "last_commit", "=", "0", ",", "0", "# Give a chance to exit the loop", "if", "self", ".", "_stopped", ":", "break", "if", "self", ".", "_should_auto_send", ":", "if", "last_send", ">", "self", ".", "_send_interval", ":", "self", ".", "_docman", ".", "send_buffered_operations", "(", ")", "last_send", "=", "0", "time", ".", "sleep", "(", "self", ".", "_sleep_interval", ")", "last_send", "+=", "self", ".", "_sleep_interval", "last_commit", "+=", "self", ".", "_sleep_interval" ]
Periodically sends buffered operations and/or commit.
[ "Periodically", "sends", "buffered", "operations", "and", "/", "or", "commit", "." ]
train
https://github.com/yougov/elastic2-doc-manager/blob/ad92138d1fd6656bb2e71cb5cc840f9ba0109c49/mongo_connector/doc_managers/elastic2_doc_manager.py#L144-L166
yougov/elastic2-doc-manager
mongo_connector/doc_managers/elastic2_doc_manager.py
DocManager._index_and_mapping
def _index_and_mapping(self, namespace): """Helper method for getting the index and type from a namespace.""" index, doc_type = namespace.split(".", 1) return index.lower(), doc_type
python
def _index_and_mapping(self, namespace): index, doc_type = namespace.split(".", 1) return index.lower(), doc_type
[ "def", "_index_and_mapping", "(", "self", ",", "namespace", ")", ":", "index", ",", "doc_type", "=", "namespace", ".", "split", "(", "\".\"", ",", "1", ")", "return", "index", ".", "lower", "(", ")", ",", "doc_type" ]
Helper method for getting the index and type from a namespace.
[ "Helper", "method", "for", "getting", "the", "index", "and", "type", "from", "a", "namespace", "." ]
train
https://github.com/yougov/elastic2-doc-manager/blob/ad92138d1fd6656bb2e71cb5cc840f9ba0109c49/mongo_connector/doc_managers/elastic2_doc_manager.py#L226-L229
yougov/elastic2-doc-manager
mongo_connector/doc_managers/elastic2_doc_manager.py
DocManager._stream_search
def _stream_search(self, *args, **kwargs): """Helper method for iterating over ES search results.""" for hit in scan( self.elastic, query=kwargs.pop("body", None), scroll="10m", **kwargs ): hit["_source"]["_id"] = hit["_id"] yield hit["_source"]
python
def _stream_search(self, *args, **kwargs): for hit in scan( self.elastic, query=kwargs.pop("body", None), scroll="10m", **kwargs ): hit["_source"]["_id"] = hit["_id"] yield hit["_source"]
[ "def", "_stream_search", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "hit", "in", "scan", "(", "self", ".", "elastic", ",", "query", "=", "kwargs", ".", "pop", "(", "\"body\"", ",", "None", ")", ",", "scroll", "=", "\"10m\"", ",", "*", "*", "kwargs", ")", ":", "hit", "[", "\"_source\"", "]", "[", "\"_id\"", "]", "=", "hit", "[", "\"_id\"", "]", "yield", "hit", "[", "\"_source\"", "]" ]
Helper method for iterating over ES search results.
[ "Helper", "method", "for", "iterating", "over", "ES", "search", "results", "." ]
train
https://github.com/yougov/elastic2-doc-manager/blob/ad92138d1fd6656bb2e71cb5cc840f9ba0109c49/mongo_connector/doc_managers/elastic2_doc_manager.py#L458-L464
yougov/elastic2-doc-manager
mongo_connector/doc_managers/elastic2_doc_manager.py
DocManager.search
def search(self, start_ts, end_ts): """Query Elasticsearch for documents in a time range. This method is used to find documents that may be in conflict during a rollback event in MongoDB. """ return self._stream_search( index=self.meta_index_name, body={"query": {"range": {"_ts": {"gte": start_ts, "lte": end_ts}}}}, )
python
def search(self, start_ts, end_ts): return self._stream_search( index=self.meta_index_name, body={"query": {"range": {"_ts": {"gte": start_ts, "lte": end_ts}}}}, )
[ "def", "search", "(", "self", ",", "start_ts", ",", "end_ts", ")", ":", "return", "self", ".", "_stream_search", "(", "index", "=", "self", ".", "meta_index_name", ",", "body", "=", "{", "\"query\"", ":", "{", "\"range\"", ":", "{", "\"_ts\"", ":", "{", "\"gte\"", ":", "start_ts", ",", "\"lte\"", ":", "end_ts", "}", "}", "}", "}", ",", ")" ]
Query Elasticsearch for documents in a time range. This method is used to find documents that may be in conflict during a rollback event in MongoDB.
[ "Query", "Elasticsearch", "for", "documents", "in", "a", "time", "range", "." ]
train
https://github.com/yougov/elastic2-doc-manager/blob/ad92138d1fd6656bb2e71cb5cc840f9ba0109c49/mongo_connector/doc_managers/elastic2_doc_manager.py#L466-L475
yougov/elastic2-doc-manager
mongo_connector/doc_managers/elastic2_doc_manager.py
DocManager.commit
def commit(self): """Send buffered requests and refresh all indexes.""" self.send_buffered_operations() retry_until_ok(self.elastic.indices.refresh, index="")
python
def commit(self): self.send_buffered_operations() retry_until_ok(self.elastic.indices.refresh, index="")
[ "def", "commit", "(", "self", ")", ":", "self", ".", "send_buffered_operations", "(", ")", "retry_until_ok", "(", "self", ".", "elastic", ".", "indices", ".", "refresh", ",", "index", "=", "\"\"", ")" ]
Send buffered requests and refresh all indexes.
[ "Send", "buffered", "requests", "and", "refresh", "all", "indexes", "." ]
train
https://github.com/yougov/elastic2-doc-manager/blob/ad92138d1fd6656bb2e71cb5cc840f9ba0109c49/mongo_connector/doc_managers/elastic2_doc_manager.py#L507-L510
yougov/elastic2-doc-manager
mongo_connector/doc_managers/elastic2_doc_manager.py
BulkBuffer.add_upsert
def add_upsert(self, action, meta_action, doc_source, update_spec): """ Function which stores sources for "insert" actions and decide if for "update" action has to add docs to get source buffer """ # Whenever update_spec is provided to this method # it means that doc source needs to be retrieved # from Elasticsearch. It means also that source # is not stored in local buffer if update_spec: self.bulk_index(action, meta_action) # -1 -> to get latest index number # -1 -> to get action instead of meta_action # Update document based on source retrieved from ES self.add_doc_to_update(action, update_spec, len(self.action_buffer) - 2) else: # Insert and update operations provide source # Store it in local buffer and use for comming updates # inside same buffer # add_to_sources will not be called for delete operation # as it does not provide doc_source if doc_source: self.add_to_sources(action, doc_source) self.bulk_index(action, meta_action)
python
def add_upsert(self, action, meta_action, doc_source, update_spec): if update_spec: self.bulk_index(action, meta_action) self.add_doc_to_update(action, update_spec, len(self.action_buffer) - 2) else: if doc_source: self.add_to_sources(action, doc_source) self.bulk_index(action, meta_action)
[ "def", "add_upsert", "(", "self", ",", "action", ",", "meta_action", ",", "doc_source", ",", "update_spec", ")", ":", "# Whenever update_spec is provided to this method", "# it means that doc source needs to be retrieved", "# from Elasticsearch. It means also that source", "# is not stored in local buffer", "if", "update_spec", ":", "self", ".", "bulk_index", "(", "action", ",", "meta_action", ")", "# -1 -> to get latest index number", "# -1 -> to get action instead of meta_action", "# Update document based on source retrieved from ES", "self", ".", "add_doc_to_update", "(", "action", ",", "update_spec", ",", "len", "(", "self", ".", "action_buffer", ")", "-", "2", ")", "else", ":", "# Insert and update operations provide source", "# Store it in local buffer and use for comming updates", "# inside same buffer", "# add_to_sources will not be called for delete operation", "# as it does not provide doc_source", "if", "doc_source", ":", "self", ".", "add_to_sources", "(", "action", ",", "doc_source", ")", "self", ".", "bulk_index", "(", "action", ",", "meta_action", ")" ]
Function which stores sources for "insert" actions and decide if for "update" action has to add docs to get source buffer
[ "Function", "which", "stores", "sources", "for", "insert", "actions", "and", "decide", "if", "for", "update", "action", "has", "to", "add", "docs", "to", "get", "source", "buffer" ]
train
https://github.com/yougov/elastic2-doc-manager/blob/ad92138d1fd6656bb2e71cb5cc840f9ba0109c49/mongo_connector/doc_managers/elastic2_doc_manager.py#L559-L585
yougov/elastic2-doc-manager
mongo_connector/doc_managers/elastic2_doc_manager.py
BulkBuffer.add_doc_to_update
def add_doc_to_update(self, action, update_spec, action_buffer_index): """ Prepare document for update based on Elasticsearch response. Set flag if document needs to be retrieved from Elasticsearch """ doc = { "_index": action["_index"], "_type": action["_type"], "_id": action["_id"], } # If get_from_ES == True -> get document's source from Elasticsearch get_from_ES = self.should_get_id(action) self.doc_to_update.append((doc, update_spec, action_buffer_index, get_from_ES))
python
def add_doc_to_update(self, action, update_spec, action_buffer_index): doc = { "_index": action["_index"], "_type": action["_type"], "_id": action["_id"], } get_from_ES = self.should_get_id(action) self.doc_to_update.append((doc, update_spec, action_buffer_index, get_from_ES))
[ "def", "add_doc_to_update", "(", "self", ",", "action", ",", "update_spec", ",", "action_buffer_index", ")", ":", "doc", "=", "{", "\"_index\"", ":", "action", "[", "\"_index\"", "]", ",", "\"_type\"", ":", "action", "[", "\"_type\"", "]", ",", "\"_id\"", ":", "action", "[", "\"_id\"", "]", ",", "}", "# If get_from_ES == True -> get document's source from Elasticsearch", "get_from_ES", "=", "self", ".", "should_get_id", "(", "action", ")", "self", ".", "doc_to_update", ".", "append", "(", "(", "doc", ",", "update_spec", ",", "action_buffer_index", ",", "get_from_ES", ")", ")" ]
Prepare document for update based on Elasticsearch response. Set flag if document needs to be retrieved from Elasticsearch
[ "Prepare", "document", "for", "update", "based", "on", "Elasticsearch", "response", ".", "Set", "flag", "if", "document", "needs", "to", "be", "retrieved", "from", "Elasticsearch" ]
train
https://github.com/yougov/elastic2-doc-manager/blob/ad92138d1fd6656bb2e71cb5cc840f9ba0109c49/mongo_connector/doc_managers/elastic2_doc_manager.py#L587-L601
yougov/elastic2-doc-manager
mongo_connector/doc_managers/elastic2_doc_manager.py
BulkBuffer.should_get_id
def should_get_id(self, action): """ Mark document to retrieve its source from Elasticsearch. Returns: True - if marking document for the first time in this bulk False - if document has been already marked """ mapping_ids = self.doc_to_get.setdefault(action["_index"], {}).setdefault( action["_type"], set() ) if action["_id"] in mapping_ids: # There is an update on this id already return False else: mapping_ids.add(action["_id"]) return True
python
def should_get_id(self, action): mapping_ids = self.doc_to_get.setdefault(action["_index"], {}).setdefault( action["_type"], set() ) if action["_id"] in mapping_ids: return False else: mapping_ids.add(action["_id"]) return True
[ "def", "should_get_id", "(", "self", ",", "action", ")", ":", "mapping_ids", "=", "self", ".", "doc_to_get", ".", "setdefault", "(", "action", "[", "\"_index\"", "]", ",", "{", "}", ")", ".", "setdefault", "(", "action", "[", "\"_type\"", "]", ",", "set", "(", ")", ")", "if", "action", "[", "\"_id\"", "]", "in", "mapping_ids", ":", "# There is an update on this id already", "return", "False", "else", ":", "mapping_ids", ".", "add", "(", "action", "[", "\"_id\"", "]", ")", "return", "True" ]
Mark document to retrieve its source from Elasticsearch. Returns: True - if marking document for the first time in this bulk False - if document has been already marked
[ "Mark", "document", "to", "retrieve", "its", "source", "from", "Elasticsearch", ".", "Returns", ":", "True", "-", "if", "marking", "document", "for", "the", "first", "time", "in", "this", "bulk", "False", "-", "if", "document", "has", "been", "already", "marked" ]
train
https://github.com/yougov/elastic2-doc-manager/blob/ad92138d1fd6656bb2e71cb5cc840f9ba0109c49/mongo_connector/doc_managers/elastic2_doc_manager.py#L603-L618
yougov/elastic2-doc-manager
mongo_connector/doc_managers/elastic2_doc_manager.py
BulkBuffer.get_docs_sources_from_ES
def get_docs_sources_from_ES(self): """Get document sources using MGET elasticsearch API""" docs = [doc for doc, _, _, get_from_ES in self.doc_to_update if get_from_ES] if docs: documents = self.docman.elastic.mget(body={"docs": docs}, realtime=True) return iter(documents["docs"]) else: return iter([])
python
def get_docs_sources_from_ES(self): docs = [doc for doc, _, _, get_from_ES in self.doc_to_update if get_from_ES] if docs: documents = self.docman.elastic.mget(body={"docs": docs}, realtime=True) return iter(documents["docs"]) else: return iter([])
[ "def", "get_docs_sources_from_ES", "(", "self", ")", ":", "docs", "=", "[", "doc", "for", "doc", ",", "_", ",", "_", ",", "get_from_ES", "in", "self", ".", "doc_to_update", "if", "get_from_ES", "]", "if", "docs", ":", "documents", "=", "self", ".", "docman", ".", "elastic", ".", "mget", "(", "body", "=", "{", "\"docs\"", ":", "docs", "}", ",", "realtime", "=", "True", ")", "return", "iter", "(", "documents", "[", "\"docs\"", "]", ")", "else", ":", "return", "iter", "(", "[", "]", ")" ]
Get document sources using MGET elasticsearch API
[ "Get", "document", "sources", "using", "MGET", "elasticsearch", "API" ]
train
https://github.com/yougov/elastic2-doc-manager/blob/ad92138d1fd6656bb2e71cb5cc840f9ba0109c49/mongo_connector/doc_managers/elastic2_doc_manager.py#L620-L627
yougov/elastic2-doc-manager
mongo_connector/doc_managers/elastic2_doc_manager.py
BulkBuffer.update_sources
def update_sources(self): """Update local sources based on response from Elasticsearch""" ES_documents = self.get_docs_sources_from_ES() for doc, update_spec, action_buffer_index, get_from_ES in self.doc_to_update: if get_from_ES: # Update source based on response from ES ES_doc = next(ES_documents) if ES_doc["found"]: source = ES_doc["_source"] else: # Document not found in elasticsearch, # Seems like something went wrong during replication LOG.error( "mGET: Document id: %s has not been found " "in Elasticsearch. Due to that " "following update failed: %s", doc["_id"], update_spec, ) self.reset_action(action_buffer_index) continue else: # Get source stored locally before applying update # as it is up-to-date source = self.get_from_sources(doc["_index"], doc["_type"], doc["_id"]) if not source: LOG.error( "mGET: Document id: %s has not been found " "in local sources. Due to that following " "update failed: %s", doc["_id"], update_spec, ) self.reset_action(action_buffer_index) continue updated = self.docman.apply_update(source, update_spec) # Remove _id field from source if "_id" in updated: del updated["_id"] # Everytime update locally stored sources to keep them up-to-date self.add_to_sources(doc, updated) self.action_buffer[action_buffer_index][ "_source" ] = self.docman._formatter.format_document(updated) # Remove empty actions if there were errors self.action_buffer = [ each_action for each_action in self.action_buffer if each_action ]
python
def update_sources(self): ES_documents = self.get_docs_sources_from_ES() for doc, update_spec, action_buffer_index, get_from_ES in self.doc_to_update: if get_from_ES: ES_doc = next(ES_documents) if ES_doc["found"]: source = ES_doc["_source"] else: LOG.error( "mGET: Document id: %s has not been found " "in Elasticsearch. Due to that " "following update failed: %s", doc["_id"], update_spec, ) self.reset_action(action_buffer_index) continue else: source = self.get_from_sources(doc["_index"], doc["_type"], doc["_id"]) if not source: LOG.error( "mGET: Document id: %s has not been found " "in local sources. Due to that following " "update failed: %s", doc["_id"], update_spec, ) self.reset_action(action_buffer_index) continue updated = self.docman.apply_update(source, update_spec) if "_id" in updated: del updated["_id"] self.add_to_sources(doc, updated) self.action_buffer[action_buffer_index][ "_source" ] = self.docman._formatter.format_document(updated) self.action_buffer = [ each_action for each_action in self.action_buffer if each_action ]
[ "def", "update_sources", "(", "self", ")", ":", "ES_documents", "=", "self", ".", "get_docs_sources_from_ES", "(", ")", "for", "doc", ",", "update_spec", ",", "action_buffer_index", ",", "get_from_ES", "in", "self", ".", "doc_to_update", ":", "if", "get_from_ES", ":", "# Update source based on response from ES", "ES_doc", "=", "next", "(", "ES_documents", ")", "if", "ES_doc", "[", "\"found\"", "]", ":", "source", "=", "ES_doc", "[", "\"_source\"", "]", "else", ":", "# Document not found in elasticsearch,", "# Seems like something went wrong during replication", "LOG", ".", "error", "(", "\"mGET: Document id: %s has not been found \"", "\"in Elasticsearch. Due to that \"", "\"following update failed: %s\"", ",", "doc", "[", "\"_id\"", "]", ",", "update_spec", ",", ")", "self", ".", "reset_action", "(", "action_buffer_index", ")", "continue", "else", ":", "# Get source stored locally before applying update", "# as it is up-to-date", "source", "=", "self", ".", "get_from_sources", "(", "doc", "[", "\"_index\"", "]", ",", "doc", "[", "\"_type\"", "]", ",", "doc", "[", "\"_id\"", "]", ")", "if", "not", "source", ":", "LOG", ".", "error", "(", "\"mGET: Document id: %s has not been found \"", "\"in local sources. Due to that following \"", "\"update failed: %s\"", ",", "doc", "[", "\"_id\"", "]", ",", "update_spec", ",", ")", "self", ".", "reset_action", "(", "action_buffer_index", ")", "continue", "updated", "=", "self", ".", "docman", ".", "apply_update", "(", "source", ",", "update_spec", ")", "# Remove _id field from source", "if", "\"_id\"", "in", "updated", ":", "del", "updated", "[", "\"_id\"", "]", "# Everytime update locally stored sources to keep them up-to-date", "self", ".", "add_to_sources", "(", "doc", ",", "updated", ")", "self", ".", "action_buffer", "[", "action_buffer_index", "]", "[", "\"_source\"", "]", "=", "self", ".", "docman", ".", "_formatter", ".", "format_document", "(", "updated", ")", "# Remove empty actions if there were errors", "self", ".", "action_buffer", "=", "[", "each_action", "for", "each_action", "in", "self", ".", "action_buffer", "if", "each_action", "]" ]
Update local sources based on response from Elasticsearch
[ "Update", "local", "sources", "based", "on", "response", "from", "Elasticsearch" ]
train
https://github.com/yougov/elastic2-doc-manager/blob/ad92138d1fd6656bb2e71cb5cc840f9ba0109c49/mongo_connector/doc_managers/elastic2_doc_manager.py#L630-L683
yougov/elastic2-doc-manager
mongo_connector/doc_managers/elastic2_doc_manager.py
BulkBuffer.add_to_sources
def add_to_sources(self, action, doc_source): """Store sources locally""" mapping = self.sources.setdefault(action["_index"], {}).setdefault( action["_type"], {} ) mapping[action["_id"]] = doc_source
python
def add_to_sources(self, action, doc_source): mapping = self.sources.setdefault(action["_index"], {}).setdefault( action["_type"], {} ) mapping[action["_id"]] = doc_source
[ "def", "add_to_sources", "(", "self", ",", "action", ",", "doc_source", ")", ":", "mapping", "=", "self", ".", "sources", ".", "setdefault", "(", "action", "[", "\"_index\"", "]", ",", "{", "}", ")", ".", "setdefault", "(", "action", "[", "\"_type\"", "]", ",", "{", "}", ")", "mapping", "[", "action", "[", "\"_id\"", "]", "]", "=", "doc_source" ]
Store sources locally
[ "Store", "sources", "locally" ]
train
https://github.com/yougov/elastic2-doc-manager/blob/ad92138d1fd6656bb2e71cb5cc840f9ba0109c49/mongo_connector/doc_managers/elastic2_doc_manager.py#L690-L695
yougov/elastic2-doc-manager
mongo_connector/doc_managers/elastic2_doc_manager.py
BulkBuffer.get_from_sources
def get_from_sources(self, index, doc_type, document_id): """Get source stored locally""" return self.sources.get(index, {}).get(doc_type, {}).get(document_id, {})
python
def get_from_sources(self, index, doc_type, document_id): return self.sources.get(index, {}).get(doc_type, {}).get(document_id, {})
[ "def", "get_from_sources", "(", "self", ",", "index", ",", "doc_type", ",", "document_id", ")", ":", "return", "self", ".", "sources", ".", "get", "(", "index", ",", "{", "}", ")", ".", "get", "(", "doc_type", ",", "{", "}", ")", ".", "get", "(", "document_id", ",", "{", "}", ")" ]
Get source stored locally
[ "Get", "source", "stored", "locally" ]
train
https://github.com/yougov/elastic2-doc-manager/blob/ad92138d1fd6656bb2e71cb5cc840f9ba0109c49/mongo_connector/doc_managers/elastic2_doc_manager.py#L697-L699
yougov/elastic2-doc-manager
mongo_connector/doc_managers/elastic2_doc_manager.py
BulkBuffer.clean_up
def clean_up(self): """Do clean-up before returning buffer""" self.action_buffer = [] self.sources = {} self.doc_to_get = {} self.doc_to_update = []
python
def clean_up(self): self.action_buffer = [] self.sources = {} self.doc_to_get = {} self.doc_to_update = []
[ "def", "clean_up", "(", "self", ")", ":", "self", ".", "action_buffer", "=", "[", "]", "self", ".", "sources", "=", "{", "}", "self", ".", "doc_to_get", "=", "{", "}", "self", ".", "doc_to_update", "=", "[", "]" ]
Do clean-up before returning buffer
[ "Do", "clean", "-", "up", "before", "returning", "buffer" ]
train
https://github.com/yougov/elastic2-doc-manager/blob/ad92138d1fd6656bb2e71cb5cc840f9ba0109c49/mongo_connector/doc_managers/elastic2_doc_manager.py#L705-L710
yougov/elastic2-doc-manager
mongo_connector/doc_managers/elastic2_doc_manager.py
BulkBuffer.get_buffer
def get_buffer(self): """Get buffer which needs to be bulked to elasticsearch""" # Get sources for documents which are in Elasticsearch # and they are not in local buffer if self.doc_to_update: self.update_sources() ES_buffer = self.action_buffer self.clean_up() return ES_buffer
python
def get_buffer(self): if self.doc_to_update: self.update_sources() ES_buffer = self.action_buffer self.clean_up() return ES_buffer
[ "def", "get_buffer", "(", "self", ")", ":", "# Get sources for documents which are in Elasticsearch", "# and they are not in local buffer", "if", "self", ".", "doc_to_update", ":", "self", ".", "update_sources", "(", ")", "ES_buffer", "=", "self", ".", "action_buffer", "self", ".", "clean_up", "(", ")", "return", "ES_buffer" ]
Get buffer which needs to be bulked to elasticsearch
[ "Get", "buffer", "which", "needs", "to", "be", "bulked", "to", "elasticsearch" ]
train
https://github.com/yougov/elastic2-doc-manager/blob/ad92138d1fd6656bb2e71cb5cc840f9ba0109c49/mongo_connector/doc_managers/elastic2_doc_manager.py#L712-L722
citruz/beacontools
beacontools/scanner.py
Monitor.run
def run(self): """Continously scan for BLE advertisements.""" self.socket = self.bluez.hci_open_dev(self.bt_device_id) filtr = self.bluez.hci_filter_new() self.bluez.hci_filter_all_events(filtr) self.bluez.hci_filter_set_ptype(filtr, self.bluez.HCI_EVENT_PKT) self.socket.setsockopt(self.bluez.SOL_HCI, self.bluez.HCI_FILTER, filtr) self.set_scan_parameters() self.toggle_scan(True) while self.keep_going: pkt = self.socket.recv(255) event = to_int(pkt[1]) subevent = to_int(pkt[3]) if event == LE_META_EVENT and subevent == EVT_LE_ADVERTISING_REPORT: # we have an BLE advertisement self.process_packet(pkt) self.socket.close()
python
def run(self): self.socket = self.bluez.hci_open_dev(self.bt_device_id) filtr = self.bluez.hci_filter_new() self.bluez.hci_filter_all_events(filtr) self.bluez.hci_filter_set_ptype(filtr, self.bluez.HCI_EVENT_PKT) self.socket.setsockopt(self.bluez.SOL_HCI, self.bluez.HCI_FILTER, filtr) self.set_scan_parameters() self.toggle_scan(True) while self.keep_going: pkt = self.socket.recv(255) event = to_int(pkt[1]) subevent = to_int(pkt[3]) if event == LE_META_EVENT and subevent == EVT_LE_ADVERTISING_REPORT: self.process_packet(pkt) self.socket.close()
[ "def", "run", "(", "self", ")", ":", "self", ".", "socket", "=", "self", ".", "bluez", ".", "hci_open_dev", "(", "self", ".", "bt_device_id", ")", "filtr", "=", "self", ".", "bluez", ".", "hci_filter_new", "(", ")", "self", ".", "bluez", ".", "hci_filter_all_events", "(", "filtr", ")", "self", ".", "bluez", ".", "hci_filter_set_ptype", "(", "filtr", ",", "self", ".", "bluez", ".", "HCI_EVENT_PKT", ")", "self", ".", "socket", ".", "setsockopt", "(", "self", ".", "bluez", ".", "SOL_HCI", ",", "self", ".", "bluez", ".", "HCI_FILTER", ",", "filtr", ")", "self", ".", "set_scan_parameters", "(", ")", "self", ".", "toggle_scan", "(", "True", ")", "while", "self", ".", "keep_going", ":", "pkt", "=", "self", ".", "socket", ".", "recv", "(", "255", ")", "event", "=", "to_int", "(", "pkt", "[", "1", "]", ")", "subevent", "=", "to_int", "(", "pkt", "[", "3", "]", ")", "if", "event", "==", "LE_META_EVENT", "and", "subevent", "==", "EVT_LE_ADVERTISING_REPORT", ":", "# we have an BLE advertisement", "self", ".", "process_packet", "(", "pkt", ")", "self", ".", "socket", ".", "close", "(", ")" ]
Continously scan for BLE advertisements.
[ "Continously", "scan", "for", "BLE", "advertisements", "." ]
train
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/scanner.py#L89-L108
citruz/beacontools
beacontools/scanner.py
Monitor.set_scan_parameters
def set_scan_parameters(self, scan_type=ScanType.ACTIVE, interval_ms=10, window_ms=10, address_type=BluetoothAddressType.RANDOM, filter_type=ScanFilter.ALL): """"sets the le scan parameters Args: scan_type: ScanType.(PASSIVE|ACTIVE) interval: ms (as float) between scans (valid range 2.5ms - 10240ms) ..note:: when interval and window are equal, the scan runs continuos window: ms (as float) scan duration (valid range 2.5ms - 10240ms) address_type: Bluetooth address type BluetoothAddressType.(PUBLIC|RANDOM) * PUBLIC = use device MAC address * RANDOM = generate a random MAC address and use that filter: ScanFilter.(ALL|WHITELIST_ONLY) only ALL is supported, which will return all fetched bluetooth packets (WHITELIST_ONLY is not supported, because OCF_LE_ADD_DEVICE_TO_WHITE_LIST command is not implemented) Raises: ValueError: A value had an unexpected format or was not in range """ interval_fractions = interval_ms / MS_FRACTION_DIVIDER if interval_fractions < 0x0004 or interval_fractions > 0x4000: raise ValueError( "Invalid interval given {}, must be in range of 2.5ms to 10240ms!".format( interval_fractions)) window_fractions = window_ms / MS_FRACTION_DIVIDER if window_fractions < 0x0004 or window_fractions > 0x4000: raise ValueError( "Invalid window given {}, must be in range of 2.5ms to 10240ms!".format( window_fractions)) interval_fractions, window_fractions = int(interval_fractions), int(window_fractions) scan_parameter_pkg = struct.pack( ">BHHBB", scan_type, interval_fractions, window_fractions, address_type, filter_type) self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_PARAMETERS, scan_parameter_pkg)
python
def set_scan_parameters(self, scan_type=ScanType.ACTIVE, interval_ms=10, window_ms=10, address_type=BluetoothAddressType.RANDOM, filter_type=ScanFilter.ALL): interval_fractions = interval_ms / MS_FRACTION_DIVIDER if interval_fractions < 0x0004 or interval_fractions > 0x4000: raise ValueError( "Invalid interval given {}, must be in range of 2.5ms to 10240ms!".format( interval_fractions)) window_fractions = window_ms / MS_FRACTION_DIVIDER if window_fractions < 0x0004 or window_fractions > 0x4000: raise ValueError( "Invalid window given {}, must be in range of 2.5ms to 10240ms!".format( window_fractions)) interval_fractions, window_fractions = int(interval_fractions), int(window_fractions) scan_parameter_pkg = struct.pack( ">BHHBB", scan_type, interval_fractions, window_fractions, address_type, filter_type) self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_PARAMETERS, scan_parameter_pkg)
[ "def", "set_scan_parameters", "(", "self", ",", "scan_type", "=", "ScanType", ".", "ACTIVE", ",", "interval_ms", "=", "10", ",", "window_ms", "=", "10", ",", "address_type", "=", "BluetoothAddressType", ".", "RANDOM", ",", "filter_type", "=", "ScanFilter", ".", "ALL", ")", ":", "interval_fractions", "=", "interval_ms", "/", "MS_FRACTION_DIVIDER", "if", "interval_fractions", "<", "0x0004", "or", "interval_fractions", ">", "0x4000", ":", "raise", "ValueError", "(", "\"Invalid interval given {}, must be in range of 2.5ms to 10240ms!\"", ".", "format", "(", "interval_fractions", ")", ")", "window_fractions", "=", "window_ms", "/", "MS_FRACTION_DIVIDER", "if", "window_fractions", "<", "0x0004", "or", "window_fractions", ">", "0x4000", ":", "raise", "ValueError", "(", "\"Invalid window given {}, must be in range of 2.5ms to 10240ms!\"", ".", "format", "(", "window_fractions", ")", ")", "interval_fractions", ",", "window_fractions", "=", "int", "(", "interval_fractions", ")", ",", "int", "(", "window_fractions", ")", "scan_parameter_pkg", "=", "struct", ".", "pack", "(", "\">BHHBB\"", ",", "scan_type", ",", "interval_fractions", ",", "window_fractions", ",", "address_type", ",", "filter_type", ")", "self", ".", "bluez", ".", "hci_send_cmd", "(", "self", ".", "socket", ",", "OGF_LE_CTL", ",", "OCF_LE_SET_SCAN_PARAMETERS", ",", "scan_parameter_pkg", ")" ]
sets the le scan parameters Args: scan_type: ScanType.(PASSIVE|ACTIVE) interval: ms (as float) between scans (valid range 2.5ms - 10240ms) ..note:: when interval and window are equal, the scan runs continuos window: ms (as float) scan duration (valid range 2.5ms - 10240ms) address_type: Bluetooth address type BluetoothAddressType.(PUBLIC|RANDOM) * PUBLIC = use device MAC address * RANDOM = generate a random MAC address and use that filter: ScanFilter.(ALL|WHITELIST_ONLY) only ALL is supported, which will return all fetched bluetooth packets (WHITELIST_ONLY is not supported, because OCF_LE_ADD_DEVICE_TO_WHITE_LIST command is not implemented) Raises: ValueError: A value had an unexpected format or was not in range
[ "sets", "the", "le", "scan", "parameters" ]
train
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/scanner.py#L110-L151
citruz/beacontools
beacontools/scanner.py
Monitor.toggle_scan
def toggle_scan(self, enable, filter_duplicates=False): """Enables or disables BLE scanning Args: enable: boolean value to enable (True) or disable (False) scanner filter_duplicates: boolean value to enable/disable filter, that omits duplicated packets""" command = struct.pack(">BB", enable, filter_duplicates) self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_ENABLE, command)
python
def toggle_scan(self, enable, filter_duplicates=False): command = struct.pack(">BB", enable, filter_duplicates) self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_ENABLE, command)
[ "def", "toggle_scan", "(", "self", ",", "enable", ",", "filter_duplicates", "=", "False", ")", ":", "command", "=", "struct", ".", "pack", "(", "\">BB\"", ",", "enable", ",", "filter_duplicates", ")", "self", ".", "bluez", ".", "hci_send_cmd", "(", "self", ".", "socket", ",", "OGF_LE_CTL", ",", "OCF_LE_SET_SCAN_ENABLE", ",", "command", ")" ]
Enables or disables BLE scanning Args: enable: boolean value to enable (True) or disable (False) scanner filter_duplicates: boolean value to enable/disable filter, that omits duplicated packets
[ "Enables", "or", "disables", "BLE", "scanning" ]
train
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/scanner.py#L153-L161
citruz/beacontools
beacontools/scanner.py
Monitor.process_packet
def process_packet(self, pkt): """Parse the packet and call callback if one of the filters matches.""" # check if this could be a valid packet before parsing # this reduces the CPU load significantly if not ( \ ((self.mode & ScannerMode.MODE_IBEACON) and (pkt[19:23] == b"\x4c\x00\x02\x15")) or \ ((self.mode & ScannerMode.MODE_EDDYSTONE) and (pkt[19:21] == b"\xaa\xfe")) or \ ((self.mode & ScannerMode.MODE_ESTIMOTE) and (pkt[19:21] == b"\x9a\xfe"))): return bt_addr = bt_addr_to_string(pkt[7:13]) rssi = bin_to_int(pkt[-1]) # strip bluetooth address and parse packet packet = parse_packet(pkt[14:-1]) # return if packet was not an beacon advertisement if not packet: return # we need to remeber which eddystone beacon has which bt address # because the TLM and URL frames do not contain the namespace and instance self.save_bt_addr(packet, bt_addr) # properties holds the identifying information for a beacon # e.g. instance and namespace for eddystone; uuid, major, minor for iBeacon properties = self.get_properties(packet, bt_addr) if self.device_filter is None and self.packet_filter is None: # no filters selected self.callback(bt_addr, rssi, packet, properties) elif self.device_filter is None: # filter by packet type if is_one_of(packet, self.packet_filter): self.callback(bt_addr, rssi, packet, properties) else: # filter by device and packet type if self.packet_filter and not is_one_of(packet, self.packet_filter): # return if packet filter does not match return # iterate over filters and call .matches() on each for filtr in self.device_filter: if isinstance(filtr, BtAddrFilter): if filtr.matches({'bt_addr':bt_addr}): self.callback(bt_addr, rssi, packet, properties) return elif filtr.matches(properties): self.callback(bt_addr, rssi, packet, properties) return
python
def process_packet(self, pkt): if not ( \ ((self.mode & ScannerMode.MODE_IBEACON) and (pkt[19:23] == b"\x4c\x00\x02\x15")) or \ ((self.mode & ScannerMode.MODE_EDDYSTONE) and (pkt[19:21] == b"\xaa\xfe")) or \ ((self.mode & ScannerMode.MODE_ESTIMOTE) and (pkt[19:21] == b"\x9a\xfe"))): return bt_addr = bt_addr_to_string(pkt[7:13]) rssi = bin_to_int(pkt[-1]) packet = parse_packet(pkt[14:-1]) if not packet: return self.save_bt_addr(packet, bt_addr) properties = self.get_properties(packet, bt_addr) if self.device_filter is None and self.packet_filter is None: self.callback(bt_addr, rssi, packet, properties) elif self.device_filter is None: if is_one_of(packet, self.packet_filter): self.callback(bt_addr, rssi, packet, properties) else: if self.packet_filter and not is_one_of(packet, self.packet_filter): return for filtr in self.device_filter: if isinstance(filtr, BtAddrFilter): if filtr.matches({'bt_addr':bt_addr}): self.callback(bt_addr, rssi, packet, properties) return elif filtr.matches(properties): self.callback(bt_addr, rssi, packet, properties) return
[ "def", "process_packet", "(", "self", ",", "pkt", ")", ":", "# check if this could be a valid packet before parsing", "# this reduces the CPU load significantly", "if", "not", "(", "(", "(", "self", ".", "mode", "&", "ScannerMode", ".", "MODE_IBEACON", ")", "and", "(", "pkt", "[", "19", ":", "23", "]", "==", "b\"\\x4c\\x00\\x02\\x15\"", ")", ")", "or", "(", "(", "self", ".", "mode", "&", "ScannerMode", ".", "MODE_EDDYSTONE", ")", "and", "(", "pkt", "[", "19", ":", "21", "]", "==", "b\"\\xaa\\xfe\"", ")", ")", "or", "(", "(", "self", ".", "mode", "&", "ScannerMode", ".", "MODE_ESTIMOTE", ")", "and", "(", "pkt", "[", "19", ":", "21", "]", "==", "b\"\\x9a\\xfe\"", ")", ")", ")", ":", "return", "bt_addr", "=", "bt_addr_to_string", "(", "pkt", "[", "7", ":", "13", "]", ")", "rssi", "=", "bin_to_int", "(", "pkt", "[", "-", "1", "]", ")", "# strip bluetooth address and parse packet", "packet", "=", "parse_packet", "(", "pkt", "[", "14", ":", "-", "1", "]", ")", "# return if packet was not an beacon advertisement", "if", "not", "packet", ":", "return", "# we need to remeber which eddystone beacon has which bt address", "# because the TLM and URL frames do not contain the namespace and instance", "self", ".", "save_bt_addr", "(", "packet", ",", "bt_addr", ")", "# properties holds the identifying information for a beacon", "# e.g. instance and namespace for eddystone; uuid, major, minor for iBeacon", "properties", "=", "self", ".", "get_properties", "(", "packet", ",", "bt_addr", ")", "if", "self", ".", "device_filter", "is", "None", "and", "self", ".", "packet_filter", "is", "None", ":", "# no filters selected", "self", ".", "callback", "(", "bt_addr", ",", "rssi", ",", "packet", ",", "properties", ")", "elif", "self", ".", "device_filter", "is", "None", ":", "# filter by packet type", "if", "is_one_of", "(", "packet", ",", "self", ".", "packet_filter", ")", ":", "self", ".", "callback", "(", "bt_addr", ",", "rssi", ",", "packet", ",", "properties", ")", "else", ":", "# filter by device and packet type", "if", "self", ".", "packet_filter", "and", "not", "is_one_of", "(", "packet", ",", "self", ".", "packet_filter", ")", ":", "# return if packet filter does not match", "return", "# iterate over filters and call .matches() on each", "for", "filtr", "in", "self", ".", "device_filter", ":", "if", "isinstance", "(", "filtr", ",", "BtAddrFilter", ")", ":", "if", "filtr", ".", "matches", "(", "{", "'bt_addr'", ":", "bt_addr", "}", ")", ":", "self", ".", "callback", "(", "bt_addr", ",", "rssi", ",", "packet", ",", "properties", ")", "return", "elif", "filtr", ".", "matches", "(", "properties", ")", ":", "self", ".", "callback", "(", "bt_addr", ",", "rssi", ",", "packet", ",", "properties", ")", "return" ]
Parse the packet and call callback if one of the filters matches.
[ "Parse", "the", "packet", "and", "call", "callback", "if", "one", "of", "the", "filters", "matches", "." ]
train
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/scanner.py#L163-L213
citruz/beacontools
beacontools/scanner.py
Monitor.save_bt_addr
def save_bt_addr(self, packet, bt_addr): """Add to the list of mappings.""" if isinstance(packet, EddystoneUIDFrame): # remove out old mapping new_mappings = [m for m in self.eddystone_mappings if m[0] != bt_addr] new_mappings.append((bt_addr, packet.properties)) self.eddystone_mappings = new_mappings
python
def save_bt_addr(self, packet, bt_addr): if isinstance(packet, EddystoneUIDFrame): new_mappings = [m for m in self.eddystone_mappings if m[0] != bt_addr] new_mappings.append((bt_addr, packet.properties)) self.eddystone_mappings = new_mappings
[ "def", "save_bt_addr", "(", "self", ",", "packet", ",", "bt_addr", ")", ":", "if", "isinstance", "(", "packet", ",", "EddystoneUIDFrame", ")", ":", "# remove out old mapping", "new_mappings", "=", "[", "m", "for", "m", "in", "self", ".", "eddystone_mappings", "if", "m", "[", "0", "]", "!=", "bt_addr", "]", "new_mappings", ".", "append", "(", "(", "bt_addr", ",", "packet", ".", "properties", ")", ")", "self", ".", "eddystone_mappings", "=", "new_mappings" ]
Add to the list of mappings.
[ "Add", "to", "the", "list", "of", "mappings", "." ]
train
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/scanner.py#L215-L221
citruz/beacontools
beacontools/scanner.py
Monitor.get_properties
def get_properties(self, packet, bt_addr): """Get properties of beacon depending on type.""" if is_one_of(packet, [EddystoneTLMFrame, EddystoneURLFrame, \ EddystoneEncryptedTLMFrame, EddystoneEIDFrame]): # here we retrieve the namespace and instance which corresponds to the # eddystone beacon with this bt address return self.properties_from_mapping(bt_addr) else: return packet.properties
python
def get_properties(self, packet, bt_addr): if is_one_of(packet, [EddystoneTLMFrame, EddystoneURLFrame, \ EddystoneEncryptedTLMFrame, EddystoneEIDFrame]): return self.properties_from_mapping(bt_addr) else: return packet.properties
[ "def", "get_properties", "(", "self", ",", "packet", ",", "bt_addr", ")", ":", "if", "is_one_of", "(", "packet", ",", "[", "EddystoneTLMFrame", ",", "EddystoneURLFrame", ",", "EddystoneEncryptedTLMFrame", ",", "EddystoneEIDFrame", "]", ")", ":", "# here we retrieve the namespace and instance which corresponds to the", "# eddystone beacon with this bt address", "return", "self", ".", "properties_from_mapping", "(", "bt_addr", ")", "else", ":", "return", "packet", ".", "properties" ]
Get properties of beacon depending on type.
[ "Get", "properties", "of", "beacon", "depending", "on", "type", "." ]
train
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/scanner.py#L223-L231
citruz/beacontools
beacontools/scanner.py
Monitor.properties_from_mapping
def properties_from_mapping(self, bt_addr): """Retrieve properties (namespace, instance) for the specified bt address.""" for addr, properties in self.eddystone_mappings: if addr == bt_addr: return properties return None
python
def properties_from_mapping(self, bt_addr): for addr, properties in self.eddystone_mappings: if addr == bt_addr: return properties return None
[ "def", "properties_from_mapping", "(", "self", ",", "bt_addr", ")", ":", "for", "addr", ",", "properties", "in", "self", ".", "eddystone_mappings", ":", "if", "addr", "==", "bt_addr", ":", "return", "properties", "return", "None" ]
Retrieve properties (namespace, instance) for the specified bt address.
[ "Retrieve", "properties", "(", "namespace", "instance", ")", "for", "the", "specified", "bt", "address", "." ]
train
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/scanner.py#L233-L238
citruz/beacontools
beacontools/scanner.py
Monitor.terminate
def terminate(self): """Signal runner to stop and join thread.""" self.toggle_scan(False) self.keep_going = False self.join()
python
def terminate(self): self.toggle_scan(False) self.keep_going = False self.join()
[ "def", "terminate", "(", "self", ")", ":", "self", ".", "toggle_scan", "(", "False", ")", "self", ".", "keep_going", "=", "False", "self", ".", "join", "(", ")" ]
Signal runner to stop and join thread.
[ "Signal", "runner", "to", "stop", "and", "join", "thread", "." ]
train
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/scanner.py#L240-L244
citruz/beacontools
beacontools/utils.py
data_to_uuid
def data_to_uuid(data): """Convert an array of binary data to the iBeacon uuid format.""" string = data_to_hexstring(data) return string[0:8]+'-'+string[8:12]+'-'+string[12:16]+'-'+string[16:20]+'-'+string[20:32]
python
def data_to_uuid(data): string = data_to_hexstring(data) return string[0:8]+'-'+string[8:12]+'-'+string[12:16]+'-'+string[16:20]+'-'+string[20:32]
[ "def", "data_to_uuid", "(", "data", ")", ":", "string", "=", "data_to_hexstring", "(", "data", ")", "return", "string", "[", "0", ":", "8", "]", "+", "'-'", "+", "string", "[", "8", ":", "12", "]", "+", "'-'", "+", "string", "[", "12", ":", "16", "]", "+", "'-'", "+", "string", "[", "16", ":", "20", "]", "+", "'-'", "+", "string", "[", "20", ":", "32", "]" ]
Convert an array of binary data to the iBeacon uuid format.
[ "Convert", "an", "array", "of", "binary", "data", "to", "the", "iBeacon", "uuid", "format", "." ]
train
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/utils.py#L24-L27
citruz/beacontools
beacontools/utils.py
bt_addr_to_string
def bt_addr_to_string(addr): """Convert a binary string to the hex representation.""" addr_str = array.array('B', addr) addr_str.reverse() hex_str = hexlify(addr_str.tostring()).decode('ascii') # insert ":" seperator between the bytes return ':'.join(a+b for a, b in zip(hex_str[::2], hex_str[1::2]))
python
def bt_addr_to_string(addr): addr_str = array.array('B', addr) addr_str.reverse() hex_str = hexlify(addr_str.tostring()).decode('ascii') return ':'.join(a+b for a, b in zip(hex_str[::2], hex_str[1::2]))
[ "def", "bt_addr_to_string", "(", "addr", ")", ":", "addr_str", "=", "array", ".", "array", "(", "'B'", ",", "addr", ")", "addr_str", ".", "reverse", "(", ")", "hex_str", "=", "hexlify", "(", "addr_str", ".", "tostring", "(", ")", ")", ".", "decode", "(", "'ascii'", ")", "# insert \":\" seperator between the bytes", "return", "':'", ".", "join", "(", "a", "+", "b", "for", "a", ",", "b", "in", "zip", "(", "hex_str", "[", ":", ":", "2", "]", ",", "hex_str", "[", "1", ":", ":", "2", "]", ")", ")" ]
Convert a binary string to the hex representation.
[ "Convert", "a", "binary", "string", "to", "the", "hex", "representation", "." ]
train
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/utils.py#L35-L41
citruz/beacontools
beacontools/utils.py
is_one_of
def is_one_of(obj, types): """Return true iff obj is an instance of one of the types.""" for type_ in types: if isinstance(obj, type_): return True return False
python
def is_one_of(obj, types): for type_ in types: if isinstance(obj, type_): return True return False
[ "def", "is_one_of", "(", "obj", ",", "types", ")", ":", "for", "type_", "in", "types", ":", "if", "isinstance", "(", "obj", ",", "type_", ")", ":", "return", "True", "return", "False" ]
Return true iff obj is an instance of one of the types.
[ "Return", "true", "iff", "obj", "is", "an", "instance", "of", "one", "of", "the", "types", "." ]
train
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/utils.py#L44-L49
citruz/beacontools
beacontools/utils.py
is_packet_type
def is_packet_type(cls): """Check if class is one the packet types.""" from .packet_types import EddystoneUIDFrame, EddystoneURLFrame, \ EddystoneEncryptedTLMFrame, EddystoneTLMFrame, \ EddystoneEIDFrame, IBeaconAdvertisement, \ EstimoteTelemetryFrameA, EstimoteTelemetryFrameB return (cls in [EddystoneURLFrame, EddystoneUIDFrame, EddystoneEncryptedTLMFrame, \ EddystoneTLMFrame, EddystoneEIDFrame, IBeaconAdvertisement, \ EstimoteTelemetryFrameA, EstimoteTelemetryFrameB])
python
def is_packet_type(cls): from .packet_types import EddystoneUIDFrame, EddystoneURLFrame, \ EddystoneEncryptedTLMFrame, EddystoneTLMFrame, \ EddystoneEIDFrame, IBeaconAdvertisement, \ EstimoteTelemetryFrameA, EstimoteTelemetryFrameB return (cls in [EddystoneURLFrame, EddystoneUIDFrame, EddystoneEncryptedTLMFrame, \ EddystoneTLMFrame, EddystoneEIDFrame, IBeaconAdvertisement, \ EstimoteTelemetryFrameA, EstimoteTelemetryFrameB])
[ "def", "is_packet_type", "(", "cls", ")", ":", "from", ".", "packet_types", "import", "EddystoneUIDFrame", ",", "EddystoneURLFrame", ",", "EddystoneEncryptedTLMFrame", ",", "EddystoneTLMFrame", ",", "EddystoneEIDFrame", ",", "IBeaconAdvertisement", ",", "EstimoteTelemetryFrameA", ",", "EstimoteTelemetryFrameB", "return", "(", "cls", "in", "[", "EddystoneURLFrame", ",", "EddystoneUIDFrame", ",", "EddystoneEncryptedTLMFrame", ",", "EddystoneTLMFrame", ",", "EddystoneEIDFrame", ",", "IBeaconAdvertisement", ",", "EstimoteTelemetryFrameA", ",", "EstimoteTelemetryFrameB", "]", ")" ]
Check if class is one the packet types.
[ "Check", "if", "class", "is", "one", "the", "packet", "types", "." ]
train
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/utils.py#L52-L60
citruz/beacontools
beacontools/utils.py
bin_to_int
def bin_to_int(string): """Convert a one element byte string to signed int for python 2 support.""" if isinstance(string, str): return struct.unpack("b", string)[0] else: return struct.unpack("b", bytes([string]))[0]
python
def bin_to_int(string): if isinstance(string, str): return struct.unpack("b", string)[0] else: return struct.unpack("b", bytes([string]))[0]
[ "def", "bin_to_int", "(", "string", ")", ":", "if", "isinstance", "(", "string", ",", "str", ")", ":", "return", "struct", ".", "unpack", "(", "\"b\"", ",", "string", ")", "[", "0", "]", "else", ":", "return", "struct", ".", "unpack", "(", "\"b\"", ",", "bytes", "(", "[", "string", "]", ")", ")", "[", "0", "]" ]
Convert a one element byte string to signed int for python 2 support.
[ "Convert", "a", "one", "element", "byte", "string", "to", "signed", "int", "for", "python", "2", "support", "." ]
train
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/utils.py#L71-L76
citruz/beacontools
beacontools/utils.py
get_mode
def get_mode(device_filter): """Determine which beacons the scanner should look for.""" from .device_filters import IBeaconFilter, EddystoneFilter, BtAddrFilter, EstimoteFilter if device_filter is None or len(device_filter) == 0: return ScannerMode.MODE_ALL mode = ScannerMode.MODE_NONE for filtr in device_filter: if isinstance(filtr, IBeaconFilter): mode |= ScannerMode.MODE_IBEACON elif isinstance(filtr, EddystoneFilter): mode |= ScannerMode.MODE_EDDYSTONE elif isinstance(filtr, EstimoteFilter): mode |= ScannerMode.MODE_ESTIMOTE elif isinstance(filtr, BtAddrFilter): mode |= ScannerMode.MODE_ALL break return mode
python
def get_mode(device_filter): from .device_filters import IBeaconFilter, EddystoneFilter, BtAddrFilter, EstimoteFilter if device_filter is None or len(device_filter) == 0: return ScannerMode.MODE_ALL mode = ScannerMode.MODE_NONE for filtr in device_filter: if isinstance(filtr, IBeaconFilter): mode |= ScannerMode.MODE_IBEACON elif isinstance(filtr, EddystoneFilter): mode |= ScannerMode.MODE_EDDYSTONE elif isinstance(filtr, EstimoteFilter): mode |= ScannerMode.MODE_ESTIMOTE elif isinstance(filtr, BtAddrFilter): mode |= ScannerMode.MODE_ALL break return mode
[ "def", "get_mode", "(", "device_filter", ")", ":", "from", ".", "device_filters", "import", "IBeaconFilter", ",", "EddystoneFilter", ",", "BtAddrFilter", ",", "EstimoteFilter", "if", "device_filter", "is", "None", "or", "len", "(", "device_filter", ")", "==", "0", ":", "return", "ScannerMode", ".", "MODE_ALL", "mode", "=", "ScannerMode", ".", "MODE_NONE", "for", "filtr", "in", "device_filter", ":", "if", "isinstance", "(", "filtr", ",", "IBeaconFilter", ")", ":", "mode", "|=", "ScannerMode", ".", "MODE_IBEACON", "elif", "isinstance", "(", "filtr", ",", "EddystoneFilter", ")", ":", "mode", "|=", "ScannerMode", ".", "MODE_EDDYSTONE", "elif", "isinstance", "(", "filtr", ",", "EstimoteFilter", ")", ":", "mode", "|=", "ScannerMode", ".", "MODE_ESTIMOTE", "elif", "isinstance", "(", "filtr", ",", "BtAddrFilter", ")", ":", "mode", "|=", "ScannerMode", ".", "MODE_ALL", "break", "return", "mode" ]
Determine which beacons the scanner should look for.
[ "Determine", "which", "beacons", "the", "scanner", "should", "look", "for", "." ]
train
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/utils.py#L79-L97
citruz/beacontools
beacontools/device_filters.py
DeviceFilter.matches
def matches(self, filter_props): """Check if the filter matches the supplied properties.""" if filter_props is None: return False found_one = False for key, value in filter_props.items(): if key in self.properties and value != self.properties[key]: return False elif key in self.properties and value == self.properties[key]: found_one = True return found_one
python
def matches(self, filter_props): if filter_props is None: return False found_one = False for key, value in filter_props.items(): if key in self.properties and value != self.properties[key]: return False elif key in self.properties and value == self.properties[key]: found_one = True return found_one
[ "def", "matches", "(", "self", ",", "filter_props", ")", ":", "if", "filter_props", "is", "None", ":", "return", "False", "found_one", "=", "False", "for", "key", ",", "value", "in", "filter_props", ".", "items", "(", ")", ":", "if", "key", "in", "self", ".", "properties", "and", "value", "!=", "self", ".", "properties", "[", "key", "]", ":", "return", "False", "elif", "key", "in", "self", ".", "properties", "and", "value", "==", "self", ".", "properties", "[", "key", "]", ":", "found_one", "=", "True", "return", "found_one" ]
Check if the filter matches the supplied properties.
[ "Check", "if", "the", "filter", "matches", "the", "supplied", "properties", "." ]
train
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/device_filters.py#L13-L25
citruz/beacontools
beacontools/parser.py
parse_packet
def parse_packet(packet): """Parse a beacon advertisement packet.""" frame = parse_ltv_packet(packet) if frame is None: frame = parse_ibeacon_packet(packet) return frame
python
def parse_packet(packet): frame = parse_ltv_packet(packet) if frame is None: frame = parse_ibeacon_packet(packet) return frame
[ "def", "parse_packet", "(", "packet", ")", ":", "frame", "=", "parse_ltv_packet", "(", "packet", ")", "if", "frame", "is", "None", ":", "frame", "=", "parse_ibeacon_packet", "(", "packet", ")", "return", "frame" ]
Parse a beacon advertisement packet.
[ "Parse", "a", "beacon", "advertisement", "packet", "." ]
train
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/parser.py#L14-L19
citruz/beacontools
beacontools/parser.py
parse_ltv_packet
def parse_ltv_packet(packet): """Parse a tag-length-value style beacon packet.""" try: frame = LTVFrame.parse(packet) for ltv in frame: if ltv['type'] == SERVICE_DATA_TYPE: data = ltv['value'] if data["service_identifier"] == EDDYSTONE_UUID: return parse_eddystone_service_data(data) elif data["service_identifier"] == ESTIMOTE_UUID: return parse_estimote_service_data(data) except ConstructError: return None return None
python
def parse_ltv_packet(packet): try: frame = LTVFrame.parse(packet) for ltv in frame: if ltv['type'] == SERVICE_DATA_TYPE: data = ltv['value'] if data["service_identifier"] == EDDYSTONE_UUID: return parse_eddystone_service_data(data) elif data["service_identifier"] == ESTIMOTE_UUID: return parse_estimote_service_data(data) except ConstructError: return None return None
[ "def", "parse_ltv_packet", "(", "packet", ")", ":", "try", ":", "frame", "=", "LTVFrame", ".", "parse", "(", "packet", ")", "for", "ltv", "in", "frame", ":", "if", "ltv", "[", "'type'", "]", "==", "SERVICE_DATA_TYPE", ":", "data", "=", "ltv", "[", "'value'", "]", "if", "data", "[", "\"service_identifier\"", "]", "==", "EDDYSTONE_UUID", ":", "return", "parse_eddystone_service_data", "(", "data", ")", "elif", "data", "[", "\"service_identifier\"", "]", "==", "ESTIMOTE_UUID", ":", "return", "parse_estimote_service_data", "(", "data", ")", "except", "ConstructError", ":", "return", "None", "return", "None" ]
Parse a tag-length-value style beacon packet.
[ "Parse", "a", "tag", "-", "length", "-", "value", "style", "beacon", "packet", "." ]
train
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/parser.py#L21-L38
citruz/beacontools
beacontools/parser.py
parse_eddystone_service_data
def parse_eddystone_service_data(data): """Parse Eddystone service data.""" if data['frame_type'] == EDDYSTONE_UID_FRAME: return EddystoneUIDFrame(data['frame']) elif data['frame_type'] == EDDYSTONE_TLM_FRAME: if data['frame']['tlm_version'] == EDDYSTONE_TLM_ENCRYPTED: return EddystoneEncryptedTLMFrame(data['frame']['data']) elif data['frame']['tlm_version'] == EDDYSTONE_TLM_UNENCRYPTED: return EddystoneTLMFrame(data['frame']['data']) elif data['frame_type'] == EDDYSTONE_URL_FRAME: return EddystoneURLFrame(data['frame']) elif data['frame_type'] == EDDYSTONE_EID_FRAME: return EddystoneEIDFrame(data['frame']) else: return None
python
def parse_eddystone_service_data(data): if data['frame_type'] == EDDYSTONE_UID_FRAME: return EddystoneUIDFrame(data['frame']) elif data['frame_type'] == EDDYSTONE_TLM_FRAME: if data['frame']['tlm_version'] == EDDYSTONE_TLM_ENCRYPTED: return EddystoneEncryptedTLMFrame(data['frame']['data']) elif data['frame']['tlm_version'] == EDDYSTONE_TLM_UNENCRYPTED: return EddystoneTLMFrame(data['frame']['data']) elif data['frame_type'] == EDDYSTONE_URL_FRAME: return EddystoneURLFrame(data['frame']) elif data['frame_type'] == EDDYSTONE_EID_FRAME: return EddystoneEIDFrame(data['frame']) else: return None
[ "def", "parse_eddystone_service_data", "(", "data", ")", ":", "if", "data", "[", "'frame_type'", "]", "==", "EDDYSTONE_UID_FRAME", ":", "return", "EddystoneUIDFrame", "(", "data", "[", "'frame'", "]", ")", "elif", "data", "[", "'frame_type'", "]", "==", "EDDYSTONE_TLM_FRAME", ":", "if", "data", "[", "'frame'", "]", "[", "'tlm_version'", "]", "==", "EDDYSTONE_TLM_ENCRYPTED", ":", "return", "EddystoneEncryptedTLMFrame", "(", "data", "[", "'frame'", "]", "[", "'data'", "]", ")", "elif", "data", "[", "'frame'", "]", "[", "'tlm_version'", "]", "==", "EDDYSTONE_TLM_UNENCRYPTED", ":", "return", "EddystoneTLMFrame", "(", "data", "[", "'frame'", "]", "[", "'data'", "]", ")", "elif", "data", "[", "'frame_type'", "]", "==", "EDDYSTONE_URL_FRAME", ":", "return", "EddystoneURLFrame", "(", "data", "[", "'frame'", "]", ")", "elif", "data", "[", "'frame_type'", "]", "==", "EDDYSTONE_EID_FRAME", ":", "return", "EddystoneEIDFrame", "(", "data", "[", "'frame'", "]", ")", "else", ":", "return", "None" ]
Parse Eddystone service data.
[ "Parse", "Eddystone", "service", "data", "." ]
train
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/parser.py#L40-L57
citruz/beacontools
beacontools/parser.py
parse_estimote_service_data
def parse_estimote_service_data(data): """Parse Estimote service data.""" if data['frame_type'] & 0xF == ESTIMOTE_TELEMETRY_FRAME: protocol_version = (data['frame_type'] & 0xF0) >> 4 if data['frame']['subframe_type'] == ESTIMOTE_TELEMETRY_SUBFRAME_A: return EstimoteTelemetryFrameA(data['frame'], protocol_version) elif data['frame']['subframe_type'] == ESTIMOTE_TELEMETRY_SUBFRAME_B: return EstimoteTelemetryFrameB(data['frame'], protocol_version) return None
python
def parse_estimote_service_data(data): if data['frame_type'] & 0xF == ESTIMOTE_TELEMETRY_FRAME: protocol_version = (data['frame_type'] & 0xF0) >> 4 if data['frame']['subframe_type'] == ESTIMOTE_TELEMETRY_SUBFRAME_A: return EstimoteTelemetryFrameA(data['frame'], protocol_version) elif data['frame']['subframe_type'] == ESTIMOTE_TELEMETRY_SUBFRAME_B: return EstimoteTelemetryFrameB(data['frame'], protocol_version) return None
[ "def", "parse_estimote_service_data", "(", "data", ")", ":", "if", "data", "[", "'frame_type'", "]", "&", "0xF", "==", "ESTIMOTE_TELEMETRY_FRAME", ":", "protocol_version", "=", "(", "data", "[", "'frame_type'", "]", "&", "0xF0", ")", ">>", "4", "if", "data", "[", "'frame'", "]", "[", "'subframe_type'", "]", "==", "ESTIMOTE_TELEMETRY_SUBFRAME_A", ":", "return", "EstimoteTelemetryFrameA", "(", "data", "[", "'frame'", "]", ",", "protocol_version", ")", "elif", "data", "[", "'frame'", "]", "[", "'subframe_type'", "]", "==", "ESTIMOTE_TELEMETRY_SUBFRAME_B", ":", "return", "EstimoteTelemetryFrameB", "(", "data", "[", "'frame'", "]", ",", "protocol_version", ")", "return", "None" ]
Parse Estimote service data.
[ "Parse", "Estimote", "service", "data", "." ]
train
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/parser.py#L59-L67
citruz/beacontools
beacontools/packet_types/estimote.py
EstimoteTelemetryFrameA.parse_motion_state
def parse_motion_state(val): """Convert motion state byte to seconds.""" number = val & 0b00111111 unit = (val & 0b11000000) >> 6 if unit == 1: number *= 60 # minutes elif unit == 2: number *= 60 * 60 # hours elif unit == 3 and number < 32: number *= 60 * 60 * 24 # days elif unit == 3: number -= 32 number *= 60 * 60 * 24 * 7 # weeks return number
python
def parse_motion_state(val): number = val & 0b00111111 unit = (val & 0b11000000) >> 6 if unit == 1: number *= 60 elif unit == 2: number *= 60 * 60 elif unit == 3 and number < 32: number *= 60 * 60 * 24 elif unit == 3: number -= 32 number *= 60 * 60 * 24 * 7 return number
[ "def", "parse_motion_state", "(", "val", ")", ":", "number", "=", "val", "&", "0b00111111", "unit", "=", "(", "val", "&", "0b11000000", ")", ">>", "6", "if", "unit", "==", "1", ":", "number", "*=", "60", "# minutes", "elif", "unit", "==", "2", ":", "number", "*=", "60", "*", "60", "# hours", "elif", "unit", "==", "3", "and", "number", "<", "32", ":", "number", "*=", "60", "*", "60", "*", "24", "# days", "elif", "unit", "==", "3", ":", "number", "-=", "32", "number", "*=", "60", "*", "60", "*", "24", "*", "7", "# weeks", "return", "number" ]
Convert motion state byte to seconds.
[ "Convert", "motion", "state", "byte", "to", "seconds", "." ]
train
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/packet_types/estimote.py#L46-L59
Polyconseil/zbarlight
src/zbarlight/__init__.py
scan_codes
def scan_codes(code_types, image): """ Get *code_type* codes from a PIL Image. *code_type* can be any of zbar supported code type [#zbar_symbologies]_: - **EAN/UPC**: EAN-13 (`ean13`), UPC-A (`upca`), EAN-8 (`ean8`) and UPC-E (`upce`) - **Linear barcode**: Code 128 (`code128`), Code 93 (`code93`), Code 39 (`code39`), Interleaved 2 of 5 (`i25`), DataBar (`databar`) and DataBar Expanded (`databar-exp`) - **2D**: QR Code (`qrcode`) - **Undocumented**: `ean5`, `ean2`, `composite`, `isbn13`, `isbn10`, `codabar`, `pdf417` .. [#zbar_symbologies] http://zbar.sourceforge.net/iphone/userguide/symbologies.html Args: code_types (list(str)): Code type(s) to search (see ``zbarlight.Symbologies`` for supported values). image (PIL.Image.Image): Image to scan returns: A list of *code_type* code values or None """ if isinstance(code_types, str): code_types = [code_types] warnings.warn( 'Using a str for code_types is deprecated, please use a list of str instead', DeprecationWarning, ) # Translate symbologies symbologies = [ Symbologies.get(code_type.upper()) for code_type in set(code_types) ] # Check that all symbologies are known if None in symbologies: bad_code_types = [code_type for code_type in code_types if code_type.upper() not in Symbologies] raise UnknownSymbologieError('Unknown Symbologies: %s' % bad_code_types) # Convert the image to be used by c-extension if not Image.isImageType(image): raise RuntimeError('Bad or unknown image format') converted_image = image.convert('L') # Convert image to gray scale (8 bits per pixel). raw = converted_image.tobytes() # Get image data. width, height = converted_image.size # Get image size. return zbar_code_scanner(symbologies, raw, width, height)
python
def scan_codes(code_types, image): if isinstance(code_types, str): code_types = [code_types] warnings.warn( 'Using a str for code_types is deprecated, please use a list of str instead', DeprecationWarning, ) symbologies = [ Symbologies.get(code_type.upper()) for code_type in set(code_types) ] if None in symbologies: bad_code_types = [code_type for code_type in code_types if code_type.upper() not in Symbologies] raise UnknownSymbologieError('Unknown Symbologies: %s' % bad_code_types) if not Image.isImageType(image): raise RuntimeError('Bad or unknown image format') converted_image = image.convert('L') raw = converted_image.tobytes() width, height = converted_image.size return zbar_code_scanner(symbologies, raw, width, height)
[ "def", "scan_codes", "(", "code_types", ",", "image", ")", ":", "if", "isinstance", "(", "code_types", ",", "str", ")", ":", "code_types", "=", "[", "code_types", "]", "warnings", ".", "warn", "(", "'Using a str for code_types is deprecated, please use a list of str instead'", ",", "DeprecationWarning", ",", ")", "# Translate symbologies", "symbologies", "=", "[", "Symbologies", ".", "get", "(", "code_type", ".", "upper", "(", ")", ")", "for", "code_type", "in", "set", "(", "code_types", ")", "]", "# Check that all symbologies are known", "if", "None", "in", "symbologies", ":", "bad_code_types", "=", "[", "code_type", "for", "code_type", "in", "code_types", "if", "code_type", ".", "upper", "(", ")", "not", "in", "Symbologies", "]", "raise", "UnknownSymbologieError", "(", "'Unknown Symbologies: %s'", "%", "bad_code_types", ")", "# Convert the image to be used by c-extension", "if", "not", "Image", ".", "isImageType", "(", "image", ")", ":", "raise", "RuntimeError", "(", "'Bad or unknown image format'", ")", "converted_image", "=", "image", ".", "convert", "(", "'L'", ")", "# Convert image to gray scale (8 bits per pixel).", "raw", "=", "converted_image", ".", "tobytes", "(", ")", "# Get image data.", "width", ",", "height", "=", "converted_image", ".", "size", "# Get image size.", "return", "zbar_code_scanner", "(", "symbologies", ",", "raw", ",", "width", ",", "height", ")" ]
Get *code_type* codes from a PIL Image. *code_type* can be any of zbar supported code type [#zbar_symbologies]_: - **EAN/UPC**: EAN-13 (`ean13`), UPC-A (`upca`), EAN-8 (`ean8`) and UPC-E (`upce`) - **Linear barcode**: Code 128 (`code128`), Code 93 (`code93`), Code 39 (`code39`), Interleaved 2 of 5 (`i25`), DataBar (`databar`) and DataBar Expanded (`databar-exp`) - **2D**: QR Code (`qrcode`) - **Undocumented**: `ean5`, `ean2`, `composite`, `isbn13`, `isbn10`, `codabar`, `pdf417` .. [#zbar_symbologies] http://zbar.sourceforge.net/iphone/userguide/symbologies.html Args: code_types (list(str)): Code type(s) to search (see ``zbarlight.Symbologies`` for supported values). image (PIL.Image.Image): Image to scan returns: A list of *code_type* code values or None
[ "Get", "*", "code_type", "*", "codes", "from", "a", "PIL", "Image", "." ]
train
https://github.com/Polyconseil/zbarlight/blob/97f46696516683af863d87935074e772e89b4292/src/zbarlight/__init__.py#L30-L77
Polyconseil/zbarlight
src/zbarlight/__init__.py
copy_image_on_background
def copy_image_on_background(image, color=WHITE): """ Create a new image by copying the image on a *color* background. Args: image (PIL.Image.Image): Image to copy color (tuple): Background color usually WHITE or BLACK Returns: PIL.Image.Image """ background = Image.new("RGB", image.size, color) background.paste(image, mask=image.split()[3]) return background
python
def copy_image_on_background(image, color=WHITE): background = Image.new("RGB", image.size, color) background.paste(image, mask=image.split()[3]) return background
[ "def", "copy_image_on_background", "(", "image", ",", "color", "=", "WHITE", ")", ":", "background", "=", "Image", ".", "new", "(", "\"RGB\"", ",", "image", ".", "size", ",", "color", ")", "background", ".", "paste", "(", "image", ",", "mask", "=", "image", ".", "split", "(", ")", "[", "3", "]", ")", "return", "background" ]
Create a new image by copying the image on a *color* background. Args: image (PIL.Image.Image): Image to copy color (tuple): Background color usually WHITE or BLACK Returns: PIL.Image.Image
[ "Create", "a", "new", "image", "by", "copying", "the", "image", "on", "a", "*", "color", "*", "background", "." ]
train
https://github.com/Polyconseil/zbarlight/blob/97f46696516683af863d87935074e772e89b4292/src/zbarlight/__init__.py#L80-L94
Polyconseil/zbarlight
docs/conf.py
_Zbarlight.monkey_patch
def monkey_patch(cls): """Monkey path zbarlight C extension on Read The Docs""" on_read_the_docs = os.environ.get('READTHEDOCS', False) if on_read_the_docs: sys.modules['zbarlight._zbarlight'] = cls
python
def monkey_patch(cls): on_read_the_docs = os.environ.get('READTHEDOCS', False) if on_read_the_docs: sys.modules['zbarlight._zbarlight'] = cls
[ "def", "monkey_patch", "(", "cls", ")", ":", "on_read_the_docs", "=", "os", ".", "environ", ".", "get", "(", "'READTHEDOCS'", ",", "False", ")", "if", "on_read_the_docs", ":", "sys", ".", "modules", "[", "'zbarlight._zbarlight'", "]", "=", "cls" ]
Monkey path zbarlight C extension on Read The Docs
[ "Monkey", "path", "zbarlight", "C", "extension", "on", "Read", "The", "Docs" ]
train
https://github.com/Polyconseil/zbarlight/blob/97f46696516683af863d87935074e772e89b4292/docs/conf.py#L23-L27
adafruit/Adafruit_CircuitPython_framebuf
adafruit_framebuf.py
MHMSBFormat.fill
def fill(framebuf, color): """completely fill/clear the buffer with a color""" if color: fill = 0xFF else: fill = 0x00 for i in range(len(framebuf.buf)): framebuf.buf[i] = fill
python
def fill(framebuf, color): if color: fill = 0xFF else: fill = 0x00 for i in range(len(framebuf.buf)): framebuf.buf[i] = fill
[ "def", "fill", "(", "framebuf", ",", "color", ")", ":", "if", "color", ":", "fill", "=", "0xFF", "else", ":", "fill", "=", "0x00", "for", "i", "in", "range", "(", "len", "(", "framebuf", ".", "buf", ")", ")", ":", "framebuf", ".", "buf", "[", "i", "]", "=", "fill" ]
completely fill/clear the buffer with a color
[ "completely", "fill", "/", "clear", "the", "buffer", "with", "a", "color" ]
train
https://github.com/adafruit/Adafruit_CircuitPython_framebuf/blob/b9f62c4b71efa963150f9c5a0284b61c7add9d02/adafruit_framebuf.py#L72-L79
adafruit/Adafruit_CircuitPython_framebuf
adafruit_framebuf.py
MHMSBFormat.fill_rect
def fill_rect(framebuf, x, y, width, height, color): """Draw a rectangle at the given location, size and color. The ``fill_rect`` method draws both the outline and interior.""" # pylint: disable=too-many-arguments for _x in range(x, x+width): offset = 7 - _x & 0x07 for _y in range(y, y+height): index = (_y * framebuf.stride + _x) // 8 framebuf.buf[index] = (framebuf.buf[index] & ~(0x01 << offset)) \ | ((color != 0) << offset)
python
def fill_rect(framebuf, x, y, width, height, color): for _x in range(x, x+width): offset = 7 - _x & 0x07 for _y in range(y, y+height): index = (_y * framebuf.stride + _x) // 8 framebuf.buf[index] = (framebuf.buf[index] & ~(0x01 << offset)) \ | ((color != 0) << offset)
[ "def", "fill_rect", "(", "framebuf", ",", "x", ",", "y", ",", "width", ",", "height", ",", "color", ")", ":", "# pylint: disable=too-many-arguments", "for", "_x", "in", "range", "(", "x", ",", "x", "+", "width", ")", ":", "offset", "=", "7", "-", "_x", "&", "0x07", "for", "_y", "in", "range", "(", "y", ",", "y", "+", "height", ")", ":", "index", "=", "(", "_y", "*", "framebuf", ".", "stride", "+", "_x", ")", "//", "8", "framebuf", ".", "buf", "[", "index", "]", "=", "(", "framebuf", ".", "buf", "[", "index", "]", "&", "~", "(", "0x01", "<<", "offset", ")", ")", "|", "(", "(", "color", "!=", "0", ")", "<<", "offset", ")" ]
Draw a rectangle at the given location, size and color. The ``fill_rect`` method draws both the outline and interior.
[ "Draw", "a", "rectangle", "at", "the", "given", "location", "size", "and", "color", ".", "The", "fill_rect", "method", "draws", "both", "the", "outline", "and", "interior", "." ]
train
https://github.com/adafruit/Adafruit_CircuitPython_framebuf/blob/b9f62c4b71efa963150f9c5a0284b61c7add9d02/adafruit_framebuf.py#L82-L91
adafruit/Adafruit_CircuitPython_framebuf
adafruit_framebuf.py
MVLSBFormat.set_pixel
def set_pixel(framebuf, x, y, color): """Set a given pixel to a color.""" index = (y >> 3) * framebuf.stride + x offset = y & 0x07 framebuf.buf[index] = (framebuf.buf[index] & ~(0x01 << offset)) | ((color != 0) << offset)
python
def set_pixel(framebuf, x, y, color): index = (y >> 3) * framebuf.stride + x offset = y & 0x07 framebuf.buf[index] = (framebuf.buf[index] & ~(0x01 << offset)) | ((color != 0) << offset)
[ "def", "set_pixel", "(", "framebuf", ",", "x", ",", "y", ",", "color", ")", ":", "index", "=", "(", "y", ">>", "3", ")", "*", "framebuf", ".", "stride", "+", "x", "offset", "=", "y", "&", "0x07", "framebuf", ".", "buf", "[", "index", "]", "=", "(", "framebuf", ".", "buf", "[", "index", "]", "&", "~", "(", "0x01", "<<", "offset", ")", ")", "|", "(", "(", "color", "!=", "0", ")", "<<", "offset", ")" ]
Set a given pixel to a color.
[ "Set", "a", "given", "pixel", "to", "a", "color", "." ]
train
https://github.com/adafruit/Adafruit_CircuitPython_framebuf/blob/b9f62c4b71efa963150f9c5a0284b61c7add9d02/adafruit_framebuf.py#L96-L100