id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
247,000
cirruscluster/cirruscluster
cirruscluster/ext/ansible/runner/__init__.py
Runner._copy_module
def _copy_module(self, conn, tmp, module_name, module_args, inject): ''' transfer a module over SFTP, does not run it ''' if module_name.startswith("/"): raise errors.AnsibleFileNotFound("%s is not a module" % module_name) # Search module path(s) for named module. in_path = utils.plugins.module_finder.find_plugin(module_name) if in_path is None: raise errors.AnsibleFileNotFound("module %s not found in %s" % (module_name, utils.plugins.module_finder.print_paths())) out_path = os.path.join(tmp, module_name) module_data = "" is_new_style=False with open(in_path) as f: module_data = f.read() if module_common.REPLACER in module_data: is_new_style=True module_data = module_data.replace(module_common.REPLACER, module_common.MODULE_COMMON) encoded_args = "\"\"\"%s\"\"\"" % module_args.replace("\"","\\\"") module_data = module_data.replace(module_common.REPLACER_ARGS, encoded_args) encoded_lang = "\"\"\"%s\"\"\"" % C.DEFAULT_MODULE_LANG module_data = module_data.replace(module_common.REPLACER_LANG, encoded_lang) if is_new_style: facility = C.DEFAULT_SYSLOG_FACILITY if 'ansible_syslog_facility' in inject: facility = inject['ansible_syslog_facility'] module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility) lines = module_data.split("\n") shebang = None if lines[0].startswith("#!"): shebang = lines[0] args = shlex.split(str(shebang[2:])) interpreter = args[0] interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter) if interpreter_config in inject: lines[0] = shebang = "#!%s %s" % (inject[interpreter_config], " ".join(args[1:])) module_data = "\n".join(lines) self._transfer_str(conn, tmp, module_name, module_data) return (out_path, is_new_style, shebang)
python
def _copy_module(self, conn, tmp, module_name, module_args, inject): ''' transfer a module over SFTP, does not run it ''' if module_name.startswith("/"): raise errors.AnsibleFileNotFound("%s is not a module" % module_name) # Search module path(s) for named module. in_path = utils.plugins.module_finder.find_plugin(module_name) if in_path is None: raise errors.AnsibleFileNotFound("module %s not found in %s" % (module_name, utils.plugins.module_finder.print_paths())) out_path = os.path.join(tmp, module_name) module_data = "" is_new_style=False with open(in_path) as f: module_data = f.read() if module_common.REPLACER in module_data: is_new_style=True module_data = module_data.replace(module_common.REPLACER, module_common.MODULE_COMMON) encoded_args = "\"\"\"%s\"\"\"" % module_args.replace("\"","\\\"") module_data = module_data.replace(module_common.REPLACER_ARGS, encoded_args) encoded_lang = "\"\"\"%s\"\"\"" % C.DEFAULT_MODULE_LANG module_data = module_data.replace(module_common.REPLACER_LANG, encoded_lang) if is_new_style: facility = C.DEFAULT_SYSLOG_FACILITY if 'ansible_syslog_facility' in inject: facility = inject['ansible_syslog_facility'] module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility) lines = module_data.split("\n") shebang = None if lines[0].startswith("#!"): shebang = lines[0] args = shlex.split(str(shebang[2:])) interpreter = args[0] interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter) if interpreter_config in inject: lines[0] = shebang = "#!%s %s" % (inject[interpreter_config], " ".join(args[1:])) module_data = "\n".join(lines) self._transfer_str(conn, tmp, module_name, module_data) return (out_path, is_new_style, shebang)
[ "def", "_copy_module", "(", "self", ",", "conn", ",", "tmp", ",", "module_name", ",", "module_args", ",", "inject", ")", ":", "if", "module_name", ".", "startswith", "(", "\"/\"", ")", ":", "raise", "errors", ".", "AnsibleFileNotFound", "(", "\"%s is not a module\"", "%", "module_name", ")", "# Search module path(s) for named module.", "in_path", "=", "utils", ".", "plugins", ".", "module_finder", ".", "find_plugin", "(", "module_name", ")", "if", "in_path", "is", "None", ":", "raise", "errors", ".", "AnsibleFileNotFound", "(", "\"module %s not found in %s\"", "%", "(", "module_name", ",", "utils", ".", "plugins", ".", "module_finder", ".", "print_paths", "(", ")", ")", ")", "out_path", "=", "os", ".", "path", ".", "join", "(", "tmp", ",", "module_name", ")", "module_data", "=", "\"\"", "is_new_style", "=", "False", "with", "open", "(", "in_path", ")", "as", "f", ":", "module_data", "=", "f", ".", "read", "(", ")", "if", "module_common", ".", "REPLACER", "in", "module_data", ":", "is_new_style", "=", "True", "module_data", "=", "module_data", ".", "replace", "(", "module_common", ".", "REPLACER", ",", "module_common", ".", "MODULE_COMMON", ")", "encoded_args", "=", "\"\\\"\\\"\\\"%s\\\"\\\"\\\"\"", "%", "module_args", ".", "replace", "(", "\"\\\"\"", ",", "\"\\\\\\\"\"", ")", "module_data", "=", "module_data", ".", "replace", "(", "module_common", ".", "REPLACER_ARGS", ",", "encoded_args", ")", "encoded_lang", "=", "\"\\\"\\\"\\\"%s\\\"\\\"\\\"\"", "%", "C", ".", "DEFAULT_MODULE_LANG", "module_data", "=", "module_data", ".", "replace", "(", "module_common", ".", "REPLACER_LANG", ",", "encoded_lang", ")", "if", "is_new_style", ":", "facility", "=", "C", ".", "DEFAULT_SYSLOG_FACILITY", "if", "'ansible_syslog_facility'", "in", "inject", ":", "facility", "=", "inject", "[", "'ansible_syslog_facility'", "]", "module_data", "=", "module_data", ".", "replace", "(", "'syslog.LOG_USER'", ",", "\"syslog.%s\"", "%", "facility", ")", "lines", "=", "module_data", ".", "split", "(", "\"\\n\"", ")", "shebang", "=", "None", "if", "lines", "[", "0", "]", ".", "startswith", "(", "\"#!\"", ")", ":", "shebang", "=", "lines", "[", "0", "]", "args", "=", "shlex", ".", "split", "(", "str", "(", "shebang", "[", "2", ":", "]", ")", ")", "interpreter", "=", "args", "[", "0", "]", "interpreter_config", "=", "'ansible_%s_interpreter'", "%", "os", ".", "path", ".", "basename", "(", "interpreter", ")", "if", "interpreter_config", "in", "inject", ":", "lines", "[", "0", "]", "=", "shebang", "=", "\"#!%s %s\"", "%", "(", "inject", "[", "interpreter_config", "]", ",", "\" \"", ".", "join", "(", "args", "[", "1", ":", "]", ")", ")", "module_data", "=", "\"\\n\"", ".", "join", "(", "lines", ")", "self", ".", "_transfer_str", "(", "conn", ",", "tmp", ",", "module_name", ",", "module_data", ")", "return", "(", "out_path", ",", "is_new_style", ",", "shebang", ")" ]
transfer a module over SFTP, does not run it
[ "transfer", "a", "module", "over", "SFTP", "does", "not", "run", "it" ]
977409929dd81322d886425cdced10608117d5d7
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/__init__.py#L524-L569
247,001
cirruscluster/cirruscluster
cirruscluster/ext/ansible/runner/__init__.py
Runner._parallel_exec
def _parallel_exec(self, hosts): ''' handles mulitprocessing when more than 1 fork is required ''' if not hosts: return p = multiprocessing.Pool(self.forks) results = [] #results = p.map(multiprocessing_runner, hosts) # can't handle keyboard interrupt results = p.map_async(multiprocessing_runner, hosts).get(9999999) p.close() p.join() return results
python
def _parallel_exec(self, hosts): ''' handles mulitprocessing when more than 1 fork is required ''' if not hosts: return p = multiprocessing.Pool(self.forks) results = [] #results = p.map(multiprocessing_runner, hosts) # can't handle keyboard interrupt results = p.map_async(multiprocessing_runner, hosts).get(9999999) p.close() p.join() return results
[ "def", "_parallel_exec", "(", "self", ",", "hosts", ")", ":", "if", "not", "hosts", ":", "return", "p", "=", "multiprocessing", ".", "Pool", "(", "self", ".", "forks", ")", "results", "=", "[", "]", "#results = p.map(multiprocessing_runner, hosts) # can't handle keyboard interrupt", "results", "=", "p", ".", "map_async", "(", "multiprocessing_runner", ",", "hosts", ")", ".", "get", "(", "9999999", ")", "p", ".", "close", "(", ")", "p", ".", "join", "(", ")", "return", "results" ]
handles mulitprocessing when more than 1 fork is required
[ "handles", "mulitprocessing", "when", "more", "than", "1", "fork", "is", "required" ]
977409929dd81322d886425cdced10608117d5d7
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/__init__.py#L621-L631
247,002
cirruscluster/cirruscluster
cirruscluster/ext/ansible/runner/__init__.py
Runner._partition_results
def _partition_results(self, results): ''' seperate results by ones we contacted & ones we didn't ''' if results is None: return None results2 = dict(contacted={}, dark={}) for result in results: host = result.host if host is None: raise Exception("internal error, host not set") if result.communicated_ok(): results2["contacted"][host] = result.result else: results2["dark"][host] = result.result # hosts which were contacted but never got a chance to return for host in self.inventory.list_hosts(self.pattern): if not (host in results2['dark'] or host in results2['contacted']): results2["dark"][host] = {} return results2
python
def _partition_results(self, results): ''' seperate results by ones we contacted & ones we didn't ''' if results is None: return None results2 = dict(contacted={}, dark={}) for result in results: host = result.host if host is None: raise Exception("internal error, host not set") if result.communicated_ok(): results2["contacted"][host] = result.result else: results2["dark"][host] = result.result # hosts which were contacted but never got a chance to return for host in self.inventory.list_hosts(self.pattern): if not (host in results2['dark'] or host in results2['contacted']): results2["dark"][host] = {} return results2
[ "def", "_partition_results", "(", "self", ",", "results", ")", ":", "if", "results", "is", "None", ":", "return", "None", "results2", "=", "dict", "(", "contacted", "=", "{", "}", ",", "dark", "=", "{", "}", ")", "for", "result", "in", "results", ":", "host", "=", "result", ".", "host", "if", "host", "is", "None", ":", "raise", "Exception", "(", "\"internal error, host not set\"", ")", "if", "result", ".", "communicated_ok", "(", ")", ":", "results2", "[", "\"contacted\"", "]", "[", "host", "]", "=", "result", ".", "result", "else", ":", "results2", "[", "\"dark\"", "]", "[", "host", "]", "=", "result", ".", "result", "# hosts which were contacted but never got a chance to return", "for", "host", "in", "self", ".", "inventory", ".", "list_hosts", "(", "self", ".", "pattern", ")", ":", "if", "not", "(", "host", "in", "results2", "[", "'dark'", "]", "or", "host", "in", "results2", "[", "'contacted'", "]", ")", ":", "results2", "[", "\"dark\"", "]", "[", "host", "]", "=", "{", "}", "return", "results2" ]
seperate results by ones we contacted & ones we didn't
[ "seperate", "results", "by", "ones", "we", "contacted", "&", "ones", "we", "didn", "t" ]
977409929dd81322d886425cdced10608117d5d7
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/__init__.py#L635-L655
247,003
cirruscluster/cirruscluster
cirruscluster/ext/ansible/runner/__init__.py
Runner.run
def run(self): ''' xfer & run module on all matched hosts ''' # find hosts that match the pattern hosts = self.inventory.list_hosts(self.pattern) if len(hosts) == 0: self.callbacks.on_no_hosts() return dict(contacted={}, dark={}) global multiprocessing_runner multiprocessing_runner = self results = None # Check if this is an action plugin. Some of them are designed # to be ran once per group of hosts. Example module: pause, # run once per hostgroup, rather than pausing once per each # host. p = utils.plugins.action_loader.get(self.module_name, self) if p and getattr(p, 'BYPASS_HOST_LOOP', None): # Expose the current hostgroup to the bypassing plugins self.host_set = hosts # We aren't iterating over all the hosts in this # group. So, just pick the first host in our group to # construct the conn object with. result_data = self._executor(hosts[0]).result # Create a ResultData item for each host in this group # using the returned result. If we didn't do this we would # get false reports of dark hosts. results = [ ReturnData(host=h, result=result_data, comm_ok=True) \ for h in hosts ] del self.host_set elif self.forks > 1: try: results = self._parallel_exec(hosts) except IOError, ie: print ie.errno if ie.errno == 32: # broken pipe from Ctrl+C raise errors.AnsibleError("interupted") raise else: results = [ self._executor(h) for h in hosts ] return self._partition_results(results)
python
def run(self): ''' xfer & run module on all matched hosts ''' # find hosts that match the pattern hosts = self.inventory.list_hosts(self.pattern) if len(hosts) == 0: self.callbacks.on_no_hosts() return dict(contacted={}, dark={}) global multiprocessing_runner multiprocessing_runner = self results = None # Check if this is an action plugin. Some of them are designed # to be ran once per group of hosts. Example module: pause, # run once per hostgroup, rather than pausing once per each # host. p = utils.plugins.action_loader.get(self.module_name, self) if p and getattr(p, 'BYPASS_HOST_LOOP', None): # Expose the current hostgroup to the bypassing plugins self.host_set = hosts # We aren't iterating over all the hosts in this # group. So, just pick the first host in our group to # construct the conn object with. result_data = self._executor(hosts[0]).result # Create a ResultData item for each host in this group # using the returned result. If we didn't do this we would # get false reports of dark hosts. results = [ ReturnData(host=h, result=result_data, comm_ok=True) \ for h in hosts ] del self.host_set elif self.forks > 1: try: results = self._parallel_exec(hosts) except IOError, ie: print ie.errno if ie.errno == 32: # broken pipe from Ctrl+C raise errors.AnsibleError("interupted") raise else: results = [ self._executor(h) for h in hosts ] return self._partition_results(results)
[ "def", "run", "(", "self", ")", ":", "# find hosts that match the pattern", "hosts", "=", "self", ".", "inventory", ".", "list_hosts", "(", "self", ".", "pattern", ")", "if", "len", "(", "hosts", ")", "==", "0", ":", "self", ".", "callbacks", ".", "on_no_hosts", "(", ")", "return", "dict", "(", "contacted", "=", "{", "}", ",", "dark", "=", "{", "}", ")", "global", "multiprocessing_runner", "multiprocessing_runner", "=", "self", "results", "=", "None", "# Check if this is an action plugin. Some of them are designed", "# to be ran once per group of hosts. Example module: pause,", "# run once per hostgroup, rather than pausing once per each", "# host.", "p", "=", "utils", ".", "plugins", ".", "action_loader", ".", "get", "(", "self", ".", "module_name", ",", "self", ")", "if", "p", "and", "getattr", "(", "p", ",", "'BYPASS_HOST_LOOP'", ",", "None", ")", ":", "# Expose the current hostgroup to the bypassing plugins", "self", ".", "host_set", "=", "hosts", "# We aren't iterating over all the hosts in this", "# group. So, just pick the first host in our group to", "# construct the conn object with.", "result_data", "=", "self", ".", "_executor", "(", "hosts", "[", "0", "]", ")", ".", "result", "# Create a ResultData item for each host in this group", "# using the returned result. If we didn't do this we would", "# get false reports of dark hosts.", "results", "=", "[", "ReturnData", "(", "host", "=", "h", ",", "result", "=", "result_data", ",", "comm_ok", "=", "True", ")", "for", "h", "in", "hosts", "]", "del", "self", ".", "host_set", "elif", "self", ".", "forks", ">", "1", ":", "try", ":", "results", "=", "self", ".", "_parallel_exec", "(", "hosts", ")", "except", "IOError", ",", "ie", ":", "print", "ie", ".", "errno", "if", "ie", ".", "errno", "==", "32", ":", "# broken pipe from Ctrl+C", "raise", "errors", ".", "AnsibleError", "(", "\"interupted\"", ")", "raise", "else", ":", "results", "=", "[", "self", ".", "_executor", "(", "h", ")", "for", "h", "in", "hosts", "]", "return", "self", ".", "_partition_results", "(", "results", ")" ]
xfer & run module on all matched hosts
[ "xfer", "&", "run", "module", "on", "all", "matched", "hosts" ]
977409929dd81322d886425cdced10608117d5d7
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/__init__.py#L659-L701
247,004
cirruscluster/cirruscluster
cirruscluster/ext/ansible/runner/__init__.py
Runner.run_async
def run_async(self, time_limit): ''' Run this module asynchronously and return a poller. ''' self.background = time_limit results = self.run() return results, poller.AsyncPoller(results, self)
python
def run_async(self, time_limit): ''' Run this module asynchronously and return a poller. ''' self.background = time_limit results = self.run() return results, poller.AsyncPoller(results, self)
[ "def", "run_async", "(", "self", ",", "time_limit", ")", ":", "self", ".", "background", "=", "time_limit", "results", "=", "self", ".", "run", "(", ")", "return", "results", ",", "poller", ".", "AsyncPoller", "(", "results", ",", "self", ")" ]
Run this module asynchronously and return a poller.
[ "Run", "this", "module", "asynchronously", "and", "return", "a", "poller", "." ]
977409929dd81322d886425cdced10608117d5d7
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/__init__.py#L705-L710
247,005
kshlm/gant
gant/utils/docker_helper.py
DockerHelper.image_by_id
def image_by_id(self, id): """ Return image with given Id """ if not id: return None return next((image for image in self.images() if image['Id'] == id), None)
python
def image_by_id(self, id): """ Return image with given Id """ if not id: return None return next((image for image in self.images() if image['Id'] == id), None)
[ "def", "image_by_id", "(", "self", ",", "id", ")", ":", "if", "not", "id", ":", "return", "None", "return", "next", "(", "(", "image", "for", "image", "in", "self", ".", "images", "(", ")", "if", "image", "[", "'Id'", "]", "==", "id", ")", ",", "None", ")" ]
Return image with given Id
[ "Return", "image", "with", "given", "Id" ]
eabaa17ebfd31b1654ee1f27e7026f6d7b370609
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/docker_helper.py#L21-L28
247,006
kshlm/gant
gant/utils/docker_helper.py
DockerHelper.image_by_tag
def image_by_tag(self, tag): """ Return image with given tag """ if not tag: return None return next((image for image in self.images() if tag in image['RepoTags']), None)
python
def image_by_tag(self, tag): """ Return image with given tag """ if not tag: return None return next((image for image in self.images() if tag in image['RepoTags']), None)
[ "def", "image_by_tag", "(", "self", ",", "tag", ")", ":", "if", "not", "tag", ":", "return", "None", "return", "next", "(", "(", "image", "for", "image", "in", "self", ".", "images", "(", ")", "if", "tag", "in", "image", "[", "'RepoTags'", "]", ")", ",", "None", ")" ]
Return image with given tag
[ "Return", "image", "with", "given", "tag" ]
eabaa17ebfd31b1654ee1f27e7026f6d7b370609
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/docker_helper.py#L30-L38
247,007
kshlm/gant
gant/utils/docker_helper.py
DockerHelper.image_exists
def image_exists(self, id=None, tag=None): """ Check if specified image exists """ exists = False if id and self.image_by_id(id): exists = True elif tag and self.image_by_tag(tag): exists = True return exists
python
def image_exists(self, id=None, tag=None): """ Check if specified image exists """ exists = False if id and self.image_by_id(id): exists = True elif tag and self.image_by_tag(tag): exists = True return exists
[ "def", "image_exists", "(", "self", ",", "id", "=", "None", ",", "tag", "=", "None", ")", ":", "exists", "=", "False", "if", "id", "and", "self", ".", "image_by_id", "(", "id", ")", ":", "exists", "=", "True", "elif", "tag", "and", "self", ".", "image_by_tag", "(", "tag", ")", ":", "exists", "=", "True", "return", "exists" ]
Check if specified image exists
[ "Check", "if", "specified", "image", "exists" ]
eabaa17ebfd31b1654ee1f27e7026f6d7b370609
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/docker_helper.py#L40-L50
247,008
kshlm/gant
gant/utils/docker_helper.py
DockerHelper.container_by_id
def container_by_id(self, id): """ Returns container with given id """ if not id: return None return next((container for container in self.containers(all=True) if container['Id'] == id), None)
python
def container_by_id(self, id): """ Returns container with given id """ if not id: return None return next((container for container in self.containers(all=True) if container['Id'] == id), None)
[ "def", "container_by_id", "(", "self", ",", "id", ")", ":", "if", "not", "id", ":", "return", "None", "return", "next", "(", "(", "container", "for", "container", "in", "self", ".", "containers", "(", "all", "=", "True", ")", "if", "container", "[", "'Id'", "]", "==", "id", ")", ",", "None", ")" ]
Returns container with given id
[ "Returns", "container", "with", "given", "id" ]
eabaa17ebfd31b1654ee1f27e7026f6d7b370609
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/docker_helper.py#L52-L59
247,009
kshlm/gant
gant/utils/docker_helper.py
DockerHelper.container_by_name
def container_by_name(self, name): """ Returns container with given name """ if not name: return None # docker prepends a '/' to container names in the container dict name = '/'+name return next((container for container in self.containers(all=True) if name in container['Names']), None)
python
def container_by_name(self, name): """ Returns container with given name """ if not name: return None # docker prepends a '/' to container names in the container dict name = '/'+name return next((container for container in self.containers(all=True) if name in container['Names']), None)
[ "def", "container_by_name", "(", "self", ",", "name", ")", ":", "if", "not", "name", ":", "return", "None", "# docker prepends a '/' to container names in the container dict", "name", "=", "'/'", "+", "name", "return", "next", "(", "(", "container", "for", "container", "in", "self", ".", "containers", "(", "all", "=", "True", ")", "if", "name", "in", "container", "[", "'Names'", "]", ")", ",", "None", ")" ]
Returns container with given name
[ "Returns", "container", "with", "given", "name" ]
eabaa17ebfd31b1654ee1f27e7026f6d7b370609
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/docker_helper.py#L61-L71
247,010
kshlm/gant
gant/utils/docker_helper.py
DockerHelper.container_exists
def container_exists(self, id=None, name=None): """ Checks if container exists already """ exists = False if id and self.container_by_id(id): exists = True elif name and self.container_by_name(name): exists = True return exists
python
def container_exists(self, id=None, name=None): """ Checks if container exists already """ exists = False if id and self.container_by_id(id): exists = True elif name and self.container_by_name(name): exists = True return exists
[ "def", "container_exists", "(", "self", ",", "id", "=", "None", ",", "name", "=", "None", ")", ":", "exists", "=", "False", "if", "id", "and", "self", ".", "container_by_id", "(", "id", ")", ":", "exists", "=", "True", "elif", "name", "and", "self", ".", "container_by_name", "(", "name", ")", ":", "exists", "=", "True", "return", "exists" ]
Checks if container exists already
[ "Checks", "if", "container", "exists", "already" ]
eabaa17ebfd31b1654ee1f27e7026f6d7b370609
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/docker_helper.py#L73-L83
247,011
kshlm/gant
gant/utils/docker_helper.py
DockerHelper.container_running
def container_running(self, id=None, name=None): """ Checks if container is running """ running = False if id: running = self.inspect_container(id)['State']['Running'] elif name: running = self.inspect_container(name)['State']['Running'] return running
python
def container_running(self, id=None, name=None): """ Checks if container is running """ running = False if id: running = self.inspect_container(id)['State']['Running'] elif name: running = self.inspect_container(name)['State']['Running'] return running
[ "def", "container_running", "(", "self", ",", "id", "=", "None", ",", "name", "=", "None", ")", ":", "running", "=", "False", "if", "id", ":", "running", "=", "self", ".", "inspect_container", "(", "id", ")", "[", "'State'", "]", "[", "'Running'", "]", "elif", "name", ":", "running", "=", "self", ".", "inspect_container", "(", "name", ")", "[", "'State'", "]", "[", "'Running'", "]", "return", "running" ]
Checks if container is running
[ "Checks", "if", "container", "is", "running" ]
eabaa17ebfd31b1654ee1f27e7026f6d7b370609
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/docker_helper.py#L85-L94
247,012
kshlm/gant
gant/utils/docker_helper.py
DockerHelper.get_container_ip
def get_container_ip(self, container): """ Returns the internal ip of the container if available """ info = self.inspect_container(container) if not info: return None netInfo = info['NetworkSettings'] if not netInfo: return None ip = netInfo['IPAddress'] if not ip: return None return ip
python
def get_container_ip(self, container): """ Returns the internal ip of the container if available """ info = self.inspect_container(container) if not info: return None netInfo = info['NetworkSettings'] if not netInfo: return None ip = netInfo['IPAddress'] if not ip: return None return ip
[ "def", "get_container_ip", "(", "self", ",", "container", ")", ":", "info", "=", "self", ".", "inspect_container", "(", "container", ")", "if", "not", "info", ":", "return", "None", "netInfo", "=", "info", "[", "'NetworkSettings'", "]", "if", "not", "netInfo", ":", "return", "None", "ip", "=", "netInfo", "[", "'IPAddress'", "]", "if", "not", "ip", ":", "return", "None", "return", "ip" ]
Returns the internal ip of the container if available
[ "Returns", "the", "internal", "ip", "of", "the", "container", "if", "available" ]
eabaa17ebfd31b1654ee1f27e7026f6d7b370609
https://github.com/kshlm/gant/blob/eabaa17ebfd31b1654ee1f27e7026f6d7b370609/gant/utils/docker_helper.py#L96-L112
247,013
polysquare/python-parse-shebang
parseshebang/__init__.py
_parse
def _parse(fileobj): """Parse fileobj for a shebang.""" fileobj.seek(0) try: part = fileobj.read(2) except UnicodeDecodeError: part = "" if part == "#!": shebang = shlex.split(fileobj.readline().strip()) if (platform.system() == "Windows" and len(shebang) and os.path.basename(shebang[0]) == "env"): return shebang[1:] return shebang return []
python
def _parse(fileobj): """Parse fileobj for a shebang.""" fileobj.seek(0) try: part = fileobj.read(2) except UnicodeDecodeError: part = "" if part == "#!": shebang = shlex.split(fileobj.readline().strip()) if (platform.system() == "Windows" and len(shebang) and os.path.basename(shebang[0]) == "env"): return shebang[1:] return shebang return []
[ "def", "_parse", "(", "fileobj", ")", ":", "fileobj", ".", "seek", "(", "0", ")", "try", ":", "part", "=", "fileobj", ".", "read", "(", "2", ")", "except", "UnicodeDecodeError", ":", "part", "=", "\"\"", "if", "part", "==", "\"#!\"", ":", "shebang", "=", "shlex", ".", "split", "(", "fileobj", ".", "readline", "(", ")", ".", "strip", "(", ")", ")", "if", "(", "platform", ".", "system", "(", ")", "==", "\"Windows\"", "and", "len", "(", "shebang", ")", "and", "os", ".", "path", ".", "basename", "(", "shebang", "[", "0", "]", ")", "==", "\"env\"", ")", ":", "return", "shebang", "[", "1", ":", "]", "return", "shebang", "return", "[", "]" ]
Parse fileobj for a shebang.
[ "Parse", "fileobj", "for", "a", "shebang", "." ]
18fddc6d987268edb031a2903c66820f5ad52902
https://github.com/polysquare/python-parse-shebang/blob/18fddc6d987268edb031a2903c66820f5ad52902/parseshebang/__init__.py#L20-L37
247,014
tomokinakamaru/mapletree
mapletree/mapletree.py
MapleTree.run
def run(self, port=5000, background=False): """ Runs this application with builtin server for testing. This is only for test usage, do not use in production stage. :param port: Port number :param background: Flag to run in background :type port: int :type background: bool """ target = os.path.dirname(os.path.abspath(sys.argv[0])) driver = Driver(self, port, target, 1) if background: driver.run_background() else: driver.run()
python
def run(self, port=5000, background=False): """ Runs this application with builtin server for testing. This is only for test usage, do not use in production stage. :param port: Port number :param background: Flag to run in background :type port: int :type background: bool """ target = os.path.dirname(os.path.abspath(sys.argv[0])) driver = Driver(self, port, target, 1) if background: driver.run_background() else: driver.run()
[ "def", "run", "(", "self", ",", "port", "=", "5000", ",", "background", "=", "False", ")", ":", "target", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "sys", ".", "argv", "[", "0", "]", ")", ")", "driver", "=", "Driver", "(", "self", ",", "port", ",", "target", ",", "1", ")", "if", "background", ":", "driver", ".", "run_background", "(", ")", "else", ":", "driver", ".", "run", "(", ")" ]
Runs this application with builtin server for testing. This is only for test usage, do not use in production stage. :param port: Port number :param background: Flag to run in background :type port: int :type background: bool
[ "Runs", "this", "application", "with", "builtin", "server", "for", "testing", ".", "This", "is", "only", "for", "test", "usage", "do", "not", "use", "in", "production", "stage", "." ]
19ec68769ef2c1cd2e4164ed8623e0c4280279bb
https://github.com/tomokinakamaru/mapletree/blob/19ec68769ef2c1cd2e4164ed8623e0c4280279bb/mapletree/mapletree.py#L74-L89
247,015
stuaxo/mnd
mnd/dispatch.py
Dispatcher.unbind
def unbind(self, handler, argspec): """ handler will no longer be called if args match argspec :param argspec: instance of ArgSpec - args to be matched """ self.handlers[argspec.key].remove((handler, argspec)) if not len(self.handlers[argspec.key]): del self.handlers[argspec.key]
python
def unbind(self, handler, argspec): """ handler will no longer be called if args match argspec :param argspec: instance of ArgSpec - args to be matched """ self.handlers[argspec.key].remove((handler, argspec)) if not len(self.handlers[argspec.key]): del self.handlers[argspec.key]
[ "def", "unbind", "(", "self", ",", "handler", ",", "argspec", ")", ":", "self", ".", "handlers", "[", "argspec", ".", "key", "]", ".", "remove", "(", "(", "handler", ",", "argspec", ")", ")", "if", "not", "len", "(", "self", ".", "handlers", "[", "argspec", ".", "key", "]", ")", ":", "del", "self", ".", "handlers", "[", "argspec", ".", "key", "]" ]
handler will no longer be called if args match argspec :param argspec: instance of ArgSpec - args to be matched
[ "handler", "will", "no", "longer", "be", "called", "if", "args", "match", "argspec" ]
0eb30155d310fa1e550cb9efd6486816b9231d27
https://github.com/stuaxo/mnd/blob/0eb30155d310fa1e550cb9efd6486816b9231d27/mnd/dispatch.py#L72-L80
247,016
stuaxo/mnd
mnd/dispatch.py
Dispatcher.dispatch
def dispatch(self, *args, **kwargs): """ Call handlers that match args or kwargs :return: set of handlers called """ called_handlers = set() for handler_list in self.handlers.values(): for handler, argspec in handler_list: accept_args, accept_kwargs = argspec.accepts if handler in called_handlers and False: continue else: if args_match(accept_args, accept_kwargs, self.default, *args, **kwargs): called_handlers.add(handler) handler(*args, **kwargs) return called_handlers
python
def dispatch(self, *args, **kwargs): """ Call handlers that match args or kwargs :return: set of handlers called """ called_handlers = set() for handler_list in self.handlers.values(): for handler, argspec in handler_list: accept_args, accept_kwargs = argspec.accepts if handler in called_handlers and False: continue else: if args_match(accept_args, accept_kwargs, self.default, *args, **kwargs): called_handlers.add(handler) handler(*args, **kwargs) return called_handlers
[ "def", "dispatch", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "called_handlers", "=", "set", "(", ")", "for", "handler_list", "in", "self", ".", "handlers", ".", "values", "(", ")", ":", "for", "handler", ",", "argspec", "in", "handler_list", ":", "accept_args", ",", "accept_kwargs", "=", "argspec", ".", "accepts", "if", "handler", "in", "called_handlers", "and", "False", ":", "continue", "else", ":", "if", "args_match", "(", "accept_args", ",", "accept_kwargs", ",", "self", ".", "default", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "called_handlers", ".", "add", "(", "handler", ")", "handler", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "called_handlers" ]
Call handlers that match args or kwargs :return: set of handlers called
[ "Call", "handlers", "that", "match", "args", "or", "kwargs" ]
0eb30155d310fa1e550cb9efd6486816b9231d27
https://github.com/stuaxo/mnd/blob/0eb30155d310fa1e550cb9efd6486816b9231d27/mnd/dispatch.py#L82-L99
247,017
pbrisk/unicum
unicum/encode_json.py
UnicumJSONEncoder.iterencode
def iterencode(self, o, _one_shot=False): """Encode the given object and yield each string representation as available. For example:: for chunk in JSONEncoder().iterencode(bigobject): mysocket.write(chunk) """ if self.check_circular: markers = {} else: markers = None if self.ensure_ascii: _encoder = encode_basestring_ascii else: _encoder = encode_basestring if self.encoding != 'utf-8': def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding): if isinstance(o, str): o = o.decode(_encoding) return _orig_encoder(o) def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY): # Check for specials. Note that this type of test is processor # and/or platform-specific, so do tests which don't depend on the # internals. if o != o: text = 'NaN' elif o == _inf: text = 'Infinity' elif o == _neginf: text = '-Infinity' else: return _repr(o) if not allow_nan: raise ValueError( "Out of range float values are not JSON compliant: " + repr(o)) return text _iterencode = _make_iterencode( markers, self.default, _encoder, self.indent, floatstr, self.key_separator, self.item_separator, self.sort_keys, self.skipkeys, _one_shot, self._order) return _iterencode(o, 0)
python
def iterencode(self, o, _one_shot=False): """Encode the given object and yield each string representation as available. For example:: for chunk in JSONEncoder().iterencode(bigobject): mysocket.write(chunk) """ if self.check_circular: markers = {} else: markers = None if self.ensure_ascii: _encoder = encode_basestring_ascii else: _encoder = encode_basestring if self.encoding != 'utf-8': def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding): if isinstance(o, str): o = o.decode(_encoding) return _orig_encoder(o) def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY): # Check for specials. Note that this type of test is processor # and/or platform-specific, so do tests which don't depend on the # internals. if o != o: text = 'NaN' elif o == _inf: text = 'Infinity' elif o == _neginf: text = '-Infinity' else: return _repr(o) if not allow_nan: raise ValueError( "Out of range float values are not JSON compliant: " + repr(o)) return text _iterencode = _make_iterencode( markers, self.default, _encoder, self.indent, floatstr, self.key_separator, self.item_separator, self.sort_keys, self.skipkeys, _one_shot, self._order) return _iterencode(o, 0)
[ "def", "iterencode", "(", "self", ",", "o", ",", "_one_shot", "=", "False", ")", ":", "if", "self", ".", "check_circular", ":", "markers", "=", "{", "}", "else", ":", "markers", "=", "None", "if", "self", ".", "ensure_ascii", ":", "_encoder", "=", "encode_basestring_ascii", "else", ":", "_encoder", "=", "encode_basestring", "if", "self", ".", "encoding", "!=", "'utf-8'", ":", "def", "_encoder", "(", "o", ",", "_orig_encoder", "=", "_encoder", ",", "_encoding", "=", "self", ".", "encoding", ")", ":", "if", "isinstance", "(", "o", ",", "str", ")", ":", "o", "=", "o", ".", "decode", "(", "_encoding", ")", "return", "_orig_encoder", "(", "o", ")", "def", "floatstr", "(", "o", ",", "allow_nan", "=", "self", ".", "allow_nan", ",", "_repr", "=", "FLOAT_REPR", ",", "_inf", "=", "INFINITY", ",", "_neginf", "=", "-", "INFINITY", ")", ":", "# Check for specials. Note that this type of test is processor", "# and/or platform-specific, so do tests which don't depend on the", "# internals.", "if", "o", "!=", "o", ":", "text", "=", "'NaN'", "elif", "o", "==", "_inf", ":", "text", "=", "'Infinity'", "elif", "o", "==", "_neginf", ":", "text", "=", "'-Infinity'", "else", ":", "return", "_repr", "(", "o", ")", "if", "not", "allow_nan", ":", "raise", "ValueError", "(", "\"Out of range float values are not JSON compliant: \"", "+", "repr", "(", "o", ")", ")", "return", "text", "_iterencode", "=", "_make_iterencode", "(", "markers", ",", "self", ".", "default", ",", "_encoder", ",", "self", ".", "indent", ",", "floatstr", ",", "self", ".", "key_separator", ",", "self", ".", "item_separator", ",", "self", ".", "sort_keys", ",", "self", ".", "skipkeys", ",", "_one_shot", ",", "self", ".", "_order", ")", "return", "_iterencode", "(", "o", ",", "0", ")" ]
Encode the given object and yield each string representation as available. For example:: for chunk in JSONEncoder().iterencode(bigobject): mysocket.write(chunk)
[ "Encode", "the", "given", "object", "and", "yield", "each", "string", "representation", "as", "available", "." ]
24bfa7355f36847a06646c58e9fd75bd3b689bfe
https://github.com/pbrisk/unicum/blob/24bfa7355f36847a06646c58e9fd75bd3b689bfe/unicum/encode_json.py#L18-L69
247,018
EventTeam/beliefs
src/beliefs/cells/lazy.py
LazyCell.update
def update(self): """ Updates intension and then adds or includes extension """ # updates intension self.update_intension() self._size_known_intension = len(self.members) self._update_members = False
python
def update(self): """ Updates intension and then adds or includes extension """ # updates intension self.update_intension() self._size_known_intension = len(self.members) self._update_members = False
[ "def", "update", "(", "self", ")", ":", "# updates intension", "self", ".", "update_intension", "(", ")", "self", ".", "_size_known_intension", "=", "len", "(", "self", ".", "members", ")", "self", ".", "_update_members", "=", "False" ]
Updates intension and then adds or includes extension
[ "Updates", "intension", "and", "then", "adds", "or", "includes", "extension" ]
c07d22b61bebeede74a72800030dde770bf64208
https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/cells/lazy.py#L54-L61
247,019
EventTeam/beliefs
src/beliefs/cells/lazy.py
LazyCell.is_entailed_by
def is_entailed_by(self, other): """ Means merging other with self does not produce any new information. """ if not set(self.include.keys()).issubset(set(other.include.keys())): return False if not self.exclude.isuperset(other.exclude): return False if not self.prototype.is_entailed_by(other.prototype): return False return True
python
def is_entailed_by(self, other): """ Means merging other with self does not produce any new information. """ if not set(self.include.keys()).issubset(set(other.include.keys())): return False if not self.exclude.isuperset(other.exclude): return False if not self.prototype.is_entailed_by(other.prototype): return False return True
[ "def", "is_entailed_by", "(", "self", ",", "other", ")", ":", "if", "not", "set", "(", "self", ".", "include", ".", "keys", "(", ")", ")", ".", "issubset", "(", "set", "(", "other", ".", "include", ".", "keys", "(", ")", ")", ")", ":", "return", "False", "if", "not", "self", ".", "exclude", ".", "isuperset", "(", "other", ".", "exclude", ")", ":", "return", "False", "if", "not", "self", ".", "prototype", ".", "is_entailed_by", "(", "other", ".", "prototype", ")", ":", "return", "False", "return", "True" ]
Means merging other with self does not produce any new information.
[ "Means", "merging", "other", "with", "self", "does", "not", "produce", "any", "new", "information", "." ]
c07d22b61bebeede74a72800030dde770bf64208
https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/cells/lazy.py#L74-L84
247,020
EventTeam/beliefs
src/beliefs/cells/lazy.py
LazyCell.get_instances
def get_instances(self): """ Returns the members of the LazyDict """ if self._update_members: self.update() return iter(sorted(self.members.iteritems()))
python
def get_instances(self): """ Returns the members of the LazyDict """ if self._update_members: self.update() return iter(sorted(self.members.iteritems()))
[ "def", "get_instances", "(", "self", ")", ":", "if", "self", ".", "_update_members", ":", "self", ".", "update", "(", ")", "return", "iter", "(", "sorted", "(", "self", ".", "members", ".", "iteritems", "(", ")", ")", ")" ]
Returns the members of the LazyDict
[ "Returns", "the", "members", "of", "the", "LazyDict" ]
c07d22b61bebeede74a72800030dde770bf64208
https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/cells/lazy.py#L107-L112
247,021
chatfirst/chatfirst
chatfirst/client.py
Chatfirst.bots_list
def bots_list(self): """ List all user's bots :rtype: list of Bot :return: user's bots """ data = self.client.bots() return [Bot(item) for item in data]
python
def bots_list(self): """ List all user's bots :rtype: list of Bot :return: user's bots """ data = self.client.bots() return [Bot(item) for item in data]
[ "def", "bots_list", "(", "self", ")", ":", "data", "=", "self", ".", "client", ".", "bots", "(", ")", "return", "[", "Bot", "(", "item", ")", "for", "item", "in", "data", "]" ]
List all user's bots :rtype: list of Bot :return: user's bots
[ "List", "all", "user", "s", "bots" ]
11e023fc372e034dfd3417b61b67759ef8c37ad6
https://github.com/chatfirst/chatfirst/blob/11e023fc372e034dfd3417b61b67759ef8c37ad6/chatfirst/client.py#L19-L27
247,022
chatfirst/chatfirst
chatfirst/client.py
Chatfirst.bots_create
def bots_create(self, bot): """ Save new bot :param bot: bot object to save :type bot: Bot """ self.client.bots(_method="POST", _json=bot.to_json(), _params=dict(userToken=self.token))
python
def bots_create(self, bot): """ Save new bot :param bot: bot object to save :type bot: Bot """ self.client.bots(_method="POST", _json=bot.to_json(), _params=dict(userToken=self.token))
[ "def", "bots_create", "(", "self", ",", "bot", ")", ":", "self", ".", "client", ".", "bots", "(", "_method", "=", "\"POST\"", ",", "_json", "=", "bot", ".", "to_json", "(", ")", ",", "_params", "=", "dict", "(", "userToken", "=", "self", ".", "token", ")", ")" ]
Save new bot :param bot: bot object to save :type bot: Bot
[ "Save", "new", "bot" ]
11e023fc372e034dfd3417b61b67759ef8c37ad6
https://github.com/chatfirst/chatfirst/blob/11e023fc372e034dfd3417b61b67759ef8c37ad6/chatfirst/client.py#L29-L36
247,023
chatfirst/chatfirst
chatfirst/client.py
Chatfirst.bots_get
def bots_get(self, bot): """ Fetch and fill Bot object :param bot: empty bot object with name to search :type bot: Bot :rtype: Bot :return: filled bot object """ data = self.client.bots.__getattr__(bot.name).__call__() return Bot(data)
python
def bots_get(self, bot): """ Fetch and fill Bot object :param bot: empty bot object with name to search :type bot: Bot :rtype: Bot :return: filled bot object """ data = self.client.bots.__getattr__(bot.name).__call__() return Bot(data)
[ "def", "bots_get", "(", "self", ",", "bot", ")", ":", "data", "=", "self", ".", "client", ".", "bots", ".", "__getattr__", "(", "bot", ".", "name", ")", ".", "__call__", "(", ")", "return", "Bot", "(", "data", ")" ]
Fetch and fill Bot object :param bot: empty bot object with name to search :type bot: Bot :rtype: Bot :return: filled bot object
[ "Fetch", "and", "fill", "Bot", "object" ]
11e023fc372e034dfd3417b61b67759ef8c37ad6
https://github.com/chatfirst/chatfirst/blob/11e023fc372e034dfd3417b61b67759ef8c37ad6/chatfirst/client.py#L38-L48
247,024
chatfirst/chatfirst
chatfirst/client.py
Chatfirst.bots_delete
def bots_delete(self, bot): """ Delete existing bot :param bot: bot to delete :type bot: Bot """ self.client.bots.__getattr__(bot.name).__call__(_method="DELETE", _params=dict(botName=bot.name))
python
def bots_delete(self, bot): """ Delete existing bot :param bot: bot to delete :type bot: Bot """ self.client.bots.__getattr__(bot.name).__call__(_method="DELETE", _params=dict(botName=bot.name))
[ "def", "bots_delete", "(", "self", ",", "bot", ")", ":", "self", ".", "client", ".", "bots", ".", "__getattr__", "(", "bot", ".", "name", ")", ".", "__call__", "(", "_method", "=", "\"DELETE\"", ",", "_params", "=", "dict", "(", "botName", "=", "bot", ".", "name", ")", ")" ]
Delete existing bot :param bot: bot to delete :type bot: Bot
[ "Delete", "existing", "bot" ]
11e023fc372e034dfd3417b61b67759ef8c37ad6
https://github.com/chatfirst/chatfirst/blob/11e023fc372e034dfd3417b61b67759ef8c37ad6/chatfirst/client.py#L59-L66
247,025
chatfirst/chatfirst
chatfirst/client.py
Chatfirst.talk
def talk(self, bot, message): """ Talk to bot and get response based You can use this method to integrate the platform with your own channels :param bot: bot to talk to :type bot: Bot :param message: message to send to bot :type message: Message :rtype: ActionResponse :return: response object """ data = self.client.talk(_method="POST", _params=dict(botName=bot.name), _json=message.to_json()) return ActionResponse(data)
python
def talk(self, bot, message): """ Talk to bot and get response based You can use this method to integrate the platform with your own channels :param bot: bot to talk to :type bot: Bot :param message: message to send to bot :type message: Message :rtype: ActionResponse :return: response object """ data = self.client.talk(_method="POST", _params=dict(botName=bot.name), _json=message.to_json()) return ActionResponse(data)
[ "def", "talk", "(", "self", ",", "bot", ",", "message", ")", ":", "data", "=", "self", ".", "client", ".", "talk", "(", "_method", "=", "\"POST\"", ",", "_params", "=", "dict", "(", "botName", "=", "bot", ".", "name", ")", ",", "_json", "=", "message", ".", "to_json", "(", ")", ")", "return", "ActionResponse", "(", "data", ")" ]
Talk to bot and get response based You can use this method to integrate the platform with your own channels :param bot: bot to talk to :type bot: Bot :param message: message to send to bot :type message: Message :rtype: ActionResponse :return: response object
[ "Talk", "to", "bot", "and", "get", "response", "based", "You", "can", "use", "this", "method", "to", "integrate", "the", "platform", "with", "your", "own", "channels" ]
11e023fc372e034dfd3417b61b67759ef8c37ad6
https://github.com/chatfirst/chatfirst/blob/11e023fc372e034dfd3417b61b67759ef8c37ad6/chatfirst/client.py#L88-L101
247,026
chatfirst/chatfirst
chatfirst/client.py
Chatfirst.push
def push(self, bot, channel_type, ar, user_id): """ Use this method to push message to user of bot. The message should be packed into ActionResponse object. This allows to push text messages, buttons, images. This also allows to force current state of user. :param bot: bot that will push user :type bot: Bot :param channel_type: one of [telegram, facebook, slack] :type channel_type: str :param ar: message packed in response object :type ar: ActionResponse :param user_id: user id in used channel :type user_id: str """ self.client.push.__getattr__(bot.name).__call__(_method="POST", _params=dict(id=user_id, channel=channel_type), _json=ar.to_json())
python
def push(self, bot, channel_type, ar, user_id): """ Use this method to push message to user of bot. The message should be packed into ActionResponse object. This allows to push text messages, buttons, images. This also allows to force current state of user. :param bot: bot that will push user :type bot: Bot :param channel_type: one of [telegram, facebook, slack] :type channel_type: str :param ar: message packed in response object :type ar: ActionResponse :param user_id: user id in used channel :type user_id: str """ self.client.push.__getattr__(bot.name).__call__(_method="POST", _params=dict(id=user_id, channel=channel_type), _json=ar.to_json())
[ "def", "push", "(", "self", ",", "bot", ",", "channel_type", ",", "ar", ",", "user_id", ")", ":", "self", ".", "client", ".", "push", ".", "__getattr__", "(", "bot", ".", "name", ")", ".", "__call__", "(", "_method", "=", "\"POST\"", ",", "_params", "=", "dict", "(", "id", "=", "user_id", ",", "channel", "=", "channel_type", ")", ",", "_json", "=", "ar", ".", "to_json", "(", ")", ")" ]
Use this method to push message to user of bot. The message should be packed into ActionResponse object. This allows to push text messages, buttons, images. This also allows to force current state of user. :param bot: bot that will push user :type bot: Bot :param channel_type: one of [telegram, facebook, slack] :type channel_type: str :param ar: message packed in response object :type ar: ActionResponse :param user_id: user id in used channel :type user_id: str
[ "Use", "this", "method", "to", "push", "message", "to", "user", "of", "bot", ".", "The", "message", "should", "be", "packed", "into", "ActionResponse", "object", ".", "This", "allows", "to", "push", "text", "messages", "buttons", "images", ".", "This", "also", "allows", "to", "force", "current", "state", "of", "user", "." ]
11e023fc372e034dfd3417b61b67759ef8c37ad6
https://github.com/chatfirst/chatfirst/blob/11e023fc372e034dfd3417b61b67759ef8c37ad6/chatfirst/client.py#L103-L121
247,027
chatfirst/chatfirst
chatfirst/client.py
Chatfirst.broadcast
def broadcast(self, bot, channel_type, text): """ Use this method to broadcast text message to all users of bot. :param bot: bot that will push user :type bot: Bot :param channel_type: one of [telegram, facebook, slack] :type channel_type: str :param text: text message :type text: str """ self.client.broadcast.__getattr__(bot.name).__call__(_method="POST", _params=dict(channel=channel_type), _json=dict(message=text))
python
def broadcast(self, bot, channel_type, text): """ Use this method to broadcast text message to all users of bot. :param bot: bot that will push user :type bot: Bot :param channel_type: one of [telegram, facebook, slack] :type channel_type: str :param text: text message :type text: str """ self.client.broadcast.__getattr__(bot.name).__call__(_method="POST", _params=dict(channel=channel_type), _json=dict(message=text))
[ "def", "broadcast", "(", "self", ",", "bot", ",", "channel_type", ",", "text", ")", ":", "self", ".", "client", ".", "broadcast", ".", "__getattr__", "(", "bot", ".", "name", ")", ".", "__call__", "(", "_method", "=", "\"POST\"", ",", "_params", "=", "dict", "(", "channel", "=", "channel_type", ")", ",", "_json", "=", "dict", "(", "message", "=", "text", ")", ")" ]
Use this method to broadcast text message to all users of bot. :param bot: bot that will push user :type bot: Bot :param channel_type: one of [telegram, facebook, slack] :type channel_type: str :param text: text message :type text: str
[ "Use", "this", "method", "to", "broadcast", "text", "message", "to", "all", "users", "of", "bot", "." ]
11e023fc372e034dfd3417b61b67759ef8c37ad6
https://github.com/chatfirst/chatfirst/blob/11e023fc372e034dfd3417b61b67759ef8c37ad6/chatfirst/client.py#L123-L136
247,028
luiscberrocal/pyjavaprops
pyjavaprops/javaproperties.py
JavaProperties.__parse
def __parse(self, lines): """ Parse a list of lines and create an internal property dictionary """ # Every line in the file must consist of either a comment # or a key-value pair. A key-value pair is a line consisting # of a key which is a combination of non-white space characters # The separator character between key-value pairs is a '=', # ':' or a whitespace character not including the newline. # If the '=' or ':' characters are found, in the line, even # keys containing whitespace chars are allowed. # A line with only a key according to the rules above is also # fine. In such case, the value is considered as the empty string. # In order to include characters '=' or ':' in a key or value, # they have to be properly escaped using the backslash character. # Some examples of valid key-value pairs: # # key value # key=value # key:value # key value1,value2,value3 # key value1,value2,value3 \ # value4, value5 # key # This key= this value # key = value1 value2 value3 # Any line that starts with a '#' or '!' is considerered a comment # and skipped. Also any trailing or preceding whitespaces # are removed from the key/value. # This is a line parser. It parses the # contents like by line. lineno=0 i = iter(lines) for line in i: lineno += 1 line = line.strip() # Skip null lines if not line: continue # Skip lines which are comments if line[0] in ('#','!'): continue # Some flags escaped=False # Position of first separation char sepidx = -1 # A flag for performing wspace re check flag = 0 # Check for valid space separation # First obtain the max index to which we # can search. m = self.othercharre.search(line) if m: first, last = m.span() start, end = 0, first flag = 1 wspacere = re.compile(r'(?<![\\\=\:])(\s)') else: if self.othercharre2.search(line): # Check if either '=' or ':' is present # in the line. If they are then it means # they are preceded by a backslash. # This means, we need to modify the # wspacere a bit, not to look for # : or = characters. wspacere = re.compile(r'(?<![\\])(\s)') start, end = 0, len(line) m2 = wspacere.search(line, start, end) if m2: # print 'Space match=>',line # Means we need to split by space. first, last = m2.span() sepidx = first elif m: # print 'Other match=>',line # No matching wspace char found, need # to split by either '=' or ':' first, last = m.span() sepidx = last - 1 # print line[sepidx] # If the last character is a backslash # it has to be preceded by a space in which # case the next line is read as part of the # same property while line[-1] == '\\': # Read next line nextline = next(i) nextline = nextline.strip() lineno += 1 # This line will become part of the value line = line[:-1] + nextline # Now split to key,value according to separation char if sepidx != -1: key, value = line[:sepidx], line[sepidx+1:] else: key,value = line,'' self._keyorder.append(key) self.process_pair(key, value)
python
def __parse(self, lines): """ Parse a list of lines and create an internal property dictionary """ # Every line in the file must consist of either a comment # or a key-value pair. A key-value pair is a line consisting # of a key which is a combination of non-white space characters # The separator character between key-value pairs is a '=', # ':' or a whitespace character not including the newline. # If the '=' or ':' characters are found, in the line, even # keys containing whitespace chars are allowed. # A line with only a key according to the rules above is also # fine. In such case, the value is considered as the empty string. # In order to include characters '=' or ':' in a key or value, # they have to be properly escaped using the backslash character. # Some examples of valid key-value pairs: # # key value # key=value # key:value # key value1,value2,value3 # key value1,value2,value3 \ # value4, value5 # key # This key= this value # key = value1 value2 value3 # Any line that starts with a '#' or '!' is considerered a comment # and skipped. Also any trailing or preceding whitespaces # are removed from the key/value. # This is a line parser. It parses the # contents like by line. lineno=0 i = iter(lines) for line in i: lineno += 1 line = line.strip() # Skip null lines if not line: continue # Skip lines which are comments if line[0] in ('#','!'): continue # Some flags escaped=False # Position of first separation char sepidx = -1 # A flag for performing wspace re check flag = 0 # Check for valid space separation # First obtain the max index to which we # can search. m = self.othercharre.search(line) if m: first, last = m.span() start, end = 0, first flag = 1 wspacere = re.compile(r'(?<![\\\=\:])(\s)') else: if self.othercharre2.search(line): # Check if either '=' or ':' is present # in the line. If they are then it means # they are preceded by a backslash. # This means, we need to modify the # wspacere a bit, not to look for # : or = characters. wspacere = re.compile(r'(?<![\\])(\s)') start, end = 0, len(line) m2 = wspacere.search(line, start, end) if m2: # print 'Space match=>',line # Means we need to split by space. first, last = m2.span() sepidx = first elif m: # print 'Other match=>',line # No matching wspace char found, need # to split by either '=' or ':' first, last = m.span() sepidx = last - 1 # print line[sepidx] # If the last character is a backslash # it has to be preceded by a space in which # case the next line is read as part of the # same property while line[-1] == '\\': # Read next line nextline = next(i) nextline = nextline.strip() lineno += 1 # This line will become part of the value line = line[:-1] + nextline # Now split to key,value according to separation char if sepidx != -1: key, value = line[:sepidx], line[sepidx+1:] else: key,value = line,'' self._keyorder.append(key) self.process_pair(key, value)
[ "def", "__parse", "(", "self", ",", "lines", ")", ":", "# Every line in the file must consist of either a comment", "# or a key-value pair. A key-value pair is a line consisting", "# of a key which is a combination of non-white space characters", "# The separator character between key-value pairs is a '=',", "# ':' or a whitespace character not including the newline.", "# If the '=' or ':' characters are found, in the line, even", "# keys containing whitespace chars are allowed.", "# A line with only a key according to the rules above is also", "# fine. In such case, the value is considered as the empty string.", "# In order to include characters '=' or ':' in a key or value,", "# they have to be properly escaped using the backslash character.", "# Some examples of valid key-value pairs:", "#", "# key value", "# key=value", "# key:value", "# key value1,value2,value3", "# key value1,value2,value3 \\", "# value4, value5", "# key", "# This key= this value", "# key = value1 value2 value3", "# Any line that starts with a '#' or '!' is considerered a comment", "# and skipped. Also any trailing or preceding whitespaces", "# are removed from the key/value.", "# This is a line parser. It parses the", "# contents like by line.", "lineno", "=", "0", "i", "=", "iter", "(", "lines", ")", "for", "line", "in", "i", ":", "lineno", "+=", "1", "line", "=", "line", ".", "strip", "(", ")", "# Skip null lines", "if", "not", "line", ":", "continue", "# Skip lines which are comments", "if", "line", "[", "0", "]", "in", "(", "'#'", ",", "'!'", ")", ":", "continue", "# Some flags", "escaped", "=", "False", "# Position of first separation char", "sepidx", "=", "-", "1", "# A flag for performing wspace re check", "flag", "=", "0", "# Check for valid space separation", "# First obtain the max index to which we", "# can search.", "m", "=", "self", ".", "othercharre", ".", "search", "(", "line", ")", "if", "m", ":", "first", ",", "last", "=", "m", ".", "span", "(", ")", "start", ",", "end", "=", "0", ",", "first", "flag", "=", "1", "wspacere", "=", "re", ".", "compile", "(", "r'(?<![\\\\\\=\\:])(\\s)'", ")", "else", ":", "if", "self", ".", "othercharre2", ".", "search", "(", "line", ")", ":", "# Check if either '=' or ':' is present", "# in the line. If they are then it means", "# they are preceded by a backslash.", "# This means, we need to modify the", "# wspacere a bit, not to look for", "# : or = characters.", "wspacere", "=", "re", ".", "compile", "(", "r'(?<![\\\\])(\\s)'", ")", "start", ",", "end", "=", "0", ",", "len", "(", "line", ")", "m2", "=", "wspacere", ".", "search", "(", "line", ",", "start", ",", "end", ")", "if", "m2", ":", "# print 'Space match=>',line", "# Means we need to split by space.", "first", ",", "last", "=", "m2", ".", "span", "(", ")", "sepidx", "=", "first", "elif", "m", ":", "# print 'Other match=>',line", "# No matching wspace char found, need", "# to split by either '=' or ':'", "first", ",", "last", "=", "m", ".", "span", "(", ")", "sepidx", "=", "last", "-", "1", "# print line[sepidx]", "# If the last character is a backslash", "# it has to be preceded by a space in which", "# case the next line is read as part of the", "# same property", "while", "line", "[", "-", "1", "]", "==", "'\\\\'", ":", "# Read next line", "nextline", "=", "next", "(", "i", ")", "nextline", "=", "nextline", ".", "strip", "(", ")", "lineno", "+=", "1", "# This line will become part of the value", "line", "=", "line", "[", ":", "-", "1", "]", "+", "nextline", "# Now split to key,value according to separation char", "if", "sepidx", "!=", "-", "1", ":", "key", ",", "value", "=", "line", "[", ":", "sepidx", "]", ",", "line", "[", "sepidx", "+", "1", ":", "]", "else", ":", "key", ",", "value", "=", "line", ",", "''", "self", ".", "_keyorder", ".", "append", "(", "key", ")", "self", ".", "process_pair", "(", "key", ",", "value", ")" ]
Parse a list of lines and create an internal property dictionary
[ "Parse", "a", "list", "of", "lines", "and", "create", "an", "internal", "property", "dictionary" ]
7d0327b1758b3d907af657e0df3b0618776ac46d
https://github.com/luiscberrocal/pyjavaprops/blob/7d0327b1758b3d907af657e0df3b0618776ac46d/pyjavaprops/javaproperties.py#L68-L174
247,029
luiscberrocal/pyjavaprops
pyjavaprops/javaproperties.py
JavaProperties.load
def load(self, stream): """ Load properties from an open file stream """ # For the time being only accept file input streams if not _is_file(stream): raise TypeError('Argument should be a file object!') # Check for the opened mode if stream.mode != 'r': raise ValueError('Stream should be opened in read-only mode!') try: lines = stream.readlines() self.__parse(lines) except IOError: raise
python
def load(self, stream): """ Load properties from an open file stream """ # For the time being only accept file input streams if not _is_file(stream): raise TypeError('Argument should be a file object!') # Check for the opened mode if stream.mode != 'r': raise ValueError('Stream should be opened in read-only mode!') try: lines = stream.readlines() self.__parse(lines) except IOError: raise
[ "def", "load", "(", "self", ",", "stream", ")", ":", "# For the time being only accept file input streams", "if", "not", "_is_file", "(", "stream", ")", ":", "raise", "TypeError", "(", "'Argument should be a file object!'", ")", "# Check for the opened mode", "if", "stream", ".", "mode", "!=", "'r'", ":", "raise", "ValueError", "(", "'Stream should be opened in read-only mode!'", ")", "try", ":", "lines", "=", "stream", ".", "readlines", "(", ")", "self", ".", "__parse", "(", "lines", ")", "except", "IOError", ":", "raise" ]
Load properties from an open file stream
[ "Load", "properties", "from", "an", "open", "file", "stream" ]
7d0327b1758b3d907af657e0df3b0618776ac46d
https://github.com/luiscberrocal/pyjavaprops/blob/7d0327b1758b3d907af657e0df3b0618776ac46d/pyjavaprops/javaproperties.py#L250-L264
247,030
luiscberrocal/pyjavaprops
pyjavaprops/javaproperties.py
JavaProperties.set_property
def set_property(self, key, value): """ Set the property for the given key """ if type(key) is str and type(value) is str: self.process_pair(key, value) else: raise TypeError('Both key and value should be strings!')
python
def set_property(self, key, value): """ Set the property for the given key """ if type(key) is str and type(value) is str: self.process_pair(key, value) else: raise TypeError('Both key and value should be strings!')
[ "def", "set_property", "(", "self", ",", "key", ",", "value", ")", ":", "if", "type", "(", "key", ")", "is", "str", "and", "type", "(", "value", ")", "is", "str", ":", "self", ".", "process_pair", "(", "key", ",", "value", ")", "else", ":", "raise", "TypeError", "(", "'Both key and value should be strings!'", ")" ]
Set the property for the given key
[ "Set", "the", "property", "for", "the", "given", "key" ]
7d0327b1758b3d907af657e0df3b0618776ac46d
https://github.com/luiscberrocal/pyjavaprops/blob/7d0327b1758b3d907af657e0df3b0618776ac46d/pyjavaprops/javaproperties.py#L271-L277
247,031
luiscberrocal/pyjavaprops
pyjavaprops/javaproperties.py
JavaProperties.list
def list(self, out=sys.stdout): """ Prints a listing of the properties to the stream 'out' which defaults to the standard output """ out.write('-- listing properties --\n') for key,value in self._properties.items(): out.write(''.join((key,'=',value,'\n')))
python
def list(self, out=sys.stdout): """ Prints a listing of the properties to the stream 'out' which defaults to the standard output """ out.write('-- listing properties --\n') for key,value in self._properties.items(): out.write(''.join((key,'=',value,'\n')))
[ "def", "list", "(", "self", ",", "out", "=", "sys", ".", "stdout", ")", ":", "out", ".", "write", "(", "'-- listing properties --\\n'", ")", "for", "key", ",", "value", "in", "self", ".", "_properties", ".", "items", "(", ")", ":", "out", ".", "write", "(", "''", ".", "join", "(", "(", "key", ",", "'='", ",", "value", ",", "'\\n'", ")", ")", ")" ]
Prints a listing of the properties to the stream 'out' which defaults to the standard output
[ "Prints", "a", "listing", "of", "the", "properties", "to", "the", "stream", "out", "which", "defaults", "to", "the", "standard", "output" ]
7d0327b1758b3d907af657e0df3b0618776ac46d
https://github.com/luiscberrocal/pyjavaprops/blob/7d0327b1758b3d907af657e0df3b0618776ac46d/pyjavaprops/javaproperties.py#L285-L291
247,032
luiscberrocal/pyjavaprops
pyjavaprops/javaproperties.py
JavaProperties.store
def store(self, out, header=""): """ Write the properties list to the stream 'out' along with the optional 'header' """ if out.mode[0] != 'w': raise ValueError('Steam should be opened in write mode!') try: out.write(''.join(('#',header,'\n'))) # Write timestamp tstamp = time.strftime('%a %b %d %H:%M:%S %Z %Y', time.localtime()) out.write(''.join(('#',tstamp,'\n'))) # Write properties from the pristine dictionary for prop in self._keyorder: if prop in self._origprops: val = self._origprops[prop] out.write(''.join((prop,'=',self.escape(val),'\n'))) out.close() except IOError: raise
python
def store(self, out, header=""): """ Write the properties list to the stream 'out' along with the optional 'header' """ if out.mode[0] != 'w': raise ValueError('Steam should be opened in write mode!') try: out.write(''.join(('#',header,'\n'))) # Write timestamp tstamp = time.strftime('%a %b %d %H:%M:%S %Z %Y', time.localtime()) out.write(''.join(('#',tstamp,'\n'))) # Write properties from the pristine dictionary for prop in self._keyorder: if prop in self._origprops: val = self._origprops[prop] out.write(''.join((prop,'=',self.escape(val),'\n'))) out.close() except IOError: raise
[ "def", "store", "(", "self", ",", "out", ",", "header", "=", "\"\"", ")", ":", "if", "out", ".", "mode", "[", "0", "]", "!=", "'w'", ":", "raise", "ValueError", "(", "'Steam should be opened in write mode!'", ")", "try", ":", "out", ".", "write", "(", "''", ".", "join", "(", "(", "'#'", ",", "header", ",", "'\\n'", ")", ")", ")", "# Write timestamp", "tstamp", "=", "time", ".", "strftime", "(", "'%a %b %d %H:%M:%S %Z %Y'", ",", "time", ".", "localtime", "(", ")", ")", "out", ".", "write", "(", "''", ".", "join", "(", "(", "'#'", ",", "tstamp", ",", "'\\n'", ")", ")", ")", "# Write properties from the pristine dictionary", "for", "prop", "in", "self", ".", "_keyorder", ":", "if", "prop", "in", "self", ".", "_origprops", ":", "val", "=", "self", ".", "_origprops", "[", "prop", "]", "out", ".", "write", "(", "''", ".", "join", "(", "(", "prop", ",", "'='", ",", "self", ".", "escape", "(", "val", ")", ",", "'\\n'", ")", ")", ")", "out", ".", "close", "(", ")", "except", "IOError", ":", "raise" ]
Write the properties list to the stream 'out' along with the optional 'header'
[ "Write", "the", "properties", "list", "to", "the", "stream", "out", "along", "with", "the", "optional", "header" ]
7d0327b1758b3d907af657e0df3b0618776ac46d
https://github.com/luiscberrocal/pyjavaprops/blob/7d0327b1758b3d907af657e0df3b0618776ac46d/pyjavaprops/javaproperties.py#L293-L313
247,033
swans-one/django-kittens
django_kittens/models.py
KittenManager.get_random
def get_random(self): """Get a kitten, either from the db, or a new one. """ num_kittens = self.count() new_cutoff = (num_kittens / (num_kittens + constants.KITTEN_FRESHNESS)) if random.random() < new_cutoff: return self._rand_inst() else: return self.create_new()
python
def get_random(self): """Get a kitten, either from the db, or a new one. """ num_kittens = self.count() new_cutoff = (num_kittens / (num_kittens + constants.KITTEN_FRESHNESS)) if random.random() < new_cutoff: return self._rand_inst() else: return self.create_new()
[ "def", "get_random", "(", "self", ")", ":", "num_kittens", "=", "self", ".", "count", "(", ")", "new_cutoff", "=", "(", "num_kittens", "/", "(", "num_kittens", "+", "constants", ".", "KITTEN_FRESHNESS", ")", ")", "if", "random", ".", "random", "(", ")", "<", "new_cutoff", ":", "return", "self", ".", "_rand_inst", "(", ")", "else", ":", "return", "self", ".", "create_new", "(", ")" ]
Get a kitten, either from the db, or a new one.
[ "Get", "a", "kitten", "either", "from", "the", "db", "or", "a", "new", "one", "." ]
31e1ff54737c8ba3e99880dbff285a730ddac851
https://github.com/swans-one/django-kittens/blob/31e1ff54737c8ba3e99880dbff285a730ddac851/django_kittens/models.py#L10-L18
247,034
zaturox/glin
glin/__main__.py
boot
def boot(): """Read configuration files, initialize glin and run main loop""" argparser = argparse.ArgumentParser( description="Controller for LED stripes (WS2801, WS2811 an similar)") argparser.add_argument("-c", "--config", metavar="CONFIGFILE", dest="configfiles", action='append', help='Configuration File. May be repeated multiple times. Later configuration files override previous ones.') argparser.add_argument("-d", "--debug", dest="log_debug", action='store_const', const=True, help='Set log level to debug. Overrides -i/--info') argparser.add_argument("-i", "--info", dest="log_info", action='store_const', const=True, help='Set log level to info.') args = argparser.parse_args() if args.log_debug: logging.basicConfig(level=logging.DEBUG) elif args.log_info: logging.basicConfig(level=logging.INFO) cfg = configparser.ConfigParser() cfgpath = os.path.join(os.path.dirname(__file__), "default.conf") cfg.read(cfgpath) if args.configfiles is not None: cfg.read(args.configfiles) if "core" not in cfg: logging.critical("No [core] section found in configurations files") sys.exit() if "leds" not in cfg["core"]: logging.critical("No leds value found in [core] section in configurations files") sys.exit() led_count = int(cfg["core"]["leds"]) if "hwbackend" not in cfg["core"]: logging.critical("No hwbackend value found in [core] section in configurations files") sys.exit() backend_name = cfg["core"]["hwbackend"] hwbackends = list(iter_entry_points(group='glin.hwbackend', name=backend_name)) if len(hwbackends) != 1: logging.critical("Found multiple hwbackend with same name. Cant decide upon one. Quitting.") sys.exit() backend_class = hwbackends[0].load() backend_configuration = dict(cfg[backend_name]) if backend_name in cfg else {} backend = backend_class(led_count=led_count, config=backend_configuration) app = glin.app.GlinApp(led_count, hw_backend=backend) for entry_point in iter_entry_points(group='glin.animation', name=None): animation_class = entry_point.load() try: if issubclass(animation_class, glin.animations.AbstractAnimation): app.register_animation(animation_class) else: logging.error("This is not a valid animation class. Has to be subclass of glin.animations:AbstraktAnimation. Ignoring.: {ep}" .format(ep=entry_point)) except TypeError: logging.error("This is not a Class. Ignoring.: {ep}".format(ep=entry_point)) app.execute()
python
def boot(): """Read configuration files, initialize glin and run main loop""" argparser = argparse.ArgumentParser( description="Controller for LED stripes (WS2801, WS2811 an similar)") argparser.add_argument("-c", "--config", metavar="CONFIGFILE", dest="configfiles", action='append', help='Configuration File. May be repeated multiple times. Later configuration files override previous ones.') argparser.add_argument("-d", "--debug", dest="log_debug", action='store_const', const=True, help='Set log level to debug. Overrides -i/--info') argparser.add_argument("-i", "--info", dest="log_info", action='store_const', const=True, help='Set log level to info.') args = argparser.parse_args() if args.log_debug: logging.basicConfig(level=logging.DEBUG) elif args.log_info: logging.basicConfig(level=logging.INFO) cfg = configparser.ConfigParser() cfgpath = os.path.join(os.path.dirname(__file__), "default.conf") cfg.read(cfgpath) if args.configfiles is not None: cfg.read(args.configfiles) if "core" not in cfg: logging.critical("No [core] section found in configurations files") sys.exit() if "leds" not in cfg["core"]: logging.critical("No leds value found in [core] section in configurations files") sys.exit() led_count = int(cfg["core"]["leds"]) if "hwbackend" not in cfg["core"]: logging.critical("No hwbackend value found in [core] section in configurations files") sys.exit() backend_name = cfg["core"]["hwbackend"] hwbackends = list(iter_entry_points(group='glin.hwbackend', name=backend_name)) if len(hwbackends) != 1: logging.critical("Found multiple hwbackend with same name. Cant decide upon one. Quitting.") sys.exit() backend_class = hwbackends[0].load() backend_configuration = dict(cfg[backend_name]) if backend_name in cfg else {} backend = backend_class(led_count=led_count, config=backend_configuration) app = glin.app.GlinApp(led_count, hw_backend=backend) for entry_point in iter_entry_points(group='glin.animation', name=None): animation_class = entry_point.load() try: if issubclass(animation_class, glin.animations.AbstractAnimation): app.register_animation(animation_class) else: logging.error("This is not a valid animation class. Has to be subclass of glin.animations:AbstraktAnimation. Ignoring.: {ep}" .format(ep=entry_point)) except TypeError: logging.error("This is not a Class. Ignoring.: {ep}".format(ep=entry_point)) app.execute()
[ "def", "boot", "(", ")", ":", "argparser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Controller for LED stripes (WS2801, WS2811 an similar)\"", ")", "argparser", ".", "add_argument", "(", "\"-c\"", ",", "\"--config\"", ",", "metavar", "=", "\"CONFIGFILE\"", ",", "dest", "=", "\"configfiles\"", ",", "action", "=", "'append'", ",", "help", "=", "'Configuration File. May be repeated multiple times. Later configuration files override previous ones.'", ")", "argparser", ".", "add_argument", "(", "\"-d\"", ",", "\"--debug\"", ",", "dest", "=", "\"log_debug\"", ",", "action", "=", "'store_const'", ",", "const", "=", "True", ",", "help", "=", "'Set log level to debug. Overrides -i/--info'", ")", "argparser", ".", "add_argument", "(", "\"-i\"", ",", "\"--info\"", ",", "dest", "=", "\"log_info\"", ",", "action", "=", "'store_const'", ",", "const", "=", "True", ",", "help", "=", "'Set log level to info.'", ")", "args", "=", "argparser", ".", "parse_args", "(", ")", "if", "args", ".", "log_debug", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "DEBUG", ")", "elif", "args", ".", "log_info", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ")", "cfg", "=", "configparser", ".", "ConfigParser", "(", ")", "cfgpath", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ",", "\"default.conf\"", ")", "cfg", ".", "read", "(", "cfgpath", ")", "if", "args", ".", "configfiles", "is", "not", "None", ":", "cfg", ".", "read", "(", "args", ".", "configfiles", ")", "if", "\"core\"", "not", "in", "cfg", ":", "logging", ".", "critical", "(", "\"No [core] section found in configurations files\"", ")", "sys", ".", "exit", "(", ")", "if", "\"leds\"", "not", "in", "cfg", "[", "\"core\"", "]", ":", "logging", ".", "critical", "(", "\"No leds value found in [core] section in configurations files\"", ")", "sys", ".", "exit", "(", ")", "led_count", "=", "int", "(", "cfg", "[", "\"core\"", "]", "[", "\"leds\"", "]", ")", "if", "\"hwbackend\"", "not", "in", "cfg", "[", "\"core\"", "]", ":", "logging", ".", "critical", "(", "\"No hwbackend value found in [core] section in configurations files\"", ")", "sys", ".", "exit", "(", ")", "backend_name", "=", "cfg", "[", "\"core\"", "]", "[", "\"hwbackend\"", "]", "hwbackends", "=", "list", "(", "iter_entry_points", "(", "group", "=", "'glin.hwbackend'", ",", "name", "=", "backend_name", ")", ")", "if", "len", "(", "hwbackends", ")", "!=", "1", ":", "logging", ".", "critical", "(", "\"Found multiple hwbackend with same name. Cant decide upon one. Quitting.\"", ")", "sys", ".", "exit", "(", ")", "backend_class", "=", "hwbackends", "[", "0", "]", ".", "load", "(", ")", "backend_configuration", "=", "dict", "(", "cfg", "[", "backend_name", "]", ")", "if", "backend_name", "in", "cfg", "else", "{", "}", "backend", "=", "backend_class", "(", "led_count", "=", "led_count", ",", "config", "=", "backend_configuration", ")", "app", "=", "glin", ".", "app", ".", "GlinApp", "(", "led_count", ",", "hw_backend", "=", "backend", ")", "for", "entry_point", "in", "iter_entry_points", "(", "group", "=", "'glin.animation'", ",", "name", "=", "None", ")", ":", "animation_class", "=", "entry_point", ".", "load", "(", ")", "try", ":", "if", "issubclass", "(", "animation_class", ",", "glin", ".", "animations", ".", "AbstractAnimation", ")", ":", "app", ".", "register_animation", "(", "animation_class", ")", "else", ":", "logging", ".", "error", "(", "\"This is not a valid animation class. Has to be subclass of glin.animations:AbstraktAnimation. Ignoring.: {ep}\"", ".", "format", "(", "ep", "=", "entry_point", ")", ")", "except", "TypeError", ":", "logging", ".", "error", "(", "\"This is not a Class. Ignoring.: {ep}\"", ".", "format", "(", "ep", "=", "entry_point", ")", ")", "app", ".", "execute", "(", ")" ]
Read configuration files, initialize glin and run main loop
[ "Read", "configuration", "files", "initialize", "glin", "and", "run", "main", "loop" ]
55214a579c4e4b4d74765f3f6aa2eb815bac1c3b
https://github.com/zaturox/glin/blob/55214a579c4e4b4d74765f3f6aa2eb815bac1c3b/glin/__main__.py#L14-L69
247,035
xtrementl/focus
focus/plugin/modules/sounds.py
PlaySound._play_sound
def _play_sound(self, filename): """ Shells player with the provided filename. `filename` Filename for sound file. """ command = self._get_external_player() if not command: return # no player found if common.IS_MACOSX: command += ' "{0}"'.format(filename) else: # append quiet flag and filename is_play = (command == 'play') command += ' -q "{0}"'.format(filename) # HACK: play can default to using pulseaudio. here, we # check if pulse command exists and delegate to alsa if # not if is_play and not common.which('pulseaudio'): command += ' -t alsa' # play sound file, ignore if it fails common.shell_process(command, background=True)
python
def _play_sound(self, filename): """ Shells player with the provided filename. `filename` Filename for sound file. """ command = self._get_external_player() if not command: return # no player found if common.IS_MACOSX: command += ' "{0}"'.format(filename) else: # append quiet flag and filename is_play = (command == 'play') command += ' -q "{0}"'.format(filename) # HACK: play can default to using pulseaudio. here, we # check if pulse command exists and delegate to alsa if # not if is_play and not common.which('pulseaudio'): command += ' -t alsa' # play sound file, ignore if it fails common.shell_process(command, background=True)
[ "def", "_play_sound", "(", "self", ",", "filename", ")", ":", "command", "=", "self", ".", "_get_external_player", "(", ")", "if", "not", "command", ":", "return", "# no player found", "if", "common", ".", "IS_MACOSX", ":", "command", "+=", "' \"{0}\"'", ".", "format", "(", "filename", ")", "else", ":", "# append quiet flag and filename", "is_play", "=", "(", "command", "==", "'play'", ")", "command", "+=", "' -q \"{0}\"'", ".", "format", "(", "filename", ")", "# HACK: play can default to using pulseaudio. here, we", "# check if pulse command exists and delegate to alsa if", "# not", "if", "is_play", "and", "not", "common", ".", "which", "(", "'pulseaudio'", ")", ":", "command", "+=", "' -t alsa'", "# play sound file, ignore if it fails", "common", ".", "shell_process", "(", "command", ",", "background", "=", "True", ")" ]
Shells player with the provided filename. `filename` Filename for sound file.
[ "Shells", "player", "with", "the", "provided", "filename", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/sounds.py#L66-L91
247,036
xtrementl/focus
focus/plugin/modules/sounds.py
PlaySound.parse_option
def parse_option(self, option, block_name, *values): """ Parse options for play, end_play, and timer_play. """ if len(values) != 1: raise TypeError value = os.path.realpath(os.path.expanduser(values[0])) if not os.path.isfile(value) and not os.path.islink(value): raise ValueError(u'Sound file "{0}" does not exist' .format(value)) # special extension check for aplay player ext = os.path.splitext(value)[1].lower() if ext != 'wav' and self._get_external_player() == 'aplay': raise ValueError(u"Only WAV sound file " "supported for 'aplay'") if option == 'play': option = 'start_' + option key = option.split('_', 1)[0] self.files[key] = value
python
def parse_option(self, option, block_name, *values): """ Parse options for play, end_play, and timer_play. """ if len(values) != 1: raise TypeError value = os.path.realpath(os.path.expanduser(values[0])) if not os.path.isfile(value) and not os.path.islink(value): raise ValueError(u'Sound file "{0}" does not exist' .format(value)) # special extension check for aplay player ext = os.path.splitext(value)[1].lower() if ext != 'wav' and self._get_external_player() == 'aplay': raise ValueError(u"Only WAV sound file " "supported for 'aplay'") if option == 'play': option = 'start_' + option key = option.split('_', 1)[0] self.files[key] = value
[ "def", "parse_option", "(", "self", ",", "option", ",", "block_name", ",", "*", "values", ")", ":", "if", "len", "(", "values", ")", "!=", "1", ":", "raise", "TypeError", "value", "=", "os", ".", "path", ".", "realpath", "(", "os", ".", "path", ".", "expanduser", "(", "values", "[", "0", "]", ")", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "value", ")", "and", "not", "os", ".", "path", ".", "islink", "(", "value", ")", ":", "raise", "ValueError", "(", "u'Sound file \"{0}\" does not exist'", ".", "format", "(", "value", ")", ")", "# special extension check for aplay player", "ext", "=", "os", ".", "path", ".", "splitext", "(", "value", ")", "[", "1", "]", ".", "lower", "(", ")", "if", "ext", "!=", "'wav'", "and", "self", ".", "_get_external_player", "(", ")", "==", "'aplay'", ":", "raise", "ValueError", "(", "u\"Only WAV sound file \"", "\"supported for 'aplay'\"", ")", "if", "option", "==", "'play'", ":", "option", "=", "'start_'", "+", "option", "key", "=", "option", ".", "split", "(", "'_'", ",", "1", ")", "[", "0", "]", "self", ".", "files", "[", "key", "]", "=", "value" ]
Parse options for play, end_play, and timer_play.
[ "Parse", "options", "for", "play", "end_play", "and", "timer_play", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/sounds.py#L93-L115
247,037
xtrementl/focus
focus/plugin/modules/sounds.py
PlaySound.on_taskend
def on_taskend(self, task): """ Play sounds at task end. """ key = 'timer' if task.elapsed else 'end' filename = self.files.get(key) if filename: self._play_sound(filename)
python
def on_taskend(self, task): """ Play sounds at task end. """ key = 'timer' if task.elapsed else 'end' filename = self.files.get(key) if filename: self._play_sound(filename)
[ "def", "on_taskend", "(", "self", ",", "task", ")", ":", "key", "=", "'timer'", "if", "task", ".", "elapsed", "else", "'end'", "filename", "=", "self", ".", "files", ".", "get", "(", "key", ")", "if", "filename", ":", "self", ".", "_play_sound", "(", "filename", ")" ]
Play sounds at task end.
[ "Play", "sounds", "at", "task", "end", "." ]
cbbbc0b49a7409f9e0dc899de5b7e057f50838e4
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/sounds.py#L123-L130
247,038
brbsix/python-batchpath
batchpath.py
_fixpath
def _fixpath(root, base): """Return absolute, normalized, joined paths""" return os.path.abspath(os.path.normpath(os.path.join(root, base)))
python
def _fixpath(root, base): """Return absolute, normalized, joined paths""" return os.path.abspath(os.path.normpath(os.path.join(root, base)))
[ "def", "_fixpath", "(", "root", ",", "base", ")", ":", "return", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "root", ",", "base", ")", ")", ")" ]
Return absolute, normalized, joined paths
[ "Return", "absolute", "normalized", "joined", "paths" ]
e4426c7946189aa41f0c99d37bf843799fb00c33
https://github.com/brbsix/python-batchpath/blob/e4426c7946189aa41f0c99d37bf843799fb00c33/batchpath.py#L137-L139
247,039
brbsix/python-batchpath
batchpath.py
_sorter
def _sorter(generated): """Return a list of paths sorted by dirname & basename.""" pairs = [(os.path.dirname(f), os.path.basename(f)) for f in set(list(generated))] pairs.sort() return [os.path.join(pair[0], pair[1]) for pair in pairs]
python
def _sorter(generated): """Return a list of paths sorted by dirname & basename.""" pairs = [(os.path.dirname(f), os.path.basename(f)) for f in set(list(generated))] pairs.sort() return [os.path.join(pair[0], pair[1]) for pair in pairs]
[ "def", "_sorter", "(", "generated", ")", ":", "pairs", "=", "[", "(", "os", ".", "path", ".", "dirname", "(", "f", ")", ",", "os", ".", "path", ".", "basename", "(", "f", ")", ")", "for", "f", "in", "set", "(", "list", "(", "generated", ")", ")", "]", "pairs", ".", "sort", "(", ")", "return", "[", "os", ".", "path", ".", "join", "(", "pair", "[", "0", "]", ",", "pair", "[", "1", "]", ")", "for", "pair", "in", "pairs", "]" ]
Return a list of paths sorted by dirname & basename.
[ "Return", "a", "list", "of", "paths", "sorted", "by", "dirname", "&", "basename", "." ]
e4426c7946189aa41f0c99d37bf843799fb00c33
https://github.com/brbsix/python-batchpath/blob/e4426c7946189aa41f0c99d37bf843799fb00c33/batchpath.py#L142-L149
247,040
brbsix/python-batchpath
batchpath.py
_walk
def _walk(recursion): """Returns a recursive or non-recursive directory walker""" try: from scandir import walk as walk_function except ImportError: from os import walk as walk_function if recursion: walk = partial(walk_function) else: def walk(path): # pylint: disable=C0111 try: yield next(walk_function(path)) except NameError: yield walk_function(path) return walk
python
def _walk(recursion): """Returns a recursive or non-recursive directory walker""" try: from scandir import walk as walk_function except ImportError: from os import walk as walk_function if recursion: walk = partial(walk_function) else: def walk(path): # pylint: disable=C0111 try: yield next(walk_function(path)) except NameError: yield walk_function(path) return walk
[ "def", "_walk", "(", "recursion", ")", ":", "try", ":", "from", "scandir", "import", "walk", "as", "walk_function", "except", "ImportError", ":", "from", "os", "import", "walk", "as", "walk_function", "if", "recursion", ":", "walk", "=", "partial", "(", "walk_function", ")", "else", ":", "def", "walk", "(", "path", ")", ":", "# pylint: disable=C0111", "try", ":", "yield", "next", "(", "walk_function", "(", "path", ")", ")", "except", "NameError", ":", "yield", "walk_function", "(", "path", ")", "return", "walk" ]
Returns a recursive or non-recursive directory walker
[ "Returns", "a", "recursive", "or", "non", "-", "recursive", "directory", "walker" ]
e4426c7946189aa41f0c99d37bf843799fb00c33
https://github.com/brbsix/python-batchpath/blob/e4426c7946189aa41f0c99d37bf843799fb00c33/batchpath.py#L152-L167
247,041
brbsix/python-batchpath
batchpath.py
isvalid
def isvalid(path, access=None, extensions=None, filetype=None, minsize=None): """Check whether file meets access, extension, size, and type criteria.""" return ((access is None or os.access(path, access)) and (extensions is None or checkext(path, extensions)) and (((filetype == 'all' and os.path.exists(path)) or (filetype == 'dir' and os.path.isdir(path)) or (filetype == 'file' and os.path.isfile(path))) or filetype is None) and (minsize is None or (not os.path.isfile(path) or os.path.getsize(path) > minsize)))
python
def isvalid(path, access=None, extensions=None, filetype=None, minsize=None): """Check whether file meets access, extension, size, and type criteria.""" return ((access is None or os.access(path, access)) and (extensions is None or checkext(path, extensions)) and (((filetype == 'all' and os.path.exists(path)) or (filetype == 'dir' and os.path.isdir(path)) or (filetype == 'file' and os.path.isfile(path))) or filetype is None) and (minsize is None or (not os.path.isfile(path) or os.path.getsize(path) > minsize)))
[ "def", "isvalid", "(", "path", ",", "access", "=", "None", ",", "extensions", "=", "None", ",", "filetype", "=", "None", ",", "minsize", "=", "None", ")", ":", "return", "(", "(", "access", "is", "None", "or", "os", ".", "access", "(", "path", ",", "access", ")", ")", "and", "(", "extensions", "is", "None", "or", "checkext", "(", "path", ",", "extensions", ")", ")", "and", "(", "(", "(", "filetype", "==", "'all'", "and", "os", ".", "path", ".", "exists", "(", "path", ")", ")", "or", "(", "filetype", "==", "'dir'", "and", "os", ".", "path", ".", "isdir", "(", "path", ")", ")", "or", "(", "filetype", "==", "'file'", "and", "os", ".", "path", ".", "isfile", "(", "path", ")", ")", ")", "or", "filetype", "is", "None", ")", "and", "(", "minsize", "is", "None", "or", "(", "not", "os", ".", "path", ".", "isfile", "(", "path", ")", "or", "os", ".", "path", ".", "getsize", "(", "path", ")", ">", "minsize", ")", ")", ")" ]
Check whether file meets access, extension, size, and type criteria.
[ "Check", "whether", "file", "meets", "access", "extension", "size", "and", "type", "criteria", "." ]
e4426c7946189aa41f0c99d37bf843799fb00c33
https://github.com/brbsix/python-batchpath/blob/e4426c7946189aa41f0c99d37bf843799fb00c33/batchpath.py#L175-L184
247,042
brbsix/python-batchpath
batchpath.py
GeneratePaths._generator_file
def _generator_file(self): """Generator for `self.filetype` of 'file'""" for path in self.paths: if os.path.isfile(path): if isvalid(path, self.access, self.extensions, minsize=self.minsize): yield os.path.abspath(path) elif os.path.isdir(path): for root, _, fnames in self._walker(path): yield from self._generator_rebase(fnames, root)
python
def _generator_file(self): """Generator for `self.filetype` of 'file'""" for path in self.paths: if os.path.isfile(path): if isvalid(path, self.access, self.extensions, minsize=self.minsize): yield os.path.abspath(path) elif os.path.isdir(path): for root, _, fnames in self._walker(path): yield from self._generator_rebase(fnames, root)
[ "def", "_generator_file", "(", "self", ")", ":", "for", "path", "in", "self", ".", "paths", ":", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "if", "isvalid", "(", "path", ",", "self", ".", "access", ",", "self", ".", "extensions", ",", "minsize", "=", "self", ".", "minsize", ")", ":", "yield", "os", ".", "path", ".", "abspath", "(", "path", ")", "elif", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "for", "root", ",", "_", ",", "fnames", "in", "self", ".", "_walker", "(", "path", ")", ":", "yield", "from", "self", ".", "_generator_rebase", "(", "fnames", ",", "root", ")" ]
Generator for `self.filetype` of 'file
[ "Generator", "for", "self", ".", "filetype", "of", "file" ]
e4426c7946189aa41f0c99d37bf843799fb00c33
https://github.com/brbsix/python-batchpath/blob/e4426c7946189aa41f0c99d37bf843799fb00c33/batchpath.py#L33-L42
247,043
brbsix/python-batchpath
batchpath.py
GeneratePaths._generator_other
def _generator_other(self): """Generator for `self.filetype` other than file""" for path in self.paths: for root, dnames, fnames in self._walker(path): yield from self._generator_rebase(dnames, root) yield from self._generator_rebase(fnames, root)
python
def _generator_other(self): """Generator for `self.filetype` other than file""" for path in self.paths: for root, dnames, fnames in self._walker(path): yield from self._generator_rebase(dnames, root) yield from self._generator_rebase(fnames, root)
[ "def", "_generator_other", "(", "self", ")", ":", "for", "path", "in", "self", ".", "paths", ":", "for", "root", ",", "dnames", ",", "fnames", "in", "self", ".", "_walker", "(", "path", ")", ":", "yield", "from", "self", ".", "_generator_rebase", "(", "dnames", ",", "root", ")", "yield", "from", "self", ".", "_generator_rebase", "(", "fnames", ",", "root", ")" ]
Generator for `self.filetype` other than file
[ "Generator", "for", "self", ".", "filetype", "other", "than", "file" ]
e4426c7946189aa41f0c99d37bf843799fb00c33
https://github.com/brbsix/python-batchpath/blob/e4426c7946189aa41f0c99d37bf843799fb00c33/batchpath.py#L44-L49
247,044
brbsix/python-batchpath
batchpath.py
VerifyPaths.all
def all(self, paths, access=None): """Verify list of paths""" self.failures = [path for path in paths if not isvalid(path, access, filetype='all')] return not self.failures
python
def all(self, paths, access=None): """Verify list of paths""" self.failures = [path for path in paths if not isvalid(path, access, filetype='all')] return not self.failures
[ "def", "all", "(", "self", ",", "paths", ",", "access", "=", "None", ")", ":", "self", ".", "failures", "=", "[", "path", "for", "path", "in", "paths", "if", "not", "isvalid", "(", "path", ",", "access", ",", "filetype", "=", "'all'", ")", "]", "return", "not", "self", ".", "failures" ]
Verify list of paths
[ "Verify", "list", "of", "paths" ]
e4426c7946189aa41f0c99d37bf843799fb00c33
https://github.com/brbsix/python-batchpath/blob/e4426c7946189aa41f0c99d37bf843799fb00c33/batchpath.py#L115-L120
247,045
brbsix/python-batchpath
batchpath.py
VerifyPaths.dirs
def dirs(self, paths, access=None): """Verify list of directories""" self.failures = [path for path in paths if not isvalid(path, access, filetype='dir')] return not self.failures
python
def dirs(self, paths, access=None): """Verify list of directories""" self.failures = [path for path in paths if not isvalid(path, access, filetype='dir')] return not self.failures
[ "def", "dirs", "(", "self", ",", "paths", ",", "access", "=", "None", ")", ":", "self", ".", "failures", "=", "[", "path", "for", "path", "in", "paths", "if", "not", "isvalid", "(", "path", ",", "access", ",", "filetype", "=", "'dir'", ")", "]", "return", "not", "self", ".", "failures" ]
Verify list of directories
[ "Verify", "list", "of", "directories" ]
e4426c7946189aa41f0c99d37bf843799fb00c33
https://github.com/brbsix/python-batchpath/blob/e4426c7946189aa41f0c99d37bf843799fb00c33/batchpath.py#L122-L127
247,046
brbsix/python-batchpath
batchpath.py
VerifyPaths.files
def files(self, paths, access=None, extensions=None, minsize=None): """Verify list of files""" self.failures = [path for path in paths if not isvalid(path, access, extensions, 'file', minsize)] return not self.failures
python
def files(self, paths, access=None, extensions=None, minsize=None): """Verify list of files""" self.failures = [path for path in paths if not isvalid(path, access, extensions, 'file', minsize)] return not self.failures
[ "def", "files", "(", "self", ",", "paths", ",", "access", "=", "None", ",", "extensions", "=", "None", ",", "minsize", "=", "None", ")", ":", "self", ".", "failures", "=", "[", "path", "for", "path", "in", "paths", "if", "not", "isvalid", "(", "path", ",", "access", ",", "extensions", ",", "'file'", ",", "minsize", ")", "]", "return", "not", "self", ".", "failures" ]
Verify list of files
[ "Verify", "list", "of", "files" ]
e4426c7946189aa41f0c99d37bf843799fb00c33
https://github.com/brbsix/python-batchpath/blob/e4426c7946189aa41f0c99d37bf843799fb00c33/batchpath.py#L129-L134
247,047
dr4ke616/pinky
pinky/core/manhole.py
get_manhole_factory
def get_manhole_factory(namespace, **passwords): """Get a Manhole Factory """ realm = manhole_ssh.TerminalRealm() realm.chainedProtocolFactory.protocolFactory = ( lambda _: EnhancedColoredManhole(namespace) ) p = portal.Portal(realm) p.registerChecker( checkers.InMemoryUsernamePasswordDatabaseDontUse(**passwords) ) return manhole_ssh.ConchFactory(p)
python
def get_manhole_factory(namespace, **passwords): """Get a Manhole Factory """ realm = manhole_ssh.TerminalRealm() realm.chainedProtocolFactory.protocolFactory = ( lambda _: EnhancedColoredManhole(namespace) ) p = portal.Portal(realm) p.registerChecker( checkers.InMemoryUsernamePasswordDatabaseDontUse(**passwords) ) return manhole_ssh.ConchFactory(p)
[ "def", "get_manhole_factory", "(", "namespace", ",", "*", "*", "passwords", ")", ":", "realm", "=", "manhole_ssh", ".", "TerminalRealm", "(", ")", "realm", ".", "chainedProtocolFactory", ".", "protocolFactory", "=", "(", "lambda", "_", ":", "EnhancedColoredManhole", "(", "namespace", ")", ")", "p", "=", "portal", ".", "Portal", "(", "realm", ")", "p", ".", "registerChecker", "(", "checkers", ".", "InMemoryUsernamePasswordDatabaseDontUse", "(", "*", "*", "passwords", ")", ")", "return", "manhole_ssh", ".", "ConchFactory", "(", "p", ")" ]
Get a Manhole Factory
[ "Get", "a", "Manhole", "Factory" ]
35c165f5a1d410be467621f3152df1dbf458622a
https://github.com/dr4ke616/pinky/blob/35c165f5a1d410be467621f3152df1dbf458622a/pinky/core/manhole.py#L86-L99
247,048
chromy/essence
src/essence/world.py
World.create_entity
def create_entity(self): """Create a new entity. The entity will have a higher UID than any previously associated with this world. :return: the new entity :rtype: :class:`essence.Entity`""" self._highest_id_seen += 1 entity = Entity(self._highest_id_seen, self) self._entities.append(entity) return entity
python
def create_entity(self): """Create a new entity. The entity will have a higher UID than any previously associated with this world. :return: the new entity :rtype: :class:`essence.Entity`""" self._highest_id_seen += 1 entity = Entity(self._highest_id_seen, self) self._entities.append(entity) return entity
[ "def", "create_entity", "(", "self", ")", ":", "self", ".", "_highest_id_seen", "+=", "1", "entity", "=", "Entity", "(", "self", ".", "_highest_id_seen", ",", "self", ")", "self", ".", "_entities", ".", "append", "(", "entity", ")", "return", "entity" ]
Create a new entity. The entity will have a higher UID than any previously associated with this world. :return: the new entity :rtype: :class:`essence.Entity`
[ "Create", "a", "new", "entity", "." ]
6cd18821ec91edf022619d9f0c0878f38c22a763
https://github.com/chromy/essence/blob/6cd18821ec91edf022619d9f0c0878f38c22a763/src/essence/world.py#L32-L43
247,049
chromy/essence
src/essence/world.py
World.destroy_entitiy
def destroy_entitiy(self, entity): """Remove the entity and all connected components from the world. Long-hand for :func:`essence.Entity.destroy`. """ for relation in self._database.values(): relation.pop(entity, None) for l in self._entities_by_component.values(): l.discard(entity) self._entities.remove(entity)
python
def destroy_entitiy(self, entity): """Remove the entity and all connected components from the world. Long-hand for :func:`essence.Entity.destroy`. """ for relation in self._database.values(): relation.pop(entity, None) for l in self._entities_by_component.values(): l.discard(entity) self._entities.remove(entity)
[ "def", "destroy_entitiy", "(", "self", ",", "entity", ")", ":", "for", "relation", "in", "self", ".", "_database", ".", "values", "(", ")", ":", "relation", ".", "pop", "(", "entity", ",", "None", ")", "for", "l", "in", "self", ".", "_entities_by_component", ".", "values", "(", ")", ":", "l", ".", "discard", "(", "entity", ")", "self", ".", "_entities", ".", "remove", "(", "entity", ")" ]
Remove the entity and all connected components from the world. Long-hand for :func:`essence.Entity.destroy`.
[ "Remove", "the", "entity", "and", "all", "connected", "components", "from", "the", "world", "." ]
6cd18821ec91edf022619d9f0c0878f38c22a763
https://github.com/chromy/essence/blob/6cd18821ec91edf022619d9f0c0878f38c22a763/src/essence/world.py#L45-L54
247,050
chromy/essence
src/essence/world.py
World.add_component
def add_component(self, entity, component): """Add component to entity. Long-hand for :func:`essence.Entity.add`. :param entity: entity to associate :type entity: :class:`essence.Entity` :param component: component to add to the entity :type component: :class:`essence.Component`""" component_type = type(component) relation = self._get_relation(component_type) if entity in relation: # PYTHON2.6: Numbers required in format string. msg = "Component {0} can't be added to entity {1} since it already has a component of type {2}.".format(component, entity, component_type) raise DuplicateComponentError(msg) relation[entity] = component self._entities_with(component_type).add(entity)
python
def add_component(self, entity, component): """Add component to entity. Long-hand for :func:`essence.Entity.add`. :param entity: entity to associate :type entity: :class:`essence.Entity` :param component: component to add to the entity :type component: :class:`essence.Component`""" component_type = type(component) relation = self._get_relation(component_type) if entity in relation: # PYTHON2.6: Numbers required in format string. msg = "Component {0} can't be added to entity {1} since it already has a component of type {2}.".format(component, entity, component_type) raise DuplicateComponentError(msg) relation[entity] = component self._entities_with(component_type).add(entity)
[ "def", "add_component", "(", "self", ",", "entity", ",", "component", ")", ":", "component_type", "=", "type", "(", "component", ")", "relation", "=", "self", ".", "_get_relation", "(", "component_type", ")", "if", "entity", "in", "relation", ":", "# PYTHON2.6: Numbers required in format string.", "msg", "=", "\"Component {0} can't be added to entity {1} since it already has a component of type {2}.\"", ".", "format", "(", "component", ",", "entity", ",", "component_type", ")", "raise", "DuplicateComponentError", "(", "msg", ")", "relation", "[", "entity", "]", "=", "component", "self", ".", "_entities_with", "(", "component_type", ")", ".", "add", "(", "entity", ")" ]
Add component to entity. Long-hand for :func:`essence.Entity.add`. :param entity: entity to associate :type entity: :class:`essence.Entity` :param component: component to add to the entity :type component: :class:`essence.Component`
[ "Add", "component", "to", "entity", "." ]
6cd18821ec91edf022619d9f0c0878f38c22a763
https://github.com/chromy/essence/blob/6cd18821ec91edf022619d9f0c0878f38c22a763/src/essence/world.py#L56-L72
247,051
chromy/essence
src/essence/world.py
World.get_component
def get_component(self, entity, component_type, missing=MISSING): """Get the component of type component_type associated with entity. Long-hand for :func:`essence.Entity.get`. :param entity: entity to query :type entity: :class:`essence.Entity` :param component_type: component to add to the entity :type component_type: The :class:`type` of a :class:`Component` subclass :param missing: value to return if :type missing: :class:`essence.Component` :raises:""" relation = self._get_relation(component_type) if entity not in relation: if missing is MISSING: raise NoSuchComponentError() else: return missing return relation[entity]
python
def get_component(self, entity, component_type, missing=MISSING): """Get the component of type component_type associated with entity. Long-hand for :func:`essence.Entity.get`. :param entity: entity to query :type entity: :class:`essence.Entity` :param component_type: component to add to the entity :type component_type: The :class:`type` of a :class:`Component` subclass :param missing: value to return if :type missing: :class:`essence.Component` :raises:""" relation = self._get_relation(component_type) if entity not in relation: if missing is MISSING: raise NoSuchComponentError() else: return missing return relation[entity]
[ "def", "get_component", "(", "self", ",", "entity", ",", "component_type", ",", "missing", "=", "MISSING", ")", ":", "relation", "=", "self", ".", "_get_relation", "(", "component_type", ")", "if", "entity", "not", "in", "relation", ":", "if", "missing", "is", "MISSING", ":", "raise", "NoSuchComponentError", "(", ")", "else", ":", "return", "missing", "return", "relation", "[", "entity", "]" ]
Get the component of type component_type associated with entity. Long-hand for :func:`essence.Entity.get`. :param entity: entity to query :type entity: :class:`essence.Entity` :param component_type: component to add to the entity :type component_type: The :class:`type` of a :class:`Component` subclass :param missing: value to return if :type missing: :class:`essence.Component` :raises:
[ "Get", "the", "component", "of", "type", "component_type", "associated", "with", "entity", "." ]
6cd18821ec91edf022619d9f0c0878f38c22a763
https://github.com/chromy/essence/blob/6cd18821ec91edf022619d9f0c0878f38c22a763/src/essence/world.py#L74-L92
247,052
chromy/essence
src/essence/world.py
World.remove_component
def remove_component(self, entity, component_type): """Remove the component of component_type from entity. Long-hand for :func:`essence.Entity.remove`. :param entity: entity to associate :type entity: :class:`essence.Entity` :param component_type: Type of component :type component_type: The :class:`type` of a :class:`Component` subclass""" relation = self._get_relation(component_type) del relation[entity] self._entities_with(component_type).remove(entity)
python
def remove_component(self, entity, component_type): """Remove the component of component_type from entity. Long-hand for :func:`essence.Entity.remove`. :param entity: entity to associate :type entity: :class:`essence.Entity` :param component_type: Type of component :type component_type: The :class:`type` of a :class:`Component` subclass""" relation = self._get_relation(component_type) del relation[entity] self._entities_with(component_type).remove(entity)
[ "def", "remove_component", "(", "self", ",", "entity", ",", "component_type", ")", ":", "relation", "=", "self", ".", "_get_relation", "(", "component_type", ")", "del", "relation", "[", "entity", "]", "self", ".", "_entities_with", "(", "component_type", ")", ".", "remove", "(", "entity", ")" ]
Remove the component of component_type from entity. Long-hand for :func:`essence.Entity.remove`. :param entity: entity to associate :type entity: :class:`essence.Entity` :param component_type: Type of component :type component_type: The :class:`type` of a :class:`Component` subclass
[ "Remove", "the", "component", "of", "component_type", "from", "entity", "." ]
6cd18821ec91edf022619d9f0c0878f38c22a763
https://github.com/chromy/essence/blob/6cd18821ec91edf022619d9f0c0878f38c22a763/src/essence/world.py#L94-L105
247,053
chromy/essence
src/essence/world.py
World.update
def update(self, *args, **kwargs): """Calls update on each of the systems self.systems.""" for system in self.systems: system.update(self, *args, **kwargs)
python
def update(self, *args, **kwargs): """Calls update on each of the systems self.systems.""" for system in self.systems: system.update(self, *args, **kwargs)
[ "def", "update", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "system", "in", "self", ".", "systems", ":", "system", ".", "update", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Calls update on each of the systems self.systems.
[ "Calls", "update", "on", "each", "of", "the", "systems", "self", ".", "systems", "." ]
6cd18821ec91edf022619d9f0c0878f38c22a763
https://github.com/chromy/essence/blob/6cd18821ec91edf022619d9f0c0878f38c22a763/src/essence/world.py#L137-L140
247,054
djangomini/djangomini
djangomini/urls.py
auto_discover
def auto_discover(): """ Auto-map urls from controllers directory. Ignored only files and classes that start from underscore. """ urls = [ url(r'^admin/', admin.site.urls), ] # TODO: we can create python package to have a lot of controllers # in separate files def get_controllers(module_name): """Return list of controllers in a module.""" module = import_module('app.controllers.{}'.format(module_name)) controllers = [] for obj_name in dir(module): # we ignore import of Controller and hidden names if obj_name.startswith('_') or obj_name == 'Controller': continue obj = getattr(module, obj_name) # include only controllers if issubclass(obj, Controller): controllers.append(obj) return controllers def controller_to_path(controller): """ Convert controller's name to a valid path. Make url in lower case by replace capital letters to small and adding underscore between words. """ words = re.findall('[A-Z][a-z]*', controller.__name__) if words[-1] == 'Controller': del words[-1] # transform words to a url address url_path = '_'.join(words).lower() # main controller is a root handler # TODO: root address inside the file should always come last if url_path == 'main': url_path = '' return url_path # load all controllers (excluding main controllers) for file_name in listdir('controllers', get_files=True, hide_ignored=True): # remove .py extension from file name app_name = file_name.split('.', 1)[0] # we will include main controller at the end if app_name == 'main': continue # add url for each controller for controller in get_controllers(app_name): url_path = controller_to_path(controller) urls.append(url( r'^{}/{}$'.format(app_name, url_path), controller.as_view() )) # add urls for main controllers for controller in get_controllers('main'): url_path = controller_to_path(controller) # map urls to a root path urls.append(url( r'^{}$'.format(url_path), controller.as_view() )) return urls
python
def auto_discover(): """ Auto-map urls from controllers directory. Ignored only files and classes that start from underscore. """ urls = [ url(r'^admin/', admin.site.urls), ] # TODO: we can create python package to have a lot of controllers # in separate files def get_controllers(module_name): """Return list of controllers in a module.""" module = import_module('app.controllers.{}'.format(module_name)) controllers = [] for obj_name in dir(module): # we ignore import of Controller and hidden names if obj_name.startswith('_') or obj_name == 'Controller': continue obj = getattr(module, obj_name) # include only controllers if issubclass(obj, Controller): controllers.append(obj) return controllers def controller_to_path(controller): """ Convert controller's name to a valid path. Make url in lower case by replace capital letters to small and adding underscore between words. """ words = re.findall('[A-Z][a-z]*', controller.__name__) if words[-1] == 'Controller': del words[-1] # transform words to a url address url_path = '_'.join(words).lower() # main controller is a root handler # TODO: root address inside the file should always come last if url_path == 'main': url_path = '' return url_path # load all controllers (excluding main controllers) for file_name in listdir('controllers', get_files=True, hide_ignored=True): # remove .py extension from file name app_name = file_name.split('.', 1)[0] # we will include main controller at the end if app_name == 'main': continue # add url for each controller for controller in get_controllers(app_name): url_path = controller_to_path(controller) urls.append(url( r'^{}/{}$'.format(app_name, url_path), controller.as_view() )) # add urls for main controllers for controller in get_controllers('main'): url_path = controller_to_path(controller) # map urls to a root path urls.append(url( r'^{}$'.format(url_path), controller.as_view() )) return urls
[ "def", "auto_discover", "(", ")", ":", "urls", "=", "[", "url", "(", "r'^admin/'", ",", "admin", ".", "site", ".", "urls", ")", ",", "]", "# TODO: we can create python package to have a lot of controllers", "# in separate files", "def", "get_controllers", "(", "module_name", ")", ":", "\"\"\"Return list of controllers in a module.\"\"\"", "module", "=", "import_module", "(", "'app.controllers.{}'", ".", "format", "(", "module_name", ")", ")", "controllers", "=", "[", "]", "for", "obj_name", "in", "dir", "(", "module", ")", ":", "# we ignore import of Controller and hidden names", "if", "obj_name", ".", "startswith", "(", "'_'", ")", "or", "obj_name", "==", "'Controller'", ":", "continue", "obj", "=", "getattr", "(", "module", ",", "obj_name", ")", "# include only controllers", "if", "issubclass", "(", "obj", ",", "Controller", ")", ":", "controllers", ".", "append", "(", "obj", ")", "return", "controllers", "def", "controller_to_path", "(", "controller", ")", ":", "\"\"\"\n Convert controller's name to a valid path.\n\n Make url in lower case by replace capital letters to small\n and adding underscore between words.\n \"\"\"", "words", "=", "re", ".", "findall", "(", "'[A-Z][a-z]*'", ",", "controller", ".", "__name__", ")", "if", "words", "[", "-", "1", "]", "==", "'Controller'", ":", "del", "words", "[", "-", "1", "]", "# transform words to a url address", "url_path", "=", "'_'", ".", "join", "(", "words", ")", ".", "lower", "(", ")", "# main controller is a root handler", "# TODO: root address inside the file should always come last", "if", "url_path", "==", "'main'", ":", "url_path", "=", "''", "return", "url_path", "# load all controllers (excluding main controllers)", "for", "file_name", "in", "listdir", "(", "'controllers'", ",", "get_files", "=", "True", ",", "hide_ignored", "=", "True", ")", ":", "# remove .py extension from file name", "app_name", "=", "file_name", ".", "split", "(", "'.'", ",", "1", ")", "[", "0", "]", "# we will include main controller at the end", "if", "app_name", "==", "'main'", ":", "continue", "# add url for each controller", "for", "controller", "in", "get_controllers", "(", "app_name", ")", ":", "url_path", "=", "controller_to_path", "(", "controller", ")", "urls", ".", "append", "(", "url", "(", "r'^{}/{}$'", ".", "format", "(", "app_name", ",", "url_path", ")", ",", "controller", ".", "as_view", "(", ")", ")", ")", "# add urls for main controllers", "for", "controller", "in", "get_controllers", "(", "'main'", ")", ":", "url_path", "=", "controller_to_path", "(", "controller", ")", "# map urls to a root path", "urls", ".", "append", "(", "url", "(", "r'^{}$'", ".", "format", "(", "url_path", ")", ",", "controller", ".", "as_view", "(", ")", ")", ")", "return", "urls" ]
Auto-map urls from controllers directory. Ignored only files and classes that start from underscore.
[ "Auto", "-", "map", "urls", "from", "controllers", "directory", "." ]
cfbe2d59acf0e89e5fd442df8952f9a117a63875
https://github.com/djangomini/djangomini/blob/cfbe2d59acf0e89e5fd442df8952f9a117a63875/djangomini/urls.py#L12-L80
247,055
heikomuller/sco-client
scocli/scoserv.py
download_file
def download_file(url, suffix=''): """Download attached file as temporary file. Parameters ---------- url : string SCO-API download Url suffix : string, optional If suffix is specified, the name of the downloaded file will end with that suffix, otherwise there will be no suffix. Returns ------- string, string Path to downloaded file and file suffix """ r = urllib2.urlopen(url) # Save attached file in temp file and return path to temp file fd, f_path = tempfile.mkstemp(suffix=suffix) os.write(fd, r.read()) os.close(fd) return f_path, suffix
python
def download_file(url, suffix=''): """Download attached file as temporary file. Parameters ---------- url : string SCO-API download Url suffix : string, optional If suffix is specified, the name of the downloaded file will end with that suffix, otherwise there will be no suffix. Returns ------- string, string Path to downloaded file and file suffix """ r = urllib2.urlopen(url) # Save attached file in temp file and return path to temp file fd, f_path = tempfile.mkstemp(suffix=suffix) os.write(fd, r.read()) os.close(fd) return f_path, suffix
[ "def", "download_file", "(", "url", ",", "suffix", "=", "''", ")", ":", "r", "=", "urllib2", ".", "urlopen", "(", "url", ")", "# Save attached file in temp file and return path to temp file", "fd", ",", "f_path", "=", "tempfile", ".", "mkstemp", "(", "suffix", "=", "suffix", ")", "os", ".", "write", "(", "fd", ",", "r", ".", "read", "(", ")", ")", "os", ".", "close", "(", "fd", ")", "return", "f_path", ",", "suffix" ]
Download attached file as temporary file. Parameters ---------- url : string SCO-API download Url suffix : string, optional If suffix is specified, the name of the downloaded file will end with that suffix, otherwise there will be no suffix. Returns ------- string, string Path to downloaded file and file suffix
[ "Download", "attached", "file", "as", "temporary", "file", "." ]
c4afab71297f73003379bba4c1679be9dcf7cef8
https://github.com/heikomuller/sco-client/blob/c4afab71297f73003379bba4c1679be9dcf7cef8/scocli/scoserv.py#L194-L215
247,056
heikomuller/sco-client
scocli/scoserv.py
get_resource_listing
def get_resource_listing(url, offset, limit, properties): """Gneric method to retrieve a resource listing from a SCO-API. Takes the resource-specific API listing Url as argument. Parameters ---------- url : string Resource listing Url for a SCO-API offset : int, optional Starting offset for returned list items limit : int, optional Limit the number of items in the result properties : List(string) List of additional object properties to be included for items in the result Returns ------- List(ResourceHandle) List of resource handle (one per subject in the object listing) """ # Create listing query based on given arguments query = [ QPARA_OFFSET + '=' + str(offset), QPARA_LIMIT + '=' + str(limit) ] # Add properties argument if property list is not None and not empty if not properties is None: if len(properties) > 0: query.append(QPARA_ATTRIBUTES + '=' + ','.join(properties)) # Add query to Url. url = url + '?' + '&'.join(query) # Get subject listing Url for given SCO-API and decorate it with # given listing arguments. Then retrieve listing from SCO-API. json_obj = JsonResource(url).json # Convert result into a list of resource handles and return the result resources = [] for element in json_obj['items']: resource = ResourceHandle(element) # Add additional properties to resource if list is given if not properties is None: resource.properties = {} for prop in properties: if prop in element: resource.properties[prop] = element[prop] resources.append(resource) return resources
python
def get_resource_listing(url, offset, limit, properties): """Gneric method to retrieve a resource listing from a SCO-API. Takes the resource-specific API listing Url as argument. Parameters ---------- url : string Resource listing Url for a SCO-API offset : int, optional Starting offset for returned list items limit : int, optional Limit the number of items in the result properties : List(string) List of additional object properties to be included for items in the result Returns ------- List(ResourceHandle) List of resource handle (one per subject in the object listing) """ # Create listing query based on given arguments query = [ QPARA_OFFSET + '=' + str(offset), QPARA_LIMIT + '=' + str(limit) ] # Add properties argument if property list is not None and not empty if not properties is None: if len(properties) > 0: query.append(QPARA_ATTRIBUTES + '=' + ','.join(properties)) # Add query to Url. url = url + '?' + '&'.join(query) # Get subject listing Url for given SCO-API and decorate it with # given listing arguments. Then retrieve listing from SCO-API. json_obj = JsonResource(url).json # Convert result into a list of resource handles and return the result resources = [] for element in json_obj['items']: resource = ResourceHandle(element) # Add additional properties to resource if list is given if not properties is None: resource.properties = {} for prop in properties: if prop in element: resource.properties[prop] = element[prop] resources.append(resource) return resources
[ "def", "get_resource_listing", "(", "url", ",", "offset", ",", "limit", ",", "properties", ")", ":", "# Create listing query based on given arguments", "query", "=", "[", "QPARA_OFFSET", "+", "'='", "+", "str", "(", "offset", ")", ",", "QPARA_LIMIT", "+", "'='", "+", "str", "(", "limit", ")", "]", "# Add properties argument if property list is not None and not empty", "if", "not", "properties", "is", "None", ":", "if", "len", "(", "properties", ")", ">", "0", ":", "query", ".", "append", "(", "QPARA_ATTRIBUTES", "+", "'='", "+", "','", ".", "join", "(", "properties", ")", ")", "# Add query to Url.", "url", "=", "url", "+", "'?'", "+", "'&'", ".", "join", "(", "query", ")", "# Get subject listing Url for given SCO-API and decorate it with", "# given listing arguments. Then retrieve listing from SCO-API.", "json_obj", "=", "JsonResource", "(", "url", ")", ".", "json", "# Convert result into a list of resource handles and return the result", "resources", "=", "[", "]", "for", "element", "in", "json_obj", "[", "'items'", "]", ":", "resource", "=", "ResourceHandle", "(", "element", ")", "# Add additional properties to resource if list is given", "if", "not", "properties", "is", "None", ":", "resource", ".", "properties", "=", "{", "}", "for", "prop", "in", "properties", ":", "if", "prop", "in", "element", ":", "resource", ".", "properties", "[", "prop", "]", "=", "element", "[", "prop", "]", "resources", ".", "append", "(", "resource", ")", "return", "resources" ]
Gneric method to retrieve a resource listing from a SCO-API. Takes the resource-specific API listing Url as argument. Parameters ---------- url : string Resource listing Url for a SCO-API offset : int, optional Starting offset for returned list items limit : int, optional Limit the number of items in the result properties : List(string) List of additional object properties to be included for items in the result Returns ------- List(ResourceHandle) List of resource handle (one per subject in the object listing)
[ "Gneric", "method", "to", "retrieve", "a", "resource", "listing", "from", "a", "SCO", "-", "API", ".", "Takes", "the", "resource", "-", "specific", "API", "listing", "Url", "as", "argument", "." ]
c4afab71297f73003379bba4c1679be9dcf7cef8
https://github.com/heikomuller/sco-client/blob/c4afab71297f73003379bba4c1679be9dcf7cef8/scocli/scoserv.py#L218-L264
247,057
heikomuller/sco-client
scocli/scoserv.py
to_local_time
def to_local_time(timestamp): """Convert a datatime object from UTC time to local time. Adopted from: http://stackoverflow.com/questions/4770297/python-convert-utc-datetime-string-to-local-datetime Parameters ---------- timestamp : string Default string representation of timestamps expected to be in UTC time zone Returns ------- datetime Datetime object in local time zone """ utc = dt.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%f') # Get UTC and local time zone from_zone = tz.gettz('UTC') to_zone = tz.tzlocal() # Tell the utc object that it is in UTC time zone utc = utc.replace(tzinfo=from_zone) # Convert time zone return utc.astimezone(to_zone)
python
def to_local_time(timestamp): """Convert a datatime object from UTC time to local time. Adopted from: http://stackoverflow.com/questions/4770297/python-convert-utc-datetime-string-to-local-datetime Parameters ---------- timestamp : string Default string representation of timestamps expected to be in UTC time zone Returns ------- datetime Datetime object in local time zone """ utc = dt.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%f') # Get UTC and local time zone from_zone = tz.gettz('UTC') to_zone = tz.tzlocal() # Tell the utc object that it is in UTC time zone utc = utc.replace(tzinfo=from_zone) # Convert time zone return utc.astimezone(to_zone)
[ "def", "to_local_time", "(", "timestamp", ")", ":", "utc", "=", "dt", ".", "datetime", ".", "strptime", "(", "timestamp", ",", "'%Y-%m-%dT%H:%M:%S.%f'", ")", "# Get UTC and local time zone", "from_zone", "=", "tz", ".", "gettz", "(", "'UTC'", ")", "to_zone", "=", "tz", ".", "tzlocal", "(", ")", "# Tell the utc object that it is in UTC time zone", "utc", "=", "utc", ".", "replace", "(", "tzinfo", "=", "from_zone", ")", "# Convert time zone", "return", "utc", ".", "astimezone", "(", "to_zone", ")" ]
Convert a datatime object from UTC time to local time. Adopted from: http://stackoverflow.com/questions/4770297/python-convert-utc-datetime-string-to-local-datetime Parameters ---------- timestamp : string Default string representation of timestamps expected to be in UTC time zone Returns ------- datetime Datetime object in local time zone
[ "Convert", "a", "datatime", "object", "from", "UTC", "time", "to", "local", "time", "." ]
c4afab71297f73003379bba4c1679be9dcf7cef8
https://github.com/heikomuller/sco-client/blob/c4afab71297f73003379bba4c1679be9dcf7cef8/scocli/scoserv.py#L303-L329
247,058
Clarify/clarify_brightcove_sync
clarify_brightcove_sync/brightcove_api_client.py
BrightcoveAPIClient._load_secret
def _load_secret(self, creds_file): '''read the oauth secrets and account ID from a credentials configuration file''' try: with open(creds_file) as fp: creds = json.load(fp) return creds except Exception as e: sys.stderr.write("Error loading oauth secret from local file called '{0}'\n".format(creds_file)) sys.stderr.write("\tThere should be a local OAuth credentials file \n") sys.stderr.write("\twhich has contents like this:\n") sys.stderr.write(""" { "account_id": "1234567890001", "client_id": "30ff0909-0909-33d3-ae88-c9887777a7b7", "client_secret": "mzKKjZZyeW5YgsdfBD37c5730g397agU35-Dsgeox6-73giehbt0996nQ" } """) sys.stderr.write("\n") raise e
python
def _load_secret(self, creds_file): '''read the oauth secrets and account ID from a credentials configuration file''' try: with open(creds_file) as fp: creds = json.load(fp) return creds except Exception as e: sys.stderr.write("Error loading oauth secret from local file called '{0}'\n".format(creds_file)) sys.stderr.write("\tThere should be a local OAuth credentials file \n") sys.stderr.write("\twhich has contents like this:\n") sys.stderr.write(""" { "account_id": "1234567890001", "client_id": "30ff0909-0909-33d3-ae88-c9887777a7b7", "client_secret": "mzKKjZZyeW5YgsdfBD37c5730g397agU35-Dsgeox6-73giehbt0996nQ" } """) sys.stderr.write("\n") raise e
[ "def", "_load_secret", "(", "self", ",", "creds_file", ")", ":", "try", ":", "with", "open", "(", "creds_file", ")", "as", "fp", ":", "creds", "=", "json", ".", "load", "(", "fp", ")", "return", "creds", "except", "Exception", "as", "e", ":", "sys", ".", "stderr", ".", "write", "(", "\"Error loading oauth secret from local file called '{0}'\\n\"", ".", "format", "(", "creds_file", ")", ")", "sys", ".", "stderr", ".", "write", "(", "\"\\tThere should be a local OAuth credentials file \\n\"", ")", "sys", ".", "stderr", ".", "write", "(", "\"\\twhich has contents like this:\\n\"", ")", "sys", ".", "stderr", ".", "write", "(", "\"\"\"\n {\n \"account_id\": \"1234567890001\",\n \"client_id\": \"30ff0909-0909-33d3-ae88-c9887777a7b7\",\n \"client_secret\": \"mzKKjZZyeW5YgsdfBD37c5730g397agU35-Dsgeox6-73giehbt0996nQ\"\n }\n\n \"\"\"", ")", "sys", ".", "stderr", ".", "write", "(", "\"\\n\"", ")", "raise", "e" ]
read the oauth secrets and account ID from a credentials configuration file
[ "read", "the", "oauth", "secrets", "and", "account", "ID", "from", "a", "credentials", "configuration", "file" ]
cda4443a40e72f1fb02af3d671d8f3f5f9644d24
https://github.com/Clarify/clarify_brightcove_sync/blob/cda4443a40e72f1fb02af3d671d8f3f5f9644d24/clarify_brightcove_sync/brightcove_api_client.py#L61-L80
247,059
Clarify/clarify_brightcove_sync
clarify_brightcove_sync/brightcove_api_client.py
BrightcoveAPIClient.get_video_count
def get_video_count(self, search_q=None): '''Return the number of videos in the account''' if search_q is not None: params = {'q': search_q} else: params = None url = "/counts/videos" result = self._make_request(self.CMS_Server, 'GET', url, params=params) return result['count']
python
def get_video_count(self, search_q=None): '''Return the number of videos in the account''' if search_q is not None: params = {'q': search_q} else: params = None url = "/counts/videos" result = self._make_request(self.CMS_Server, 'GET', url, params=params) return result['count']
[ "def", "get_video_count", "(", "self", ",", "search_q", "=", "None", ")", ":", "if", "search_q", "is", "not", "None", ":", "params", "=", "{", "'q'", ":", "search_q", "}", "else", ":", "params", "=", "None", "url", "=", "\"/counts/videos\"", "result", "=", "self", ".", "_make_request", "(", "self", ".", "CMS_Server", ",", "'GET'", ",", "url", ",", "params", "=", "params", ")", "return", "result", "[", "'count'", "]" ]
Return the number of videos in the account
[ "Return", "the", "number", "of", "videos", "in", "the", "account" ]
cda4443a40e72f1fb02af3d671d8f3f5f9644d24
https://github.com/Clarify/clarify_brightcove_sync/blob/cda4443a40e72f1fb02af3d671d8f3f5f9644d24/clarify_brightcove_sync/brightcove_api_client.py#L160-L169
247,060
Clarify/clarify_brightcove_sync
clarify_brightcove_sync/brightcove_api_client.py
BrightcoveAPIClient.post_video
def post_video(self, videoUrl, name=None, ingestMedia=True): '''Post and optionally ingest media from the specified URL''' if name is None: name = os.path.basename(videoUrl) url = '/videos' data = {'name': name} new_video = self._make_request(self.CMS_Server, 'POST', url, data=data) if ingestMedia: self.ingest_video(new_video['id'], videoUrl) return new_video
python
def post_video(self, videoUrl, name=None, ingestMedia=True): '''Post and optionally ingest media from the specified URL''' if name is None: name = os.path.basename(videoUrl) url = '/videos' data = {'name': name} new_video = self._make_request(self.CMS_Server, 'POST', url, data=data) if ingestMedia: self.ingest_video(new_video['id'], videoUrl) return new_video
[ "def", "post_video", "(", "self", ",", "videoUrl", ",", "name", "=", "None", ",", "ingestMedia", "=", "True", ")", ":", "if", "name", "is", "None", ":", "name", "=", "os", ".", "path", ".", "basename", "(", "videoUrl", ")", "url", "=", "'/videos'", "data", "=", "{", "'name'", ":", "name", "}", "new_video", "=", "self", ".", "_make_request", "(", "self", ".", "CMS_Server", ",", "'POST'", ",", "url", ",", "data", "=", "data", ")", "if", "ingestMedia", ":", "self", ".", "ingest_video", "(", "new_video", "[", "'id'", "]", ",", "videoUrl", ")", "return", "new_video" ]
Post and optionally ingest media from the specified URL
[ "Post", "and", "optionally", "ingest", "media", "from", "the", "specified", "URL" ]
cda4443a40e72f1fb02af3d671d8f3f5f9644d24
https://github.com/Clarify/clarify_brightcove_sync/blob/cda4443a40e72f1fb02af3d671d8f3f5f9644d24/clarify_brightcove_sync/brightcove_api_client.py#L212-L222
247,061
cbrand/vpnchooser
src/vpnchooser/connection/client.py
Client.connect
def connect(self): """ Connects the client to the server and returns it. """ key = paramiko.RSAKey(data=base64.b64decode( app.config['SSH_HOST_KEY'] )) client = paramiko.SSHClient() client.get_host_keys().add( app.config['SSH_HOST'], 'ssh-rsa', key ) client.connect( app.config['SSH_HOST'], username=app.config['SSH_USER'], password=app.config['SSH_PASSWORD'], ) return client
python
def connect(self): """ Connects the client to the server and returns it. """ key = paramiko.RSAKey(data=base64.b64decode( app.config['SSH_HOST_KEY'] )) client = paramiko.SSHClient() client.get_host_keys().add( app.config['SSH_HOST'], 'ssh-rsa', key ) client.connect( app.config['SSH_HOST'], username=app.config['SSH_USER'], password=app.config['SSH_PASSWORD'], ) return client
[ "def", "connect", "(", "self", ")", ":", "key", "=", "paramiko", ".", "RSAKey", "(", "data", "=", "base64", ".", "b64decode", "(", "app", ".", "config", "[", "'SSH_HOST_KEY'", "]", ")", ")", "client", "=", "paramiko", ".", "SSHClient", "(", ")", "client", ".", "get_host_keys", "(", ")", ".", "add", "(", "app", ".", "config", "[", "'SSH_HOST'", "]", ",", "'ssh-rsa'", ",", "key", ")", "client", ".", "connect", "(", "app", ".", "config", "[", "'SSH_HOST'", "]", ",", "username", "=", "app", ".", "config", "[", "'SSH_USER'", "]", ",", "password", "=", "app", ".", "config", "[", "'SSH_PASSWORD'", "]", ",", ")", "return", "client" ]
Connects the client to the server and returns it.
[ "Connects", "the", "client", "to", "the", "server", "and", "returns", "it", "." ]
d153e3d05555c23cf5e8e15e507eecad86465923
https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/connection/client.py#L20-L39
247,062
cbrand/vpnchooser
src/vpnchooser/connection/client.py
Client.server_rules
def server_rules(self): """ Reads the server rules from the client and returns it. """ sftp = self.client.open_sftp() try: rule_path = self.rule_location try: stat_entry = sftp.stat(rule_path) if stat.S_ISDIR(stat_entry.st_mode): sftp.rmdir(rule_path) return [] except IOError: return [] with sftp.open(rule_path, 'r') as file_handle: data = file_handle.read() return self._parse(data) finally: sftp.close()
python
def server_rules(self): """ Reads the server rules from the client and returns it. """ sftp = self.client.open_sftp() try: rule_path = self.rule_location try: stat_entry = sftp.stat(rule_path) if stat.S_ISDIR(stat_entry.st_mode): sftp.rmdir(rule_path) return [] except IOError: return [] with sftp.open(rule_path, 'r') as file_handle: data = file_handle.read() return self._parse(data) finally: sftp.close()
[ "def", "server_rules", "(", "self", ")", ":", "sftp", "=", "self", ".", "client", ".", "open_sftp", "(", ")", "try", ":", "rule_path", "=", "self", ".", "rule_location", "try", ":", "stat_entry", "=", "sftp", ".", "stat", "(", "rule_path", ")", "if", "stat", ".", "S_ISDIR", "(", "stat_entry", ".", "st_mode", ")", ":", "sftp", ".", "rmdir", "(", "rule_path", ")", "return", "[", "]", "except", "IOError", ":", "return", "[", "]", "with", "sftp", ".", "open", "(", "rule_path", ",", "'r'", ")", "as", "file_handle", ":", "data", "=", "file_handle", ".", "read", "(", ")", "return", "self", ".", "_parse", "(", "data", ")", "finally", ":", "sftp", ".", "close", "(", ")" ]
Reads the server rules from the client and returns it.
[ "Reads", "the", "server", "rules", "from", "the", "client", "and", "returns", "it", "." ]
d153e3d05555c23cf5e8e15e507eecad86465923
https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/connection/client.py#L60-L79
247,063
cbrand/vpnchooser
src/vpnchooser/connection/client.py
Client._parse
def _parse(data: str) -> list: """ Parses the given data string and returns a list of rule objects. """ if isinstance(data, bytes): data = data.decode('utf-8') lines = ( item for item in (item.strip() for item in data.split('\n')) if len(item) and not item.startswith('#') ) rules = [] for line in lines: rules.append( Rule.parse(line) ) return rules
python
def _parse(data: str) -> list: """ Parses the given data string and returns a list of rule objects. """ if isinstance(data, bytes): data = data.decode('utf-8') lines = ( item for item in (item.strip() for item in data.split('\n')) if len(item) and not item.startswith('#') ) rules = [] for line in lines: rules.append( Rule.parse(line) ) return rules
[ "def", "_parse", "(", "data", ":", "str", ")", "->", "list", ":", "if", "isinstance", "(", "data", ",", "bytes", ")", ":", "data", "=", "data", ".", "decode", "(", "'utf-8'", ")", "lines", "=", "(", "item", "for", "item", "in", "(", "item", ".", "strip", "(", ")", "for", "item", "in", "data", ".", "split", "(", "'\\n'", ")", ")", "if", "len", "(", "item", ")", "and", "not", "item", ".", "startswith", "(", "'#'", ")", ")", "rules", "=", "[", "]", "for", "line", "in", "lines", ":", "rules", ".", "append", "(", "Rule", ".", "parse", "(", "line", ")", ")", "return", "rules" ]
Parses the given data string and returns a list of rule objects.
[ "Parses", "the", "given", "data", "string", "and", "returns", "a", "list", "of", "rule", "objects", "." ]
d153e3d05555c23cf5e8e15e507eecad86465923
https://github.com/cbrand/vpnchooser/blob/d153e3d05555c23cf5e8e15e507eecad86465923/src/vpnchooser/connection/client.py#L82-L99
247,064
pjuren/pyokit
src/pyokit/datastruct/genomicInterval.py
jaccardIndex
def jaccardIndex(s1, s2, stranded=False): """ Compute the Jaccard index for two collections of genomic intervals :param s1: the first set of genomic intervals :param s2: the second set of genomic intervals :param stranded: if True, treat regions on different strands as not intersecting each other, even if they occupy the same genomic region. :return: Jaccard index """ def count(s): """ sum the size of regions in s. """ tot = 0 for r in s: tot += len(r) return tot if stranded: raise GenomicIntervalError("Sorry, stranded mode for computing Jaccard " + "index hasn't been implemented yet.") s1 = collapseRegions(s1) s2 = collapseRegions(s2) intersection = regionsIntersection(s1, s2) c_i = count(intersection) return c_i / float(count(s1) + count(s2) - c_i)
python
def jaccardIndex(s1, s2, stranded=False): """ Compute the Jaccard index for two collections of genomic intervals :param s1: the first set of genomic intervals :param s2: the second set of genomic intervals :param stranded: if True, treat regions on different strands as not intersecting each other, even if they occupy the same genomic region. :return: Jaccard index """ def count(s): """ sum the size of regions in s. """ tot = 0 for r in s: tot += len(r) return tot if stranded: raise GenomicIntervalError("Sorry, stranded mode for computing Jaccard " + "index hasn't been implemented yet.") s1 = collapseRegions(s1) s2 = collapseRegions(s2) intersection = regionsIntersection(s1, s2) c_i = count(intersection) return c_i / float(count(s1) + count(s2) - c_i)
[ "def", "jaccardIndex", "(", "s1", ",", "s2", ",", "stranded", "=", "False", ")", ":", "def", "count", "(", "s", ")", ":", "\"\"\" sum the size of regions in s. \"\"\"", "tot", "=", "0", "for", "r", "in", "s", ":", "tot", "+=", "len", "(", "r", ")", "return", "tot", "if", "stranded", ":", "raise", "GenomicIntervalError", "(", "\"Sorry, stranded mode for computing Jaccard \"", "+", "\"index hasn't been implemented yet.\"", ")", "s1", "=", "collapseRegions", "(", "s1", ")", "s2", "=", "collapseRegions", "(", "s2", ")", "intersection", "=", "regionsIntersection", "(", "s1", ",", "s2", ")", "c_i", "=", "count", "(", "intersection", ")", "return", "c_i", "/", "float", "(", "count", "(", "s1", ")", "+", "count", "(", "s2", ")", "-", "c_i", ")" ]
Compute the Jaccard index for two collections of genomic intervals :param s1: the first set of genomic intervals :param s2: the second set of genomic intervals :param stranded: if True, treat regions on different strands as not intersecting each other, even if they occupy the same genomic region. :return: Jaccard index
[ "Compute", "the", "Jaccard", "index", "for", "two", "collections", "of", "genomic", "intervals" ]
fddae123b5d817daa39496183f19c000d9c3791f
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomicInterval.py#L57-L84
247,065
pjuren/pyokit
src/pyokit/datastruct/genomicInterval.py
intervalTreesFromList
def intervalTreesFromList(inElements, verbose=False, openEnded=False): """ build a dictionary, indexed by chrom name, of interval trees for each chrom. :param inElements: list of genomic intervals. Members of the list must have chrom, start and end fields; no other restrictions. :param verbose: output progress messages to sys.stderr if True """ elements = {} if verbose: totalLines = len(inElements) pind = ProgressIndicator(totalToDo=totalLines, messagePrefix="completed", messageSuffix="of parsing") for element in inElements: if element.chrom not in elements: elements[element.chrom] = [] elements[element.chrom].append(element) if verbose: pind.done += 1 pind.showProgress() # create an interval tree for each list trees = {} if verbose: totalLines = len(elements) pind = ProgressIndicator(totalToDo=totalLines, messagePrefix="completed", messageSuffix="of making interval trees") for chrom in elements: trees[chrom] = IntervalTree(elements[chrom], openEnded) if verbose: pind.done += 1 pind.showProgress() return trees
python
def intervalTreesFromList(inElements, verbose=False, openEnded=False): """ build a dictionary, indexed by chrom name, of interval trees for each chrom. :param inElements: list of genomic intervals. Members of the list must have chrom, start and end fields; no other restrictions. :param verbose: output progress messages to sys.stderr if True """ elements = {} if verbose: totalLines = len(inElements) pind = ProgressIndicator(totalToDo=totalLines, messagePrefix="completed", messageSuffix="of parsing") for element in inElements: if element.chrom not in elements: elements[element.chrom] = [] elements[element.chrom].append(element) if verbose: pind.done += 1 pind.showProgress() # create an interval tree for each list trees = {} if verbose: totalLines = len(elements) pind = ProgressIndicator(totalToDo=totalLines, messagePrefix="completed", messageSuffix="of making interval trees") for chrom in elements: trees[chrom] = IntervalTree(elements[chrom], openEnded) if verbose: pind.done += 1 pind.showProgress() return trees
[ "def", "intervalTreesFromList", "(", "inElements", ",", "verbose", "=", "False", ",", "openEnded", "=", "False", ")", ":", "elements", "=", "{", "}", "if", "verbose", ":", "totalLines", "=", "len", "(", "inElements", ")", "pind", "=", "ProgressIndicator", "(", "totalToDo", "=", "totalLines", ",", "messagePrefix", "=", "\"completed\"", ",", "messageSuffix", "=", "\"of parsing\"", ")", "for", "element", "in", "inElements", ":", "if", "element", ".", "chrom", "not", "in", "elements", ":", "elements", "[", "element", ".", "chrom", "]", "=", "[", "]", "elements", "[", "element", ".", "chrom", "]", ".", "append", "(", "element", ")", "if", "verbose", ":", "pind", ".", "done", "+=", "1", "pind", ".", "showProgress", "(", ")", "# create an interval tree for each list", "trees", "=", "{", "}", "if", "verbose", ":", "totalLines", "=", "len", "(", "elements", ")", "pind", "=", "ProgressIndicator", "(", "totalToDo", "=", "totalLines", ",", "messagePrefix", "=", "\"completed\"", ",", "messageSuffix", "=", "\"of making interval trees\"", ")", "for", "chrom", "in", "elements", ":", "trees", "[", "chrom", "]", "=", "IntervalTree", "(", "elements", "[", "chrom", "]", ",", "openEnded", ")", "if", "verbose", ":", "pind", ".", "done", "+=", "1", "pind", ".", "showProgress", "(", ")", "return", "trees" ]
build a dictionary, indexed by chrom name, of interval trees for each chrom. :param inElements: list of genomic intervals. Members of the list must have chrom, start and end fields; no other restrictions. :param verbose: output progress messages to sys.stderr if True
[ "build", "a", "dictionary", "indexed", "by", "chrom", "name", "of", "interval", "trees", "for", "each", "chrom", "." ]
fddae123b5d817daa39496183f19c000d9c3791f
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomicInterval.py#L87-L123
247,066
pjuren/pyokit
src/pyokit/datastruct/genomicInterval.py
regionsIntersection
def regionsIntersection(s1, s2, collapse=True): """ given two lists of genomic regions with chromosome, start and end coordinates, return a new list of regions which is the intersection of those two sets. Lists must be sorted by chromosome and start index :return: new list that represents the intersection of the two input lists. output regions will all have name "X", be on strand "+" and have score 0 :param s1: first list of genomic regions :param s2: second list of genomic regions :raise GenomicIntervalError: if the input regions are not sorted correctly (by chromosome and start index) :note: O(n) time, O(n) space; informally, might use up to 3x space of input """ debug = False # we don't need to explicitly check for sorting because sorted order is # a post-condition of the collapsing function s1_c = collapseRegions(s1) s2_c = collapseRegions(s2) if len(s1_c) == 0 or len(s2_c) == 0: return [] res = [] j = 0 for i in range(0, len(s1_c)): if debug: sys.stderr.write("processing from s1_c : " + str(s1_c[i]) + "\n") # find first thing in s2_c with end in or after s1_c[i] if debug: sys.stderr.write("i = " + str(i) + " and j = " + str(j) + "\n") while (j < len(s2_c) and (s2_c[j].chrom < s1_c[i].chrom or (s2_c[j].chrom == s1_c[i].chrom and s2_c[j].end <= s1_c[i].start))): j += 1 # nothing intersects if we hit the end of s2, or the end of the chrom, # or we're still on the same chrom but start after the end of s2_c[i] if j >= len(s2_c) or s2_c[j].chrom > s1_c[i].chrom or \ (s2_c[j].chrom == s1_c[i].chrom and s2_c[j].start >= s1_c[i].end): continue # now everything at or after j in s2_c that starts before # the end of s1_c must overlap with it while s2_c[j].start < s1_c[i].end: s = max(s1_c[i].start, s2_c[j].start) e = min(s1_c[i].end, s2_c[j].end) overlap = GenomicInterval(s1_c[i].chrom, s, e, "X", 0, "+") if debug: sys.stderr.write("\tadding to overlaps: " + str(overlap) + "\n") res.append(overlap) j += 1 if j >= len(s2_c) or s2_c[j].chrom != s1_c[i].chrom: break # it's possible the last intersecting element runs on to the # next element from s1_c, so... j -= 1 if debug: sys.stderr.write("\tmoving s2_c index back to " + str(s2_c[j]) + "\n") return res
python
def regionsIntersection(s1, s2, collapse=True): """ given two lists of genomic regions with chromosome, start and end coordinates, return a new list of regions which is the intersection of those two sets. Lists must be sorted by chromosome and start index :return: new list that represents the intersection of the two input lists. output regions will all have name "X", be on strand "+" and have score 0 :param s1: first list of genomic regions :param s2: second list of genomic regions :raise GenomicIntervalError: if the input regions are not sorted correctly (by chromosome and start index) :note: O(n) time, O(n) space; informally, might use up to 3x space of input """ debug = False # we don't need to explicitly check for sorting because sorted order is # a post-condition of the collapsing function s1_c = collapseRegions(s1) s2_c = collapseRegions(s2) if len(s1_c) == 0 or len(s2_c) == 0: return [] res = [] j = 0 for i in range(0, len(s1_c)): if debug: sys.stderr.write("processing from s1_c : " + str(s1_c[i]) + "\n") # find first thing in s2_c with end in or after s1_c[i] if debug: sys.stderr.write("i = " + str(i) + " and j = " + str(j) + "\n") while (j < len(s2_c) and (s2_c[j].chrom < s1_c[i].chrom or (s2_c[j].chrom == s1_c[i].chrom and s2_c[j].end <= s1_c[i].start))): j += 1 # nothing intersects if we hit the end of s2, or the end of the chrom, # or we're still on the same chrom but start after the end of s2_c[i] if j >= len(s2_c) or s2_c[j].chrom > s1_c[i].chrom or \ (s2_c[j].chrom == s1_c[i].chrom and s2_c[j].start >= s1_c[i].end): continue # now everything at or after j in s2_c that starts before # the end of s1_c must overlap with it while s2_c[j].start < s1_c[i].end: s = max(s1_c[i].start, s2_c[j].start) e = min(s1_c[i].end, s2_c[j].end) overlap = GenomicInterval(s1_c[i].chrom, s, e, "X", 0, "+") if debug: sys.stderr.write("\tadding to overlaps: " + str(overlap) + "\n") res.append(overlap) j += 1 if j >= len(s2_c) or s2_c[j].chrom != s1_c[i].chrom: break # it's possible the last intersecting element runs on to the # next element from s1_c, so... j -= 1 if debug: sys.stderr.write("\tmoving s2_c index back to " + str(s2_c[j]) + "\n") return res
[ "def", "regionsIntersection", "(", "s1", ",", "s2", ",", "collapse", "=", "True", ")", ":", "debug", "=", "False", "# we don't need to explicitly check for sorting because sorted order is", "# a post-condition of the collapsing function", "s1_c", "=", "collapseRegions", "(", "s1", ")", "s2_c", "=", "collapseRegions", "(", "s2", ")", "if", "len", "(", "s1_c", ")", "==", "0", "or", "len", "(", "s2_c", ")", "==", "0", ":", "return", "[", "]", "res", "=", "[", "]", "j", "=", "0", "for", "i", "in", "range", "(", "0", ",", "len", "(", "s1_c", ")", ")", ":", "if", "debug", ":", "sys", ".", "stderr", ".", "write", "(", "\"processing from s1_c : \"", "+", "str", "(", "s1_c", "[", "i", "]", ")", "+", "\"\\n\"", ")", "# find first thing in s2_c with end in or after s1_c[i]", "if", "debug", ":", "sys", ".", "stderr", ".", "write", "(", "\"i = \"", "+", "str", "(", "i", ")", "+", "\" and j = \"", "+", "str", "(", "j", ")", "+", "\"\\n\"", ")", "while", "(", "j", "<", "len", "(", "s2_c", ")", "and", "(", "s2_c", "[", "j", "]", ".", "chrom", "<", "s1_c", "[", "i", "]", ".", "chrom", "or", "(", "s2_c", "[", "j", "]", ".", "chrom", "==", "s1_c", "[", "i", "]", ".", "chrom", "and", "s2_c", "[", "j", "]", ".", "end", "<=", "s1_c", "[", "i", "]", ".", "start", ")", ")", ")", ":", "j", "+=", "1", "# nothing intersects if we hit the end of s2, or the end of the chrom,", "# or we're still on the same chrom but start after the end of s2_c[i]", "if", "j", ">=", "len", "(", "s2_c", ")", "or", "s2_c", "[", "j", "]", ".", "chrom", ">", "s1_c", "[", "i", "]", ".", "chrom", "or", "(", "s2_c", "[", "j", "]", ".", "chrom", "==", "s1_c", "[", "i", "]", ".", "chrom", "and", "s2_c", "[", "j", "]", ".", "start", ">=", "s1_c", "[", "i", "]", ".", "end", ")", ":", "continue", "# now everything at or after j in s2_c that starts before", "# the end of s1_c must overlap with it", "while", "s2_c", "[", "j", "]", ".", "start", "<", "s1_c", "[", "i", "]", ".", "end", ":", "s", "=", "max", "(", "s1_c", "[", "i", "]", ".", "start", ",", "s2_c", "[", "j", "]", ".", "start", ")", "e", "=", "min", "(", "s1_c", "[", "i", "]", ".", "end", ",", "s2_c", "[", "j", "]", ".", "end", ")", "overlap", "=", "GenomicInterval", "(", "s1_c", "[", "i", "]", ".", "chrom", ",", "s", ",", "e", ",", "\"X\"", ",", "0", ",", "\"+\"", ")", "if", "debug", ":", "sys", ".", "stderr", ".", "write", "(", "\"\\tadding to overlaps: \"", "+", "str", "(", "overlap", ")", "+", "\"\\n\"", ")", "res", ".", "append", "(", "overlap", ")", "j", "+=", "1", "if", "j", ">=", "len", "(", "s2_c", ")", "or", "s2_c", "[", "j", "]", ".", "chrom", "!=", "s1_c", "[", "i", "]", ".", "chrom", ":", "break", "# it's possible the last intersecting element runs on to the", "# next element from s1_c, so...", "j", "-=", "1", "if", "debug", ":", "sys", ".", "stderr", ".", "write", "(", "\"\\tmoving s2_c index back to \"", "+", "str", "(", "s2_c", "[", "j", "]", ")", "+", "\"\\n\"", ")", "return", "res" ]
given two lists of genomic regions with chromosome, start and end coordinates, return a new list of regions which is the intersection of those two sets. Lists must be sorted by chromosome and start index :return: new list that represents the intersection of the two input lists. output regions will all have name "X", be on strand "+" and have score 0 :param s1: first list of genomic regions :param s2: second list of genomic regions :raise GenomicIntervalError: if the input regions are not sorted correctly (by chromosome and start index) :note: O(n) time, O(n) space; informally, might use up to 3x space of input
[ "given", "two", "lists", "of", "genomic", "regions", "with", "chromosome", "start", "and", "end", "coordinates", "return", "a", "new", "list", "of", "regions", "which", "is", "the", "intersection", "of", "those", "two", "sets", ".", "Lists", "must", "be", "sorted", "by", "chromosome", "and", "start", "index" ]
fddae123b5d817daa39496183f19c000d9c3791f
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomicInterval.py#L230-L293
247,067
pjuren/pyokit
src/pyokit/datastruct/genomicInterval.py
bucketIterator
def bucketIterator(elements, buckets): """ For each bucket in buckets, yield it and any elements that overlap it. :param elements: the genomic intervals to place into the buckets. Must be sorted by chromosome and start index. This could be a list, or an iterator. :param buckets: the buckets into which genomic intervals should be binned. Must be sorted by chromosome and start index. This could be a list, or an iterator :return: iterator that will yeild a tuple of 1 bucket and 1 list of elements in the bucket for each call to __next__(). """ def check_sorted(current, previous): if (previous is not None) and \ ((previous.chrom > current.chrom) or ((previous.chrom == current.chrom) and (previous.start > current.start))): raise GenomicIntervalError("elements not sorted. Saw " + str(previous) + " before " + str(current)) def updateOpen(openHeap, elementIterator, bucketChrom, bucketStart, bucketEnd): """ Drop elements from heap which start earlier than current bucket. Update the open heap so that it contains only elements that end after the start of the current bucket. Note that the heap may already contain some elements that start after the end of the current bucket, if a previous bucket ended after the end of this one and brought them into the set. :param openHeap: a min heap of elements; uses the default sorting order for the genomic intervals, which is by end index. This is what we're updating. :param elementIterator: an iterator from which we will pull new elements. Elements yielded by this iterator must be sorted by start index. Must be 'peakable' :param bucketChrom: the chromosome of the current bucket. :param bucketStart: the start index of the current bucket. :param bucketEnd: the end index of the current bucket. """ # first, we're going to pop elements from the heap which can no longer # overalp this or any future buckets. Buckets are sorted by start, so # we'll never see another bucket that starts earlier than this one -- # hence any elements that end before the start of this bucket will never be # used again and can be dropped. Elements in the heap are ordered by end # index, so once we reach an element in the heap that does not end before # the start of this bucket, we are sure that no others will come after it # which do end before the start of this bucket. So we can stop dropping. while len(openHeap) > 0 and ((openHeap[0].chrom < bucketChrom) or ((openHeap[0].chrom == bucketChrom) and (openHeap[0].end <= bucketStart))): heappop(openHeap) # now we're going to add new elements from the iterator to the heap. As # we know that elements in the iterator are sorted by start index, we know # that once we see an element that has a start index greater than the end # of this bucket, we can stop -- everything else after it will also start # after the end of this bucket. while (elementIterator.peek() is not None) and \ ((elementIterator.peek().chrom < bucketChrom) or ((elementIterator.peek().chrom == bucketChrom) and (elementIterator.peek().start < bucketEnd))): e = elementIterator.__next__() # if e falls before this bucket, we can skip it; buckets are sorted by # start, so no other buckets start earlier than this one and so it # cannot intersect any others. if (e.chrom < bucketChrom) or \ (e.chrom == bucketChrom and e.end <= bucketStart): continue # now we know e intersects this bucket.. heappush(openHeap, e) openElems = [] prevBucket = None elementIter = AutoApplyIterator(elements, check_sorted) for bucket in buckets: # make sure the buckets are sorted by start index if prevBucket is not None and ((bucket.chrom < prevBucket.chrom) or (bucket.chrom == prevBucket.chrom and bucket.start < prevBucket.start)): raise GenomicIntervalError("regions-of-interest are not sorted. Saw " + str(prevBucket) + " before " + str(bucket)) updateOpen(openElems, elementIter, bucket.chrom, bucket. start, bucket.end) # be careful here not to leak a reference to the heap; if the caller # decides to mess with that list, it'll screw us up. Anyway, we need a # final check here to make sure we trim off any elements that exceed the # end of this bucket. yield bucket, [x for x in openElems if x.start < bucket.end] prevBucket = bucket
python
def bucketIterator(elements, buckets): """ For each bucket in buckets, yield it and any elements that overlap it. :param elements: the genomic intervals to place into the buckets. Must be sorted by chromosome and start index. This could be a list, or an iterator. :param buckets: the buckets into which genomic intervals should be binned. Must be sorted by chromosome and start index. This could be a list, or an iterator :return: iterator that will yeild a tuple of 1 bucket and 1 list of elements in the bucket for each call to __next__(). """ def check_sorted(current, previous): if (previous is not None) and \ ((previous.chrom > current.chrom) or ((previous.chrom == current.chrom) and (previous.start > current.start))): raise GenomicIntervalError("elements not sorted. Saw " + str(previous) + " before " + str(current)) def updateOpen(openHeap, elementIterator, bucketChrom, bucketStart, bucketEnd): """ Drop elements from heap which start earlier than current bucket. Update the open heap so that it contains only elements that end after the start of the current bucket. Note that the heap may already contain some elements that start after the end of the current bucket, if a previous bucket ended after the end of this one and brought them into the set. :param openHeap: a min heap of elements; uses the default sorting order for the genomic intervals, which is by end index. This is what we're updating. :param elementIterator: an iterator from which we will pull new elements. Elements yielded by this iterator must be sorted by start index. Must be 'peakable' :param bucketChrom: the chromosome of the current bucket. :param bucketStart: the start index of the current bucket. :param bucketEnd: the end index of the current bucket. """ # first, we're going to pop elements from the heap which can no longer # overalp this or any future buckets. Buckets are sorted by start, so # we'll never see another bucket that starts earlier than this one -- # hence any elements that end before the start of this bucket will never be # used again and can be dropped. Elements in the heap are ordered by end # index, so once we reach an element in the heap that does not end before # the start of this bucket, we are sure that no others will come after it # which do end before the start of this bucket. So we can stop dropping. while len(openHeap) > 0 and ((openHeap[0].chrom < bucketChrom) or ((openHeap[0].chrom == bucketChrom) and (openHeap[0].end <= bucketStart))): heappop(openHeap) # now we're going to add new elements from the iterator to the heap. As # we know that elements in the iterator are sorted by start index, we know # that once we see an element that has a start index greater than the end # of this bucket, we can stop -- everything else after it will also start # after the end of this bucket. while (elementIterator.peek() is not None) and \ ((elementIterator.peek().chrom < bucketChrom) or ((elementIterator.peek().chrom == bucketChrom) and (elementIterator.peek().start < bucketEnd))): e = elementIterator.__next__() # if e falls before this bucket, we can skip it; buckets are sorted by # start, so no other buckets start earlier than this one and so it # cannot intersect any others. if (e.chrom < bucketChrom) or \ (e.chrom == bucketChrom and e.end <= bucketStart): continue # now we know e intersects this bucket.. heappush(openHeap, e) openElems = [] prevBucket = None elementIter = AutoApplyIterator(elements, check_sorted) for bucket in buckets: # make sure the buckets are sorted by start index if prevBucket is not None and ((bucket.chrom < prevBucket.chrom) or (bucket.chrom == prevBucket.chrom and bucket.start < prevBucket.start)): raise GenomicIntervalError("regions-of-interest are not sorted. Saw " + str(prevBucket) + " before " + str(bucket)) updateOpen(openElems, elementIter, bucket.chrom, bucket. start, bucket.end) # be careful here not to leak a reference to the heap; if the caller # decides to mess with that list, it'll screw us up. Anyway, we need a # final check here to make sure we trim off any elements that exceed the # end of this bucket. yield bucket, [x for x in openElems if x.start < bucket.end] prevBucket = bucket
[ "def", "bucketIterator", "(", "elements", ",", "buckets", ")", ":", "def", "check_sorted", "(", "current", ",", "previous", ")", ":", "if", "(", "previous", "is", "not", "None", ")", "and", "(", "(", "previous", ".", "chrom", ">", "current", ".", "chrom", ")", "or", "(", "(", "previous", ".", "chrom", "==", "current", ".", "chrom", ")", "and", "(", "previous", ".", "start", ">", "current", ".", "start", ")", ")", ")", ":", "raise", "GenomicIntervalError", "(", "\"elements not sorted. Saw \"", "+", "str", "(", "previous", ")", "+", "\" before \"", "+", "str", "(", "current", ")", ")", "def", "updateOpen", "(", "openHeap", ",", "elementIterator", ",", "bucketChrom", ",", "bucketStart", ",", "bucketEnd", ")", ":", "\"\"\"\n Drop elements from heap which start earlier than current bucket.\n\n Update the open heap so that it contains only elements that end after the\n start of the current bucket. Note that the heap may already contain some\n elements that start after the end of the current bucket, if a previous\n bucket ended after the end of this one and brought them into the set.\n\n :param openHeap: a min heap of elements; uses the default sorting order\n for the genomic intervals, which is by end index. This\n is what we're updating.\n :param elementIterator: an iterator from which we will pull new elements.\n Elements yielded by this iterator must be sorted\n by start index. Must be 'peakable'\n :param bucketChrom: the chromosome of the current bucket.\n :param bucketStart: the start index of the current bucket.\n :param bucketEnd: the end index of the current bucket.\n \"\"\"", "# first, we're going to pop elements from the heap which can no longer", "# overalp this or any future buckets. Buckets are sorted by start, so", "# we'll never see another bucket that starts earlier than this one --", "# hence any elements that end before the start of this bucket will never be", "# used again and can be dropped. Elements in the heap are ordered by end", "# index, so once we reach an element in the heap that does not end before", "# the start of this bucket, we are sure that no others will come after it", "# which do end before the start of this bucket. So we can stop dropping.", "while", "len", "(", "openHeap", ")", ">", "0", "and", "(", "(", "openHeap", "[", "0", "]", ".", "chrom", "<", "bucketChrom", ")", "or", "(", "(", "openHeap", "[", "0", "]", ".", "chrom", "==", "bucketChrom", ")", "and", "(", "openHeap", "[", "0", "]", ".", "end", "<=", "bucketStart", ")", ")", ")", ":", "heappop", "(", "openHeap", ")", "# now we're going to add new elements from the iterator to the heap. As", "# we know that elements in the iterator are sorted by start index, we know", "# that once we see an element that has a start index greater than the end", "# of this bucket, we can stop -- everything else after it will also start", "# after the end of this bucket.", "while", "(", "elementIterator", ".", "peek", "(", ")", "is", "not", "None", ")", "and", "(", "(", "elementIterator", ".", "peek", "(", ")", ".", "chrom", "<", "bucketChrom", ")", "or", "(", "(", "elementIterator", ".", "peek", "(", ")", ".", "chrom", "==", "bucketChrom", ")", "and", "(", "elementIterator", ".", "peek", "(", ")", ".", "start", "<", "bucketEnd", ")", ")", ")", ":", "e", "=", "elementIterator", ".", "__next__", "(", ")", "# if e falls before this bucket, we can skip it; buckets are sorted by", "# start, so no other buckets start earlier than this one and so it", "# cannot intersect any others.", "if", "(", "e", ".", "chrom", "<", "bucketChrom", ")", "or", "(", "e", ".", "chrom", "==", "bucketChrom", "and", "e", ".", "end", "<=", "bucketStart", ")", ":", "continue", "# now we know e intersects this bucket..", "heappush", "(", "openHeap", ",", "e", ")", "openElems", "=", "[", "]", "prevBucket", "=", "None", "elementIter", "=", "AutoApplyIterator", "(", "elements", ",", "check_sorted", ")", "for", "bucket", "in", "buckets", ":", "# make sure the buckets are sorted by start index", "if", "prevBucket", "is", "not", "None", "and", "(", "(", "bucket", ".", "chrom", "<", "prevBucket", ".", "chrom", ")", "or", "(", "bucket", ".", "chrom", "==", "prevBucket", ".", "chrom", "and", "bucket", ".", "start", "<", "prevBucket", ".", "start", ")", ")", ":", "raise", "GenomicIntervalError", "(", "\"regions-of-interest are not sorted. Saw \"", "+", "str", "(", "prevBucket", ")", "+", "\" before \"", "+", "str", "(", "bucket", ")", ")", "updateOpen", "(", "openElems", ",", "elementIter", ",", "bucket", ".", "chrom", ",", "bucket", ".", "start", ",", "bucket", ".", "end", ")", "# be careful here not to leak a reference to the heap; if the caller", "# decides to mess with that list, it'll screw us up. Anyway, we need a", "# final check here to make sure we trim off any elements that exceed the", "# end of this bucket.", "yield", "bucket", ",", "[", "x", "for", "x", "in", "openElems", "if", "x", ".", "start", "<", "bucket", ".", "end", "]", "prevBucket", "=", "bucket" ]
For each bucket in buckets, yield it and any elements that overlap it. :param elements: the genomic intervals to place into the buckets. Must be sorted by chromosome and start index. This could be a list, or an iterator. :param buckets: the buckets into which genomic intervals should be binned. Must be sorted by chromosome and start index. This could be a list, or an iterator :return: iterator that will yeild a tuple of 1 bucket and 1 list of elements in the bucket for each call to __next__().
[ "For", "each", "bucket", "in", "buckets", "yield", "it", "and", "any", "elements", "that", "overlap", "it", "." ]
fddae123b5d817daa39496183f19c000d9c3791f
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomicInterval.py#L296-L386
247,068
pjuren/pyokit
src/pyokit/datastruct/genomicInterval.py
parseWigString
def parseWigString(line, scoreType=int): """ Parse a string in simple Wig format and return a GenomicInterval. :param line: the string to be parsed :param scoreType: treat the score field as having this type. :return: GenomicInterval object representing this wig line; the name of the interval will be set to 'X', and it's strand to the default. """ parts = line.split("\t") if (len(parts) < 4): raise GenomicIntervalError("failed to parse " + line + " as wig format, too few fields") return GenomicInterval(parts[0].strip(), int(parts[1]), int(parts[2]), None, scoreType(parts[3]))
python
def parseWigString(line, scoreType=int): """ Parse a string in simple Wig format and return a GenomicInterval. :param line: the string to be parsed :param scoreType: treat the score field as having this type. :return: GenomicInterval object representing this wig line; the name of the interval will be set to 'X', and it's strand to the default. """ parts = line.split("\t") if (len(parts) < 4): raise GenomicIntervalError("failed to parse " + line + " as wig format, too few fields") return GenomicInterval(parts[0].strip(), int(parts[1]), int(parts[2]), None, scoreType(parts[3]))
[ "def", "parseWigString", "(", "line", ",", "scoreType", "=", "int", ")", ":", "parts", "=", "line", ".", "split", "(", "\"\\t\"", ")", "if", "(", "len", "(", "parts", ")", "<", "4", ")", ":", "raise", "GenomicIntervalError", "(", "\"failed to parse \"", "+", "line", "+", "\" as wig format, too few fields\"", ")", "return", "GenomicInterval", "(", "parts", "[", "0", "]", ".", "strip", "(", ")", ",", "int", "(", "parts", "[", "1", "]", ")", ",", "int", "(", "parts", "[", "2", "]", ")", ",", "None", ",", "scoreType", "(", "parts", "[", "3", "]", ")", ")" ]
Parse a string in simple Wig format and return a GenomicInterval. :param line: the string to be parsed :param scoreType: treat the score field as having this type. :return: GenomicInterval object representing this wig line; the name of the interval will be set to 'X', and it's strand to the default.
[ "Parse", "a", "string", "in", "simple", "Wig", "format", "and", "return", "a", "GenomicInterval", "." ]
fddae123b5d817daa39496183f19c000d9c3791f
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomicInterval.py#L393-L407
247,069
pjuren/pyokit
src/pyokit/datastruct/genomicInterval.py
parseBEDString
def parseBEDString(line, scoreType=int, dropAfter=None): """ Parse a string in BED format and return a GenomicInterval object. :param line: the string to be parsed :param dropAfter: an int indicating that any fields after and including this field should be ignored as they don't conform to the BED format. By default, None, meaning we use all fields. Index from zero. :return: GenomicInterval object built from the BED string representation """ peices = line.split("\t") if dropAfter is not None: peices = peices[0:dropAfter] if len(peices) < 3: raise GenomicIntervalError("BED elements must have at least chrom, " + "start and end; found only " + str(len(peices)) + " in " + line) chrom = peices[0] start = peices[1] end = peices[2] name = None score = None strand = None if len(peices) >= 4 is not None: name = peices[3] if len(peices) >= 5 is not None: score = peices[4] if len(peices) >= 6 is not None: strand = peices[5] return GenomicInterval(chrom, start, end, name, score, strand, scoreType)
python
def parseBEDString(line, scoreType=int, dropAfter=None): """ Parse a string in BED format and return a GenomicInterval object. :param line: the string to be parsed :param dropAfter: an int indicating that any fields after and including this field should be ignored as they don't conform to the BED format. By default, None, meaning we use all fields. Index from zero. :return: GenomicInterval object built from the BED string representation """ peices = line.split("\t") if dropAfter is not None: peices = peices[0:dropAfter] if len(peices) < 3: raise GenomicIntervalError("BED elements must have at least chrom, " + "start and end; found only " + str(len(peices)) + " in " + line) chrom = peices[0] start = peices[1] end = peices[2] name = None score = None strand = None if len(peices) >= 4 is not None: name = peices[3] if len(peices) >= 5 is not None: score = peices[4] if len(peices) >= 6 is not None: strand = peices[5] return GenomicInterval(chrom, start, end, name, score, strand, scoreType)
[ "def", "parseBEDString", "(", "line", ",", "scoreType", "=", "int", ",", "dropAfter", "=", "None", ")", ":", "peices", "=", "line", ".", "split", "(", "\"\\t\"", ")", "if", "dropAfter", "is", "not", "None", ":", "peices", "=", "peices", "[", "0", ":", "dropAfter", "]", "if", "len", "(", "peices", ")", "<", "3", ":", "raise", "GenomicIntervalError", "(", "\"BED elements must have at least chrom, \"", "+", "\"start and end; found only \"", "+", "str", "(", "len", "(", "peices", ")", ")", "+", "\" in \"", "+", "line", ")", "chrom", "=", "peices", "[", "0", "]", "start", "=", "peices", "[", "1", "]", "end", "=", "peices", "[", "2", "]", "name", "=", "None", "score", "=", "None", "strand", "=", "None", "if", "len", "(", "peices", ")", ">=", "4", "is", "not", "None", ":", "name", "=", "peices", "[", "3", "]", "if", "len", "(", "peices", ")", ">=", "5", "is", "not", "None", ":", "score", "=", "peices", "[", "4", "]", "if", "len", "(", "peices", ")", ">=", "6", "is", "not", "None", ":", "strand", "=", "peices", "[", "5", "]", "return", "GenomicInterval", "(", "chrom", ",", "start", ",", "end", ",", "name", ",", "score", ",", "strand", ",", "scoreType", ")" ]
Parse a string in BED format and return a GenomicInterval object. :param line: the string to be parsed :param dropAfter: an int indicating that any fields after and including this field should be ignored as they don't conform to the BED format. By default, None, meaning we use all fields. Index from zero. :return: GenomicInterval object built from the BED string representation
[ "Parse", "a", "string", "in", "BED", "format", "and", "return", "a", "GenomicInterval", "object", "." ]
fddae123b5d817daa39496183f19c000d9c3791f
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomicInterval.py#L410-L443
247,070
pjuren/pyokit
src/pyokit/datastruct/genomicInterval.py
GenomicInterval.sameRegion
def sameRegion(self, e): """ Check whether self represents the same DNA region as e. :param e: genomic region to compare against :return: True if self and e are for the same region (ignores differences in non-region related fields, such as name or score -- but does consider strand) """ if e is None: return False return (self.chrom == e.chrom and self.start == e.start and self.end == e.end and self.name == e.name and self.strand == e.strand)
python
def sameRegion(self, e): """ Check whether self represents the same DNA region as e. :param e: genomic region to compare against :return: True if self and e are for the same region (ignores differences in non-region related fields, such as name or score -- but does consider strand) """ if e is None: return False return (self.chrom == e.chrom and self.start == e.start and self.end == e.end and self.name == e.name and self.strand == e.strand)
[ "def", "sameRegion", "(", "self", ",", "e", ")", ":", "if", "e", "is", "None", ":", "return", "False", "return", "(", "self", ".", "chrom", "==", "e", ".", "chrom", "and", "self", ".", "start", "==", "e", ".", "start", "and", "self", ".", "end", "==", "e", ".", "end", "and", "self", ".", "name", "==", "e", ".", "name", "and", "self", ".", "strand", "==", "e", ".", "strand", ")" ]
Check whether self represents the same DNA region as e. :param e: genomic region to compare against :return: True if self and e are for the same region (ignores differences in non-region related fields, such as name or score -- but does consider strand)
[ "Check", "whether", "self", "represents", "the", "same", "DNA", "region", "as", "e", "." ]
fddae123b5d817daa39496183f19c000d9c3791f
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomicInterval.py#L536-L549
247,071
pjuren/pyokit
src/pyokit/datastruct/genomicInterval.py
GenomicInterval.__singleIntersect
def __singleIntersect(self, e): """ Get a list of GenomicRegions produced by subtracting e from self. :return: a list of regions that represent the result of subtracting e from self. The list might be empty if self is totally contained in e, or may contain two elements if e is totally contained in self. otherwise there'll be one element. """ if e.chrom != self.chrom or e.end < self.start or e.start > self.end: # no intersection return [copy.copy(self)] if e.start <= self.start and e.end >= self.end: # whole self is removed. return [] if e.start > self.start and e.end < self.end: # splits self in two r1 = copy.copy(self) r2 = copy.copy(self) r1.end = e.start r2.start = e.end return [r1, r2] if e.start <= self.start: # cuts off start of self r = copy.copy(self) r.start = e.end return [r] if e.end >= self.end: # cuts off end of self r = copy.copy(self) r.end = e.start return [r] # oops, we screwed up if we got to here.. raise GenomicIntervalError("fatal error - failed BED subtraction of " + str(e) + " from " + str(self))
python
def __singleIntersect(self, e): """ Get a list of GenomicRegions produced by subtracting e from self. :return: a list of regions that represent the result of subtracting e from self. The list might be empty if self is totally contained in e, or may contain two elements if e is totally contained in self. otherwise there'll be one element. """ if e.chrom != self.chrom or e.end < self.start or e.start > self.end: # no intersection return [copy.copy(self)] if e.start <= self.start and e.end >= self.end: # whole self is removed. return [] if e.start > self.start and e.end < self.end: # splits self in two r1 = copy.copy(self) r2 = copy.copy(self) r1.end = e.start r2.start = e.end return [r1, r2] if e.start <= self.start: # cuts off start of self r = copy.copy(self) r.start = e.end return [r] if e.end >= self.end: # cuts off end of self r = copy.copy(self) r.end = e.start return [r] # oops, we screwed up if we got to here.. raise GenomicIntervalError("fatal error - failed BED subtraction of " + str(e) + " from " + str(self))
[ "def", "__singleIntersect", "(", "self", ",", "e", ")", ":", "if", "e", ".", "chrom", "!=", "self", ".", "chrom", "or", "e", ".", "end", "<", "self", ".", "start", "or", "e", ".", "start", ">", "self", ".", "end", ":", "# no intersection", "return", "[", "copy", ".", "copy", "(", "self", ")", "]", "if", "e", ".", "start", "<=", "self", ".", "start", "and", "e", ".", "end", ">=", "self", ".", "end", ":", "# whole self is removed.", "return", "[", "]", "if", "e", ".", "start", ">", "self", ".", "start", "and", "e", ".", "end", "<", "self", ".", "end", ":", "# splits self in two", "r1", "=", "copy", ".", "copy", "(", "self", ")", "r2", "=", "copy", ".", "copy", "(", "self", ")", "r1", ".", "end", "=", "e", ".", "start", "r2", ".", "start", "=", "e", ".", "end", "return", "[", "r1", ",", "r2", "]", "if", "e", ".", "start", "<=", "self", ".", "start", ":", "# cuts off start of self", "r", "=", "copy", ".", "copy", "(", "self", ")", "r", ".", "start", "=", "e", ".", "end", "return", "[", "r", "]", "if", "e", ".", "end", ">=", "self", ".", "end", ":", "# cuts off end of self", "r", "=", "copy", ".", "copy", "(", "self", ")", "r", ".", "end", "=", "e", ".", "start", "return", "[", "r", "]", "# oops, we screwed up if we got to here..", "raise", "GenomicIntervalError", "(", "\"fatal error - failed BED subtraction of \"", "+", "str", "(", "e", ")", "+", "\" from \"", "+", "str", "(", "self", ")", ")" ]
Get a list of GenomicRegions produced by subtracting e from self. :return: a list of regions that represent the result of subtracting e from self. The list might be empty if self is totally contained in e, or may contain two elements if e is totally contained in self. otherwise there'll be one element.
[ "Get", "a", "list", "of", "GenomicRegions", "produced", "by", "subtracting", "e", "from", "self", "." ]
fddae123b5d817daa39496183f19c000d9c3791f
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomicInterval.py#L614-L648
247,072
pjuren/pyokit
src/pyokit/datastruct/genomicInterval.py
GenomicInterval.subtract
def subtract(self, es): """ Subtract the BED elements in es from self. :param es: a list of BED elements (or anything with chrom, start, end) :return: a list of BED elements which represent what is left of self after the subtraction. This might be an empty list. """ workingSet = [self] for e in es: newWorkingSet = [] for w in workingSet: newWorkingSet += w.__singleIntersect(e) workingSet = newWorkingSet return workingSet
python
def subtract(self, es): """ Subtract the BED elements in es from self. :param es: a list of BED elements (or anything with chrom, start, end) :return: a list of BED elements which represent what is left of self after the subtraction. This might be an empty list. """ workingSet = [self] for e in es: newWorkingSet = [] for w in workingSet: newWorkingSet += w.__singleIntersect(e) workingSet = newWorkingSet return workingSet
[ "def", "subtract", "(", "self", ",", "es", ")", ":", "workingSet", "=", "[", "self", "]", "for", "e", "in", "es", ":", "newWorkingSet", "=", "[", "]", "for", "w", "in", "workingSet", ":", "newWorkingSet", "+=", "w", ".", "__singleIntersect", "(", "e", ")", "workingSet", "=", "newWorkingSet", "return", "workingSet" ]
Subtract the BED elements in es from self. :param es: a list of BED elements (or anything with chrom, start, end) :return: a list of BED elements which represent what is left of self after the subtraction. This might be an empty list.
[ "Subtract", "the", "BED", "elements", "in", "es", "from", "self", "." ]
fddae123b5d817daa39496183f19c000d9c3791f
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomicInterval.py#L650-L664
247,073
pjuren/pyokit
src/pyokit/datastruct/genomicInterval.py
GenomicInterval.sizeOfOverlap
def sizeOfOverlap(self, e): """ Get the size of the overlap between self and e. :return: the number of bases that are shared in common between self and e. """ # no overlap if not self.intersects(e): return 0 # complete inclusion.. if e.start >= self.start and e.end <= self.end: return len(e) if self.start >= e.start and self.end <= e.end: return len(self) # partial overlap if e.start > self.start: return (self.end - e.start) if self.start > e.start: return (e.end - self.start)
python
def sizeOfOverlap(self, e): """ Get the size of the overlap between self and e. :return: the number of bases that are shared in common between self and e. """ # no overlap if not self.intersects(e): return 0 # complete inclusion.. if e.start >= self.start and e.end <= self.end: return len(e) if self.start >= e.start and self.end <= e.end: return len(self) # partial overlap if e.start > self.start: return (self.end - e.start) if self.start > e.start: return (e.end - self.start)
[ "def", "sizeOfOverlap", "(", "self", ",", "e", ")", ":", "# no overlap", "if", "not", "self", ".", "intersects", "(", "e", ")", ":", "return", "0", "# complete inclusion..", "if", "e", ".", "start", ">=", "self", ".", "start", "and", "e", ".", "end", "<=", "self", ".", "end", ":", "return", "len", "(", "e", ")", "if", "self", ".", "start", ">=", "e", ".", "start", "and", "self", ".", "end", "<=", "e", ".", "end", ":", "return", "len", "(", "self", ")", "# partial overlap", "if", "e", ".", "start", ">", "self", ".", "start", ":", "return", "(", "self", ".", "end", "-", "e", ".", "start", ")", "if", "self", ".", "start", ">", "e", ".", "start", ":", "return", "(", "e", ".", "end", "-", "self", ".", "start", ")" ]
Get the size of the overlap between self and e. :return: the number of bases that are shared in common between self and e.
[ "Get", "the", "size", "of", "the", "overlap", "between", "self", "and", "e", "." ]
fddae123b5d817daa39496183f19c000d9c3791f
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomicInterval.py#L666-L686
247,074
pjuren/pyokit
src/pyokit/datastruct/genomicInterval.py
GenomicInterval.intersects
def intersects(self, e): """ Check whether e intersects self. :return: true if this elements intersects the element e """ if self.chrom != e.chrom: return False if e.start >= self.start and e.start < self.end: return True if e.end > self.start and e.end <= self.end: return True if e.start < self.start and e.end > self.end: return True return False
python
def intersects(self, e): """ Check whether e intersects self. :return: true if this elements intersects the element e """ if self.chrom != e.chrom: return False if e.start >= self.start and e.start < self.end: return True if e.end > self.start and e.end <= self.end: return True if e.start < self.start and e.end > self.end: return True return False
[ "def", "intersects", "(", "self", ",", "e", ")", ":", "if", "self", ".", "chrom", "!=", "e", ".", "chrom", ":", "return", "False", "if", "e", ".", "start", ">=", "self", ".", "start", "and", "e", ".", "start", "<", "self", ".", "end", ":", "return", "True", "if", "e", ".", "end", ">", "self", ".", "start", "and", "e", ".", "end", "<=", "self", ".", "end", ":", "return", "True", "if", "e", ".", "start", "<", "self", ".", "start", "and", "e", ".", "end", ">", "self", ".", "end", ":", "return", "True", "return", "False" ]
Check whether e intersects self. :return: true if this elements intersects the element e
[ "Check", "whether", "e", "intersects", "self", "." ]
fddae123b5d817daa39496183f19c000d9c3791f
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomicInterval.py#L688-L703
247,075
pjuren/pyokit
src/pyokit/datastruct/genomicInterval.py
GenomicInterval.isPositiveStrand
def isPositiveStrand(self): """ Check if this genomic region is on the positive strand. :return: True if this element is on the positive strand """ if self.strand is None and self.DEFAULT_STRAND == self.POSITIVE_STRAND: return True return self.strand == self.POSITIVE_STRAND
python
def isPositiveStrand(self): """ Check if this genomic region is on the positive strand. :return: True if this element is on the positive strand """ if self.strand is None and self.DEFAULT_STRAND == self.POSITIVE_STRAND: return True return self.strand == self.POSITIVE_STRAND
[ "def", "isPositiveStrand", "(", "self", ")", ":", "if", "self", ".", "strand", "is", "None", "and", "self", ".", "DEFAULT_STRAND", "==", "self", ".", "POSITIVE_STRAND", ":", "return", "True", "return", "self", ".", "strand", "==", "self", ".", "POSITIVE_STRAND" ]
Check if this genomic region is on the positive strand. :return: True if this element is on the positive strand
[ "Check", "if", "this", "genomic", "region", "is", "on", "the", "positive", "strand", "." ]
fddae123b5d817daa39496183f19c000d9c3791f
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomicInterval.py#L705-L713
247,076
pjuren/pyokit
src/pyokit/datastruct/genomicInterval.py
GenomicInterval.isNegativeStrand
def isNegativeStrand(self): """ Check if this genomic interval is on the negative strand. :return: True if this element is on the negative strand """ if self.strand is None and self.DEFAULT_STRAND == self.NEGATIVE_STRAND: return True return self.strand == self.NEGATIVE_STRAND
python
def isNegativeStrand(self): """ Check if this genomic interval is on the negative strand. :return: True if this element is on the negative strand """ if self.strand is None and self.DEFAULT_STRAND == self.NEGATIVE_STRAND: return True return self.strand == self.NEGATIVE_STRAND
[ "def", "isNegativeStrand", "(", "self", ")", ":", "if", "self", ".", "strand", "is", "None", "and", "self", ".", "DEFAULT_STRAND", "==", "self", ".", "NEGATIVE_STRAND", ":", "return", "True", "return", "self", ".", "strand", "==", "self", ".", "NEGATIVE_STRAND" ]
Check if this genomic interval is on the negative strand. :return: True if this element is on the negative strand
[ "Check", "if", "this", "genomic", "interval", "is", "on", "the", "negative", "strand", "." ]
fddae123b5d817daa39496183f19c000d9c3791f
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomicInterval.py#L715-L723
247,077
pjuren/pyokit
src/pyokit/datastruct/genomicInterval.py
GenomicInterval.transform_center
def transform_center(self, size): """ Tranform self so it is centered on the same spot, but has new size. If the region grows, the extra nucleotides will be distributed evenly to the 5' and 3' side of the current region if possible. If the extra is odd, the 3' side will get the extra one. Similarly, if the resize shrinks the interval, bases will be removed from the 5' and 3' sides equally; if the number to remove is odd, the extra one will be removed from the 3' side. :param size: size of the region after transformation. """ if size < 1: raise GenomicIntervalError("Cannot resize genomic interval to " + str(size) + " bases; must be at least 1 " + "base in length") if size == len(self): return elif size > len(self): extra = size - len(self) extra_5_prime = extra / 2 extra_3_prime = extra / 2 if extra % 2 == 0 else (extra / 2) + 1 assert(extra_5_prime + extra_3_prime == extra) self.start = (self.start - extra_5_prime if self.isPositiveStrand() else self.start - extra_3_prime) self.end = (self.end + extra_3_prime if self.isPositiveStrand() else self.end + extra_5_prime) else: less = len(self) - size less_5_prime = less / 2 less_3_prime = less / 2 if less % 2 == 0 else (less / 2) + 1 assert(less_5_prime + less_3_prime == less) self.start = (self.start + less_5_prime if self.isPositiveStrand() else self.start + less_3_prime) self.end = (self.end - less_3_prime if self.isPositiveStrand() else self.end - less_5_prime)
python
def transform_center(self, size): """ Tranform self so it is centered on the same spot, but has new size. If the region grows, the extra nucleotides will be distributed evenly to the 5' and 3' side of the current region if possible. If the extra is odd, the 3' side will get the extra one. Similarly, if the resize shrinks the interval, bases will be removed from the 5' and 3' sides equally; if the number to remove is odd, the extra one will be removed from the 3' side. :param size: size of the region after transformation. """ if size < 1: raise GenomicIntervalError("Cannot resize genomic interval to " + str(size) + " bases; must be at least 1 " + "base in length") if size == len(self): return elif size > len(self): extra = size - len(self) extra_5_prime = extra / 2 extra_3_prime = extra / 2 if extra % 2 == 0 else (extra / 2) + 1 assert(extra_5_prime + extra_3_prime == extra) self.start = (self.start - extra_5_prime if self.isPositiveStrand() else self.start - extra_3_prime) self.end = (self.end + extra_3_prime if self.isPositiveStrand() else self.end + extra_5_prime) else: less = len(self) - size less_5_prime = less / 2 less_3_prime = less / 2 if less % 2 == 0 else (less / 2) + 1 assert(less_5_prime + less_3_prime == less) self.start = (self.start + less_5_prime if self.isPositiveStrand() else self.start + less_3_prime) self.end = (self.end - less_3_prime if self.isPositiveStrand() else self.end - less_5_prime)
[ "def", "transform_center", "(", "self", ",", "size", ")", ":", "if", "size", "<", "1", ":", "raise", "GenomicIntervalError", "(", "\"Cannot resize genomic interval to \"", "+", "str", "(", "size", ")", "+", "\" bases; must be at least 1 \"", "+", "\"base in length\"", ")", "if", "size", "==", "len", "(", "self", ")", ":", "return", "elif", "size", ">", "len", "(", "self", ")", ":", "extra", "=", "size", "-", "len", "(", "self", ")", "extra_5_prime", "=", "extra", "/", "2", "extra_3_prime", "=", "extra", "/", "2", "if", "extra", "%", "2", "==", "0", "else", "(", "extra", "/", "2", ")", "+", "1", "assert", "(", "extra_5_prime", "+", "extra_3_prime", "==", "extra", ")", "self", ".", "start", "=", "(", "self", ".", "start", "-", "extra_5_prime", "if", "self", ".", "isPositiveStrand", "(", ")", "else", "self", ".", "start", "-", "extra_3_prime", ")", "self", ".", "end", "=", "(", "self", ".", "end", "+", "extra_3_prime", "if", "self", ".", "isPositiveStrand", "(", ")", "else", "self", ".", "end", "+", "extra_5_prime", ")", "else", ":", "less", "=", "len", "(", "self", ")", "-", "size", "less_5_prime", "=", "less", "/", "2", "less_3_prime", "=", "less", "/", "2", "if", "less", "%", "2", "==", "0", "else", "(", "less", "/", "2", ")", "+", "1", "assert", "(", "less_5_prime", "+", "less_3_prime", "==", "less", ")", "self", ".", "start", "=", "(", "self", ".", "start", "+", "less_5_prime", "if", "self", ".", "isPositiveStrand", "(", ")", "else", "self", ".", "start", "+", "less_3_prime", ")", "self", ".", "end", "=", "(", "self", ".", "end", "-", "less_3_prime", "if", "self", ".", "isPositiveStrand", "(", ")", "else", "self", ".", "end", "-", "less_5_prime", ")" ]
Tranform self so it is centered on the same spot, but has new size. If the region grows, the extra nucleotides will be distributed evenly to the 5' and 3' side of the current region if possible. If the extra is odd, the 3' side will get the extra one. Similarly, if the resize shrinks the interval, bases will be removed from the 5' and 3' sides equally; if the number to remove is odd, the extra one will be removed from the 3' side. :param size: size of the region after transformation.
[ "Tranform", "self", "so", "it", "is", "centered", "on", "the", "same", "spot", "but", "has", "new", "size", "." ]
fddae123b5d817daa39496183f19c000d9c3791f
https://github.com/pjuren/pyokit/blob/fddae123b5d817daa39496183f19c000d9c3791f/src/pyokit/datastruct/genomicInterval.py#L725-L760
247,078
minhhoit/yacms
yacms/accounts/admin.py
UserProfileAdmin.save_model
def save_model(self, request, obj, form, change): """ If the ``ACCOUNTS_APPROVAL_REQUIRED`` setting is ``True``, send a notification email to the user being saved if their ``active`` status has changed to ``True``. If the ``ACCOUNTS_VERIFICATION_REQUIRED`` setting is ``True``, send a verification email instead. """ must_send_verification_mail_after_save = False if change and settings.ACCOUNTS_APPROVAL_REQUIRED: if obj.is_active and not User.objects.get(id=obj.id).is_active: if settings.ACCOUNTS_VERIFICATION_REQUIRED: # Accounts verification requires an inactive account obj.is_active = False # The token generated by send_verification_mail() # must match the _saved_ User object, # so postpone send_verification_mail() until later must_send_verification_mail_after_save = True else: send_approved_mail(request, obj) super(UserProfileAdmin, self).save_model(request, obj, form, change) if must_send_verification_mail_after_save: user = User.objects.get(id=obj.id) send_verification_mail(request, user, "signup_verify")
python
def save_model(self, request, obj, form, change): """ If the ``ACCOUNTS_APPROVAL_REQUIRED`` setting is ``True``, send a notification email to the user being saved if their ``active`` status has changed to ``True``. If the ``ACCOUNTS_VERIFICATION_REQUIRED`` setting is ``True``, send a verification email instead. """ must_send_verification_mail_after_save = False if change and settings.ACCOUNTS_APPROVAL_REQUIRED: if obj.is_active and not User.objects.get(id=obj.id).is_active: if settings.ACCOUNTS_VERIFICATION_REQUIRED: # Accounts verification requires an inactive account obj.is_active = False # The token generated by send_verification_mail() # must match the _saved_ User object, # so postpone send_verification_mail() until later must_send_verification_mail_after_save = True else: send_approved_mail(request, obj) super(UserProfileAdmin, self).save_model(request, obj, form, change) if must_send_verification_mail_after_save: user = User.objects.get(id=obj.id) send_verification_mail(request, user, "signup_verify")
[ "def", "save_model", "(", "self", ",", "request", ",", "obj", ",", "form", ",", "change", ")", ":", "must_send_verification_mail_after_save", "=", "False", "if", "change", "and", "settings", ".", "ACCOUNTS_APPROVAL_REQUIRED", ":", "if", "obj", ".", "is_active", "and", "not", "User", ".", "objects", ".", "get", "(", "id", "=", "obj", ".", "id", ")", ".", "is_active", ":", "if", "settings", ".", "ACCOUNTS_VERIFICATION_REQUIRED", ":", "# Accounts verification requires an inactive account", "obj", ".", "is_active", "=", "False", "# The token generated by send_verification_mail()", "# must match the _saved_ User object,", "# so postpone send_verification_mail() until later", "must_send_verification_mail_after_save", "=", "True", "else", ":", "send_approved_mail", "(", "request", ",", "obj", ")", "super", "(", "UserProfileAdmin", ",", "self", ")", ".", "save_model", "(", "request", ",", "obj", ",", "form", ",", "change", ")", "if", "must_send_verification_mail_after_save", ":", "user", "=", "User", ".", "objects", ".", "get", "(", "id", "=", "obj", ".", "id", ")", "send_verification_mail", "(", "request", ",", "user", ",", "\"signup_verify\"", ")" ]
If the ``ACCOUNTS_APPROVAL_REQUIRED`` setting is ``True``, send a notification email to the user being saved if their ``active`` status has changed to ``True``. If the ``ACCOUNTS_VERIFICATION_REQUIRED`` setting is ``True``, send a verification email instead.
[ "If", "the", "ACCOUNTS_APPROVAL_REQUIRED", "setting", "is", "True", "send", "a", "notification", "email", "to", "the", "user", "being", "saved", "if", "their", "active", "status", "has", "changed", "to", "True", ".", "If", "the", "ACCOUNTS_VERIFICATION_REQUIRED", "setting", "is", "True", "send", "a", "verification", "email", "instead", "." ]
2921b706b7107c6e8c5f2bbf790ff11f85a2167f
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/accounts/admin.py#L22-L45
247,079
malthe/pop
src/pop/services/common.py
Service.deploy
def deploy(self, machine): """Deploy service.""" log.debug("machine id: %s." % machine) path = self.path + "/machines" value, metadata = yield self.client.get(path) machines = json.loads(value) machines.append(machine) yield self.client.set(path, json.dumps(machines))
python
def deploy(self, machine): """Deploy service.""" log.debug("machine id: %s." % machine) path = self.path + "/machines" value, metadata = yield self.client.get(path) machines = json.loads(value) machines.append(machine) yield self.client.set(path, json.dumps(machines))
[ "def", "deploy", "(", "self", ",", "machine", ")", ":", "log", ".", "debug", "(", "\"machine id: %s.\"", "%", "machine", ")", "path", "=", "self", ".", "path", "+", "\"/machines\"", "value", ",", "metadata", "=", "yield", "self", ".", "client", ".", "get", "(", "path", ")", "machines", "=", "json", ".", "loads", "(", "value", ")", "machines", ".", "append", "(", "machine", ")", "yield", "self", ".", "client", ".", "set", "(", "path", ",", "json", ".", "dumps", "(", "machines", ")", ")" ]
Deploy service.
[ "Deploy", "service", "." ]
3b58b91b41d8b9bee546eb40dc280a57500b8bed
https://github.com/malthe/pop/blob/3b58b91b41d8b9bee546eb40dc280a57500b8bed/src/pop/services/common.py#L43-L53
247,080
malthe/pop
src/pop/services/common.py
Service.add
def add(self): """Add service definition to hierarchy.""" yield self.client.create(self.path) yield self.client.create(self.path + "/type", self.name) yield self.client.create(self.path + "/state") yield self.client.create(self.path + "/machines", "[]") log.debug("registered service '%s' at %s." % (self.name, self.path))
python
def add(self): """Add service definition to hierarchy.""" yield self.client.create(self.path) yield self.client.create(self.path + "/type", self.name) yield self.client.create(self.path + "/state") yield self.client.create(self.path + "/machines", "[]") log.debug("registered service '%s' at %s." % (self.name, self.path))
[ "def", "add", "(", "self", ")", ":", "yield", "self", ".", "client", ".", "create", "(", "self", ".", "path", ")", "yield", "self", ".", "client", ".", "create", "(", "self", ".", "path", "+", "\"/type\"", ",", "self", ".", "name", ")", "yield", "self", ".", "client", ".", "create", "(", "self", ".", "path", "+", "\"/state\"", ")", "yield", "self", ".", "client", ".", "create", "(", "self", ".", "path", "+", "\"/machines\"", ",", "\"[]\"", ")", "log", ".", "debug", "(", "\"registered service '%s' at %s.\"", "%", "(", "self", ".", "name", ",", "self", ".", "path", ")", ")" ]
Add service definition to hierarchy.
[ "Add", "service", "definition", "to", "hierarchy", "." ]
3b58b91b41d8b9bee546eb40dc280a57500b8bed
https://github.com/malthe/pop/blob/3b58b91b41d8b9bee546eb40dc280a57500b8bed/src/pop/services/common.py#L108-L116
247,081
OpenGov/python_data_wrap
datawrap/basewrap.py
UnorderedCachedDict._insert_cache
def _insert_cache(self, key, val, read): ''' Does an insert into the cache such that the cache will have an updated entry for the key,value,read tuple. Any changes to those values will both update the local cache and queue any required writes to the database. ''' if key != None and self.stringify_keys: key = str(key) cval = self._cache.get(key) if cval == None: self._cur_size += 1 if cval == None or val != cval: self._cache[key] = val # Force a write if it's a write or if it's # unclear that the item was modified if not self.read_only and (not self._immutable_vals or not read): self._wqueue.add(key)
python
def _insert_cache(self, key, val, read): ''' Does an insert into the cache such that the cache will have an updated entry for the key,value,read tuple. Any changes to those values will both update the local cache and queue any required writes to the database. ''' if key != None and self.stringify_keys: key = str(key) cval = self._cache.get(key) if cval == None: self._cur_size += 1 if cval == None or val != cval: self._cache[key] = val # Force a write if it's a write or if it's # unclear that the item was modified if not self.read_only and (not self._immutable_vals or not read): self._wqueue.add(key)
[ "def", "_insert_cache", "(", "self", ",", "key", ",", "val", ",", "read", ")", ":", "if", "key", "!=", "None", "and", "self", ".", "stringify_keys", ":", "key", "=", "str", "(", "key", ")", "cval", "=", "self", ".", "_cache", ".", "get", "(", "key", ")", "if", "cval", "==", "None", ":", "self", ".", "_cur_size", "+=", "1", "if", "cval", "==", "None", "or", "val", "!=", "cval", ":", "self", ".", "_cache", "[", "key", "]", "=", "val", "# Force a write if it's a write or if it's\r", "# unclear that the item was modified\r", "if", "not", "self", ".", "read_only", "and", "(", "not", "self", ".", "_immutable_vals", "or", "not", "read", ")", ":", "self", ".", "_wqueue", ".", "add", "(", "key", ")" ]
Does an insert into the cache such that the cache will have an updated entry for the key,value,read tuple. Any changes to those values will both update the local cache and queue any required writes to the database.
[ "Does", "an", "insert", "into", "the", "cache", "such", "that", "the", "cache", "will", "have", "an", "updated", "entry", "for", "the", "key", "value", "read", "tuple", ".", "Any", "changes", "to", "those", "values", "will", "both", "update", "the", "local", "cache", "and", "queue", "any", "required", "writes", "to", "the", "database", "." ]
7de38bb30d7a500adc336a4a7999528d753e5600
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/basewrap.py#L126-L144
247,082
OpenGov/python_data_wrap
datawrap/basewrap.py
UnorderedCachedDict._sync_writes
def _sync_writes(self): ''' Flushes the write queue ''' for key in self._wqueue: val = self._cache[key] self._database[key] = val del self._wqueue self._wqueue = set() self._database.sync()
python
def _sync_writes(self): ''' Flushes the write queue ''' for key in self._wqueue: val = self._cache[key] self._database[key] = val del self._wqueue self._wqueue = set() self._database.sync()
[ "def", "_sync_writes", "(", "self", ")", ":", "for", "key", "in", "self", ".", "_wqueue", ":", "val", "=", "self", ".", "_cache", "[", "key", "]", "self", ".", "_database", "[", "key", "]", "=", "val", "del", "self", ".", "_wqueue", "self", ".", "_wqueue", "=", "set", "(", ")", "self", ".", "_database", ".", "sync", "(", ")" ]
Flushes the write queue
[ "Flushes", "the", "write", "queue" ]
7de38bb30d7a500adc336a4a7999528d753e5600
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/basewrap.py#L205-L214
247,083
OpenGov/python_data_wrap
datawrap/basewrap.py
UnorderedCachedDict.drop_cache
def drop_cache(self): ''' Drops all changes in the cache. ''' del self._cache self._cache = {} del self._wqueue self._wqueue = set() self._cur_size = 0
python
def drop_cache(self): ''' Drops all changes in the cache. ''' del self._cache self._cache = {} del self._wqueue self._wqueue = set() self._cur_size = 0
[ "def", "drop_cache", "(", "self", ")", ":", "del", "self", ".", "_cache", "self", ".", "_cache", "=", "{", "}", "del", "self", ".", "_wqueue", "self", ".", "_wqueue", "=", "set", "(", ")", "self", ".", "_cur_size", "=", "0" ]
Drops all changes in the cache.
[ "Drops", "all", "changes", "in", "the", "cache", "." ]
7de38bb30d7a500adc336a4a7999528d753e5600
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/basewrap.py#L223-L231
247,084
OpenGov/python_data_wrap
datawrap/basewrap.py
UnorderedCachedSet.update
def update(self, *args, **kwargs): ''' Updates the set to include all arguments passed in. If the keyword argument preprocess is passed, then each element is preprocessed before being added. ''' preprocess = kwargs.get('preprocess') for s in args: for e in s: self._dict_set(preprocess(e) if preprocess else e, True)
python
def update(self, *args, **kwargs): ''' Updates the set to include all arguments passed in. If the keyword argument preprocess is passed, then each element is preprocessed before being added. ''' preprocess = kwargs.get('preprocess') for s in args: for e in s: self._dict_set(preprocess(e) if preprocess else e, True)
[ "def", "update", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "preprocess", "=", "kwargs", ".", "get", "(", "'preprocess'", ")", "for", "s", "in", "args", ":", "for", "e", "in", "s", ":", "self", ".", "_dict_set", "(", "preprocess", "(", "e", ")", "if", "preprocess", "else", "e", ",", "True", ")" ]
Updates the set to include all arguments passed in. If the keyword argument preprocess is passed, then each element is preprocessed before being added.
[ "Updates", "the", "set", "to", "include", "all", "arguments", "passed", "in", ".", "If", "the", "keyword", "argument", "preprocess", "is", "passed", "then", "each", "element", "is", "preprocessed", "before", "being", "added", "." ]
7de38bb30d7a500adc336a4a7999528d753e5600
https://github.com/OpenGov/python_data_wrap/blob/7de38bb30d7a500adc336a4a7999528d753e5600/datawrap/basewrap.py#L287-L296
247,085
darkfeline/mir.anidb
mir/anidb/anime.py
get_episode_number
def get_episode_number(episode: Episode) -> int: """Get the episode number. The episode number is unique for an anime and episode type, but not across episode types for the same anime. """ match = _NUMBER_SUFFIX.search(episode.epno) return int(match.group(1))
python
def get_episode_number(episode: Episode) -> int: """Get the episode number. The episode number is unique for an anime and episode type, but not across episode types for the same anime. """ match = _NUMBER_SUFFIX.search(episode.epno) return int(match.group(1))
[ "def", "get_episode_number", "(", "episode", ":", "Episode", ")", "->", "int", ":", "match", "=", "_NUMBER_SUFFIX", ".", "search", "(", "episode", ".", "epno", ")", "return", "int", "(", "match", ".", "group", "(", "1", ")", ")" ]
Get the episode number. The episode number is unique for an anime and episode type, but not across episode types for the same anime.
[ "Get", "the", "episode", "number", "." ]
a0d25908f85fb1ff4bc595954bfc3f223f1b5acc
https://github.com/darkfeline/mir.anidb/blob/a0d25908f85fb1ff4bc595954bfc3f223f1b5acc/mir/anidb/anime.py#L77-L84
247,086
darkfeline/mir.anidb
mir/anidb/anime.py
get_episode_title
def get_episode_title(episode: Episode) -> int: """Get the episode title. Japanese title is prioritized. """ for title in episode.titles: if title.lang == 'ja': return title.title else: return episode.titles[0].title
python
def get_episode_title(episode: Episode) -> int: """Get the episode title. Japanese title is prioritized. """ for title in episode.titles: if title.lang == 'ja': return title.title else: return episode.titles[0].title
[ "def", "get_episode_title", "(", "episode", ":", "Episode", ")", "->", "int", ":", "for", "title", "in", "episode", ".", "titles", ":", "if", "title", ".", "lang", "==", "'ja'", ":", "return", "title", ".", "title", "else", ":", "return", "episode", ".", "titles", "[", "0", "]", ".", "title" ]
Get the episode title. Japanese title is prioritized.
[ "Get", "the", "episode", "title", "." ]
a0d25908f85fb1ff4bc595954bfc3f223f1b5acc
https://github.com/darkfeline/mir.anidb/blob/a0d25908f85fb1ff4bc595954bfc3f223f1b5acc/mir/anidb/anime.py#L87-L96
247,087
darkfeline/mir.anidb
mir/anidb/anime.py
_find_element_text
def _find_element_text(element, match, default=None): """Find a matching subelement and return its text. If default is given, return it if a matching subelement is not found. Otherwise, ValueError is raised. """ child = element.find(match) try: return child.text except AttributeError: if default is not None: return default else: raise MissingElementError(element, match)
python
def _find_element_text(element, match, default=None): """Find a matching subelement and return its text. If default is given, return it if a matching subelement is not found. Otherwise, ValueError is raised. """ child = element.find(match) try: return child.text except AttributeError: if default is not None: return default else: raise MissingElementError(element, match)
[ "def", "_find_element_text", "(", "element", ",", "match", ",", "default", "=", "None", ")", ":", "child", "=", "element", ".", "find", "(", "match", ")", "try", ":", "return", "child", ".", "text", "except", "AttributeError", ":", "if", "default", "is", "not", "None", ":", "return", "default", "else", ":", "raise", "MissingElementError", "(", "element", ",", "match", ")" ]
Find a matching subelement and return its text. If default is given, return it if a matching subelement is not found. Otherwise, ValueError is raised.
[ "Find", "a", "matching", "subelement", "and", "return", "its", "text", "." ]
a0d25908f85fb1ff4bc595954bfc3f223f1b5acc
https://github.com/darkfeline/mir.anidb/blob/a0d25908f85fb1ff4bc595954bfc3f223f1b5acc/mir/anidb/anime.py#L128-L141
247,088
darkfeline/mir.anidb
mir/anidb/anime.py
_unpack_episode
def _unpack_episode(element: ET.Element): """Unpack Episode from episode XML element.""" return Episode( epno=element.find('epno').text, type=int(element.find('epno').get('type')), length=int(element.find('length').text), titles=tuple(_unpack_episode_title(title) for title in element.iterfind('title')), )
python
def _unpack_episode(element: ET.Element): """Unpack Episode from episode XML element.""" return Episode( epno=element.find('epno').text, type=int(element.find('epno').get('type')), length=int(element.find('length').text), titles=tuple(_unpack_episode_title(title) for title in element.iterfind('title')), )
[ "def", "_unpack_episode", "(", "element", ":", "ET", ".", "Element", ")", ":", "return", "Episode", "(", "epno", "=", "element", ".", "find", "(", "'epno'", ")", ".", "text", ",", "type", "=", "int", "(", "element", ".", "find", "(", "'epno'", ")", ".", "get", "(", "'type'", ")", ")", ",", "length", "=", "int", "(", "element", ".", "find", "(", "'length'", ")", ".", "text", ")", ",", "titles", "=", "tuple", "(", "_unpack_episode_title", "(", "title", ")", "for", "title", "in", "element", ".", "iterfind", "(", "'title'", ")", ")", ",", ")" ]
Unpack Episode from episode XML element.
[ "Unpack", "Episode", "from", "episode", "XML", "element", "." ]
a0d25908f85fb1ff4bc595954bfc3f223f1b5acc
https://github.com/darkfeline/mir.anidb/blob/a0d25908f85fb1ff4bc595954bfc3f223f1b5acc/mir/anidb/anime.py#L168-L176
247,089
darkfeline/mir.anidb
mir/anidb/anime.py
_unpack_episode_title
def _unpack_episode_title(element: ET.Element): """Unpack EpisodeTitle from title XML element.""" return EpisodeTitle(title=element.text, lang=element.get(f'{XML}lang'))
python
def _unpack_episode_title(element: ET.Element): """Unpack EpisodeTitle from title XML element.""" return EpisodeTitle(title=element.text, lang=element.get(f'{XML}lang'))
[ "def", "_unpack_episode_title", "(", "element", ":", "ET", ".", "Element", ")", ":", "return", "EpisodeTitle", "(", "title", "=", "element", ".", "text", ",", "lang", "=", "element", ".", "get", "(", "f'{XML}lang'", ")", ")" ]
Unpack EpisodeTitle from title XML element.
[ "Unpack", "EpisodeTitle", "from", "title", "XML", "element", "." ]
a0d25908f85fb1ff4bc595954bfc3f223f1b5acc
https://github.com/darkfeline/mir.anidb/blob/a0d25908f85fb1ff4bc595954bfc3f223f1b5acc/mir/anidb/anime.py#L179-L182
247,090
openpermissions/perch
perch/model.py
Document.validate
def validate(self): """Validate the resource using its voluptuous schema""" try: # update _resource to have default values from the schema self._resource = self.schema(self._resource) except MultipleInvalid as e: errors = [format_error(err, self.resource_type) for err in e.errors] raise exceptions.ValidationError({'errors': errors}) yield self.check_unique()
python
def validate(self): """Validate the resource using its voluptuous schema""" try: # update _resource to have default values from the schema self._resource = self.schema(self._resource) except MultipleInvalid as e: errors = [format_error(err, self.resource_type) for err in e.errors] raise exceptions.ValidationError({'errors': errors}) yield self.check_unique()
[ "def", "validate", "(", "self", ")", ":", "try", ":", "# update _resource to have default values from the schema", "self", ".", "_resource", "=", "self", ".", "schema", "(", "self", ".", "_resource", ")", "except", "MultipleInvalid", "as", "e", ":", "errors", "=", "[", "format_error", "(", "err", ",", "self", ".", "resource_type", ")", "for", "err", "in", "e", ".", "errors", "]", "raise", "exceptions", ".", "ValidationError", "(", "{", "'errors'", ":", "errors", "}", ")", "yield", "self", ".", "check_unique", "(", ")" ]
Validate the resource using its voluptuous schema
[ "Validate", "the", "resource", "using", "its", "voluptuous", "schema" ]
36d78994133918f3c52c187f19e50132960a0156
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/model.py#L141-L150
247,091
openpermissions/perch
perch/model.py
Document._save
def _save(self): """ Save the resource It's better to use the create, updated & delete methods intsead of modifying an instance and calling save, because then we call the can_create, can_update & can_delete methods to check whether a user is permitted to make the changes. """ yield self.validate() db = self.db_client() saved = yield db.save_doc(self._resource) # Allow couch to create Document IDs if '_id' not in self._resource: self._resource['_id'] = saved['id']
python
def _save(self): """ Save the resource It's better to use the create, updated & delete methods intsead of modifying an instance and calling save, because then we call the can_create, can_update & can_delete methods to check whether a user is permitted to make the changes. """ yield self.validate() db = self.db_client() saved = yield db.save_doc(self._resource) # Allow couch to create Document IDs if '_id' not in self._resource: self._resource['_id'] = saved['id']
[ "def", "_save", "(", "self", ")", ":", "yield", "self", ".", "validate", "(", ")", "db", "=", "self", ".", "db_client", "(", ")", "saved", "=", "yield", "db", ".", "save_doc", "(", "self", ".", "_resource", ")", "# Allow couch to create Document IDs", "if", "'_id'", "not", "in", "self", ".", "_resource", ":", "self", ".", "_resource", "[", "'_id'", "]", "=", "saved", "[", "'id'", "]" ]
Save the resource It's better to use the create, updated & delete methods intsead of modifying an instance and calling save, because then we call the can_create, can_update & can_delete methods to check whether a user is permitted to make the changes.
[ "Save", "the", "resource" ]
36d78994133918f3c52c187f19e50132960a0156
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/model.py#L321-L337
247,092
openpermissions/perch
perch/model.py
Document.save_subresource
def save_subresource(self, subresource): """ Save the sub-resource NOTE: Currently assumes subresources are stored within a dictionary, keyed with the subresource's ID """ data = deepcopy(subresource._resource) data.pop('id', None) data.pop(self.resource_type + '_id', None) subresources = getattr(self, subresource.parent_key, {}) subresources[subresource.id] = data setattr(self, subresource.parent_key, subresources) yield self._save()
python
def save_subresource(self, subresource): """ Save the sub-resource NOTE: Currently assumes subresources are stored within a dictionary, keyed with the subresource's ID """ data = deepcopy(subresource._resource) data.pop('id', None) data.pop(self.resource_type + '_id', None) subresources = getattr(self, subresource.parent_key, {}) subresources[subresource.id] = data setattr(self, subresource.parent_key, subresources) yield self._save()
[ "def", "save_subresource", "(", "self", ",", "subresource", ")", ":", "data", "=", "deepcopy", "(", "subresource", ".", "_resource", ")", "data", ".", "pop", "(", "'id'", ",", "None", ")", "data", ".", "pop", "(", "self", ".", "resource_type", "+", "'_id'", ",", "None", ")", "subresources", "=", "getattr", "(", "self", ",", "subresource", ".", "parent_key", ",", "{", "}", ")", "subresources", "[", "subresource", ".", "id", "]", "=", "data", "setattr", "(", "self", ",", "subresource", ".", "parent_key", ",", "subresources", ")", "yield", "self", ".", "_save", "(", ")" ]
Save the sub-resource NOTE: Currently assumes subresources are stored within a dictionary, keyed with the subresource's ID
[ "Save", "the", "sub", "-", "resource" ]
36d78994133918f3c52c187f19e50132960a0156
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/model.py#L340-L355
247,093
openpermissions/perch
perch/model.py
Document.delete
def delete(self, user): """Delete a resource""" if user: can_delete = yield self.can_delete(user) else: can_delete = False if not can_delete: raise exceptions.Unauthorized('User may not delete the resource') doc = { '_id': self.id, '_deleted': True } try: doc['_rev'] = self._rev except AttributeError: pass db = self.db_client() yield db.save_doc(doc) self._resource = doc
python
def delete(self, user): """Delete a resource""" if user: can_delete = yield self.can_delete(user) else: can_delete = False if not can_delete: raise exceptions.Unauthorized('User may not delete the resource') doc = { '_id': self.id, '_deleted': True } try: doc['_rev'] = self._rev except AttributeError: pass db = self.db_client() yield db.save_doc(doc) self._resource = doc
[ "def", "delete", "(", "self", ",", "user", ")", ":", "if", "user", ":", "can_delete", "=", "yield", "self", ".", "can_delete", "(", "user", ")", "else", ":", "can_delete", "=", "False", "if", "not", "can_delete", ":", "raise", "exceptions", ".", "Unauthorized", "(", "'User may not delete the resource'", ")", "doc", "=", "{", "'_id'", ":", "self", ".", "id", ",", "'_deleted'", ":", "True", "}", "try", ":", "doc", "[", "'_rev'", "]", "=", "self", ".", "_rev", "except", "AttributeError", ":", "pass", "db", "=", "self", ".", "db_client", "(", ")", "yield", "db", ".", "save_doc", "(", "doc", ")", "self", ".", "_resource", "=", "doc" ]
Delete a resource
[ "Delete", "a", "resource" ]
36d78994133918f3c52c187f19e50132960a0156
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/model.py#L358-L381
247,094
openpermissions/perch
perch/model.py
Document.delete_subresource
def delete_subresource(self, subresource): """ Delete the sub-resource NOTE: Currently assumes subresources are stored within a dictionary, keyed with the subresource's ID """ subresources = getattr(self, subresource.parent_key, {}) del subresources[subresource.id] yield self._save()
python
def delete_subresource(self, subresource): """ Delete the sub-resource NOTE: Currently assumes subresources are stored within a dictionary, keyed with the subresource's ID """ subresources = getattr(self, subresource.parent_key, {}) del subresources[subresource.id] yield self._save()
[ "def", "delete_subresource", "(", "self", ",", "subresource", ")", ":", "subresources", "=", "getattr", "(", "self", ",", "subresource", ".", "parent_key", ",", "{", "}", ")", "del", "subresources", "[", "subresource", ".", "id", "]", "yield", "self", ".", "_save", "(", ")" ]
Delete the sub-resource NOTE: Currently assumes subresources are stored within a dictionary, keyed with the subresource's ID
[ "Delete", "the", "sub", "-", "resource" ]
36d78994133918f3c52c187f19e50132960a0156
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/model.py#L384-L393
247,095
openpermissions/perch
perch/model.py
Document.state
def state(self): """Get the Document's state""" state = self._resource.get('state', self.default_state) if state in State: return state else: return getattr(State, state)
python
def state(self): """Get the Document's state""" state = self._resource.get('state', self.default_state) if state in State: return state else: return getattr(State, state)
[ "def", "state", "(", "self", ")", ":", "state", "=", "self", ".", "_resource", ".", "get", "(", "'state'", ",", "self", ".", "default_state", ")", "if", "state", "in", "State", ":", "return", "state", "else", ":", "return", "getattr", "(", "State", ",", "state", ")" ]
Get the Document's state
[ "Get", "the", "Document", "s", "state" ]
36d78994133918f3c52c187f19e50132960a0156
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/model.py#L433-L440
247,096
openpermissions/perch
perch/model.py
SubResource.get_parent
def get_parent(self): """ Get the parent resource from the database The get, create & update methods will populate the parent for you. Use this method in the cases where parent has not been populated. """ if not self._parent: self._parent = yield self.parent_resource.get(self.parent_id) raise Return(self._parent)
python
def get_parent(self): """ Get the parent resource from the database The get, create & update methods will populate the parent for you. Use this method in the cases where parent has not been populated. """ if not self._parent: self._parent = yield self.parent_resource.get(self.parent_id) raise Return(self._parent)
[ "def", "get_parent", "(", "self", ")", ":", "if", "not", "self", ".", "_parent", ":", "self", ".", "_parent", "=", "yield", "self", ".", "parent_resource", ".", "get", "(", "self", ".", "parent_id", ")", "raise", "Return", "(", "self", ".", "_parent", ")" ]
Get the parent resource from the database The get, create & update methods will populate the parent for you. Use this method in the cases where parent has not been populated.
[ "Get", "the", "parent", "resource", "from", "the", "database" ]
36d78994133918f3c52c187f19e50132960a0156
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/model.py#L469-L479
247,097
openpermissions/perch
perch/model.py
SubResource.parent_resources
def parent_resources(cls): """Get a list of parent resources, starting from the Document""" parent = cls.parent_resource parents = [parent] try: while True: parent = parent.parent_resource parents.append(parent) except AttributeError: pass parents.reverse() return parents
python
def parent_resources(cls): """Get a list of parent resources, starting from the Document""" parent = cls.parent_resource parents = [parent] try: while True: parent = parent.parent_resource parents.append(parent) except AttributeError: pass parents.reverse() return parents
[ "def", "parent_resources", "(", "cls", ")", ":", "parent", "=", "cls", ".", "parent_resource", "parents", "=", "[", "parent", "]", "try", ":", "while", "True", ":", "parent", "=", "parent", ".", "parent_resource", "parents", ".", "append", "(", "parent", ")", "except", "AttributeError", ":", "pass", "parents", ".", "reverse", "(", ")", "return", "parents" ]
Get a list of parent resources, starting from the Document
[ "Get", "a", "list", "of", "parent", "resources", "starting", "from", "the", "Document" ]
36d78994133918f3c52c187f19e50132960a0156
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/model.py#L482-L495
247,098
openpermissions/perch
perch/model.py
SubResource.create
def create(cls, user, **kwargs): """If parent resource is not an editable state, should not be able to create.""" parent_id = kwargs.get(cls.parent_resource.resource_type + '_id') try: parent = yield cls.parent_resource.get(parent_id) except couch.NotFound: msg = 'Parent {} with id {} not found'.format( cls.parent_resource.resource_type, parent_id) raise exceptions.ValidationError(msg) if not parent.editable: err = 'Cannot create child of {} resource'.format(parent.state.name) raise exceptions.Unauthorized(err) resource = yield super(SubResource, cls).create(user, **kwargs) resource._parent = parent raise Return(resource)
python
def create(cls, user, **kwargs): """If parent resource is not an editable state, should not be able to create.""" parent_id = kwargs.get(cls.parent_resource.resource_type + '_id') try: parent = yield cls.parent_resource.get(parent_id) except couch.NotFound: msg = 'Parent {} with id {} not found'.format( cls.parent_resource.resource_type, parent_id) raise exceptions.ValidationError(msg) if not parent.editable: err = 'Cannot create child of {} resource'.format(parent.state.name) raise exceptions.Unauthorized(err) resource = yield super(SubResource, cls).create(user, **kwargs) resource._parent = parent raise Return(resource)
[ "def", "create", "(", "cls", ",", "user", ",", "*", "*", "kwargs", ")", ":", "parent_id", "=", "kwargs", ".", "get", "(", "cls", ".", "parent_resource", ".", "resource_type", "+", "'_id'", ")", "try", ":", "parent", "=", "yield", "cls", ".", "parent_resource", ".", "get", "(", "parent_id", ")", "except", "couch", ".", "NotFound", ":", "msg", "=", "'Parent {} with id {} not found'", ".", "format", "(", "cls", ".", "parent_resource", ".", "resource_type", ",", "parent_id", ")", "raise", "exceptions", ".", "ValidationError", "(", "msg", ")", "if", "not", "parent", ".", "editable", ":", "err", "=", "'Cannot create child of {} resource'", ".", "format", "(", "parent", ".", "state", ".", "name", ")", "raise", "exceptions", ".", "Unauthorized", "(", "err", ")", "resource", "=", "yield", "super", "(", "SubResource", ",", "cls", ")", ".", "create", "(", "user", ",", "*", "*", "kwargs", ")", "resource", ".", "_parent", "=", "parent", "raise", "Return", "(", "resource", ")" ]
If parent resource is not an editable state, should not be able to create.
[ "If", "parent", "resource", "is", "not", "an", "editable", "state", "should", "not", "be", "able", "to", "create", "." ]
36d78994133918f3c52c187f19e50132960a0156
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/model.py#L520-L538
247,099
openpermissions/perch
perch/model.py
SubResource.update
def update(self, user, **kwargs): """If parent resource is not an editable state, should not be able to update""" yield self.get_parent() if not self.parent.editable: err = 'Cannot update child of {} resource'.format(self.parent.state.name) raise exceptions.Unauthorized(err) yield super(SubResource, self).update(user, **kwargs)
python
def update(self, user, **kwargs): """If parent resource is not an editable state, should not be able to update""" yield self.get_parent() if not self.parent.editable: err = 'Cannot update child of {} resource'.format(self.parent.state.name) raise exceptions.Unauthorized(err) yield super(SubResource, self).update(user, **kwargs)
[ "def", "update", "(", "self", ",", "user", ",", "*", "*", "kwargs", ")", ":", "yield", "self", ".", "get_parent", "(", ")", "if", "not", "self", ".", "parent", ".", "editable", ":", "err", "=", "'Cannot update child of {} resource'", ".", "format", "(", "self", ".", "parent", ".", "state", ".", "name", ")", "raise", "exceptions", ".", "Unauthorized", "(", "err", ")", "yield", "super", "(", "SubResource", ",", "self", ")", ".", "update", "(", "user", ",", "*", "*", "kwargs", ")" ]
If parent resource is not an editable state, should not be able to update
[ "If", "parent", "resource", "is", "not", "an", "editable", "state", "should", "not", "be", "able", "to", "update" ]
36d78994133918f3c52c187f19e50132960a0156
https://github.com/openpermissions/perch/blob/36d78994133918f3c52c187f19e50132960a0156/perch/model.py#L541-L549