repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
elastic/elasticsearch-py
elasticsearch/client/xpack/rollup.py
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/xpack/rollup.py#L57-L69
def put_job(self, id, body, params=None): """ `<>`_ :arg id: The ID of the job to create :arg body: The job configuration """ for param in (id, body): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return self.transport.perform_request( "PUT", _make_path("_rollup", "job", id), params=params, body=body )
[ "def", "put_job", "(", "self", ",", "id", ",", "body", ",", "params", "=", "None", ")", ":", "for", "param", "in", "(", "id", ",", "body", ")", ":", "if", "param", "in", "SKIP_IN_PATH", ":", "raise", "ValueError", "(", "\"Empty value passed for a required argument.\"", ")", "return", "self", ".", "transport", ".", "perform_request", "(", "\"PUT\"", ",", "_make_path", "(", "\"_rollup\"", ",", "\"job\"", ",", "id", ")", ",", "params", "=", "params", ",", "body", "=", "body", ")" ]
`<>`_ :arg id: The ID of the job to create :arg body: The job configuration
[ "<", ">", "_" ]
python
train
33.846154
svenkreiss/pysparkling
pysparkling/streaming/dstream.py
https://github.com/svenkreiss/pysparkling/blob/596d0ef2793100f7115efe228ff9bfc17beaa08d/pysparkling/streaming/dstream.py#L157-L166
def flatMap(self, f, preservesPartitioning=False): """Apply function f and flatten. :param f: mapping function :rtype: DStream """ return self.mapPartitions( lambda p: (e for pp in p for e in f(pp)), preservesPartitioning, )
[ "def", "flatMap", "(", "self", ",", "f", ",", "preservesPartitioning", "=", "False", ")", ":", "return", "self", ".", "mapPartitions", "(", "lambda", "p", ":", "(", "e", "for", "pp", "in", "p", "for", "e", "in", "f", "(", "pp", ")", ")", ",", "preservesPartitioning", ",", ")" ]
Apply function f and flatten. :param f: mapping function :rtype: DStream
[ "Apply", "function", "f", "and", "flatten", "." ]
python
train
28.8
natea/django-deployer
django_deployer/utils.py
https://github.com/natea/django-deployer/blob/5ce7d972db2f8500ec53ad89e7eb312d3360d074/django_deployer/utils.py#L26-L52
def get_template_filelist(repo_path, ignore_files=[], ignore_folders=[]): """ input: local repo path output: path list of files which need to be rendered """ default_ignore_files = ['.gitignore'] default_ignore_folders = ['.git'] ignore_files += default_ignore_files ignore_folders += default_ignore_folders filelist = [] for root, folders, files in os.walk(repo_path): for ignore_file in ignore_files: if ignore_file in files: files.remove(ignore_file) for ignore_folder in ignore_folders: if ignore_folder in folders: folders.remove(ignore_folder) for file_name in files: filelist.append( '%s/%s' % (root, file_name)) return filelist
[ "def", "get_template_filelist", "(", "repo_path", ",", "ignore_files", "=", "[", "]", ",", "ignore_folders", "=", "[", "]", ")", ":", "default_ignore_files", "=", "[", "'.gitignore'", "]", "default_ignore_folders", "=", "[", "'.git'", "]", "ignore_files", "+=", "default_ignore_files", "ignore_folders", "+=", "default_ignore_folders", "filelist", "=", "[", "]", "for", "root", ",", "folders", ",", "files", "in", "os", ".", "walk", "(", "repo_path", ")", ":", "for", "ignore_file", "in", "ignore_files", ":", "if", "ignore_file", "in", "files", ":", "files", ".", "remove", "(", "ignore_file", ")", "for", "ignore_folder", "in", "ignore_folders", ":", "if", "ignore_folder", "in", "folders", ":", "folders", ".", "remove", "(", "ignore_folder", ")", "for", "file_name", "in", "files", ":", "filelist", ".", "append", "(", "'%s/%s'", "%", "(", "root", ",", "file_name", ")", ")", "return", "filelist" ]
input: local repo path output: path list of files which need to be rendered
[ "input", ":", "local", "repo", "path", "output", ":", "path", "list", "of", "files", "which", "need", "to", "be", "rendered" ]
python
train
27.851852
labstreaminglayer/liblsl-Python
pylsl/pylsl.py
https://github.com/labstreaminglayer/liblsl-Python/blob/1ff6fe2794f8dba286b7491d1f7a4c915b8a0605/pylsl/pylsl.py#L430-L455
def push_sample(self, x, timestamp=0.0, pushthrough=True): """Push a sample into the outlet. Each entry in the list corresponds to one channel. Keyword arguments: x -- A list of values to push (one per channel). timestamp -- Optionally the capture time of the sample, in agreement with local_clock(); if omitted, the current time is used. (default 0.0) pushthrough -- Whether to push the sample through to the receivers instead of buffering it with subsequent samples. Note that the chunk_size, if specified at outlet construction, takes precedence over the pushthrough flag. (default True) """ if len(x) == self.channel_count: if self.channel_format == cf_string: x = [v.encode('utf-8') for v in x] handle_error(self.do_push_sample(self.obj, self.sample_type(*x), c_double(timestamp), c_int(pushthrough))) else: raise ValueError("length of the data must correspond to the " "stream's channel count.")
[ "def", "push_sample", "(", "self", ",", "x", ",", "timestamp", "=", "0.0", ",", "pushthrough", "=", "True", ")", ":", "if", "len", "(", "x", ")", "==", "self", ".", "channel_count", ":", "if", "self", ".", "channel_format", "==", "cf_string", ":", "x", "=", "[", "v", ".", "encode", "(", "'utf-8'", ")", "for", "v", "in", "x", "]", "handle_error", "(", "self", ".", "do_push_sample", "(", "self", ".", "obj", ",", "self", ".", "sample_type", "(", "*", "x", ")", ",", "c_double", "(", "timestamp", ")", ",", "c_int", "(", "pushthrough", ")", ")", ")", "else", ":", "raise", "ValueError", "(", "\"length of the data must correspond to the \"", "\"stream's channel count.\"", ")" ]
Push a sample into the outlet. Each entry in the list corresponds to one channel. Keyword arguments: x -- A list of values to push (one per channel). timestamp -- Optionally the capture time of the sample, in agreement with local_clock(); if omitted, the current time is used. (default 0.0) pushthrough -- Whether to push the sample through to the receivers instead of buffering it with subsequent samples. Note that the chunk_size, if specified at outlet construction, takes precedence over the pushthrough flag. (default True)
[ "Push", "a", "sample", "into", "the", "outlet", "." ]
python
test
48.538462
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/refgraph.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/refgraph.py#L92-L104
def _eliminate_leafs(self, graph): """ Eliminate leaf objects - that are objects not referencing any other objects in the list `graph`. Returns the list of objects without the objects identified as leafs. """ result = [] idset = set([id(x) for x in graph]) for n in graph: refset = set([id(x) for x in get_referents(n)]) if refset.intersection(idset): result.append(n) return result
[ "def", "_eliminate_leafs", "(", "self", ",", "graph", ")", ":", "result", "=", "[", "]", "idset", "=", "set", "(", "[", "id", "(", "x", ")", "for", "x", "in", "graph", "]", ")", "for", "n", "in", "graph", ":", "refset", "=", "set", "(", "[", "id", "(", "x", ")", "for", "x", "in", "get_referents", "(", "n", ")", "]", ")", "if", "refset", ".", "intersection", "(", "idset", ")", ":", "result", ".", "append", "(", "n", ")", "return", "result" ]
Eliminate leaf objects - that are objects not referencing any other objects in the list `graph`. Returns the list of objects without the objects identified as leafs.
[ "Eliminate", "leaf", "objects", "-", "that", "are", "objects", "not", "referencing", "any", "other", "objects", "in", "the", "list", "graph", ".", "Returns", "the", "list", "of", "objects", "without", "the", "objects", "identified", "as", "leafs", "." ]
python
train
37.076923
GaretJax/lancet
lancet/commands/repository.py
https://github.com/GaretJax/lancet/blob/cf438c5c6166b18ee0dc5ffce55220793019bb95/lancet/commands/repository.py#L26-L105
def pull_request(ctx, base_branch, open_pr, stop_timer): """Create a new pull request for this issue.""" lancet = ctx.obj review_status = lancet.config.get("tracker", "review_status") remote_name = lancet.config.get("repository", "remote_name") if not base_branch: base_branch = lancet.config.get("repository", "base_branch") # Get the issue issue = get_issue(lancet) transition = get_transition(ctx, lancet, issue, review_status) # Get the working branch branch = get_branch(lancet, issue, create=False) with taskstatus("Checking pre-requisites") as ts: if not branch: ts.abort("No working branch found") if lancet.tracker.whoami() not in issue.assignees: ts.abort("Issue currently not assigned to you") # TODO: Check mergeability # TODO: Check remote status (PR does not already exist) # Push to remote with taskstatus('Pushing to "{}"', remote_name) as ts: remote = lancet.repo.lookup_remote(remote_name) if not remote: ts.abort('Remote "{}" not found', remote_name) from ..git import CredentialsCallbacks remote.push([branch.name], callbacks=CredentialsCallbacks()) ts.ok('Pushed latest changes to "{}"', remote_name) # Create pull request with taskstatus("Creating pull request") as ts: template_path = lancet.config.get("repository", "pr_template") message = edit_template(template_path, issue=issue) if not message: ts.abort("You didn't provide a title for the pull request") title, body = message.split("\n", 1) title = title.strip() if not title: ts.abort("You didn't provide a title for the pull request") try: pr = lancet.scm_manager.create_pull_request( branch.branch_name, base_branch, title, body.strip("\n") ) except PullRequestAlreadyExists as e: pr = e.pull_request ts.ok("Pull request does already exist at {}", pr.link) else: ts.ok("Pull request created at {}", pr.link) # Update issue set_issue_status(lancet, issue, review_status, transition) # TODO: Post to activity stream on JIRA? # TODO: Post to Slack? # Stop harvest timer if stop_timer: with taskstatus("Pausing harvest timer") as ts: lancet.timer.pause() ts.ok("Harvest timer paused") # Open the pull request page in the browser if requested if open_pr: click.launch(pr.link)
[ "def", "pull_request", "(", "ctx", ",", "base_branch", ",", "open_pr", ",", "stop_timer", ")", ":", "lancet", "=", "ctx", ".", "obj", "review_status", "=", "lancet", ".", "config", ".", "get", "(", "\"tracker\"", ",", "\"review_status\"", ")", "remote_name", "=", "lancet", ".", "config", ".", "get", "(", "\"repository\"", ",", "\"remote_name\"", ")", "if", "not", "base_branch", ":", "base_branch", "=", "lancet", ".", "config", ".", "get", "(", "\"repository\"", ",", "\"base_branch\"", ")", "# Get the issue", "issue", "=", "get_issue", "(", "lancet", ")", "transition", "=", "get_transition", "(", "ctx", ",", "lancet", ",", "issue", ",", "review_status", ")", "# Get the working branch", "branch", "=", "get_branch", "(", "lancet", ",", "issue", ",", "create", "=", "False", ")", "with", "taskstatus", "(", "\"Checking pre-requisites\"", ")", "as", "ts", ":", "if", "not", "branch", ":", "ts", ".", "abort", "(", "\"No working branch found\"", ")", "if", "lancet", ".", "tracker", ".", "whoami", "(", ")", "not", "in", "issue", ".", "assignees", ":", "ts", ".", "abort", "(", "\"Issue currently not assigned to you\"", ")", "# TODO: Check mergeability", "# TODO: Check remote status (PR does not already exist)", "# Push to remote", "with", "taskstatus", "(", "'Pushing to \"{}\"'", ",", "remote_name", ")", "as", "ts", ":", "remote", "=", "lancet", ".", "repo", ".", "lookup_remote", "(", "remote_name", ")", "if", "not", "remote", ":", "ts", ".", "abort", "(", "'Remote \"{}\" not found'", ",", "remote_name", ")", "from", ".", ".", "git", "import", "CredentialsCallbacks", "remote", ".", "push", "(", "[", "branch", ".", "name", "]", ",", "callbacks", "=", "CredentialsCallbacks", "(", ")", ")", "ts", ".", "ok", "(", "'Pushed latest changes to \"{}\"'", ",", "remote_name", ")", "# Create pull request", "with", "taskstatus", "(", "\"Creating pull request\"", ")", "as", "ts", ":", "template_path", "=", "lancet", ".", "config", ".", "get", "(", "\"repository\"", ",", "\"pr_template\"", ")", "message", "=", "edit_template", "(", "template_path", ",", "issue", "=", "issue", ")", "if", "not", "message", ":", "ts", ".", "abort", "(", "\"You didn't provide a title for the pull request\"", ")", "title", ",", "body", "=", "message", ".", "split", "(", "\"\\n\"", ",", "1", ")", "title", "=", "title", ".", "strip", "(", ")", "if", "not", "title", ":", "ts", ".", "abort", "(", "\"You didn't provide a title for the pull request\"", ")", "try", ":", "pr", "=", "lancet", ".", "scm_manager", ".", "create_pull_request", "(", "branch", ".", "branch_name", ",", "base_branch", ",", "title", ",", "body", ".", "strip", "(", "\"\\n\"", ")", ")", "except", "PullRequestAlreadyExists", "as", "e", ":", "pr", "=", "e", ".", "pull_request", "ts", ".", "ok", "(", "\"Pull request does already exist at {}\"", ",", "pr", ".", "link", ")", "else", ":", "ts", ".", "ok", "(", "\"Pull request created at {}\"", ",", "pr", ".", "link", ")", "# Update issue", "set_issue_status", "(", "lancet", ",", "issue", ",", "review_status", ",", "transition", ")", "# TODO: Post to activity stream on JIRA?", "# TODO: Post to Slack?", "# Stop harvest timer", "if", "stop_timer", ":", "with", "taskstatus", "(", "\"Pausing harvest timer\"", ")", "as", "ts", ":", "lancet", ".", "timer", ".", "pause", "(", ")", "ts", ".", "ok", "(", "\"Harvest timer paused\"", ")", "# Open the pull request page in the browser if requested", "if", "open_pr", ":", "click", ".", "launch", "(", "pr", ".", "link", ")" ]
Create a new pull request for this issue.
[ "Create", "a", "new", "pull", "request", "for", "this", "issue", "." ]
python
train
31.4
RPi-Distro/python-gpiozero
gpiozero/tools.py
https://github.com/RPi-Distro/python-gpiozero/blob/7b67374fd0c8c4fde5586d9bad9531f076db9c0c/gpiozero/tools.py#L437-L470
def queued(values, qsize): """ Queues up readings from *values* (the number of readings queued is determined by *qsize*) and begins yielding values only when the queue is full. For example, to "cascade" values along a sequence of LEDs:: from gpiozero import LEDBoard, Button from gpiozero.tools import queued from signal import pause leds = LEDBoard(5, 6, 13, 19, 26) btn = Button(17) for i in range(4): leds[i].source = queued(leds[i + 1], 5) leds[i].source_delay = 0.01 leds[4].source = btn pause() """ values = [_normalize(v) for v in values] if qsize < 1: raise ValueError("qsize must be 1 or larger") q = [] it = iter(values) try: for i in range(qsize): q.append(next(it)) for i in cycle(range(qsize)): yield q[i] q[i] = next(it) except StopIteration: pass
[ "def", "queued", "(", "values", ",", "qsize", ")", ":", "values", "=", "[", "_normalize", "(", "v", ")", "for", "v", "in", "values", "]", "if", "qsize", "<", "1", ":", "raise", "ValueError", "(", "\"qsize must be 1 or larger\"", ")", "q", "=", "[", "]", "it", "=", "iter", "(", "values", ")", "try", ":", "for", "i", "in", "range", "(", "qsize", ")", ":", "q", ".", "append", "(", "next", "(", "it", ")", ")", "for", "i", "in", "cycle", "(", "range", "(", "qsize", ")", ")", ":", "yield", "q", "[", "i", "]", "q", "[", "i", "]", "=", "next", "(", "it", ")", "except", "StopIteration", ":", "pass" ]
Queues up readings from *values* (the number of readings queued is determined by *qsize*) and begins yielding values only when the queue is full. For example, to "cascade" values along a sequence of LEDs:: from gpiozero import LEDBoard, Button from gpiozero.tools import queued from signal import pause leds = LEDBoard(5, 6, 13, 19, 26) btn = Button(17) for i in range(4): leds[i].source = queued(leds[i + 1], 5) leds[i].source_delay = 0.01 leds[4].source = btn pause()
[ "Queues", "up", "readings", "from", "*", "values", "*", "(", "the", "number", "of", "readings", "queued", "is", "determined", "by", "*", "qsize", "*", ")", "and", "begins", "yielding", "values", "only", "when", "the", "queue", "is", "full", ".", "For", "example", "to", "cascade", "values", "along", "a", "sequence", "of", "LEDs", "::" ]
python
train
27.441176
OSSOS/MOP
src/ossos/core/ossos/storage.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/storage.py#L1207-L1218
def vofile(filename, **kwargs): """ Open and return a handle on a VOSpace data connection @param filename: @param kwargs: @return: """ basename = os.path.basename(filename) if os.access(basename, os.R_OK): return open(basename, 'r') kwargs['view'] = kwargs.get('view', 'data') return client.open(filename, **kwargs)
[ "def", "vofile", "(", "filename", ",", "*", "*", "kwargs", ")", ":", "basename", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "if", "os", ".", "access", "(", "basename", ",", "os", ".", "R_OK", ")", ":", "return", "open", "(", "basename", ",", "'r'", ")", "kwargs", "[", "'view'", "]", "=", "kwargs", ".", "get", "(", "'view'", ",", "'data'", ")", "return", "client", ".", "open", "(", "filename", ",", "*", "*", "kwargs", ")" ]
Open and return a handle on a VOSpace data connection @param filename: @param kwargs: @return:
[ "Open", "and", "return", "a", "handle", "on", "a", "VOSpace", "data", "connection" ]
python
train
29.333333
oceanprotocol/squid-py
squid_py/keeper/conditions/condition_base.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/keeper/conditions/condition_base.py#L16-L29
def generate_id(self, agreement_id, types, values): """ Generate id for the condition. :param agreement_id: id of the agreement, hex str :param types: list of types :param values: list of values :return: id, str """ values_hash = utils.generate_multi_value_hash(types, values) return utils.generate_multi_value_hash( ['bytes32', 'address', 'bytes32'], [agreement_id, self.address, values_hash] )
[ "def", "generate_id", "(", "self", ",", "agreement_id", ",", "types", ",", "values", ")", ":", "values_hash", "=", "utils", ".", "generate_multi_value_hash", "(", "types", ",", "values", ")", "return", "utils", ".", "generate_multi_value_hash", "(", "[", "'bytes32'", ",", "'address'", ",", "'bytes32'", "]", ",", "[", "agreement_id", ",", "self", ".", "address", ",", "values_hash", "]", ")" ]
Generate id for the condition. :param agreement_id: id of the agreement, hex str :param types: list of types :param values: list of values :return: id, str
[ "Generate", "id", "for", "the", "condition", "." ]
python
train
34.785714
codeinn/vcs
vcs/backends/git/repository.py
https://github.com/codeinn/vcs/blob/e6cd94188e9c36d273411bf3adc0584ac6ab92a0/vcs/backends/git/repository.py#L454-L524
def get_changesets(self, start=None, end=None, start_date=None, end_date=None, branch_name=None, reverse=False): """ Returns iterator of ``GitChangeset`` objects from start to end (both are inclusive), in ascending date order (unless ``reverse`` is set). :param start: changeset ID, as str; first returned changeset :param end: changeset ID, as str; last returned changeset :param start_date: if specified, changesets with commit date less than ``start_date`` would be filtered out from returned set :param end_date: if specified, changesets with commit date greater than ``end_date`` would be filtered out from returned set :param branch_name: if specified, changesets not reachable from given branch would be filtered out from returned set :param reverse: if ``True``, returned generator would be reversed (meaning that returned changesets would have descending date order) :raise BranchDoesNotExistError: If given ``branch_name`` does not exist. :raise ChangesetDoesNotExistError: If changeset for given ``start`` or ``end`` could not be found. """ if branch_name and branch_name not in self.branches: raise BranchDoesNotExistError("Branch '%s' not found" \ % branch_name) # %H at format means (full) commit hash, initial hashes are retrieved # in ascending date order cmd_template = 'log --date-order --reverse --pretty=format:"%H"' cmd_params = {} if start_date: cmd_template += ' --since "$since"' cmd_params['since'] = start_date.strftime('%m/%d/%y %H:%M:%S') if end_date: cmd_template += ' --until "$until"' cmd_params['until'] = end_date.strftime('%m/%d/%y %H:%M:%S') if branch_name: cmd_template += ' $branch_name' cmd_params['branch_name'] = branch_name else: rev_filter = settings.GIT_REV_FILTER cmd_template += ' %s' % (rev_filter) cmd = string.Template(cmd_template).safe_substitute(**cmd_params) revs = self.run_git_command(cmd)[0].splitlines() start_pos = 0 end_pos = len(revs) if start: _start = self._get_revision(start) try: start_pos = revs.index(_start) except ValueError: pass if end is not None: _end = self._get_revision(end) try: end_pos = revs.index(_end) except ValueError: pass if None not in [start, end] and start_pos > end_pos: raise RepositoryError('start cannot be after end') if end_pos is not None: end_pos += 1 revs = revs[start_pos:end_pos] if reverse: revs = reversed(revs) return CollectionGenerator(self, revs)
[ "def", "get_changesets", "(", "self", ",", "start", "=", "None", ",", "end", "=", "None", ",", "start_date", "=", "None", ",", "end_date", "=", "None", ",", "branch_name", "=", "None", ",", "reverse", "=", "False", ")", ":", "if", "branch_name", "and", "branch_name", "not", "in", "self", ".", "branches", ":", "raise", "BranchDoesNotExistError", "(", "\"Branch '%s' not found\"", "%", "branch_name", ")", "# %H at format means (full) commit hash, initial hashes are retrieved", "# in ascending date order", "cmd_template", "=", "'log --date-order --reverse --pretty=format:\"%H\"'", "cmd_params", "=", "{", "}", "if", "start_date", ":", "cmd_template", "+=", "' --since \"$since\"'", "cmd_params", "[", "'since'", "]", "=", "start_date", ".", "strftime", "(", "'%m/%d/%y %H:%M:%S'", ")", "if", "end_date", ":", "cmd_template", "+=", "' --until \"$until\"'", "cmd_params", "[", "'until'", "]", "=", "end_date", ".", "strftime", "(", "'%m/%d/%y %H:%M:%S'", ")", "if", "branch_name", ":", "cmd_template", "+=", "' $branch_name'", "cmd_params", "[", "'branch_name'", "]", "=", "branch_name", "else", ":", "rev_filter", "=", "settings", ".", "GIT_REV_FILTER", "cmd_template", "+=", "' %s'", "%", "(", "rev_filter", ")", "cmd", "=", "string", ".", "Template", "(", "cmd_template", ")", ".", "safe_substitute", "(", "*", "*", "cmd_params", ")", "revs", "=", "self", ".", "run_git_command", "(", "cmd", ")", "[", "0", "]", ".", "splitlines", "(", ")", "start_pos", "=", "0", "end_pos", "=", "len", "(", "revs", ")", "if", "start", ":", "_start", "=", "self", ".", "_get_revision", "(", "start", ")", "try", ":", "start_pos", "=", "revs", ".", "index", "(", "_start", ")", "except", "ValueError", ":", "pass", "if", "end", "is", "not", "None", ":", "_end", "=", "self", ".", "_get_revision", "(", "end", ")", "try", ":", "end_pos", "=", "revs", ".", "index", "(", "_end", ")", "except", "ValueError", ":", "pass", "if", "None", "not", "in", "[", "start", ",", "end", "]", "and", "start_pos", ">", "end_pos", ":", "raise", "RepositoryError", "(", "'start cannot be after end'", ")", "if", "end_pos", "is", "not", "None", ":", "end_pos", "+=", "1", "revs", "=", "revs", "[", "start_pos", ":", "end_pos", "]", "if", "reverse", ":", "revs", "=", "reversed", "(", "revs", ")", "return", "CollectionGenerator", "(", "self", ",", "revs", ")" ]
Returns iterator of ``GitChangeset`` objects from start to end (both are inclusive), in ascending date order (unless ``reverse`` is set). :param start: changeset ID, as str; first returned changeset :param end: changeset ID, as str; last returned changeset :param start_date: if specified, changesets with commit date less than ``start_date`` would be filtered out from returned set :param end_date: if specified, changesets with commit date greater than ``end_date`` would be filtered out from returned set :param branch_name: if specified, changesets not reachable from given branch would be filtered out from returned set :param reverse: if ``True``, returned generator would be reversed (meaning that returned changesets would have descending date order) :raise BranchDoesNotExistError: If given ``branch_name`` does not exist. :raise ChangesetDoesNotExistError: If changeset for given ``start`` or ``end`` could not be found.
[ "Returns", "iterator", "of", "GitChangeset", "objects", "from", "start", "to", "end", "(", "both", "are", "inclusive", ")", "in", "ascending", "date", "order", "(", "unless", "reverse", "is", "set", ")", "." ]
python
train
41.394366
waqasbhatti/astrobase
astrobase/astrokep.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/astrokep.py#L1269-L1472
def epd_kepler_lightcurve(lcdict, xccol='mom_centr1', yccol='mom_centr2', timestoignore=None, filterflags=True, writetodict=True, epdsmooth=5): '''This runs EPD on the Kepler light curve. Following Huang et al. 2015, we fit the following EPD function to a smoothed light curve, and then subtract it to obtain EPD corrected magnitudes:: f = c0 + c1*sin(2*pi*x) + c2*cos(2*pi*x) + c3*sin(2*pi*y) + c4*cos(2*pi*y) + c5*sin(4*pi*x) + c6*cos(4*pi*x) + c7*sin(4*pi*y) + c8*cos(4*pi*y) + c9*bgv + c10*bge By default, this function removes points in the Kepler LC that have ANY quality flags set. Parameters ---------- lcdict : lcdict An `lcdict` produced by `consolidate_kepler_fitslc` or `read_kepler_fitslc`. xcol,ycol : str Indicates the x and y coordinate column names to use from the Kepler LC in the EPD fit. timestoignore : list of tuples This is of the form:: [(time1_start, time1_end), (time2_start, time2_end), ...] and indicates the start and end times to mask out of the final lcdict. Use this to remove anything that wasn't caught by the quality flags. filterflags : bool If True, will remove any measurements that have non-zero quality flags present. This usually indicates an issue with the instrument or spacecraft. writetodict : bool If writetodict is True, adds the following columns to the lcdict:: epd_time = time array epd_sapflux = uncorrected flux before EPD epd_epdsapflux = corrected flux after EPD epd_epdsapcorr = EPD flux corrections epd_bkg = background array epd_bkg_err = background errors array epd_xcc = xcoord array epd_ycc = ycoord array epd_quality = quality flag array and updates the 'columns' list in the lcdict as well. epdsmooth : int Sets the number of light curve points to smooth over when generating the EPD fit function. Returns ------- tuple Returns a tuple of the form: (times, epdfluxes, fitcoeffs, epdfit) ''' times, fluxes, background, background_err = (lcdict['time'], lcdict['sap']['sap_flux'], lcdict['sap']['sap_bkg'], lcdict['sap']['sap_bkg_err']) xcc = lcdict[xccol] ycc = lcdict[yccol] flags = lcdict['sap_quality'] # filter all bad LC points as noted by quality flags if filterflags: nbefore = times.size filterind = flags == 0 times = times[filterind] fluxes = fluxes[filterind] background = background[filterind] background_err = background_err[filterind] xcc = xcc[filterind] ycc = ycc[filterind] flags = flags[filterind] nafter = times.size LOGINFO('applied quality flag filter, ndet before = %s, ndet after = %s' % (nbefore, nafter)) # remove nans find = (npisfinite(xcc) & npisfinite(ycc) & npisfinite(times) & npisfinite(fluxes) & npisfinite(background) & npisfinite(background_err)) nbefore = times.size times = times[find] fluxes = fluxes[find] background = background[find] background_err = background_err[find] xcc = xcc[find] ycc = ycc[find] flags = flags[find] nafter = times.size LOGINFO('removed nans, ndet before = %s, ndet after = %s' % (nbefore, nafter)) # exclude all times in timestoignore if (timestoignore and isinstance(timestoignore, list) and len(timestoignore) > 0): exclind = npfull_like(times,True) nbefore = times.size # apply all the masks for ignoretime in timestoignore: time0, time1 = ignoretime[0], ignoretime[1] thismask = (times > time0) & (times < time1) exclind = exclind & thismask # quantities after masks have been applied times = times[exclind] fluxes = fluxes[exclind] background = background[exclind] background_err = background_err[exclind] xcc = xcc[exclind] ycc = ycc[exclind] flags = flags[exclind] nafter = times.size LOGINFO('removed timestoignore, ndet before = %s, ndet after = %s' % (nbefore, nafter)) # now that we're all done, we can do EPD # first, smooth the light curve smoothedfluxes = median_filter(fluxes, size=epdsmooth) # initial fit coeffs initcoeffs = npones(11) # fit the the smoothed mags and find better coeffs leastsqfit = leastsq(_epd_residual, initcoeffs, args=(smoothedfluxes, xcc, ycc, background, background_err)) # if the fit succeeds, then get the EPD fluxes if leastsqfit[-1] in (1,2,3,4): fitcoeffs = leastsqfit[0] epdfit = _epd_function(fitcoeffs, fluxes, xcc, ycc, background, background_err) epdfluxes = npmedian(fluxes) + fluxes - epdfit # write these to the dictionary if requested if writetodict: lcdict['epd'] = {} lcdict['epd']['time'] = times lcdict['epd']['sapflux'] = fluxes lcdict['epd']['epdsapflux'] = epdfluxes lcdict['epd']['epdsapcorr'] = epdfit lcdict['epd']['bkg'] = background lcdict['epd']['bkg_err'] = background_err lcdict['epd']['xcc'] = xcc lcdict['epd']['ycc'] = ycc lcdict['epd']['quality'] = flags for newcol in ['epd.time','epd.sapflux', 'epd.epdsapflux','epd.epdsapcorr', 'epd.bkg','epd.bkg.err', 'epd.xcc','epd.ycc', 'epd.quality']: if newcol not in lcdict['columns']: lcdict['columns'].append(newcol) return times, epdfluxes, fitcoeffs, epdfit else: LOGERROR('could not fit EPD function to light curve') return None, None, None, None
[ "def", "epd_kepler_lightcurve", "(", "lcdict", ",", "xccol", "=", "'mom_centr1'", ",", "yccol", "=", "'mom_centr2'", ",", "timestoignore", "=", "None", ",", "filterflags", "=", "True", ",", "writetodict", "=", "True", ",", "epdsmooth", "=", "5", ")", ":", "times", ",", "fluxes", ",", "background", ",", "background_err", "=", "(", "lcdict", "[", "'time'", "]", ",", "lcdict", "[", "'sap'", "]", "[", "'sap_flux'", "]", ",", "lcdict", "[", "'sap'", "]", "[", "'sap_bkg'", "]", ",", "lcdict", "[", "'sap'", "]", "[", "'sap_bkg_err'", "]", ")", "xcc", "=", "lcdict", "[", "xccol", "]", "ycc", "=", "lcdict", "[", "yccol", "]", "flags", "=", "lcdict", "[", "'sap_quality'", "]", "# filter all bad LC points as noted by quality flags", "if", "filterflags", ":", "nbefore", "=", "times", ".", "size", "filterind", "=", "flags", "==", "0", "times", "=", "times", "[", "filterind", "]", "fluxes", "=", "fluxes", "[", "filterind", "]", "background", "=", "background", "[", "filterind", "]", "background_err", "=", "background_err", "[", "filterind", "]", "xcc", "=", "xcc", "[", "filterind", "]", "ycc", "=", "ycc", "[", "filterind", "]", "flags", "=", "flags", "[", "filterind", "]", "nafter", "=", "times", ".", "size", "LOGINFO", "(", "'applied quality flag filter, ndet before = %s, ndet after = %s'", "%", "(", "nbefore", ",", "nafter", ")", ")", "# remove nans", "find", "=", "(", "npisfinite", "(", "xcc", ")", "&", "npisfinite", "(", "ycc", ")", "&", "npisfinite", "(", "times", ")", "&", "npisfinite", "(", "fluxes", ")", "&", "npisfinite", "(", "background", ")", "&", "npisfinite", "(", "background_err", ")", ")", "nbefore", "=", "times", ".", "size", "times", "=", "times", "[", "find", "]", "fluxes", "=", "fluxes", "[", "find", "]", "background", "=", "background", "[", "find", "]", "background_err", "=", "background_err", "[", "find", "]", "xcc", "=", "xcc", "[", "find", "]", "ycc", "=", "ycc", "[", "find", "]", "flags", "=", "flags", "[", "find", "]", "nafter", "=", "times", ".", "size", "LOGINFO", "(", "'removed nans, ndet before = %s, ndet after = %s'", "%", "(", "nbefore", ",", "nafter", ")", ")", "# exclude all times in timestoignore", "if", "(", "timestoignore", "and", "isinstance", "(", "timestoignore", ",", "list", ")", "and", "len", "(", "timestoignore", ")", ">", "0", ")", ":", "exclind", "=", "npfull_like", "(", "times", ",", "True", ")", "nbefore", "=", "times", ".", "size", "# apply all the masks", "for", "ignoretime", "in", "timestoignore", ":", "time0", ",", "time1", "=", "ignoretime", "[", "0", "]", ",", "ignoretime", "[", "1", "]", "thismask", "=", "(", "times", ">", "time0", ")", "&", "(", "times", "<", "time1", ")", "exclind", "=", "exclind", "&", "thismask", "# quantities after masks have been applied", "times", "=", "times", "[", "exclind", "]", "fluxes", "=", "fluxes", "[", "exclind", "]", "background", "=", "background", "[", "exclind", "]", "background_err", "=", "background_err", "[", "exclind", "]", "xcc", "=", "xcc", "[", "exclind", "]", "ycc", "=", "ycc", "[", "exclind", "]", "flags", "=", "flags", "[", "exclind", "]", "nafter", "=", "times", ".", "size", "LOGINFO", "(", "'removed timestoignore, ndet before = %s, ndet after = %s'", "%", "(", "nbefore", ",", "nafter", ")", ")", "# now that we're all done, we can do EPD", "# first, smooth the light curve", "smoothedfluxes", "=", "median_filter", "(", "fluxes", ",", "size", "=", "epdsmooth", ")", "# initial fit coeffs", "initcoeffs", "=", "npones", "(", "11", ")", "# fit the the smoothed mags and find better coeffs", "leastsqfit", "=", "leastsq", "(", "_epd_residual", ",", "initcoeffs", ",", "args", "=", "(", "smoothedfluxes", ",", "xcc", ",", "ycc", ",", "background", ",", "background_err", ")", ")", "# if the fit succeeds, then get the EPD fluxes", "if", "leastsqfit", "[", "-", "1", "]", "in", "(", "1", ",", "2", ",", "3", ",", "4", ")", ":", "fitcoeffs", "=", "leastsqfit", "[", "0", "]", "epdfit", "=", "_epd_function", "(", "fitcoeffs", ",", "fluxes", ",", "xcc", ",", "ycc", ",", "background", ",", "background_err", ")", "epdfluxes", "=", "npmedian", "(", "fluxes", ")", "+", "fluxes", "-", "epdfit", "# write these to the dictionary if requested", "if", "writetodict", ":", "lcdict", "[", "'epd'", "]", "=", "{", "}", "lcdict", "[", "'epd'", "]", "[", "'time'", "]", "=", "times", "lcdict", "[", "'epd'", "]", "[", "'sapflux'", "]", "=", "fluxes", "lcdict", "[", "'epd'", "]", "[", "'epdsapflux'", "]", "=", "epdfluxes", "lcdict", "[", "'epd'", "]", "[", "'epdsapcorr'", "]", "=", "epdfit", "lcdict", "[", "'epd'", "]", "[", "'bkg'", "]", "=", "background", "lcdict", "[", "'epd'", "]", "[", "'bkg_err'", "]", "=", "background_err", "lcdict", "[", "'epd'", "]", "[", "'xcc'", "]", "=", "xcc", "lcdict", "[", "'epd'", "]", "[", "'ycc'", "]", "=", "ycc", "lcdict", "[", "'epd'", "]", "[", "'quality'", "]", "=", "flags", "for", "newcol", "in", "[", "'epd.time'", ",", "'epd.sapflux'", ",", "'epd.epdsapflux'", ",", "'epd.epdsapcorr'", ",", "'epd.bkg'", ",", "'epd.bkg.err'", ",", "'epd.xcc'", ",", "'epd.ycc'", ",", "'epd.quality'", "]", ":", "if", "newcol", "not", "in", "lcdict", "[", "'columns'", "]", ":", "lcdict", "[", "'columns'", "]", ".", "append", "(", "newcol", ")", "return", "times", ",", "epdfluxes", ",", "fitcoeffs", ",", "epdfit", "else", ":", "LOGERROR", "(", "'could not fit EPD function to light curve'", ")", "return", "None", ",", "None", ",", "None", ",", "None" ]
This runs EPD on the Kepler light curve. Following Huang et al. 2015, we fit the following EPD function to a smoothed light curve, and then subtract it to obtain EPD corrected magnitudes:: f = c0 + c1*sin(2*pi*x) + c2*cos(2*pi*x) + c3*sin(2*pi*y) + c4*cos(2*pi*y) + c5*sin(4*pi*x) + c6*cos(4*pi*x) + c7*sin(4*pi*y) + c8*cos(4*pi*y) + c9*bgv + c10*bge By default, this function removes points in the Kepler LC that have ANY quality flags set. Parameters ---------- lcdict : lcdict An `lcdict` produced by `consolidate_kepler_fitslc` or `read_kepler_fitslc`. xcol,ycol : str Indicates the x and y coordinate column names to use from the Kepler LC in the EPD fit. timestoignore : list of tuples This is of the form:: [(time1_start, time1_end), (time2_start, time2_end), ...] and indicates the start and end times to mask out of the final lcdict. Use this to remove anything that wasn't caught by the quality flags. filterflags : bool If True, will remove any measurements that have non-zero quality flags present. This usually indicates an issue with the instrument or spacecraft. writetodict : bool If writetodict is True, adds the following columns to the lcdict:: epd_time = time array epd_sapflux = uncorrected flux before EPD epd_epdsapflux = corrected flux after EPD epd_epdsapcorr = EPD flux corrections epd_bkg = background array epd_bkg_err = background errors array epd_xcc = xcoord array epd_ycc = ycoord array epd_quality = quality flag array and updates the 'columns' list in the lcdict as well. epdsmooth : int Sets the number of light curve points to smooth over when generating the EPD fit function. Returns ------- tuple Returns a tuple of the form: (times, epdfluxes, fitcoeffs, epdfit)
[ "This", "runs", "EPD", "on", "the", "Kepler", "light", "curve", "." ]
python
valid
31.617647
pypa/pipenv
pipenv/patched/notpip/_vendor/html5lib/treebuilders/etree_lxml.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/html5lib/treebuilders/etree_lxml.py#L134-L172
def tostring(element): """Serialize an element and its child nodes to a string""" rv = [] def serializeElement(element): if not hasattr(element, "tag"): if element.docinfo.internalDTD: if element.docinfo.doctype: dtd_str = element.docinfo.doctype else: dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name rv.append(dtd_str) serializeElement(element.getroot()) elif element.tag == comment_type: rv.append("<!--%s-->" % (element.text,)) else: # This is assumed to be an ordinary element if not element.attrib: rv.append("<%s>" % (element.tag,)) else: attr = " ".join(["%s=\"%s\"" % (name, value) for name, value in element.attrib.items()]) rv.append("<%s %s>" % (element.tag, attr)) if element.text: rv.append(element.text) for child in element: serializeElement(child) rv.append("</%s>" % (element.tag,)) if hasattr(element, "tail") and element.tail: rv.append(element.tail) serializeElement(element) return "".join(rv)
[ "def", "tostring", "(", "element", ")", ":", "rv", "=", "[", "]", "def", "serializeElement", "(", "element", ")", ":", "if", "not", "hasattr", "(", "element", ",", "\"tag\"", ")", ":", "if", "element", ".", "docinfo", ".", "internalDTD", ":", "if", "element", ".", "docinfo", ".", "doctype", ":", "dtd_str", "=", "element", ".", "docinfo", ".", "doctype", "else", ":", "dtd_str", "=", "\"<!DOCTYPE %s>\"", "%", "element", ".", "docinfo", ".", "root_name", "rv", ".", "append", "(", "dtd_str", ")", "serializeElement", "(", "element", ".", "getroot", "(", ")", ")", "elif", "element", ".", "tag", "==", "comment_type", ":", "rv", ".", "append", "(", "\"<!--%s-->\"", "%", "(", "element", ".", "text", ",", ")", ")", "else", ":", "# This is assumed to be an ordinary element", "if", "not", "element", ".", "attrib", ":", "rv", ".", "append", "(", "\"<%s>\"", "%", "(", "element", ".", "tag", ",", ")", ")", "else", ":", "attr", "=", "\" \"", ".", "join", "(", "[", "\"%s=\\\"%s\\\"\"", "%", "(", "name", ",", "value", ")", "for", "name", ",", "value", "in", "element", ".", "attrib", ".", "items", "(", ")", "]", ")", "rv", ".", "append", "(", "\"<%s %s>\"", "%", "(", "element", ".", "tag", ",", "attr", ")", ")", "if", "element", ".", "text", ":", "rv", ".", "append", "(", "element", ".", "text", ")", "for", "child", "in", "element", ":", "serializeElement", "(", "child", ")", "rv", ".", "append", "(", "\"</%s>\"", "%", "(", "element", ".", "tag", ",", ")", ")", "if", "hasattr", "(", "element", ",", "\"tail\"", ")", "and", "element", ".", "tail", ":", "rv", ".", "append", "(", "element", ".", "tail", ")", "serializeElement", "(", "element", ")", "return", "\"\"", ".", "join", "(", "rv", ")" ]
Serialize an element and its child nodes to a string
[ "Serialize", "an", "element", "and", "its", "child", "nodes", "to", "a", "string" ]
python
train
32.384615
Nukesor/pueue
pueue/daemon/process_handler.py
https://github.com/Nukesor/pueue/blob/f1d276360454d4dd2738658a13df1e20caa4b926/pueue/daemon/process_handler.py#L240-L247
def pause_process(self, key): """Pause a specific processes.""" if key in self.processes and key not in self.paused: os.killpg(os.getpgid(self.processes[key].pid), signal.SIGSTOP) self.queue[key]['status'] = 'paused' self.paused.append(key) return True return False
[ "def", "pause_process", "(", "self", ",", "key", ")", ":", "if", "key", "in", "self", ".", "processes", "and", "key", "not", "in", "self", ".", "paused", ":", "os", ".", "killpg", "(", "os", ".", "getpgid", "(", "self", ".", "processes", "[", "key", "]", ".", "pid", ")", ",", "signal", ".", "SIGSTOP", ")", "self", ".", "queue", "[", "key", "]", "[", "'status'", "]", "=", "'paused'", "self", ".", "paused", ".", "append", "(", "key", ")", "return", "True", "return", "False" ]
Pause a specific processes.
[ "Pause", "a", "specific", "processes", "." ]
python
train
41.25
spyder-ide/spyder
spyder/plugins/variableexplorer/widgets/dataframeeditor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/variableexplorer/widgets/dataframeeditor.py#L170-L177
def _axis_levels(self, axis): """ Return the number of levels in the labels taking into account the axis. Get the number of levels for the columns (0) or rows (1). """ ax = self._axis(axis) return 1 if not hasattr(ax, 'levels') else len(ax.levels)
[ "def", "_axis_levels", "(", "self", ",", "axis", ")", ":", "ax", "=", "self", ".", "_axis", "(", "axis", ")", "return", "1", "if", "not", "hasattr", "(", "ax", ",", "'levels'", ")", "else", "len", "(", "ax", ".", "levels", ")" ]
Return the number of levels in the labels taking into account the axis. Get the number of levels for the columns (0) or rows (1).
[ "Return", "the", "number", "of", "levels", "in", "the", "labels", "taking", "into", "account", "the", "axis", ".", "Get", "the", "number", "of", "levels", "for", "the", "columns", "(", "0", ")", "or", "rows", "(", "1", ")", "." ]
python
train
37
libyal/dtfabric
dtfabric/reader.py
https://github.com/libyal/dtfabric/blob/0d2b5719fa257f6e5c661a406737ebcf8c8db266/dtfabric/reader.py#L439-L488
def _ReadFixedSizeDataTypeDefinition( self, definitions_registry, definition_values, data_type_definition_class, definition_name, supported_attributes, default_size=definitions.SIZE_NATIVE, default_units='bytes', is_member=False, supported_size_values=None): """Reads a fixed-size data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. data_type_definition_class (str): data type definition class. definition_name (str): name of the definition. supported_attributes (set[str]): names of the supported attributes. default_size (Optional[int]): default size. default_units (Optional[str]): default units. is_member (Optional[bool]): True if the data type definition is a member data type definition. supported_size_values (Optional[tuple[int]]): supported size values, or None if not set. Returns: FixedSizeDataTypeDefinition: fixed-size data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect. """ definition_object = self._ReadStorageDataTypeDefinition( definitions_registry, definition_values, data_type_definition_class, definition_name, supported_attributes, is_member=is_member) attributes = definition_values.get('attributes', None) if attributes: size = attributes.get('size', default_size) if size != definitions.SIZE_NATIVE: try: int(size) except ValueError: error_message = 'unuspported size attribute: {0!s}'.format(size) raise errors.DefinitionReaderError(definition_name, error_message) if supported_size_values and size not in supported_size_values: error_message = 'unuspported size value: {0!s}'.format(size) raise errors.DefinitionReaderError(definition_name, error_message) definition_object.size = size definition_object.units = attributes.get('units', default_units) return definition_object
[ "def", "_ReadFixedSizeDataTypeDefinition", "(", "self", ",", "definitions_registry", ",", "definition_values", ",", "data_type_definition_class", ",", "definition_name", ",", "supported_attributes", ",", "default_size", "=", "definitions", ".", "SIZE_NATIVE", ",", "default_units", "=", "'bytes'", ",", "is_member", "=", "False", ",", "supported_size_values", "=", "None", ")", ":", "definition_object", "=", "self", ".", "_ReadStorageDataTypeDefinition", "(", "definitions_registry", ",", "definition_values", ",", "data_type_definition_class", ",", "definition_name", ",", "supported_attributes", ",", "is_member", "=", "is_member", ")", "attributes", "=", "definition_values", ".", "get", "(", "'attributes'", ",", "None", ")", "if", "attributes", ":", "size", "=", "attributes", ".", "get", "(", "'size'", ",", "default_size", ")", "if", "size", "!=", "definitions", ".", "SIZE_NATIVE", ":", "try", ":", "int", "(", "size", ")", "except", "ValueError", ":", "error_message", "=", "'unuspported size attribute: {0!s}'", ".", "format", "(", "size", ")", "raise", "errors", ".", "DefinitionReaderError", "(", "definition_name", ",", "error_message", ")", "if", "supported_size_values", "and", "size", "not", "in", "supported_size_values", ":", "error_message", "=", "'unuspported size value: {0!s}'", ".", "format", "(", "size", ")", "raise", "errors", ".", "DefinitionReaderError", "(", "definition_name", ",", "error_message", ")", "definition_object", ".", "size", "=", "size", "definition_object", ".", "units", "=", "attributes", ".", "get", "(", "'units'", ",", "default_units", ")", "return", "definition_object" ]
Reads a fixed-size data type definition. Args: definitions_registry (DataTypeDefinitionsRegistry): data type definitions registry. definition_values (dict[str, object]): definition values. data_type_definition_class (str): data type definition class. definition_name (str): name of the definition. supported_attributes (set[str]): names of the supported attributes. default_size (Optional[int]): default size. default_units (Optional[str]): default units. is_member (Optional[bool]): True if the data type definition is a member data type definition. supported_size_values (Optional[tuple[int]]): supported size values, or None if not set. Returns: FixedSizeDataTypeDefinition: fixed-size data type definition. Raises: DefinitionReaderError: if the definitions values are missing or if the format is incorrect.
[ "Reads", "a", "fixed", "-", "size", "data", "type", "definition", "." ]
python
train
42.56
QUANTAXIS/QUANTAXIS
QUANTAXIS/QASU/save_tdx_parallelism.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QASU/save_tdx_parallelism.py#L118-L193
def QA_SU_save_stock_day(client=DATABASE, ui_log=None, ui_progress=None): ''' save stock_day 保存日线数据 :param client: :param ui_log: 给GUI qt 界面使用 :param ui_progress: 给GUI qt 界面使用 :param ui_progress_int_value: 给GUI qt 界面使用 ''' stock_list = QA_fetch_get_stock_list().code.unique().tolist() coll_stock_day = client.stock_day coll_stock_day.create_index( [("code", pymongo.ASCENDING), ("date_stamp", pymongo.ASCENDING)] ) err = [] # saveing result def __gen_param(stock_list, coll_stock_day, ip_list=[]): results = [] count = len(ip_list) total = len(stock_list) for item in range(len(stock_list)): try: code = stock_list[item] QA_util_log_info( '##JOB01 Now Saving STOCK_DAY==== {}'.format(str(code)), ui_log ) # 首选查找数据库 是否 有 这个代码的数据 search_cond = {'code': str(code)[0:6]} ref = coll_stock_day.find(search_cond) end_date = str(now_time())[0:10] ref_count = coll_stock_day.count_documents(search_cond) # 当前数据库已经包含了这个代码的数据, 继续增量更新 # 加入这个判断的原因是因为如果股票是刚上市的 数据库会没有数据 所以会有负索引问题出现 if ref_count > 0: # 接着上次获取的日期继续更新 start_date = ref[ref_count - 1]['date'] # print("ref[ref.count() - 1]['date'] {} {}".format(ref.count(), coll_stock_day.count_documents({'code': str(code)[0:6]}))) else: # 当前数据库中没有这个代码的股票数据, 从1990-01-01 开始下载所有的数据 start_date = '1990-01-01' QA_util_log_info( 'UPDATE_STOCK_DAY \n Trying updating {} from {} to {}' .format(code, start_date, end_date), ui_log ) if start_date != end_date: # 更新过的,不更新 results.extend([(code, start_date, end_date, '00', 'day', ip_list[item % count]['ip'], ip_list[item % count]['port'], item, total, ui_log, ui_progress)]) except Exception as error0: print('Exception:{}'.format(error0)) err.append(code) return results ips = get_ip_list_by_multi_process_ping(stock_ip_list, _type='stock')[:cpu_count() * 2 + 1] param = __gen_param(stock_list, coll_stock_day, ips) ps = QA_SU_save_stock_day_parallelism(processes=cpu_count() if len(ips) >= cpu_count() else len(ips), client=client, ui_log=ui_log) ps.add(do_saving_work, param) ps.run() if len(err) < 1: QA_util_log_info('SUCCESS save stock day ^_^', ui_log) else: QA_util_log_info('ERROR CODE \n ', ui_log) QA_util_log_info(err, ui_log)
[ "def", "QA_SU_save_stock_day", "(", "client", "=", "DATABASE", ",", "ui_log", "=", "None", ",", "ui_progress", "=", "None", ")", ":", "stock_list", "=", "QA_fetch_get_stock_list", "(", ")", ".", "code", ".", "unique", "(", ")", ".", "tolist", "(", ")", "coll_stock_day", "=", "client", ".", "stock_day", "coll_stock_day", ".", "create_index", "(", "[", "(", "\"code\"", ",", "pymongo", ".", "ASCENDING", ")", ",", "(", "\"date_stamp\"", ",", "pymongo", ".", "ASCENDING", ")", "]", ")", "err", "=", "[", "]", "# saveing result", "def", "__gen_param", "(", "stock_list", ",", "coll_stock_day", ",", "ip_list", "=", "[", "]", ")", ":", "results", "=", "[", "]", "count", "=", "len", "(", "ip_list", ")", "total", "=", "len", "(", "stock_list", ")", "for", "item", "in", "range", "(", "len", "(", "stock_list", ")", ")", ":", "try", ":", "code", "=", "stock_list", "[", "item", "]", "QA_util_log_info", "(", "'##JOB01 Now Saving STOCK_DAY==== {}'", ".", "format", "(", "str", "(", "code", ")", ")", ",", "ui_log", ")", "# 首选查找数据库 是否 有 这个代码的数据", "search_cond", "=", "{", "'code'", ":", "str", "(", "code", ")", "[", "0", ":", "6", "]", "}", "ref", "=", "coll_stock_day", ".", "find", "(", "search_cond", ")", "end_date", "=", "str", "(", "now_time", "(", ")", ")", "[", "0", ":", "10", "]", "ref_count", "=", "coll_stock_day", ".", "count_documents", "(", "search_cond", ")", "# 当前数据库已经包含了这个代码的数据, 继续增量更新", "# 加入这个判断的原因是因为如果股票是刚上市的 数据库会没有数据 所以会有负索引问题出现", "if", "ref_count", ">", "0", ":", "# 接着上次获取的日期继续更新", "start_date", "=", "ref", "[", "ref_count", "-", "1", "]", "[", "'date'", "]", "# print(\"ref[ref.count() - 1]['date'] {} {}\".format(ref.count(), coll_stock_day.count_documents({'code': str(code)[0:6]})))", "else", ":", "# 当前数据库中没有这个代码的股票数据, 从1990-01-01 开始下载所有的数据", "start_date", "=", "'1990-01-01'", "QA_util_log_info", "(", "'UPDATE_STOCK_DAY \\n Trying updating {} from {} to {}'", ".", "format", "(", "code", ",", "start_date", ",", "end_date", ")", ",", "ui_log", ")", "if", "start_date", "!=", "end_date", ":", "# 更新过的,不更新", "results", ".", "extend", "(", "[", "(", "code", ",", "start_date", ",", "end_date", ",", "'00'", ",", "'day'", ",", "ip_list", "[", "item", "%", "count", "]", "[", "'ip'", "]", ",", "ip_list", "[", "item", "%", "count", "]", "[", "'port'", "]", ",", "item", ",", "total", ",", "ui_log", ",", "ui_progress", ")", "]", ")", "except", "Exception", "as", "error0", ":", "print", "(", "'Exception:{}'", ".", "format", "(", "error0", ")", ")", "err", ".", "append", "(", "code", ")", "return", "results", "ips", "=", "get_ip_list_by_multi_process_ping", "(", "stock_ip_list", ",", "_type", "=", "'stock'", ")", "[", ":", "cpu_count", "(", ")", "*", "2", "+", "1", "]", "param", "=", "__gen_param", "(", "stock_list", ",", "coll_stock_day", ",", "ips", ")", "ps", "=", "QA_SU_save_stock_day_parallelism", "(", "processes", "=", "cpu_count", "(", ")", "if", "len", "(", "ips", ")", ">=", "cpu_count", "(", ")", "else", "len", "(", "ips", ")", ",", "client", "=", "client", ",", "ui_log", "=", "ui_log", ")", "ps", ".", "add", "(", "do_saving_work", ",", "param", ")", "ps", ".", "run", "(", ")", "if", "len", "(", "err", ")", "<", "1", ":", "QA_util_log_info", "(", "'SUCCESS save stock day ^_^'", ",", "ui_log", ")", "else", ":", "QA_util_log_info", "(", "'ERROR CODE \\n '", ",", "ui_log", ")", "QA_util_log_info", "(", "err", ",", "ui_log", ")" ]
save stock_day 保存日线数据 :param client: :param ui_log: 给GUI qt 界面使用 :param ui_progress: 给GUI qt 界面使用 :param ui_progress_int_value: 给GUI qt 界面使用
[ "save", "stock_day", "保存日线数据", ":", "param", "client", ":", ":", "param", "ui_log", ":", "给GUI", "qt", "界面使用", ":", "param", "ui_progress", ":", "给GUI", "qt", "界面使用", ":", "param", "ui_progress_int_value", ":", "给GUI", "qt", "界面使用" ]
python
train
38.421053
klichukb/django-migrate-sql
migrate_sql/operations.py
https://github.com/klichukb/django-migrate-sql/blob/be48ff2c9283404e3d951128c459c3496d1ba25d/migrate_sql/operations.py#L12-L18
def get_sql_state(self, state): """ Get SQLStateGraph from state. """ if not hasattr(state, 'sql_state'): setattr(state, 'sql_state', SQLStateGraph()) return state.sql_state
[ "def", "get_sql_state", "(", "self", ",", "state", ")", ":", "if", "not", "hasattr", "(", "state", ",", "'sql_state'", ")", ":", "setattr", "(", "state", ",", "'sql_state'", ",", "SQLStateGraph", "(", ")", ")", "return", "state", ".", "sql_state" ]
Get SQLStateGraph from state.
[ "Get", "SQLStateGraph", "from", "state", "." ]
python
train
31.285714
intake/intake
intake/catalog/base.py
https://github.com/intake/intake/blob/277b96bfdee39d8a3048ea5408c6d6716d568336/intake/catalog/base.py#L177-L180
def reload(self): """Reload catalog if sufficient time has passed""" if time.time() - self.updated > self.ttl: self.force_reload()
[ "def", "reload", "(", "self", ")", ":", "if", "time", ".", "time", "(", ")", "-", "self", ".", "updated", ">", "self", ".", "ttl", ":", "self", ".", "force_reload", "(", ")" ]
Reload catalog if sufficient time has passed
[ "Reload", "catalog", "if", "sufficient", "time", "has", "passed" ]
python
train
38.75
petl-developers/petl
petl/transform/intervals.py
https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/transform/intervals.py#L575-L632
def intervalleftjoin(left, right, lstart='start', lstop='stop', rstart='start', rstop='stop', lkey=None, rkey=None, include_stop=False, missing=None, lprefix=None, rprefix=None): """ Like :func:`petl.transform.intervals.intervaljoin` but rows from the left table without a match in the right table are also included. E.g.:: >>> import petl as etl >>> left = [['begin', 'end', 'quux'], ... [1, 2, 'a'], ... [2, 4, 'b'], ... [2, 5, 'c'], ... [9, 14, 'd'], ... [1, 1, 'e'], ... [10, 10, 'f']] >>> right = [['start', 'stop', 'value'], ... [1, 4, 'foo'], ... [3, 7, 'bar'], ... [4, 9, 'baz']] >>> table1 = etl.intervalleftjoin(left, right, ... lstart='begin', lstop='end', ... rstart='start', rstop='stop') >>> table1.lookall() +-------+-----+------+-------+------+-------+ | begin | end | quux | start | stop | value | +=======+=====+======+=======+======+=======+ | 1 | 2 | 'a' | 1 | 4 | 'foo' | +-------+-----+------+-------+------+-------+ | 2 | 4 | 'b' | 1 | 4 | 'foo' | +-------+-----+------+-------+------+-------+ | 2 | 4 | 'b' | 3 | 7 | 'bar' | +-------+-----+------+-------+------+-------+ | 2 | 5 | 'c' | 1 | 4 | 'foo' | +-------+-----+------+-------+------+-------+ | 2 | 5 | 'c' | 3 | 7 | 'bar' | +-------+-----+------+-------+------+-------+ | 2 | 5 | 'c' | 4 | 9 | 'baz' | +-------+-----+------+-------+------+-------+ | 9 | 14 | 'd' | None | None | None | +-------+-----+------+-------+------+-------+ | 1 | 1 | 'e' | None | None | None | +-------+-----+------+-------+------+-------+ | 10 | 10 | 'f' | None | None | None | +-------+-----+------+-------+------+-------+ Note start coordinates are included and stop coordinates are excluded from the interval. Use the `include_stop` keyword argument to include the upper bound of the interval when finding overlaps. """ assert (lkey is None) == (rkey is None), \ 'facet key field must be provided for both or neither table' return IntervalLeftJoinView(left, right, lstart=lstart, lstop=lstop, rstart=rstart, rstop=rstop, lkey=lkey, rkey=rkey, include_stop=include_stop, missing=missing, lprefix=lprefix, rprefix=rprefix)
[ "def", "intervalleftjoin", "(", "left", ",", "right", ",", "lstart", "=", "'start'", ",", "lstop", "=", "'stop'", ",", "rstart", "=", "'start'", ",", "rstop", "=", "'stop'", ",", "lkey", "=", "None", ",", "rkey", "=", "None", ",", "include_stop", "=", "False", ",", "missing", "=", "None", ",", "lprefix", "=", "None", ",", "rprefix", "=", "None", ")", ":", "assert", "(", "lkey", "is", "None", ")", "==", "(", "rkey", "is", "None", ")", ",", "'facet key field must be provided for both or neither table'", "return", "IntervalLeftJoinView", "(", "left", ",", "right", ",", "lstart", "=", "lstart", ",", "lstop", "=", "lstop", ",", "rstart", "=", "rstart", ",", "rstop", "=", "rstop", ",", "lkey", "=", "lkey", ",", "rkey", "=", "rkey", ",", "include_stop", "=", "include_stop", ",", "missing", "=", "missing", ",", "lprefix", "=", "lprefix", ",", "rprefix", "=", "rprefix", ")" ]
Like :func:`petl.transform.intervals.intervaljoin` but rows from the left table without a match in the right table are also included. E.g.:: >>> import petl as etl >>> left = [['begin', 'end', 'quux'], ... [1, 2, 'a'], ... [2, 4, 'b'], ... [2, 5, 'c'], ... [9, 14, 'd'], ... [1, 1, 'e'], ... [10, 10, 'f']] >>> right = [['start', 'stop', 'value'], ... [1, 4, 'foo'], ... [3, 7, 'bar'], ... [4, 9, 'baz']] >>> table1 = etl.intervalleftjoin(left, right, ... lstart='begin', lstop='end', ... rstart='start', rstop='stop') >>> table1.lookall() +-------+-----+------+-------+------+-------+ | begin | end | quux | start | stop | value | +=======+=====+======+=======+======+=======+ | 1 | 2 | 'a' | 1 | 4 | 'foo' | +-------+-----+------+-------+------+-------+ | 2 | 4 | 'b' | 1 | 4 | 'foo' | +-------+-----+------+-------+------+-------+ | 2 | 4 | 'b' | 3 | 7 | 'bar' | +-------+-----+------+-------+------+-------+ | 2 | 5 | 'c' | 1 | 4 | 'foo' | +-------+-----+------+-------+------+-------+ | 2 | 5 | 'c' | 3 | 7 | 'bar' | +-------+-----+------+-------+------+-------+ | 2 | 5 | 'c' | 4 | 9 | 'baz' | +-------+-----+------+-------+------+-------+ | 9 | 14 | 'd' | None | None | None | +-------+-----+------+-------+------+-------+ | 1 | 1 | 'e' | None | None | None | +-------+-----+------+-------+------+-------+ | 10 | 10 | 'f' | None | None | None | +-------+-----+------+-------+------+-------+ Note start coordinates are included and stop coordinates are excluded from the interval. Use the `include_stop` keyword argument to include the upper bound of the interval when finding overlaps.
[ "Like", ":", "func", ":", "petl", ".", "transform", ".", "intervals", ".", "intervaljoin", "but", "rows", "from", "the", "left", "table", "without", "a", "match", "in", "the", "right", "table", "are", "also", "included", ".", "E", ".", "g", ".", "::" ]
python
train
48.017241
SBRG/ssbio
ssbio/protein/structure/homology/itasser/itasserprop.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/structure/homology/itasser/itasserprop.py#L153-L196
def copy_results(self, copy_to_dir, rename_model_to=None, force_rerun=False): """Copy the raw information from I-TASSER modeling to a new folder. Copies all files in the list _attrs_to_copy. Args: copy_to_dir (str): Directory to copy the minimal set of results per sequence. rename_model_to (str): New file name (without extension) force_rerun (bool): If existing models and results should be overwritten. """ # Save path to the structure and copy it if specified if not rename_model_to: rename_model_to = self.model_to_use new_model_path = op.join(copy_to_dir, '{}.pdb'.format(rename_model_to)) if self.structure_path: if ssbio.utils.force_rerun(flag=force_rerun, outfile=new_model_path): # Clean and save it custom_clean = CleanPDB() my_pdb = StructureIO(self.structure_path) new_model_path = my_pdb.write_pdb(custom_selection=custom_clean, custom_name=rename_model_to, out_dir=copy_to_dir, force_rerun=force_rerun) # Update the structure_path to be the new clean file self.load_structure_path(structure_path=new_model_path, file_type='pdb') # Other modeling results - store in a new folder dest_itasser_dir = op.join(copy_to_dir, '{}_itasser'.format(rename_model_to)) if not op.exists(dest_itasser_dir): os.mkdir(dest_itasser_dir) for attr in self._attrs_to_copy: old_file_path = getattr(self, attr) new_file_path = op.join(dest_itasser_dir, op.basename(old_file_path)) if ssbio.utils.force_rerun(flag=force_rerun, outfile=new_file_path): shutil.copy2(old_file_path, new_file_path) log.debug('{}: copied from {}'.format(new_file_path, old_file_path)) else: log.debug('{}: file already exists'.format(new_file_path)) setattr(self, attr, new_file_path)
[ "def", "copy_results", "(", "self", ",", "copy_to_dir", ",", "rename_model_to", "=", "None", ",", "force_rerun", "=", "False", ")", ":", "# Save path to the structure and copy it if specified", "if", "not", "rename_model_to", ":", "rename_model_to", "=", "self", ".", "model_to_use", "new_model_path", "=", "op", ".", "join", "(", "copy_to_dir", ",", "'{}.pdb'", ".", "format", "(", "rename_model_to", ")", ")", "if", "self", ".", "structure_path", ":", "if", "ssbio", ".", "utils", ".", "force_rerun", "(", "flag", "=", "force_rerun", ",", "outfile", "=", "new_model_path", ")", ":", "# Clean and save it", "custom_clean", "=", "CleanPDB", "(", ")", "my_pdb", "=", "StructureIO", "(", "self", ".", "structure_path", ")", "new_model_path", "=", "my_pdb", ".", "write_pdb", "(", "custom_selection", "=", "custom_clean", ",", "custom_name", "=", "rename_model_to", ",", "out_dir", "=", "copy_to_dir", ",", "force_rerun", "=", "force_rerun", ")", "# Update the structure_path to be the new clean file", "self", ".", "load_structure_path", "(", "structure_path", "=", "new_model_path", ",", "file_type", "=", "'pdb'", ")", "# Other modeling results - store in a new folder", "dest_itasser_dir", "=", "op", ".", "join", "(", "copy_to_dir", ",", "'{}_itasser'", ".", "format", "(", "rename_model_to", ")", ")", "if", "not", "op", ".", "exists", "(", "dest_itasser_dir", ")", ":", "os", ".", "mkdir", "(", "dest_itasser_dir", ")", "for", "attr", "in", "self", ".", "_attrs_to_copy", ":", "old_file_path", "=", "getattr", "(", "self", ",", "attr", ")", "new_file_path", "=", "op", ".", "join", "(", "dest_itasser_dir", ",", "op", ".", "basename", "(", "old_file_path", ")", ")", "if", "ssbio", ".", "utils", ".", "force_rerun", "(", "flag", "=", "force_rerun", ",", "outfile", "=", "new_file_path", ")", ":", "shutil", ".", "copy2", "(", "old_file_path", ",", "new_file_path", ")", "log", ".", "debug", "(", "'{}: copied from {}'", ".", "format", "(", "new_file_path", ",", "old_file_path", ")", ")", "else", ":", "log", ".", "debug", "(", "'{}: file already exists'", ".", "format", "(", "new_file_path", ")", ")", "setattr", "(", "self", ",", "attr", ",", "new_file_path", ")" ]
Copy the raw information from I-TASSER modeling to a new folder. Copies all files in the list _attrs_to_copy. Args: copy_to_dir (str): Directory to copy the minimal set of results per sequence. rename_model_to (str): New file name (without extension) force_rerun (bool): If existing models and results should be overwritten.
[ "Copy", "the", "raw", "information", "from", "I", "-", "TASSER", "modeling", "to", "a", "new", "folder", "." ]
python
train
49.636364
olitheolix/qtmacs
qtmacs/extensions/qtmacsscintilla_widget.py
https://github.com/olitheolix/qtmacs/blob/36253b082b82590f183fe154b053eb3a1e741be2/qtmacs/extensions/qtmacsscintilla_widget.py#L390-L397
def reverseCommit(self): """ Replace the current widget content with the original text. Note that the original text has styling information available, whereas the new text does not. """ self.baseClass.setText(self.oldText) self.qteWidget.SCISetStylingEx(0, 0, self.style)
[ "def", "reverseCommit", "(", "self", ")", ":", "self", ".", "baseClass", ".", "setText", "(", "self", ".", "oldText", ")", "self", ".", "qteWidget", ".", "SCISetStylingEx", "(", "0", ",", "0", ",", "self", ".", "style", ")" ]
Replace the current widget content with the original text. Note that the original text has styling information available, whereas the new text does not.
[ "Replace", "the", "current", "widget", "content", "with", "the", "original", "text", ".", "Note", "that", "the", "original", "text", "has", "styling", "information", "available", "whereas", "the", "new", "text", "does", "not", "." ]
python
train
40
poppy-project/pypot
pypot/vrep/remoteApiBindings/vrep.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L1196-L1201
def simxGetOutMessageInfo(clientID, infoType): ''' Please have a look at the function description/documentation in the V-REP user manual ''' info = ct.c_int() return c_GetOutMessageInfo(clientID, infoType, ct.byref(info)), info.value
[ "def", "simxGetOutMessageInfo", "(", "clientID", ",", "infoType", ")", ":", "info", "=", "ct", ".", "c_int", "(", ")", "return", "c_GetOutMessageInfo", "(", "clientID", ",", "infoType", ",", "ct", ".", "byref", "(", "info", ")", ")", ",", "info", ".", "value" ]
Please have a look at the function description/documentation in the V-REP user manual
[ "Please", "have", "a", "look", "at", "the", "function", "description", "/", "documentation", "in", "the", "V", "-", "REP", "user", "manual" ]
python
train
41.333333
gtaylor/python-route53
route53/xml_generators/change_resource_record_set.py
https://github.com/gtaylor/python-route53/blob/b9fc7e258a79551c9ed61e4a71668b7f06f9e774/route53/xml_generators/change_resource_record_set.py#L98-L134
def change_resource_record_set_writer(connection, change_set, comment=None): """ Forms an XML string that we'll send to Route53 in order to change record sets. :param Route53Connection connection: The connection instance used to query the API. :param change_set.ChangeSet change_set: The ChangeSet object to create the XML doc from. :keyword str comment: An optional comment to go along with the request. """ e_root = etree.Element( "ChangeResourceRecordSetsRequest", xmlns=connection._xml_namespace ) e_change_batch = etree.SubElement(e_root, "ChangeBatch") if comment: e_comment = etree.SubElement(e_change_batch, "Comment") e_comment.text = comment e_changes = etree.SubElement(e_change_batch, "Changes") # Deletions need to come first in the change sets. for change in change_set.deletions + change_set.creations: e_changes.append(write_change(change)) e_tree = etree.ElementTree(element=e_root) #print(prettyprint_xml(e_root)) fobj = BytesIO() # This writes bytes. e_tree.write(fobj, xml_declaration=True, encoding='utf-8', method="xml") return fobj.getvalue().decode('utf-8')
[ "def", "change_resource_record_set_writer", "(", "connection", ",", "change_set", ",", "comment", "=", "None", ")", ":", "e_root", "=", "etree", ".", "Element", "(", "\"ChangeResourceRecordSetsRequest\"", ",", "xmlns", "=", "connection", ".", "_xml_namespace", ")", "e_change_batch", "=", "etree", ".", "SubElement", "(", "e_root", ",", "\"ChangeBatch\"", ")", "if", "comment", ":", "e_comment", "=", "etree", ".", "SubElement", "(", "e_change_batch", ",", "\"Comment\"", ")", "e_comment", ".", "text", "=", "comment", "e_changes", "=", "etree", ".", "SubElement", "(", "e_change_batch", ",", "\"Changes\"", ")", "# Deletions need to come first in the change sets.", "for", "change", "in", "change_set", ".", "deletions", "+", "change_set", ".", "creations", ":", "e_changes", ".", "append", "(", "write_change", "(", "change", ")", ")", "e_tree", "=", "etree", ".", "ElementTree", "(", "element", "=", "e_root", ")", "#print(prettyprint_xml(e_root))", "fobj", "=", "BytesIO", "(", ")", "# This writes bytes.", "e_tree", ".", "write", "(", "fobj", ",", "xml_declaration", "=", "True", ",", "encoding", "=", "'utf-8'", ",", "method", "=", "\"xml\"", ")", "return", "fobj", ".", "getvalue", "(", ")", ".", "decode", "(", "'utf-8'", ")" ]
Forms an XML string that we'll send to Route53 in order to change record sets. :param Route53Connection connection: The connection instance used to query the API. :param change_set.ChangeSet change_set: The ChangeSet object to create the XML doc from. :keyword str comment: An optional comment to go along with the request.
[ "Forms", "an", "XML", "string", "that", "we", "ll", "send", "to", "Route53", "in", "order", "to", "change", "record", "sets", "." ]
python
test
32.162162
saltstack/salt
salt/serializers/configparser.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/serializers/configparser.py#L24-L57
def deserialize(stream_or_string, **options): ''' Deserialize any string or stream like object into a Python data structure. :param stream_or_string: stream or string to deserialize. :param options: options given to lower configparser module. ''' if six.PY3: cp = configparser.ConfigParser(**options) else: cp = configparser.SafeConfigParser(**options) try: if not isinstance(stream_or_string, (bytes, six.string_types)): if six.PY3: cp.read_file(stream_or_string) else: cp.readfp(stream_or_string) else: if six.PY3: cp.read_file(six.moves.StringIO(stream_or_string)) else: # python2's ConfigParser cannot parse a config from a string cp.readfp(six.moves.StringIO(stream_or_string)) data = {} for section_name in cp.sections(): section = {} for k, v in cp.items(section_name): section[k] = v data[section_name] = section return data except Exception as error: raise DeserializationError(error)
[ "def", "deserialize", "(", "stream_or_string", ",", "*", "*", "options", ")", ":", "if", "six", ".", "PY3", ":", "cp", "=", "configparser", ".", "ConfigParser", "(", "*", "*", "options", ")", "else", ":", "cp", "=", "configparser", ".", "SafeConfigParser", "(", "*", "*", "options", ")", "try", ":", "if", "not", "isinstance", "(", "stream_or_string", ",", "(", "bytes", ",", "six", ".", "string_types", ")", ")", ":", "if", "six", ".", "PY3", ":", "cp", ".", "read_file", "(", "stream_or_string", ")", "else", ":", "cp", ".", "readfp", "(", "stream_or_string", ")", "else", ":", "if", "six", ".", "PY3", ":", "cp", ".", "read_file", "(", "six", ".", "moves", ".", "StringIO", "(", "stream_or_string", ")", ")", "else", ":", "# python2's ConfigParser cannot parse a config from a string", "cp", ".", "readfp", "(", "six", ".", "moves", ".", "StringIO", "(", "stream_or_string", ")", ")", "data", "=", "{", "}", "for", "section_name", "in", "cp", ".", "sections", "(", ")", ":", "section", "=", "{", "}", "for", "k", ",", "v", "in", "cp", ".", "items", "(", "section_name", ")", ":", "section", "[", "k", "]", "=", "v", "data", "[", "section_name", "]", "=", "section", "return", "data", "except", "Exception", "as", "error", ":", "raise", "DeserializationError", "(", "error", ")" ]
Deserialize any string or stream like object into a Python data structure. :param stream_or_string: stream or string to deserialize. :param options: options given to lower configparser module.
[ "Deserialize", "any", "string", "or", "stream", "like", "object", "into", "a", "Python", "data", "structure", "." ]
python
train
33.617647
ChaosinaCan/pyvswhere
vswhere/__init__.py
https://github.com/ChaosinaCan/pyvswhere/blob/e1b0acc92b851c7ada23ed6415406d688dc6bfce/vswhere/__init__.py#L208-L220
def _download_vswhere(): """ Download vswhere to DOWNLOAD_PATH. """ print('downloading from', _get_latest_release_url()) try: from urllib.request import urlopen with urlopen(_get_latest_release_url()) as response, open(DOWNLOAD_PATH, 'wb') as outfile: shutil.copyfileobj(response, outfile) except ImportError: # Python 2 import urllib urllib.urlretrieve(_get_latest_release_url(), DOWNLOAD_PATH)
[ "def", "_download_vswhere", "(", ")", ":", "print", "(", "'downloading from'", ",", "_get_latest_release_url", "(", ")", ")", "try", ":", "from", "urllib", ".", "request", "import", "urlopen", "with", "urlopen", "(", "_get_latest_release_url", "(", ")", ")", "as", "response", ",", "open", "(", "DOWNLOAD_PATH", ",", "'wb'", ")", "as", "outfile", ":", "shutil", ".", "copyfileobj", "(", "response", ",", "outfile", ")", "except", "ImportError", ":", "# Python 2", "import", "urllib", "urllib", ".", "urlretrieve", "(", "_get_latest_release_url", "(", ")", ",", "DOWNLOAD_PATH", ")" ]
Download vswhere to DOWNLOAD_PATH.
[ "Download", "vswhere", "to", "DOWNLOAD_PATH", "." ]
python
train
35.307692
sorgerlab/indra
indra/sources/ndex_cx/processor.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/ndex_cx/processor.py#L166-L173
def get_pmids(self): """Get list of all PMIDs associated with edges in the network.""" pmids = [] for ea in self._edge_attributes.values(): edge_pmids = ea.get('pmids') if edge_pmids: pmids += edge_pmids return list(set(pmids))
[ "def", "get_pmids", "(", "self", ")", ":", "pmids", "=", "[", "]", "for", "ea", "in", "self", ".", "_edge_attributes", ".", "values", "(", ")", ":", "edge_pmids", "=", "ea", ".", "get", "(", "'pmids'", ")", "if", "edge_pmids", ":", "pmids", "+=", "edge_pmids", "return", "list", "(", "set", "(", "pmids", ")", ")" ]
Get list of all PMIDs associated with edges in the network.
[ "Get", "list", "of", "all", "PMIDs", "associated", "with", "edges", "in", "the", "network", "." ]
python
train
36.5
senaite/senaite.core
bika/lims/content/abstractroutineanalysis.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/content/abstractroutineanalysis.py#L455-L469
def getPrioritySortkey(self): """ Returns the key that will be used to sort the current Analysis, from most prioritary to less prioritary. :return: string used for sorting """ analysis_request = self.getRequest() if analysis_request is None: return None ar_sort_key = analysis_request.getPrioritySortkey() ar_id = analysis_request.getId().lower() title = sortable_title(self) if callable(title): title = title() return '{}.{}.{}'.format(ar_sort_key, ar_id, title)
[ "def", "getPrioritySortkey", "(", "self", ")", ":", "analysis_request", "=", "self", ".", "getRequest", "(", ")", "if", "analysis_request", "is", "None", ":", "return", "None", "ar_sort_key", "=", "analysis_request", ".", "getPrioritySortkey", "(", ")", "ar_id", "=", "analysis_request", ".", "getId", "(", ")", ".", "lower", "(", ")", "title", "=", "sortable_title", "(", "self", ")", "if", "callable", "(", "title", ")", ":", "title", "=", "title", "(", ")", "return", "'{}.{}.{}'", ".", "format", "(", "ar_sort_key", ",", "ar_id", ",", "title", ")" ]
Returns the key that will be used to sort the current Analysis, from most prioritary to less prioritary. :return: string used for sorting
[ "Returns", "the", "key", "that", "will", "be", "used", "to", "sort", "the", "current", "Analysis", "from", "most", "prioritary", "to", "less", "prioritary", ".", ":", "return", ":", "string", "used", "for", "sorting" ]
python
train
37.933333
trevorstephens/gplearn
gplearn/genetic.py
https://github.com/trevorstephens/gplearn/blob/5c0465f2ecdcd5abcdf3fe520688d24cd59e4a52/gplearn/genetic.py#L1429-L1457
def transform(self, X): """Transform X according to the fitted transformer. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape = [n_samples, n_components] Transformed array. """ if not hasattr(self, '_best_programs'): raise NotFittedError('SymbolicTransformer not fitted.') X = check_array(X) _, n_features = X.shape if self.n_features_ != n_features: raise ValueError('Number of features of the model must match the ' 'input. Model n_features is %s and input ' 'n_features is %s.' % (self.n_features_, n_features)) X_new = np.array([gp.execute(X) for gp in self._best_programs]).T return X_new
[ "def", "transform", "(", "self", ",", "X", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_best_programs'", ")", ":", "raise", "NotFittedError", "(", "'SymbolicTransformer not fitted.'", ")", "X", "=", "check_array", "(", "X", ")", "_", ",", "n_features", "=", "X", ".", "shape", "if", "self", ".", "n_features_", "!=", "n_features", ":", "raise", "ValueError", "(", "'Number of features of the model must match the '", "'input. Model n_features is %s and input '", "'n_features is %s.'", "%", "(", "self", ".", "n_features_", ",", "n_features", ")", ")", "X_new", "=", "np", ".", "array", "(", "[", "gp", ".", "execute", "(", "X", ")", "for", "gp", "in", "self", ".", "_best_programs", "]", ")", ".", "T", "return", "X_new" ]
Transform X according to the fitted transformer. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- X_new : array-like, shape = [n_samples, n_components] Transformed array.
[ "Transform", "X", "according", "to", "the", "fitted", "transformer", "." ]
python
train
34.137931
skoczen/will
will/plugins/fun/googlepoem.py
https://github.com/skoczen/will/blob/778a6a78571e3ae4656b307f9e5d4d184b25627d/will/plugins/fun/googlepoem.py#L9-L15
def google_poem(self, message, topic): """make a poem about __: show a google poem about __""" r = requests.get("http://www.google.com/complete/search?output=toolbar&q=" + topic + "%20") xmldoc = minidom.parseString(r.text) item_list = xmldoc.getElementsByTagName("suggestion") context = {"topic": topic, "lines": [x.attributes["data"].value for x in item_list[:4]]} self.say(rendered_template("gpoem.html", context), message, html=True)
[ "def", "google_poem", "(", "self", ",", "message", ",", "topic", ")", ":", "r", "=", "requests", ".", "get", "(", "\"http://www.google.com/complete/search?output=toolbar&q=\"", "+", "topic", "+", "\"%20\"", ")", "xmldoc", "=", "minidom", ".", "parseString", "(", "r", ".", "text", ")", "item_list", "=", "xmldoc", ".", "getElementsByTagName", "(", "\"suggestion\"", ")", "context", "=", "{", "\"topic\"", ":", "topic", ",", "\"lines\"", ":", "[", "x", ".", "attributes", "[", "\"data\"", "]", ".", "value", "for", "x", "in", "item_list", "[", ":", "4", "]", "]", "}", "self", ".", "say", "(", "rendered_template", "(", "\"gpoem.html\"", ",", "context", ")", ",", "message", ",", "html", "=", "True", ")" ]
make a poem about __: show a google poem about __
[ "make", "a", "poem", "about", "__", ":", "show", "a", "google", "poem", "about", "__" ]
python
train
68.428571
adamalton/django-csp-reports
cspreports/summary.py
https://github.com/adamalton/django-csp-reports/blob/867992c6f535cf6afbf911f92af7eea4c61e4b73/cspreports/summary.py#L35-L40
def append(self, report): """Append a new CSP report.""" assert report not in self.examples self.count += 1 if len(self.examples) < self.top: self.examples.append(report)
[ "def", "append", "(", "self", ",", "report", ")", ":", "assert", "report", "not", "in", "self", ".", "examples", "self", ".", "count", "+=", "1", "if", "len", "(", "self", ".", "examples", ")", "<", "self", ".", "top", ":", "self", ".", "examples", ".", "append", "(", "report", ")" ]
Append a new CSP report.
[ "Append", "a", "new", "CSP", "report", "." ]
python
train
34.833333
senaite/senaite.core
bika/lims/exportimport/instruments/__init__.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/exportimport/instruments/__init__.py#L245-L249
def getExim(exim_id): """Returns the instrument interface for the exim_id passed in """ interfaces = filter(lambda i: i[0]==exim_id, get_instrument_interfaces()) return interfaces and interfaces[0][1] or None
[ "def", "getExim", "(", "exim_id", ")", ":", "interfaces", "=", "filter", "(", "lambda", "i", ":", "i", "[", "0", "]", "==", "exim_id", ",", "get_instrument_interfaces", "(", ")", ")", "return", "interfaces", "and", "interfaces", "[", "0", "]", "[", "1", "]", "or", "None" ]
Returns the instrument interface for the exim_id passed in
[ "Returns", "the", "instrument", "interface", "for", "the", "exim_id", "passed", "in" ]
python
train
44
PyGithub/PyGithub
github/Organization.py
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/Organization.py#L489-L499
def delete_hook(self, id): """ :calls: `DELETE /orgs/:owner/hooks/:id <http://developer.github.com/v3/orgs/hooks>`_ :param id: integer :rtype: None` """ assert isinstance(id, (int, long)), id headers, data = self._requester.requestJsonAndCheck( "DELETE", self.url + "/hooks/" + str(id) )
[ "def", "delete_hook", "(", "self", ",", "id", ")", ":", "assert", "isinstance", "(", "id", ",", "(", "int", ",", "long", ")", ")", ",", "id", "headers", ",", "data", "=", "self", ".", "_requester", ".", "requestJsonAndCheck", "(", "\"DELETE\"", ",", "self", ".", "url", "+", "\"/hooks/\"", "+", "str", "(", "id", ")", ")" ]
:calls: `DELETE /orgs/:owner/hooks/:id <http://developer.github.com/v3/orgs/hooks>`_ :param id: integer :rtype: None`
[ ":", "calls", ":", "DELETE", "/", "orgs", "/", ":", "owner", "/", "hooks", "/", ":", "id", "<http", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "orgs", "/", "hooks", ">", "_", ":", "param", "id", ":", "integer", ":", "rtype", ":", "None" ]
python
train
33.181818
odlgroup/odl
odl/space/weighting.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/space/weighting.py#L555-L560
def repr_part(self): """String usable in a space's ``__repr__`` method.""" optargs = [('weighting', array_str(self.array, nprint=10), ''), ('exponent', self.exponent, 2.0)] return signature_string([], optargs, sep=',\n', mod=[[], ['!s', ':.4']])
[ "def", "repr_part", "(", "self", ")", ":", "optargs", "=", "[", "(", "'weighting'", ",", "array_str", "(", "self", ".", "array", ",", "nprint", "=", "10", ")", ",", "''", ")", ",", "(", "'exponent'", ",", "self", ".", "exponent", ",", "2.0", ")", "]", "return", "signature_string", "(", "[", "]", ",", "optargs", ",", "sep", "=", "',\\n'", ",", "mod", "=", "[", "[", "]", ",", "[", "'!s'", ",", "':.4'", "]", "]", ")" ]
String usable in a space's ``__repr__`` method.
[ "String", "usable", "in", "a", "space", "s", "__repr__", "method", "." ]
python
train
52.5
openstack/quark
quark/billing.py
https://github.com/openstack/quark/blob/1112e6a66917d3e98e44cb7b33b107fd5a74bb2e/quark/billing.py#L256-L288
def calc_periods(hour=0, minute=0): """Returns a tuple of start_period and end_period. Assumes that the period is 24-hrs. Parameters: - `hour`: the hour from 0 to 23 when the period ends - `minute`: the minute from 0 to 59 when the period ends This method will calculate the end of the period as the closest hour/minute going backwards. It will also calculate the start of the period as the passed hour/minute but 24 hrs ago. Example, if we pass 0, 0 - we will get the events from 0:00 midnight of the day before yesterday until today's midnight. If we pass 2,0 - we will get the start time as 2am of the previous morning till 2am of today's morning. By default it's midnight. """ # Calculate the time intervals in a usable form period_end = datetime.datetime.utcnow().replace(hour=hour, minute=minute, second=0, microsecond=0) period_start = period_end - datetime.timedelta(days=1) # period end should be slightly before the midnight. # hence, we subtract a second # this will force period_end to store something like: # datetime.datetime(2016, 5, 19, 23, 59, 59, 999999) # instead of: # datetime.datetime(2016, 5, 20, 0, 0, 0, 0) period_end -= datetime.timedelta(seconds=1) return (period_start, period_end)
[ "def", "calc_periods", "(", "hour", "=", "0", ",", "minute", "=", "0", ")", ":", "# Calculate the time intervals in a usable form", "period_end", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "replace", "(", "hour", "=", "hour", ",", "minute", "=", "minute", ",", "second", "=", "0", ",", "microsecond", "=", "0", ")", "period_start", "=", "period_end", "-", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", "# period end should be slightly before the midnight.", "# hence, we subtract a second", "# this will force period_end to store something like:", "# datetime.datetime(2016, 5, 19, 23, 59, 59, 999999)", "# instead of:", "# datetime.datetime(2016, 5, 20, 0, 0, 0, 0)", "period_end", "-=", "datetime", ".", "timedelta", "(", "seconds", "=", "1", ")", "return", "(", "period_start", ",", "period_end", ")" ]
Returns a tuple of start_period and end_period. Assumes that the period is 24-hrs. Parameters: - `hour`: the hour from 0 to 23 when the period ends - `minute`: the minute from 0 to 59 when the period ends This method will calculate the end of the period as the closest hour/minute going backwards. It will also calculate the start of the period as the passed hour/minute but 24 hrs ago. Example, if we pass 0, 0 - we will get the events from 0:00 midnight of the day before yesterday until today's midnight. If we pass 2,0 - we will get the start time as 2am of the previous morning till 2am of today's morning. By default it's midnight.
[ "Returns", "a", "tuple", "of", "start_period", "and", "end_period", "." ]
python
valid
44.121212
dls-controls/pymalcolm
malcolm/modules/web/controllers/websocketclientcomms.py
https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/modules/web/controllers/websocketclientcomms.py#L234-L255
def send_post(self, mri, method_name, **params): """Abstract method to dispatch a Post to the server Args: mri (str): The mri of the Block method_name (str): The name of the Method within the Block params: The parameters to send Returns: The return results from the server """ q = Queue() request = Post( path=[mri, method_name], parameters=params) request.set_callback(q.put) IOLoopHelper.call(self._send_request, request) response = q.get() if isinstance(response, Error): raise response.message else: return response.value
[ "def", "send_post", "(", "self", ",", "mri", ",", "method_name", ",", "*", "*", "params", ")", ":", "q", "=", "Queue", "(", ")", "request", "=", "Post", "(", "path", "=", "[", "mri", ",", "method_name", "]", ",", "parameters", "=", "params", ")", "request", ".", "set_callback", "(", "q", ".", "put", ")", "IOLoopHelper", ".", "call", "(", "self", ".", "_send_request", ",", "request", ")", "response", "=", "q", ".", "get", "(", ")", "if", "isinstance", "(", "response", ",", "Error", ")", ":", "raise", "response", ".", "message", "else", ":", "return", "response", ".", "value" ]
Abstract method to dispatch a Post to the server Args: mri (str): The mri of the Block method_name (str): The name of the Method within the Block params: The parameters to send Returns: The return results from the server
[ "Abstract", "method", "to", "dispatch", "a", "Post", "to", "the", "server" ]
python
train
31.363636
petrjasek/eve-elastic
eve_elastic/elastic.py
https://github.com/petrjasek/eve-elastic/blob/f146f31b348d22ac5559cf78717b3bb02efcb2d7/eve_elastic/elastic.py#L695-L721
def put_settings(self, app=None, index=None, settings=None, es=None): """Modify index settings. Index must exist already. """ if not index: index = self.index if not app: app = self.app if not es: es = self.es if not settings: return for alias, old_settings in self.es.indices.get_settings(index=index).items(): try: if test_settings_contain(old_settings['settings']['index'], settings['settings']): return except KeyError: pass es.indices.close(index=index) es.indices.put_settings(index=index, body=settings) es.indices.open(index=index)
[ "def", "put_settings", "(", "self", ",", "app", "=", "None", ",", "index", "=", "None", ",", "settings", "=", "None", ",", "es", "=", "None", ")", ":", "if", "not", "index", ":", "index", "=", "self", ".", "index", "if", "not", "app", ":", "app", "=", "self", ".", "app", "if", "not", "es", ":", "es", "=", "self", ".", "es", "if", "not", "settings", ":", "return", "for", "alias", ",", "old_settings", "in", "self", ".", "es", ".", "indices", ".", "get_settings", "(", "index", "=", "index", ")", ".", "items", "(", ")", ":", "try", ":", "if", "test_settings_contain", "(", "old_settings", "[", "'settings'", "]", "[", "'index'", "]", ",", "settings", "[", "'settings'", "]", ")", ":", "return", "except", "KeyError", ":", "pass", "es", ".", "indices", ".", "close", "(", "index", "=", "index", ")", "es", ".", "indices", ".", "put_settings", "(", "index", "=", "index", ",", "body", "=", "settings", ")", "es", ".", "indices", ".", "open", "(", "index", "=", "index", ")" ]
Modify index settings. Index must exist already.
[ "Modify", "index", "settings", "." ]
python
train
27.074074
openstack/networking-cisco
networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/services/firewall/native/fabric_setup_base.py#L595-L598
def create_serv_obj(self, tenant_id): """Creates and stores the service object associated with a tenant. """ self.service_attr[tenant_id] = ServiceIpSegTenantMap() self.store_tenant_obj(tenant_id, self.service_attr[tenant_id])
[ "def", "create_serv_obj", "(", "self", ",", "tenant_id", ")", ":", "self", ".", "service_attr", "[", "tenant_id", "]", "=", "ServiceIpSegTenantMap", "(", ")", "self", ".", "store_tenant_obj", "(", "tenant_id", ",", "self", ".", "service_attr", "[", "tenant_id", "]", ")" ]
Creates and stores the service object associated with a tenant.
[ "Creates", "and", "stores", "the", "service", "object", "associated", "with", "a", "tenant", "." ]
python
train
61.75
gwastro/pycbc-glue
pycbc_glue/pipeline.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/pipeline.py#L1466-L1474
def write_output_files(self, fh): """ Write as a comment into the DAG file the list of output files for this DAG node. @param fh: descriptor of open DAG file. """ for f in self.__output_files: print >>fh, "## Job %s generates output file %s" % (self.__name, f)
[ "def", "write_output_files", "(", "self", ",", "fh", ")", ":", "for", "f", "in", "self", ".", "__output_files", ":", "print", ">>", "fh", ",", "\"## Job %s generates output file %s\"", "%", "(", "self", ".", "__name", ",", "f", ")" ]
Write as a comment into the DAG file the list of output files for this DAG node. @param fh: descriptor of open DAG file.
[ "Write", "as", "a", "comment", "into", "the", "DAG", "file", "the", "list", "of", "output", "files", "for", "this", "DAG", "node", "." ]
python
train
31.666667
wright-group/WrightTools
WrightTools/data/_constant.py
https://github.com/wright-group/WrightTools/blob/80d3ddd5074d8d5c1bc03fd5a0e0f10d4b424aeb/WrightTools/data/_constant.py#L61-L81
def label(self) -> str: """A latex formatted label representing constant expression and united value.""" label = self.expression.replace("_", "\\;") if self.units_kind: symbol = wt_units.get_symbol(self.units) for v in self.variables: vl = "%s_{%s}" % (symbol, v.label) vl = vl.replace("_{}", "") # label can be empty, no empty subscripts label = label.replace(v.natural_name, vl) val = ( round(self.value, self.round_spec) if self.round_spec is not None else self.value ) label += r"\,=\,{}".format(format(val, self.format_spec)) if self.units_kind: units_dictionary = getattr(wt_units, self.units_kind) label += r"\," label += units_dictionary[self.units][2] label = r"$\mathsf{%s}$" % label return label
[ "def", "label", "(", "self", ")", "->", "str", ":", "label", "=", "self", ".", "expression", ".", "replace", "(", "\"_\"", ",", "\"\\\\;\"", ")", "if", "self", ".", "units_kind", ":", "symbol", "=", "wt_units", ".", "get_symbol", "(", "self", ".", "units", ")", "for", "v", "in", "self", ".", "variables", ":", "vl", "=", "\"%s_{%s}\"", "%", "(", "symbol", ",", "v", ".", "label", ")", "vl", "=", "vl", ".", "replace", "(", "\"_{}\"", ",", "\"\"", ")", "# label can be empty, no empty subscripts", "label", "=", "label", ".", "replace", "(", "v", ".", "natural_name", ",", "vl", ")", "val", "=", "(", "round", "(", "self", ".", "value", ",", "self", ".", "round_spec", ")", "if", "self", ".", "round_spec", "is", "not", "None", "else", "self", ".", "value", ")", "label", "+=", "r\"\\,=\\,{}\"", ".", "format", "(", "format", "(", "val", ",", "self", ".", "format_spec", ")", ")", "if", "self", ".", "units_kind", ":", "units_dictionary", "=", "getattr", "(", "wt_units", ",", "self", ".", "units_kind", ")", "label", "+=", "r\"\\,\"", "label", "+=", "units_dictionary", "[", "self", ".", "units", "]", "[", "2", "]", "label", "=", "r\"$\\mathsf{%s}$\"", "%", "label", "return", "label" ]
A latex formatted label representing constant expression and united value.
[ "A", "latex", "formatted", "label", "representing", "constant", "expression", "and", "united", "value", "." ]
python
train
44.904762
openstack/horizon
horizon/base.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/horizon/base.py#L853-L886
def _urls(self): """Constructs the URLconf for Horizon from registered Dashboards.""" urlpatterns = self._get_default_urlpatterns() self._autodiscover() # Discover each dashboard's panels. for dash in self._registry.values(): dash._autodiscover() # Load the plugin-based panel configuration self._load_panel_customization() # Allow for override modules if self._conf.get("customization_module", None): customization_module = self._conf["customization_module"] bits = customization_module.split('.') mod_name = bits.pop() package = '.'.join(bits) mod = import_module(package) try: before_import_registry = copy.copy(self._registry) import_module('%s.%s' % (package, mod_name)) except Exception: self._registry = before_import_registry if module_has_submodule(mod, mod_name): raise # Compile the dynamic urlconf. for dash in self._registry.values(): urlpatterns.append(url(r'^%s/' % dash.slug, _wrapped_include(dash._decorated_urls))) # Return the three arguments to django.conf.urls.include return urlpatterns, self.namespace, self.slug
[ "def", "_urls", "(", "self", ")", ":", "urlpatterns", "=", "self", ".", "_get_default_urlpatterns", "(", ")", "self", ".", "_autodiscover", "(", ")", "# Discover each dashboard's panels.", "for", "dash", "in", "self", ".", "_registry", ".", "values", "(", ")", ":", "dash", ".", "_autodiscover", "(", ")", "# Load the plugin-based panel configuration", "self", ".", "_load_panel_customization", "(", ")", "# Allow for override modules", "if", "self", ".", "_conf", ".", "get", "(", "\"customization_module\"", ",", "None", ")", ":", "customization_module", "=", "self", ".", "_conf", "[", "\"customization_module\"", "]", "bits", "=", "customization_module", ".", "split", "(", "'.'", ")", "mod_name", "=", "bits", ".", "pop", "(", ")", "package", "=", "'.'", ".", "join", "(", "bits", ")", "mod", "=", "import_module", "(", "package", ")", "try", ":", "before_import_registry", "=", "copy", ".", "copy", "(", "self", ".", "_registry", ")", "import_module", "(", "'%s.%s'", "%", "(", "package", ",", "mod_name", ")", ")", "except", "Exception", ":", "self", ".", "_registry", "=", "before_import_registry", "if", "module_has_submodule", "(", "mod", ",", "mod_name", ")", ":", "raise", "# Compile the dynamic urlconf.", "for", "dash", "in", "self", ".", "_registry", ".", "values", "(", ")", ":", "urlpatterns", ".", "append", "(", "url", "(", "r'^%s/'", "%", "dash", ".", "slug", ",", "_wrapped_include", "(", "dash", ".", "_decorated_urls", ")", ")", ")", "# Return the three arguments to django.conf.urls.include", "return", "urlpatterns", ",", "self", ".", "namespace", ",", "self", ".", "slug" ]
Constructs the URLconf for Horizon from registered Dashboards.
[ "Constructs", "the", "URLconf", "for", "Horizon", "from", "registered", "Dashboards", "." ]
python
train
39.352941
RudolfCardinal/pythonlib
cardinal_pythonlib/nhs.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/nhs.py#L116-L128
def generate_random_nhs_number() -> int: """ Returns a random valid NHS number, as an ``int``. """ check_digit = 10 # NHS numbers with this check digit are all invalid while check_digit == 10: digits = [random.randint(1, 9)] # don't start with a zero digits.extend([random.randint(0, 9) for _ in range(8)]) # ... length now 9 check_digit = nhs_check_digit(digits) # noinspection PyUnboundLocalVariable digits.append(check_digit) return int("".join([str(d) for d in digits]))
[ "def", "generate_random_nhs_number", "(", ")", "->", "int", ":", "check_digit", "=", "10", "# NHS numbers with this check digit are all invalid", "while", "check_digit", "==", "10", ":", "digits", "=", "[", "random", ".", "randint", "(", "1", ",", "9", ")", "]", "# don't start with a zero", "digits", ".", "extend", "(", "[", "random", ".", "randint", "(", "0", ",", "9", ")", "for", "_", "in", "range", "(", "8", ")", "]", ")", "# ... length now 9", "check_digit", "=", "nhs_check_digit", "(", "digits", ")", "# noinspection PyUnboundLocalVariable", "digits", ".", "append", "(", "check_digit", ")", "return", "int", "(", "\"\"", ".", "join", "(", "[", "str", "(", "d", ")", "for", "d", "in", "digits", "]", ")", ")" ]
Returns a random valid NHS number, as an ``int``.
[ "Returns", "a", "random", "valid", "NHS", "number", "as", "an", "int", "." ]
python
train
40.615385
openvax/datacache
datacache/download.py
https://github.com/openvax/datacache/blob/73bcac02d37cf153710a07fbdc636aa55cb214ca/datacache/download.py#L159-L214
def fetch_file( download_url, filename=None, decompress=False, subdir=None, force=False, timeout=None, use_wget_if_available=False): """ Download a remote file and store it locally in a cache directory. Don't download it again if it's already present (unless `force` is True.) Parameters ---------- download_url : str Remote URL of file to download. filename : str, optional Local filename, used as cache key. If omitted, then determine the local filename from the URL. decompress : bool, optional By default any file whose remote extension is one of (".zip", ".gzip") and whose local filename lacks this suffix is decompressed. If a local filename wasn't provided but you still want to decompress the stored data then set this option to True. subdir : str, optional Group downloads in a single subdirectory. force : bool, optional By default, a remote file is not downloaded if it's already present. However, with this argument set to True, it will be overwritten. timeout : float, optional Timeout for download in seconds, default is None which uses global timeout. use_wget_if_available: bool, optional If the `wget` command is available, use that for download instead of Python libraries (default True) Returns the full path of the local file. """ filename = build_local_filename(download_url, filename, decompress) full_path = build_path(filename, subdir) if not os.path.exists(full_path) or force: logger.info("Fetching %s from URL %s", filename, download_url) _download_and_decompress_if_necessary( full_path=full_path, download_url=download_url, timeout=timeout, use_wget_if_available=use_wget_if_available) else: logger.info("Cached file %s from URL %s", filename, download_url) return full_path
[ "def", "fetch_file", "(", "download_url", ",", "filename", "=", "None", ",", "decompress", "=", "False", ",", "subdir", "=", "None", ",", "force", "=", "False", ",", "timeout", "=", "None", ",", "use_wget_if_available", "=", "False", ")", ":", "filename", "=", "build_local_filename", "(", "download_url", ",", "filename", ",", "decompress", ")", "full_path", "=", "build_path", "(", "filename", ",", "subdir", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "full_path", ")", "or", "force", ":", "logger", ".", "info", "(", "\"Fetching %s from URL %s\"", ",", "filename", ",", "download_url", ")", "_download_and_decompress_if_necessary", "(", "full_path", "=", "full_path", ",", "download_url", "=", "download_url", ",", "timeout", "=", "timeout", ",", "use_wget_if_available", "=", "use_wget_if_available", ")", "else", ":", "logger", ".", "info", "(", "\"Cached file %s from URL %s\"", ",", "filename", ",", "download_url", ")", "return", "full_path" ]
Download a remote file and store it locally in a cache directory. Don't download it again if it's already present (unless `force` is True.) Parameters ---------- download_url : str Remote URL of file to download. filename : str, optional Local filename, used as cache key. If omitted, then determine the local filename from the URL. decompress : bool, optional By default any file whose remote extension is one of (".zip", ".gzip") and whose local filename lacks this suffix is decompressed. If a local filename wasn't provided but you still want to decompress the stored data then set this option to True. subdir : str, optional Group downloads in a single subdirectory. force : bool, optional By default, a remote file is not downloaded if it's already present. However, with this argument set to True, it will be overwritten. timeout : float, optional Timeout for download in seconds, default is None which uses global timeout. use_wget_if_available: bool, optional If the `wget` command is available, use that for download instead of Python libraries (default True) Returns the full path of the local file.
[ "Download", "a", "remote", "file", "and", "store", "it", "locally", "in", "a", "cache", "directory", ".", "Don", "t", "download", "it", "again", "if", "it", "s", "already", "present", "(", "unless", "force", "is", "True", ".", ")" ]
python
train
35.232143
PyCQA/astroid
astroid/rebuilder.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/rebuilder.py#L172-L177
def _save_assignment(self, node, name=None): """save assignement situation since node.parent is not available yet""" if self._global_names and node.name in self._global_names[-1]: node.root().set_local(node.name, node) else: node.parent.set_local(node.name, node)
[ "def", "_save_assignment", "(", "self", ",", "node", ",", "name", "=", "None", ")", ":", "if", "self", ".", "_global_names", "and", "node", ".", "name", "in", "self", ".", "_global_names", "[", "-", "1", "]", ":", "node", ".", "root", "(", ")", ".", "set_local", "(", "node", ".", "name", ",", "node", ")", "else", ":", "node", ".", "parent", ".", "set_local", "(", "node", ".", "name", ",", "node", ")" ]
save assignement situation since node.parent is not available yet
[ "save", "assignement", "situation", "since", "node", ".", "parent", "is", "not", "available", "yet" ]
python
train
51
pytroll/satpy
satpy/resample.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/resample.py#L720-L738
def aggregate(d, y_size, x_size): """Average every 4 elements (2x2) in a 2D array""" if d.ndim != 2: # we can't guarantee what blocks we are getting and how # it should be reshaped to do the averaging. raise ValueError("Can't aggregrate (reduce) data arrays with " "more than 2 dimensions.") if not (x_size.is_integer() and y_size.is_integer()): raise ValueError("Aggregation factors are not integers") for agg_size, chunks in zip([y_size, x_size], d.chunks): for chunk_size in chunks: if chunk_size % agg_size != 0: raise ValueError("Aggregation requires arrays with " "shapes and chunks divisible by the " "factor") new_chunks = (tuple(int(x / y_size) for x in d.chunks[0]), tuple(int(x / x_size) for x in d.chunks[1])) return da.core.map_blocks(_mean, d, y_size, x_size, dtype=d.dtype, chunks=new_chunks)
[ "def", "aggregate", "(", "d", ",", "y_size", ",", "x_size", ")", ":", "if", "d", ".", "ndim", "!=", "2", ":", "# we can't guarantee what blocks we are getting and how", "# it should be reshaped to do the averaging.", "raise", "ValueError", "(", "\"Can't aggregrate (reduce) data arrays with \"", "\"more than 2 dimensions.\"", ")", "if", "not", "(", "x_size", ".", "is_integer", "(", ")", "and", "y_size", ".", "is_integer", "(", ")", ")", ":", "raise", "ValueError", "(", "\"Aggregation factors are not integers\"", ")", "for", "agg_size", ",", "chunks", "in", "zip", "(", "[", "y_size", ",", "x_size", "]", ",", "d", ".", "chunks", ")", ":", "for", "chunk_size", "in", "chunks", ":", "if", "chunk_size", "%", "agg_size", "!=", "0", ":", "raise", "ValueError", "(", "\"Aggregation requires arrays with \"", "\"shapes and chunks divisible by the \"", "\"factor\"", ")", "new_chunks", "=", "(", "tuple", "(", "int", "(", "x", "/", "y_size", ")", "for", "x", "in", "d", ".", "chunks", "[", "0", "]", ")", ",", "tuple", "(", "int", "(", "x", "/", "x_size", ")", "for", "x", "in", "d", ".", "chunks", "[", "1", "]", ")", ")", "return", "da", ".", "core", ".", "map_blocks", "(", "_mean", ",", "d", ",", "y_size", ",", "x_size", ",", "dtype", "=", "d", ".", "dtype", ",", "chunks", "=", "new_chunks", ")" ]
Average every 4 elements (2x2) in a 2D array
[ "Average", "every", "4", "elements", "(", "2x2", ")", "in", "a", "2D", "array" ]
python
train
55.736842
quantumlib/Cirq
cirq/google/params.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/google/params.py#L35-L47
def _to_zip_product(sweep: Sweep) -> Product: """Converts sweep to a product of zips of single sweeps, if possible.""" if not isinstance(sweep, Product): sweep = Product(sweep) if not all(isinstance(f, Zip) for f in sweep.factors): factors = [f if isinstance(f, Zip) else Zip(f) for f in sweep.factors] sweep = Product(*factors) for factor in sweep.factors: for term in cast(Zip, factor).sweeps: if not isinstance(term, SingleSweep): raise ValueError('cannot convert to zip-product form: {}' .format(sweep)) return sweep
[ "def", "_to_zip_product", "(", "sweep", ":", "Sweep", ")", "->", "Product", ":", "if", "not", "isinstance", "(", "sweep", ",", "Product", ")", ":", "sweep", "=", "Product", "(", "sweep", ")", "if", "not", "all", "(", "isinstance", "(", "f", ",", "Zip", ")", "for", "f", "in", "sweep", ".", "factors", ")", ":", "factors", "=", "[", "f", "if", "isinstance", "(", "f", ",", "Zip", ")", "else", "Zip", "(", "f", ")", "for", "f", "in", "sweep", ".", "factors", "]", "sweep", "=", "Product", "(", "*", "factors", ")", "for", "factor", "in", "sweep", ".", "factors", ":", "for", "term", "in", "cast", "(", "Zip", ",", "factor", ")", ".", "sweeps", ":", "if", "not", "isinstance", "(", "term", ",", "SingleSweep", ")", ":", "raise", "ValueError", "(", "'cannot convert to zip-product form: {}'", ".", "format", "(", "sweep", ")", ")", "return", "sweep" ]
Converts sweep to a product of zips of single sweeps, if possible.
[ "Converts", "sweep", "to", "a", "product", "of", "zips", "of", "single", "sweeps", "if", "possible", "." ]
python
train
47.769231
zalando/patroni
patroni/utils.py
https://github.com/zalando/patroni/blob/f6d29081c90af52064b981cdd877a07338d86038/patroni/utils.py#L36-L58
def patch_config(config, data): """recursively 'patch' `config` with `data` :returns: `!True` if the `config` was changed""" is_changed = False for name, value in data.items(): if value is None: if config.pop(name, None) is not None: is_changed = True elif name in config: if isinstance(value, dict): if isinstance(config[name], dict): if patch_config(config[name], value): is_changed = True else: config[name] = value is_changed = True elif str(config[name]) != str(value): config[name] = value is_changed = True else: config[name] = value is_changed = True return is_changed
[ "def", "patch_config", "(", "config", ",", "data", ")", ":", "is_changed", "=", "False", "for", "name", ",", "value", "in", "data", ".", "items", "(", ")", ":", "if", "value", "is", "None", ":", "if", "config", ".", "pop", "(", "name", ",", "None", ")", "is", "not", "None", ":", "is_changed", "=", "True", "elif", "name", "in", "config", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "if", "isinstance", "(", "config", "[", "name", "]", ",", "dict", ")", ":", "if", "patch_config", "(", "config", "[", "name", "]", ",", "value", ")", ":", "is_changed", "=", "True", "else", ":", "config", "[", "name", "]", "=", "value", "is_changed", "=", "True", "elif", "str", "(", "config", "[", "name", "]", ")", "!=", "str", "(", "value", ")", ":", "config", "[", "name", "]", "=", "value", "is_changed", "=", "True", "else", ":", "config", "[", "name", "]", "=", "value", "is_changed", "=", "True", "return", "is_changed" ]
recursively 'patch' `config` with `data` :returns: `!True` if the `config` was changed
[ "recursively", "patch", "config", "with", "data", ":", "returns", ":", "!True", "if", "the", "config", "was", "changed" ]
python
train
35.73913
tornadoweb/tornado
tornado/escape.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/escape.py#L118-L144
def url_unescape( # noqa: F811 value: Union[str, bytes], encoding: Optional[str] = "utf-8", plus: bool = True ) -> Union[str, bytes]: """Decodes the given value from a URL. The argument may be either a byte or unicode string. If encoding is None, the result will be a byte string. Otherwise, the result is a unicode string in the specified encoding. If ``plus`` is true (the default), plus signs will be interpreted as spaces (literal plus signs must be represented as "%2B"). This is appropriate for query strings and form-encoded values but not for the path component of a URL. Note that this default is the reverse of Python's urllib module. .. versionadded:: 3.1 The ``plus`` argument """ if encoding is None: if plus: # unquote_to_bytes doesn't have a _plus variant value = to_basestring(value).replace("+", " ") return urllib.parse.unquote_to_bytes(value) else: unquote = urllib.parse.unquote_plus if plus else urllib.parse.unquote return unquote(to_basestring(value), encoding=encoding)
[ "def", "url_unescape", "(", "# noqa: F811", "value", ":", "Union", "[", "str", ",", "bytes", "]", ",", "encoding", ":", "Optional", "[", "str", "]", "=", "\"utf-8\"", ",", "plus", ":", "bool", "=", "True", ")", "->", "Union", "[", "str", ",", "bytes", "]", ":", "if", "encoding", "is", "None", ":", "if", "plus", ":", "# unquote_to_bytes doesn't have a _plus variant", "value", "=", "to_basestring", "(", "value", ")", ".", "replace", "(", "\"+\"", ",", "\" \"", ")", "return", "urllib", ".", "parse", ".", "unquote_to_bytes", "(", "value", ")", "else", ":", "unquote", "=", "urllib", ".", "parse", ".", "unquote_plus", "if", "plus", "else", "urllib", ".", "parse", ".", "unquote", "return", "unquote", "(", "to_basestring", "(", "value", ")", ",", "encoding", "=", "encoding", ")" ]
Decodes the given value from a URL. The argument may be either a byte or unicode string. If encoding is None, the result will be a byte string. Otherwise, the result is a unicode string in the specified encoding. If ``plus`` is true (the default), plus signs will be interpreted as spaces (literal plus signs must be represented as "%2B"). This is appropriate for query strings and form-encoded values but not for the path component of a URL. Note that this default is the reverse of Python's urllib module. .. versionadded:: 3.1 The ``plus`` argument
[ "Decodes", "the", "given", "value", "from", "a", "URL", "." ]
python
train
40.518519
a1ezzz/wasp-general
wasp_general/network/transport.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/network/transport.py#L192-L198
def server_socket(self, config): """ :meth:`.WNetworkNativeTransportProto.server_socket` method implementation """ if self.__server_socket is None: self.__server_socket = self.create_server_socket(config) self.__server_socket.bind(self.bind_socket(config).pair()) return self.__server_socket
[ "def", "server_socket", "(", "self", ",", "config", ")", ":", "if", "self", ".", "__server_socket", "is", "None", ":", "self", ".", "__server_socket", "=", "self", ".", "create_server_socket", "(", "config", ")", "self", ".", "__server_socket", ".", "bind", "(", "self", ".", "bind_socket", "(", "config", ")", ".", "pair", "(", ")", ")", "return", "self", ".", "__server_socket" ]
:meth:`.WNetworkNativeTransportProto.server_socket` method implementation
[ ":", "meth", ":", ".", "WNetworkNativeTransportProto", ".", "server_socket", "method", "implementation" ]
python
train
42.714286
OnroerendErfgoed/crabpy
crabpy/gateway/capakey.py
https://github.com/OnroerendErfgoed/crabpy/blob/3a6fd8bc5aca37c2a173e3ea94e4e468b8aa79c1/crabpy/gateway/capakey.py#L260-L295
def list_secties_by_afdeling(self, afdeling): ''' List all `secties` in a `kadastrale afdeling`. :param afdeling: The :class:`Afdeling` for which the `secties` are \ wanted. Can also be the id of and `afdeling`. :rtype: A :class:`list` of `Sectie`. ''' try: aid = afdeling.id gid = afdeling.gemeente.id except AttributeError: aid = afdeling afdeling = self.get_kadastrale_afdeling_by_id(aid) gid = afdeling.gemeente.id afdeling.clear_gateway() def creator(): url = self.base_url + '/municipality/%s/department/%s/section' % (gid, aid) h = self.base_headers res = capakey_rest_gateway_request(url, h).json() return [ Sectie( r['sectionCode'], afdeling ) for r in res['sections'] ] if self.caches['long'].is_configured: key = 'list_secties_by_afdeling_rest#%s' % aid secties = self.caches['long'].get_or_create(key, creator) else: secties = creator() for s in secties: s.set_gateway(self) return secties
[ "def", "list_secties_by_afdeling", "(", "self", ",", "afdeling", ")", ":", "try", ":", "aid", "=", "afdeling", ".", "id", "gid", "=", "afdeling", ".", "gemeente", ".", "id", "except", "AttributeError", ":", "aid", "=", "afdeling", "afdeling", "=", "self", ".", "get_kadastrale_afdeling_by_id", "(", "aid", ")", "gid", "=", "afdeling", ".", "gemeente", ".", "id", "afdeling", ".", "clear_gateway", "(", ")", "def", "creator", "(", ")", ":", "url", "=", "self", ".", "base_url", "+", "'/municipality/%s/department/%s/section'", "%", "(", "gid", ",", "aid", ")", "h", "=", "self", ".", "base_headers", "res", "=", "capakey_rest_gateway_request", "(", "url", ",", "h", ")", ".", "json", "(", ")", "return", "[", "Sectie", "(", "r", "[", "'sectionCode'", "]", ",", "afdeling", ")", "for", "r", "in", "res", "[", "'sections'", "]", "]", "if", "self", ".", "caches", "[", "'long'", "]", ".", "is_configured", ":", "key", "=", "'list_secties_by_afdeling_rest#%s'", "%", "aid", "secties", "=", "self", ".", "caches", "[", "'long'", "]", ".", "get_or_create", "(", "key", ",", "creator", ")", "else", ":", "secties", "=", "creator", "(", ")", "for", "s", "in", "secties", ":", "s", ".", "set_gateway", "(", "self", ")", "return", "secties" ]
List all `secties` in a `kadastrale afdeling`. :param afdeling: The :class:`Afdeling` for which the `secties` are \ wanted. Can also be the id of and `afdeling`. :rtype: A :class:`list` of `Sectie`.
[ "List", "all", "secties", "in", "a", "kadastrale", "afdeling", "." ]
python
train
34.027778
qubell/contrib-python-qubell-client
qubell/api/tools/__init__.py
https://github.com/qubell/contrib-python-qubell-client/blob/4586ea11d5103c2ff9607d3ed922b5a0991b8845/qubell/api/tools/__init__.py#L312-L325
def get_manifest_from_meta(metaurl, name): """ Extact manifest url from metadata url :param metaurl: Url to metadata :param name: Name of application to extract :return: """ if 'http' in metaurl: kit = yaml.safe_load(requests.get(url=metaurl).content)['kit']['applications'] else: kit = yaml.safe_load(open(metaurl).read())['kit']['applications'] app_urls = [x['manifest'] for x in kit if x['name'] == name] assert len(app_urls) == 1 return app_urls[0]
[ "def", "get_manifest_from_meta", "(", "metaurl", ",", "name", ")", ":", "if", "'http'", "in", "metaurl", ":", "kit", "=", "yaml", ".", "safe_load", "(", "requests", ".", "get", "(", "url", "=", "metaurl", ")", ".", "content", ")", "[", "'kit'", "]", "[", "'applications'", "]", "else", ":", "kit", "=", "yaml", ".", "safe_load", "(", "open", "(", "metaurl", ")", ".", "read", "(", ")", ")", "[", "'kit'", "]", "[", "'applications'", "]", "app_urls", "=", "[", "x", "[", "'manifest'", "]", "for", "x", "in", "kit", "if", "x", "[", "'name'", "]", "==", "name", "]", "assert", "len", "(", "app_urls", ")", "==", "1", "return", "app_urls", "[", "0", "]" ]
Extact manifest url from metadata url :param metaurl: Url to metadata :param name: Name of application to extract :return:
[ "Extact", "manifest", "url", "from", "metadata", "url", ":", "param", "metaurl", ":", "Url", "to", "metadata", ":", "param", "name", ":", "Name", "of", "application", "to", "extract", ":", "return", ":" ]
python
train
35.642857
python-xlib/python-xlib
Xlib/support/connect.py
https://github.com/python-xlib/python-xlib/blob/8901e831737e79fe5645f48089d70e1d1046d2f2/Xlib/support/connect.py#L58-L73
def get_display(display): """dname, protocol, host, dno, screen = get_display(display) Parse DISPLAY into its components. If DISPLAY is None, use the default display. The return values are: DNAME -- the full display name (string) PROTOCOL -- the protocol to use (None if automatic) HOST -- the host name (string, possibly empty) DNO -- display number (integer) SCREEN -- default screen number (integer) """ modname = _display_mods.get(platform, _default_display_mod) mod = _relative_import(modname) return mod.get_display(display)
[ "def", "get_display", "(", "display", ")", ":", "modname", "=", "_display_mods", ".", "get", "(", "platform", ",", "_default_display_mod", ")", "mod", "=", "_relative_import", "(", "modname", ")", "return", "mod", ".", "get_display", "(", "display", ")" ]
dname, protocol, host, dno, screen = get_display(display) Parse DISPLAY into its components. If DISPLAY is None, use the default display. The return values are: DNAME -- the full display name (string) PROTOCOL -- the protocol to use (None if automatic) HOST -- the host name (string, possibly empty) DNO -- display number (integer) SCREEN -- default screen number (integer)
[ "dname", "protocol", "host", "dno", "screen", "=", "get_display", "(", "display", ")" ]
python
train
37
samjabrahams/anchorhub
anchorhub/lib/armedcheckswitch.py
https://github.com/samjabrahams/anchorhub/blob/5ade359b08297d4003a5f477389c01de9e634b54/anchorhub/lib/armedcheckswitch.py#L39-L52
def switch(self, *args): """ Method that attempts to change the switch to the opposite of its current state. Calls either switch_on() or switch_off() to accomplish this. :param kwargs: an variable length dictionary of key-pair arguments passed through to either switch_on() or switch_off() :return: Boolean. Returns True if the switch changes state """ if self.is_switched(): return self.switch_off(*args) else: return self.switch_on(*args)
[ "def", "switch", "(", "self", ",", "*", "args", ")", ":", "if", "self", ".", "is_switched", "(", ")", ":", "return", "self", ".", "switch_off", "(", "*", "args", ")", "else", ":", "return", "self", ".", "switch_on", "(", "*", "args", ")" ]
Method that attempts to change the switch to the opposite of its current state. Calls either switch_on() or switch_off() to accomplish this. :param kwargs: an variable length dictionary of key-pair arguments passed through to either switch_on() or switch_off() :return: Boolean. Returns True if the switch changes state
[ "Method", "that", "attempts", "to", "change", "the", "switch", "to", "the", "opposite", "of", "its", "current", "state", ".", "Calls", "either", "switch_on", "()", "or", "switch_off", "()", "to", "accomplish", "this", "." ]
python
train
38.285714
saltstack/salt
salt/cloud/clouds/dimensiondata.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/dimensiondata.py#L558-L578
def get_lb_conn(dd_driver=None): ''' Return a load-balancer conn object ''' vm_ = get_configured_provider() region = config.get_cloud_config_value( 'region', vm_, __opts__ ) user_id = config.get_cloud_config_value( 'user_id', vm_, __opts__ ) key = config.get_cloud_config_value( 'key', vm_, __opts__ ) if not dd_driver: raise SaltCloudSystemExit( 'Missing dimensiondata_driver for get_lb_conn method.' ) return get_driver_lb(Provider_lb.DIMENSIONDATA)(user_id, key, region=region)
[ "def", "get_lb_conn", "(", "dd_driver", "=", "None", ")", ":", "vm_", "=", "get_configured_provider", "(", ")", "region", "=", "config", ".", "get_cloud_config_value", "(", "'region'", ",", "vm_", ",", "__opts__", ")", "user_id", "=", "config", ".", "get_cloud_config_value", "(", "'user_id'", ",", "vm_", ",", "__opts__", ")", "key", "=", "config", ".", "get_cloud_config_value", "(", "'key'", ",", "vm_", ",", "__opts__", ")", "if", "not", "dd_driver", ":", "raise", "SaltCloudSystemExit", "(", "'Missing dimensiondata_driver for get_lb_conn method.'", ")", "return", "get_driver_lb", "(", "Provider_lb", ".", "DIMENSIONDATA", ")", "(", "user_id", ",", "key", ",", "region", "=", "region", ")" ]
Return a load-balancer conn object
[ "Return", "a", "load", "-", "balancer", "conn", "object" ]
python
train
26.666667
saltstack/salt
salt/modules/sysmod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/sysmod.py#L677-L714
def list_runners(*args): ''' List the runners loaded on the minion .. versionadded:: 2014.7.0 CLI Example: .. code-block:: bash salt '*' sys.list_runners Runner names can be specified as globs. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' sys.list_runners 'm*' ''' run_ = salt.runner.Runner(__opts__) runners = set() if not args: for func in run_.functions: runners.add(func.split('.')[0]) return sorted(runners) for module in args: if '*' in module: for func in fnmatch.filter(run_.functions, module): runners.add(func.split('.')[0]) else: for func in run_.functions: mod_test = func.split('.')[0] if mod_test == module: runners.add(mod_test) return sorted(runners)
[ "def", "list_runners", "(", "*", "args", ")", ":", "run_", "=", "salt", ".", "runner", ".", "Runner", "(", "__opts__", ")", "runners", "=", "set", "(", ")", "if", "not", "args", ":", "for", "func", "in", "run_", ".", "functions", ":", "runners", ".", "add", "(", "func", ".", "split", "(", "'.'", ")", "[", "0", "]", ")", "return", "sorted", "(", "runners", ")", "for", "module", "in", "args", ":", "if", "'*'", "in", "module", ":", "for", "func", "in", "fnmatch", ".", "filter", "(", "run_", ".", "functions", ",", "module", ")", ":", "runners", ".", "add", "(", "func", ".", "split", "(", "'.'", ")", "[", "0", "]", ")", "else", ":", "for", "func", "in", "run_", ".", "functions", ":", "mod_test", "=", "func", ".", "split", "(", "'.'", ")", "[", "0", "]", "if", "mod_test", "==", "module", ":", "runners", ".", "add", "(", "mod_test", ")", "return", "sorted", "(", "runners", ")" ]
List the runners loaded on the minion .. versionadded:: 2014.7.0 CLI Example: .. code-block:: bash salt '*' sys.list_runners Runner names can be specified as globs. .. versionadded:: 2015.5.0 .. code-block:: bash salt '*' sys.list_runners 'm*'
[ "List", "the", "runners", "loaded", "on", "the", "minion" ]
python
train
22.605263
gwpy/gwpy
gwpy/timeseries/core.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/core.py#L1557-L1583
def coalesce(self): """Merge contiguous elements of this list into single objects This method implicitly sorts and potentially shortens this list. """ self.sort(key=lambda ts: ts.t0.value) i = j = 0 N = len(self) while j < N: this = self[j] j += 1 if j < N and this.is_contiguous(self[j]) == 1: while j < N and this.is_contiguous(self[j]): try: this = self[i] = this.append(self[j]) except ValueError as exc: if 'cannot resize this array' in str(exc): this = this.copy() this = self[i] = this.append(self[j]) else: raise j += 1 else: self[i] = this i += 1 del self[i:] return self
[ "def", "coalesce", "(", "self", ")", ":", "self", ".", "sort", "(", "key", "=", "lambda", "ts", ":", "ts", ".", "t0", ".", "value", ")", "i", "=", "j", "=", "0", "N", "=", "len", "(", "self", ")", "while", "j", "<", "N", ":", "this", "=", "self", "[", "j", "]", "j", "+=", "1", "if", "j", "<", "N", "and", "this", ".", "is_contiguous", "(", "self", "[", "j", "]", ")", "==", "1", ":", "while", "j", "<", "N", "and", "this", ".", "is_contiguous", "(", "self", "[", "j", "]", ")", ":", "try", ":", "this", "=", "self", "[", "i", "]", "=", "this", ".", "append", "(", "self", "[", "j", "]", ")", "except", "ValueError", "as", "exc", ":", "if", "'cannot resize this array'", "in", "str", "(", "exc", ")", ":", "this", "=", "this", ".", "copy", "(", ")", "this", "=", "self", "[", "i", "]", "=", "this", ".", "append", "(", "self", "[", "j", "]", ")", "else", ":", "raise", "j", "+=", "1", "else", ":", "self", "[", "i", "]", "=", "this", "i", "+=", "1", "del", "self", "[", "i", ":", "]", "return", "self" ]
Merge contiguous elements of this list into single objects This method implicitly sorts and potentially shortens this list.
[ "Merge", "contiguous", "elements", "of", "this", "list", "into", "single", "objects" ]
python
train
34.62963
b3j0f/utils
b3j0f/utils/reflect.py
https://github.com/b3j0f/utils/blob/793871b98e90fd1c7ce9ef0dce839cc18fcbc6ff/b3j0f/utils/reflect.py#L48-L145
def base_elts(elt, cls=None, depth=None): """Get bases elements of the input elt. - If elt is an instance, get class and all base classes. - If elt is a method, get all base methods. - If elt is a class, get all base classes. - In other case, get an empty list. :param elt: supposed inherited elt. :param cls: cls from where find attributes equal to elt. If None, it is found as much as possible. Required in python3 for function classes. :type cls: type or list :param int depth: search depth. If None (default), depth is maximal. :return: elt bases elements. if elt has not base elements, result is empty. :rtype: list """ result = [] elt_name = getattr(elt, '__name__', None) if elt_name is not None: cls = [] if cls is None else ensureiterable(cls) elt_is_class = False # if cls is None and elt is routine, it is possible to find the cls if not cls and isroutine(elt): if hasattr(elt, '__self__'): # from the instance instance = get_method_self(elt) # get instance if instance is None and PY2: # get base im_class if PY2 cls = list(elt.im_class.__bases__) else: # use instance class cls = [instance.__class__] # cls is elt if elt is a class elif isclass(elt): elt_is_class = True cls = list(elt.__bases__) if cls: # if cls is not empty, find all base classes index_of_found_classes = 0 # get last visited class index visited_classes = set(cls) # cache for visited classes len_classes = len(cls) if depth is None: # if depth is None, get maximal value depth = -1 # set negative value while depth != 0 and index_of_found_classes != len_classes: len_classes = len(cls) for index in range(index_of_found_classes, len_classes): _cls = cls[index] for base_cls in _cls.__bases__: if base_cls in visited_classes: continue else: visited_classes.add(base_cls) cls.append(base_cls) index_of_found_classes = len_classes depth -= 1 if elt_is_class: # if cls is elt, result is classes minus first class result = cls elif isroutine(elt): # get an elt to compare with found element if ismethod(elt): elt_to_compare = get_method_function(elt) else: elt_to_compare = elt for _cls in cls: # for all classes # get possible base elt b_elt = getattr(_cls, elt_name, None) if b_elt is not None: # compare funcs if ismethod(b_elt): bec = get_method_function(b_elt) else: bec = b_elt # if matching, add to result if bec is elt_to_compare: result.append(b_elt) return result
[ "def", "base_elts", "(", "elt", ",", "cls", "=", "None", ",", "depth", "=", "None", ")", ":", "result", "=", "[", "]", "elt_name", "=", "getattr", "(", "elt", ",", "'__name__'", ",", "None", ")", "if", "elt_name", "is", "not", "None", ":", "cls", "=", "[", "]", "if", "cls", "is", "None", "else", "ensureiterable", "(", "cls", ")", "elt_is_class", "=", "False", "# if cls is None and elt is routine, it is possible to find the cls", "if", "not", "cls", "and", "isroutine", "(", "elt", ")", ":", "if", "hasattr", "(", "elt", ",", "'__self__'", ")", ":", "# from the instance", "instance", "=", "get_method_self", "(", "elt", ")", "# get instance", "if", "instance", "is", "None", "and", "PY2", ":", "# get base im_class if PY2", "cls", "=", "list", "(", "elt", ".", "im_class", ".", "__bases__", ")", "else", ":", "# use instance class", "cls", "=", "[", "instance", ".", "__class__", "]", "# cls is elt if elt is a class", "elif", "isclass", "(", "elt", ")", ":", "elt_is_class", "=", "True", "cls", "=", "list", "(", "elt", ".", "__bases__", ")", "if", "cls", ":", "# if cls is not empty, find all base classes", "index_of_found_classes", "=", "0", "# get last visited class index", "visited_classes", "=", "set", "(", "cls", ")", "# cache for visited classes", "len_classes", "=", "len", "(", "cls", ")", "if", "depth", "is", "None", ":", "# if depth is None, get maximal value", "depth", "=", "-", "1", "# set negative value", "while", "depth", "!=", "0", "and", "index_of_found_classes", "!=", "len_classes", ":", "len_classes", "=", "len", "(", "cls", ")", "for", "index", "in", "range", "(", "index_of_found_classes", ",", "len_classes", ")", ":", "_cls", "=", "cls", "[", "index", "]", "for", "base_cls", "in", "_cls", ".", "__bases__", ":", "if", "base_cls", "in", "visited_classes", ":", "continue", "else", ":", "visited_classes", ".", "add", "(", "base_cls", ")", "cls", ".", "append", "(", "base_cls", ")", "index_of_found_classes", "=", "len_classes", "depth", "-=", "1", "if", "elt_is_class", ":", "# if cls is elt, result is classes minus first class", "result", "=", "cls", "elif", "isroutine", "(", "elt", ")", ":", "# get an elt to compare with found element", "if", "ismethod", "(", "elt", ")", ":", "elt_to_compare", "=", "get_method_function", "(", "elt", ")", "else", ":", "elt_to_compare", "=", "elt", "for", "_cls", "in", "cls", ":", "# for all classes", "# get possible base elt", "b_elt", "=", "getattr", "(", "_cls", ",", "elt_name", ",", "None", ")", "if", "b_elt", "is", "not", "None", ":", "# compare funcs", "if", "ismethod", "(", "b_elt", ")", ":", "bec", "=", "get_method_function", "(", "b_elt", ")", "else", ":", "bec", "=", "b_elt", "# if matching, add to result", "if", "bec", "is", "elt_to_compare", ":", "result", ".", "append", "(", "b_elt", ")", "return", "result" ]
Get bases elements of the input elt. - If elt is an instance, get class and all base classes. - If elt is a method, get all base methods. - If elt is a class, get all base classes. - In other case, get an empty list. :param elt: supposed inherited elt. :param cls: cls from where find attributes equal to elt. If None, it is found as much as possible. Required in python3 for function classes. :type cls: type or list :param int depth: search depth. If None (default), depth is maximal. :return: elt bases elements. if elt has not base elements, result is empty. :rtype: list
[ "Get", "bases", "elements", "of", "the", "input", "elt", "." ]
python
train
33.540816
aconrad/pycobertura
pycobertura/cobertura.py
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L137-L152
def line_statuses(self, filename): """ Return a list of tuples `(lineno, status)` of all the lines found in the Cobertura report for the given file `filename` where `lineno` is the line number and `status` is coverage status of the line which can be either `True` (line hit) or `False` (line miss). """ line_elements = self._get_lines_by_filename(filename) lines_w_status = [] for line in line_elements: lineno = int(line.attrib['number']) status = line.attrib['hits'] != '0' lines_w_status.append((lineno, status)) return lines_w_status
[ "def", "line_statuses", "(", "self", ",", "filename", ")", ":", "line_elements", "=", "self", ".", "_get_lines_by_filename", "(", "filename", ")", "lines_w_status", "=", "[", "]", "for", "line", "in", "line_elements", ":", "lineno", "=", "int", "(", "line", ".", "attrib", "[", "'number'", "]", ")", "status", "=", "line", ".", "attrib", "[", "'hits'", "]", "!=", "'0'", "lines_w_status", ".", "append", "(", "(", "lineno", ",", "status", ")", ")", "return", "lines_w_status" ]
Return a list of tuples `(lineno, status)` of all the lines found in the Cobertura report for the given file `filename` where `lineno` is the line number and `status` is coverage status of the line which can be either `True` (line hit) or `False` (line miss).
[ "Return", "a", "list", "of", "tuples", "(", "lineno", "status", ")", "of", "all", "the", "lines", "found", "in", "the", "Cobertura", "report", "for", "the", "given", "file", "filename", "where", "lineno", "is", "the", "line", "number", "and", "status", "is", "coverage", "status", "of", "the", "line", "which", "can", "be", "either", "True", "(", "line", "hit", ")", "or", "False", "(", "line", "miss", ")", "." ]
python
train
40
pybel/pybel
src/pybel/parser/parse_control.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/parser/parse_control.py#L372-L377
def clear(self): """Clear the statement_group, citation, evidence, and annotations.""" self.statement_group = None self.citation.clear() self.evidence = None self.annotations.clear()
[ "def", "clear", "(", "self", ")", ":", "self", ".", "statement_group", "=", "None", "self", ".", "citation", ".", "clear", "(", ")", "self", ".", "evidence", "=", "None", "self", ".", "annotations", ".", "clear", "(", ")" ]
Clear the statement_group, citation, evidence, and annotations.
[ "Clear", "the", "statement_group", "citation", "evidence", "and", "annotations", "." ]
python
train
36.166667
HPENetworking/PYHPEIMC
pyhpeimc/plat/termaccess.py
https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/pyhpeimc/plat/termaccess.py#L443-L486
def remove_scope_ip(hostid, auth, url): """ Function to add remove IP address allocation :param hostid: Host id of the host to be deleted :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: String of HTTP response code. Should be 204 is successfull :rtype: str >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> new_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url) >>> add_host_to_segment('10.50.0.5', 'cyoung', 'New Test Host', '10.50.0.0/24', auth.creds, auth.url) >>> host_id = get_host_id('10.50.0.5', '10.50.0.0/24', auth.creds, auth.url) >>> rem_host = remove_scope_ip(host_id, auth.creds, auth.url) >>> assert type(rem_host) is int >>> assert rem_host == 204 """ f_url = url + '/imcrs/res/access/assignedIpScope/ip/' + str(hostid) response = requests.delete(f_url, auth=auth, headers=HEADERS, ) try: if response.status_code == 204: # print("Host Successfully Deleted") return response.status_code elif response.status_code == 409: # print("IP Scope Already Exists") return response.status_code except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + " add_ip_scope: An Error has occured"
[ "def", "remove_scope_ip", "(", "hostid", ",", "auth", ",", "url", ")", ":", "f_url", "=", "url", "+", "'/imcrs/res/access/assignedIpScope/ip/'", "+", "str", "(", "hostid", ")", "response", "=", "requests", ".", "delete", "(", "f_url", ",", "auth", "=", "auth", ",", "headers", "=", "HEADERS", ",", ")", "try", ":", "if", "response", ".", "status_code", "==", "204", ":", "# print(\"Host Successfully Deleted\")", "return", "response", ".", "status_code", "elif", "response", ".", "status_code", "==", "409", ":", "# print(\"IP Scope Already Exists\")", "return", "response", ".", "status_code", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "error", ":", "return", "\"Error:\\n\"", "+", "str", "(", "error", ")", "+", "\" add_ip_scope: An Error has occured\"" ]
Function to add remove IP address allocation :param hostid: Host id of the host to be deleted :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: String of HTTP response code. Should be 204 is successfull :rtype: str >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.termaccess import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> new_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url) >>> add_host_to_segment('10.50.0.5', 'cyoung', 'New Test Host', '10.50.0.0/24', auth.creds, auth.url) >>> host_id = get_host_id('10.50.0.5', '10.50.0.0/24', auth.creds, auth.url) >>> rem_host = remove_scope_ip(host_id, auth.creds, auth.url) >>> assert type(rem_host) is int >>> assert rem_host == 204
[ "Function", "to", "add", "remove", "IP", "address", "allocation" ]
python
train
34.727273
fastai/fastai
fastai/vision/image.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/image.py#L41-L47
def _draw_rect(ax:plt.Axes, b:Collection[int], color:str='white', text=None, text_size=14): "Draw bounding box on `ax`." patch = ax.add_patch(patches.Rectangle(b[:2], *b[-2:], fill=False, edgecolor=color, lw=2)) _draw_outline(patch, 4) if text is not None: patch = ax.text(*b[:2], text, verticalalignment='top', color=color, fontsize=text_size, weight='bold') _draw_outline(patch,1)
[ "def", "_draw_rect", "(", "ax", ":", "plt", ".", "Axes", ",", "b", ":", "Collection", "[", "int", "]", ",", "color", ":", "str", "=", "'white'", ",", "text", "=", "None", ",", "text_size", "=", "14", ")", ":", "patch", "=", "ax", ".", "add_patch", "(", "patches", ".", "Rectangle", "(", "b", "[", ":", "2", "]", ",", "*", "b", "[", "-", "2", ":", "]", ",", "fill", "=", "False", ",", "edgecolor", "=", "color", ",", "lw", "=", "2", ")", ")", "_draw_outline", "(", "patch", ",", "4", ")", "if", "text", "is", "not", "None", ":", "patch", "=", "ax", ".", "text", "(", "*", "b", "[", ":", "2", "]", ",", "text", ",", "verticalalignment", "=", "'top'", ",", "color", "=", "color", ",", "fontsize", "=", "text_size", ",", "weight", "=", "'bold'", ")", "_draw_outline", "(", "patch", ",", "1", ")" ]
Draw bounding box on `ax`.
[ "Draw", "bounding", "box", "on", "ax", "." ]
python
train
58.285714
cloudera/cm_api
python/src/cm_api/endpoints/types.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/types.py#L1285-L1296
def config_to_api_list(dic): """ Converts a python dictionary into a list containing the proper ApiConfig encoding for configuration data. @param dic: Key-value pairs to convert. @return: JSON dictionary of an ApiConfig list (*not* an ApiList). """ config = [ ] for k, v in dic.iteritems(): config.append({ 'name' : k, 'value': v }) return { ApiList.LIST_KEY : config }
[ "def", "config_to_api_list", "(", "dic", ")", ":", "config", "=", "[", "]", "for", "k", ",", "v", "in", "dic", ".", "iteritems", "(", ")", ":", "config", ".", "append", "(", "{", "'name'", ":", "k", ",", "'value'", ":", "v", "}", ")", "return", "{", "ApiList", ".", "LIST_KEY", ":", "config", "}" ]
Converts a python dictionary into a list containing the proper ApiConfig encoding for configuration data. @param dic: Key-value pairs to convert. @return: JSON dictionary of an ApiConfig list (*not* an ApiList).
[ "Converts", "a", "python", "dictionary", "into", "a", "list", "containing", "the", "proper", "ApiConfig", "encoding", "for", "configuration", "data", "." ]
python
train
31.75
flatironinstitute/mlprocessors
mlprocessors/core.py
https://github.com/flatironinstitute/mlprocessors/blob/28d55542bbd02b9ddfe429db260f0be58f3820d3/mlprocessors/core.py#L386-L458
def invoke(proc, args=None, *, _instance = None, **kwargs): """ Executes the processor passing given arguments. :param args: a list of parameters in --key=value format. """ if args is None: args=[] for kwargname in kwargs: args.append('--'+kwargname) args.append('{}'.format(kwargs[kwargname])) parser = proc.invoke_parser(noexit=(_instance is not None)) opts = parser.parse_args(args) kwargs0 = {} def handle_set(opts, dataset, kwargs0, canMulti = False): for elem in dataset: elemname = elem.name # ml-run-process passes values for not provided inputs, outputs and params as empty strings ('') if hasattr(opts, elemname) and getattr(opts, elemname) not in [None, '']: # value for element was given in the invocation elemvalue = getattr(opts, elemname) if canMulti and isinstance(elemvalue, list): elemlist = elemvalue else: elemlist = [ elemvalue ] for elemelem in elemlist: for validator in elem.validators: validator(elemelem) if hasattr(opts, elem.name): prepared = elem.prepare(elemvalue) or elemvalue kwargs0[elem.name] = prepared elif elem.optional: # value was not set but is optional so ignore it kwargs0[elem.name] = None else: # value was not set and is mandatory -- error raise AttributeError('Missing value for {} '.format(elemname)) try: handle_set(opts, proc.INPUTS, kwargs0, True) handle_set(opts, proc.OUTPUTS, kwargs0, True) for param in proc.PARAMETERS: if hasattr(opts, param.name) and getattr(opts, param.name) is not None and getattr(opts, param.name) is not '': value = getattr(opts, param.name) # validate if needed for validator in param.validators: validator(value) # if param is a tuple of choices, each choice is a tuple itself # with first element of the input value and second element # containing the value to be passed to the processor if param.choices and isinstance(param.choices, tuple): for choice in param.choices: if choice[0] == value: kwargs0[param.name] = choice[1] break else: kwargs0[param.name] = value elif param.optional: kwargs0[param.name] = param.default else: raise AttributeError('Missing value for {} parameter'.format(param.name)) if not _instance: _instance = proc(**kwargs0) else: _instance.apply(_instance, **kwargs0) return _instance.run() # todo: cleanup except Exception as e: print("Error:", e) # traceback.print_exc() raise
[ "def", "invoke", "(", "proc", ",", "args", "=", "None", ",", "*", ",", "_instance", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "args", "is", "None", ":", "args", "=", "[", "]", "for", "kwargname", "in", "kwargs", ":", "args", ".", "append", "(", "'--'", "+", "kwargname", ")", "args", ".", "append", "(", "'{}'", ".", "format", "(", "kwargs", "[", "kwargname", "]", ")", ")", "parser", "=", "proc", ".", "invoke_parser", "(", "noexit", "=", "(", "_instance", "is", "not", "None", ")", ")", "opts", "=", "parser", ".", "parse_args", "(", "args", ")", "kwargs0", "=", "{", "}", "def", "handle_set", "(", "opts", ",", "dataset", ",", "kwargs0", ",", "canMulti", "=", "False", ")", ":", "for", "elem", "in", "dataset", ":", "elemname", "=", "elem", ".", "name", "# ml-run-process passes values for not provided inputs, outputs and params as empty strings ('')", "if", "hasattr", "(", "opts", ",", "elemname", ")", "and", "getattr", "(", "opts", ",", "elemname", ")", "not", "in", "[", "None", ",", "''", "]", ":", "# value for element was given in the invocation", "elemvalue", "=", "getattr", "(", "opts", ",", "elemname", ")", "if", "canMulti", "and", "isinstance", "(", "elemvalue", ",", "list", ")", ":", "elemlist", "=", "elemvalue", "else", ":", "elemlist", "=", "[", "elemvalue", "]", "for", "elemelem", "in", "elemlist", ":", "for", "validator", "in", "elem", ".", "validators", ":", "validator", "(", "elemelem", ")", "if", "hasattr", "(", "opts", ",", "elem", ".", "name", ")", ":", "prepared", "=", "elem", ".", "prepare", "(", "elemvalue", ")", "or", "elemvalue", "kwargs0", "[", "elem", ".", "name", "]", "=", "prepared", "elif", "elem", ".", "optional", ":", "# value was not set but is optional so ignore it", "kwargs0", "[", "elem", ".", "name", "]", "=", "None", "else", ":", "# value was not set and is mandatory -- error", "raise", "AttributeError", "(", "'Missing value for {} '", ".", "format", "(", "elemname", ")", ")", "try", ":", "handle_set", "(", "opts", ",", "proc", ".", "INPUTS", ",", "kwargs0", ",", "True", ")", "handle_set", "(", "opts", ",", "proc", ".", "OUTPUTS", ",", "kwargs0", ",", "True", ")", "for", "param", "in", "proc", ".", "PARAMETERS", ":", "if", "hasattr", "(", "opts", ",", "param", ".", "name", ")", "and", "getattr", "(", "opts", ",", "param", ".", "name", ")", "is", "not", "None", "and", "getattr", "(", "opts", ",", "param", ".", "name", ")", "is", "not", "''", ":", "value", "=", "getattr", "(", "opts", ",", "param", ".", "name", ")", "# validate if needed", "for", "validator", "in", "param", ".", "validators", ":", "validator", "(", "value", ")", "# if param is a tuple of choices, each choice is a tuple itself", "# with first element of the input value and second element", "# containing the value to be passed to the processor", "if", "param", ".", "choices", "and", "isinstance", "(", "param", ".", "choices", ",", "tuple", ")", ":", "for", "choice", "in", "param", ".", "choices", ":", "if", "choice", "[", "0", "]", "==", "value", ":", "kwargs0", "[", "param", ".", "name", "]", "=", "choice", "[", "1", "]", "break", "else", ":", "kwargs0", "[", "param", ".", "name", "]", "=", "value", "elif", "param", ".", "optional", ":", "kwargs0", "[", "param", ".", "name", "]", "=", "param", ".", "default", "else", ":", "raise", "AttributeError", "(", "'Missing value for {} parameter'", ".", "format", "(", "param", ".", "name", ")", ")", "if", "not", "_instance", ":", "_instance", "=", "proc", "(", "*", "*", "kwargs0", ")", "else", ":", "_instance", ".", "apply", "(", "_instance", ",", "*", "*", "kwargs0", ")", "return", "_instance", ".", "run", "(", ")", "# todo: cleanup", "except", "Exception", "as", "e", ":", "print", "(", "\"Error:\"", ",", "e", ")", "# traceback.print_exc()", "raise" ]
Executes the processor passing given arguments. :param args: a list of parameters in --key=value format.
[ "Executes", "the", "processor", "passing", "given", "arguments", "." ]
python
train
45.547945
gwpy/gwpy
gwpy/timeseries/io/losc.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/losc.py#L229-L271
def read_losc_hdf5_state(f, path='quality/simple', start=None, end=None, copy=False): """Read a `StateVector` from a LOSC-format HDF file. Parameters ---------- f : `str`, `h5py.HLObject` path of HDF5 file, or open `H5File` path : `str` path of HDF5 dataset to read. start : `Time`, `~gwpy.time.LIGOTimeGPS`, optional start GPS time of desired data end : `Time`, `~gwpy.time.LIGOTimeGPS`, optional end GPS time of desired data copy : `bool`, default: `False` create a fresh-memory copy of the underlying array Returns ------- data : `~gwpy.timeseries.TimeSeries` a new `TimeSeries` containing the data read from disk """ # find data dataset = io_hdf5.find_dataset(f, '%s/DQmask' % path) maskset = io_hdf5.find_dataset(f, '%s/DQDescriptions' % path) # read data nddata = dataset[()] bits = [bytes.decode(bytes(b), 'utf-8') for b in maskset[()]] # read metadata epoch = dataset.attrs['Xstart'] try: dt = dataset.attrs['Xspacing'] except KeyError: dt = Quantity(1, 's') else: xunit = parse_unit(dataset.attrs['Xunits']) dt = Quantity(dt, xunit) return StateVector(nddata, bits=bits, t0=epoch, name='Data quality', dx=dt, copy=copy).crop(start=start, end=end)
[ "def", "read_losc_hdf5_state", "(", "f", ",", "path", "=", "'quality/simple'", ",", "start", "=", "None", ",", "end", "=", "None", ",", "copy", "=", "False", ")", ":", "# find data", "dataset", "=", "io_hdf5", ".", "find_dataset", "(", "f", ",", "'%s/DQmask'", "%", "path", ")", "maskset", "=", "io_hdf5", ".", "find_dataset", "(", "f", ",", "'%s/DQDescriptions'", "%", "path", ")", "# read data", "nddata", "=", "dataset", "[", "(", ")", "]", "bits", "=", "[", "bytes", ".", "decode", "(", "bytes", "(", "b", ")", ",", "'utf-8'", ")", "for", "b", "in", "maskset", "[", "(", ")", "]", "]", "# read metadata", "epoch", "=", "dataset", ".", "attrs", "[", "'Xstart'", "]", "try", ":", "dt", "=", "dataset", ".", "attrs", "[", "'Xspacing'", "]", "except", "KeyError", ":", "dt", "=", "Quantity", "(", "1", ",", "'s'", ")", "else", ":", "xunit", "=", "parse_unit", "(", "dataset", ".", "attrs", "[", "'Xunits'", "]", ")", "dt", "=", "Quantity", "(", "dt", ",", "xunit", ")", "return", "StateVector", "(", "nddata", ",", "bits", "=", "bits", ",", "t0", "=", "epoch", ",", "name", "=", "'Data quality'", ",", "dx", "=", "dt", ",", "copy", "=", "copy", ")", ".", "crop", "(", "start", "=", "start", ",", "end", "=", "end", ")" ]
Read a `StateVector` from a LOSC-format HDF file. Parameters ---------- f : `str`, `h5py.HLObject` path of HDF5 file, or open `H5File` path : `str` path of HDF5 dataset to read. start : `Time`, `~gwpy.time.LIGOTimeGPS`, optional start GPS time of desired data end : `Time`, `~gwpy.time.LIGOTimeGPS`, optional end GPS time of desired data copy : `bool`, default: `False` create a fresh-memory copy of the underlying array Returns ------- data : `~gwpy.timeseries.TimeSeries` a new `TimeSeries` containing the data read from disk
[ "Read", "a", "StateVector", "from", "a", "LOSC", "-", "format", "HDF", "file", "." ]
python
train
31.232558
twisted/mantissa
xmantissa/websession.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/websession.py#L381-L396
def getCredentials(self, request): """ Derive credentials from an HTTP request. Override SessionWrapper.getCredentials to add the Host: header to the credentials. This will make web-based virtual hosting work. @type request: L{nevow.inevow.IRequest} @param request: The request being handled. @rtype: L{twisted.cred.credentials.1ICredentials} @return: Credentials derived from the HTTP request. """ username = usernameFromRequest(request) password = request.args.get('password', [''])[0] return credentials.UsernamePassword(username, password)
[ "def", "getCredentials", "(", "self", ",", "request", ")", ":", "username", "=", "usernameFromRequest", "(", "request", ")", "password", "=", "request", ".", "args", ".", "get", "(", "'password'", ",", "[", "''", "]", ")", "[", "0", "]", "return", "credentials", ".", "UsernamePassword", "(", "username", ",", "password", ")" ]
Derive credentials from an HTTP request. Override SessionWrapper.getCredentials to add the Host: header to the credentials. This will make web-based virtual hosting work. @type request: L{nevow.inevow.IRequest} @param request: The request being handled. @rtype: L{twisted.cred.credentials.1ICredentials} @return: Credentials derived from the HTTP request.
[ "Derive", "credentials", "from", "an", "HTTP", "request", "." ]
python
train
39.25
bitesofcode/projexui
projexui/dialogs/xconfigdialog/xconfigdialog.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/dialogs/xconfigdialog/xconfigdialog.py#L152-L168
def setCurrentPlugin( self, plugin ): """ Sets the current plugin item to the inputed plugin. :param plugin | <XConfigPlugin> || None """ if ( not plugin ): self.uiPluginTREE.setCurrentItem(None) return for i in range(self.uiPluginTREE.topLevelItemCount()): item = self.uiPluginTREE.topLevelItem(i) for c in range(item.childCount()): pitem = item.child(c) if ( pitem.plugin() == plugin ): self.uiPluginTREE.setCurrentItem(pitem)
[ "def", "setCurrentPlugin", "(", "self", ",", "plugin", ")", ":", "if", "(", "not", "plugin", ")", ":", "self", ".", "uiPluginTREE", ".", "setCurrentItem", "(", "None", ")", "return", "for", "i", "in", "range", "(", "self", ".", "uiPluginTREE", ".", "topLevelItemCount", "(", ")", ")", ":", "item", "=", "self", ".", "uiPluginTREE", ".", "topLevelItem", "(", "i", ")", "for", "c", "in", "range", "(", "item", ".", "childCount", "(", ")", ")", ":", "pitem", "=", "item", ".", "child", "(", "c", ")", "if", "(", "pitem", ".", "plugin", "(", ")", "==", "plugin", ")", ":", "self", ".", "uiPluginTREE", ".", "setCurrentItem", "(", "pitem", ")" ]
Sets the current plugin item to the inputed plugin. :param plugin | <XConfigPlugin> || None
[ "Sets", "the", "current", "plugin", "item", "to", "the", "inputed", "plugin", ".", ":", "param", "plugin", "|", "<XConfigPlugin", ">", "||", "None" ]
python
train
35.529412
cmheisel/basecampreporting
src/basecampreporting/basecamp.py
https://github.com/cmheisel/basecampreporting/blob/88ecfc6e835608650ff6be23cbf2421d224c122b/src/basecampreporting/basecamp.py#L427-L439
def update_todo_item(self, item_id, content, party_id=None, notify=False): """ Modifies an existing item. The values work much like the "create item" operation, so you should refer to that for a more detailed explanation. """ path = '/todos/update_item/%u' % item_id req = ET.Element('request') item = ET.Element('request') ET.SubElement(item, 'content').text = str(content) if party_id is not None: ET.SubElement(req, 'responsible-party').text = str(party_id) ET.SubElement(req, 'notify').text = str(bool(notify)).lower() return self._request(path, req)
[ "def", "update_todo_item", "(", "self", ",", "item_id", ",", "content", ",", "party_id", "=", "None", ",", "notify", "=", "False", ")", ":", "path", "=", "'/todos/update_item/%u'", "%", "item_id", "req", "=", "ET", ".", "Element", "(", "'request'", ")", "item", "=", "ET", ".", "Element", "(", "'request'", ")", "ET", ".", "SubElement", "(", "item", ",", "'content'", ")", ".", "text", "=", "str", "(", "content", ")", "if", "party_id", "is", "not", "None", ":", "ET", ".", "SubElement", "(", "req", ",", "'responsible-party'", ")", ".", "text", "=", "str", "(", "party_id", ")", "ET", ".", "SubElement", "(", "req", ",", "'notify'", ")", ".", "text", "=", "str", "(", "bool", "(", "notify", ")", ")", ".", "lower", "(", ")", "return", "self", ".", "_request", "(", "path", ",", "req", ")" ]
Modifies an existing item. The values work much like the "create item" operation, so you should refer to that for a more detailed explanation.
[ "Modifies", "an", "existing", "item", ".", "The", "values", "work", "much", "like", "the", "create", "item", "operation", "so", "you", "should", "refer", "to", "that", "for", "a", "more", "detailed", "explanation", "." ]
python
train
49.692308
aws/aws-xray-sdk-python
aws_xray_sdk/core/models/entity.py
https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/models/entity.py#L258-L279
def _delete_empty_properties(self, properties): """ Delete empty properties before serialization to avoid extra keys with empty values in the output json. """ if not self.parent_id: del properties['parent_id'] if not self.subsegments: del properties['subsegments'] if not self.aws: del properties['aws'] if not self.http: del properties['http'] if not self.cause: del properties['cause'] if not self.annotations: del properties['annotations'] if not self.metadata: del properties['metadata'] properties.pop(ORIGIN_TRACE_HEADER_ATTR_KEY, None) del properties['sampled']
[ "def", "_delete_empty_properties", "(", "self", ",", "properties", ")", ":", "if", "not", "self", ".", "parent_id", ":", "del", "properties", "[", "'parent_id'", "]", "if", "not", "self", ".", "subsegments", ":", "del", "properties", "[", "'subsegments'", "]", "if", "not", "self", ".", "aws", ":", "del", "properties", "[", "'aws'", "]", "if", "not", "self", ".", "http", ":", "del", "properties", "[", "'http'", "]", "if", "not", "self", ".", "cause", ":", "del", "properties", "[", "'cause'", "]", "if", "not", "self", ".", "annotations", ":", "del", "properties", "[", "'annotations'", "]", "if", "not", "self", ".", "metadata", ":", "del", "properties", "[", "'metadata'", "]", "properties", ".", "pop", "(", "ORIGIN_TRACE_HEADER_ATTR_KEY", ",", "None", ")", "del", "properties", "[", "'sampled'", "]" ]
Delete empty properties before serialization to avoid extra keys with empty values in the output json.
[ "Delete", "empty", "properties", "before", "serialization", "to", "avoid", "extra", "keys", "with", "empty", "values", "in", "the", "output", "json", "." ]
python
train
33.454545
mailgun/expiringdict
expiringdict/__init__.py
https://github.com/mailgun/expiringdict/blob/750048022cde40d35721253a88fbaa2df1781e94/expiringdict/__init__.py#L84-L95
def pop(self, key, default=None): """ Get item from the dict and remove it. Return default if expired or does not exist. Never raise KeyError. """ with self.lock: try: item = OrderedDict.__getitem__(self, key) del self[key] return item[0] except KeyError: return default
[ "def", "pop", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "with", "self", ".", "lock", ":", "try", ":", "item", "=", "OrderedDict", ".", "__getitem__", "(", "self", ",", "key", ")", "del", "self", "[", "key", "]", "return", "item", "[", "0", "]", "except", "KeyError", ":", "return", "default" ]
Get item from the dict and remove it. Return default if expired or does not exist. Never raise KeyError.
[ "Get", "item", "from", "the", "dict", "and", "remove", "it", "." ]
python
train
31.666667
dnanexus/dx-toolkit
src/python/dxpy/utils/resolver.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/utils/resolver.py#L1087-L1118
def check_folder_exists(project, path, folder_name): ''' :param project: project id :type project: string :param path: path to where we should look for the folder in question :type path: string :param folder_name: name of the folder in question :type folder_name: string :returns: A boolean True or False whether the folder exists at the specified path :type: boolean :raises: :exc:'ResolutionError' if dxpy.api.container_list_folder raises an exception This function returns a boolean value that indicates whether a folder of the specified name exists at the specified path Note: this function will NOT work on the root folder case, i.e. '/' ''' if folder_name is None or path is None: return False try: folder_list = dxpy.api.container_list_folder(project, {"folder": path, "only": "folders"}) except dxpy.exceptions.DXAPIError as e: if e.name == 'ResourceNotFound': raise ResolutionError(str(e.msg)) else: raise e target_folder = path + '/' + folder_name # sanitize input if necessary target_folder, _skip = clean_folder_path(target_folder, 'folder') # Check that folder name exists in return from list folder API call return target_folder in folder_list['folders']
[ "def", "check_folder_exists", "(", "project", ",", "path", ",", "folder_name", ")", ":", "if", "folder_name", "is", "None", "or", "path", "is", "None", ":", "return", "False", "try", ":", "folder_list", "=", "dxpy", ".", "api", ".", "container_list_folder", "(", "project", ",", "{", "\"folder\"", ":", "path", ",", "\"only\"", ":", "\"folders\"", "}", ")", "except", "dxpy", ".", "exceptions", ".", "DXAPIError", "as", "e", ":", "if", "e", ".", "name", "==", "'ResourceNotFound'", ":", "raise", "ResolutionError", "(", "str", "(", "e", ".", "msg", ")", ")", "else", ":", "raise", "e", "target_folder", "=", "path", "+", "'/'", "+", "folder_name", "# sanitize input if necessary", "target_folder", ",", "_skip", "=", "clean_folder_path", "(", "target_folder", ",", "'folder'", ")", "# Check that folder name exists in return from list folder API call", "return", "target_folder", "in", "folder_list", "[", "'folders'", "]" ]
:param project: project id :type project: string :param path: path to where we should look for the folder in question :type path: string :param folder_name: name of the folder in question :type folder_name: string :returns: A boolean True or False whether the folder exists at the specified path :type: boolean :raises: :exc:'ResolutionError' if dxpy.api.container_list_folder raises an exception This function returns a boolean value that indicates whether a folder of the specified name exists at the specified path Note: this function will NOT work on the root folder case, i.e. '/'
[ ":", "param", "project", ":", "project", "id", ":", "type", "project", ":", "string", ":", "param", "path", ":", "path", "to", "where", "we", "should", "look", "for", "the", "folder", "in", "question", ":", "type", "path", ":", "string", ":", "param", "folder_name", ":", "name", "of", "the", "folder", "in", "question", ":", "type", "folder_name", ":", "string", ":", "returns", ":", "A", "boolean", "True", "or", "False", "whether", "the", "folder", "exists", "at", "the", "specified", "path", ":", "type", ":", "boolean", ":", "raises", ":", ":", "exc", ":", "ResolutionError", "if", "dxpy", ".", "api", ".", "container_list_folder", "raises", "an", "exception" ]
python
train
40.125
CI-WATER/gsshapy
gsshapy/orm/cmt.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/cmt.py#L366-L395
def _writeMapTable(self, session, fileObject, mapTable, replaceParamFile): """ Write Generic Map Table Method This method writes a mapping table in the generic format to file. The method will handle both empty and filled cases of generic formatted mapping tables. session = SQLAlchemy session object for retrieving data from the database fileObject = The file object to write to mapTable = The GSSHAPY MapTable object to write """ # Write mapping name fileObject.write('%s "%s"\n' % (mapTable.name, mapTable.indexMap.name)) # Write mapping table global variables if mapTable.numIDs: fileObject.write('NUM_IDS %s\n' % (mapTable.numIDs)) if mapTable.maxNumCells: fileObject.write('MAX_NUMBER_CELLS %s\n' % (mapTable.maxNumCells)) if mapTable.numSed: fileObject.write('NUM_SED %s\n' % (mapTable.numSed)) if mapTable.maxSoilID: fileObject.write('MAX_SOIL_ID %s\n' % (mapTable.maxSoilID)) # Write value lines from the database self._writeValues(session, fileObject, mapTable, None, replaceParamFile)
[ "def", "_writeMapTable", "(", "self", ",", "session", ",", "fileObject", ",", "mapTable", ",", "replaceParamFile", ")", ":", "# Write mapping name", "fileObject", ".", "write", "(", "'%s \"%s\"\\n'", "%", "(", "mapTable", ".", "name", ",", "mapTable", ".", "indexMap", ".", "name", ")", ")", "# Write mapping table global variables", "if", "mapTable", ".", "numIDs", ":", "fileObject", ".", "write", "(", "'NUM_IDS %s\\n'", "%", "(", "mapTable", ".", "numIDs", ")", ")", "if", "mapTable", ".", "maxNumCells", ":", "fileObject", ".", "write", "(", "'MAX_NUMBER_CELLS %s\\n'", "%", "(", "mapTable", ".", "maxNumCells", ")", ")", "if", "mapTable", ".", "numSed", ":", "fileObject", ".", "write", "(", "'NUM_SED %s\\n'", "%", "(", "mapTable", ".", "numSed", ")", ")", "if", "mapTable", ".", "maxSoilID", ":", "fileObject", ".", "write", "(", "'MAX_SOIL_ID %s\\n'", "%", "(", "mapTable", ".", "maxSoilID", ")", ")", "# Write value lines from the database", "self", ".", "_writeValues", "(", "session", ",", "fileObject", ",", "mapTable", ",", "None", ",", "replaceParamFile", ")" ]
Write Generic Map Table Method This method writes a mapping table in the generic format to file. The method will handle both empty and filled cases of generic formatted mapping tables. session = SQLAlchemy session object for retrieving data from the database fileObject = The file object to write to mapTable = The GSSHAPY MapTable object to write
[ "Write", "Generic", "Map", "Table", "Method" ]
python
train
38.566667
qubell/contrib-python-qubell-client
qubell/api/private/organization.py
https://github.com/qubell/contrib-python-qubell-client/blob/4586ea11d5103c2ff9607d3ed922b5a0991b8845/qubell/api/private/organization.py#L513-L522
def get_user(self, id=None, name=None, email=None): """ Get user object by email or id. """ log.info("Picking user: %s (%s) (%s)" % (name, email, id)) from qubell.api.private.user import User if email: user = User.get(self._router, organization=self, email=email) else: user = self.users[id or name] return user
[ "def", "get_user", "(", "self", ",", "id", "=", "None", ",", "name", "=", "None", ",", "email", "=", "None", ")", ":", "log", ".", "info", "(", "\"Picking user: %s (%s) (%s)\"", "%", "(", "name", ",", "email", ",", "id", ")", ")", "from", "qubell", ".", "api", ".", "private", ".", "user", "import", "User", "if", "email", ":", "user", "=", "User", ".", "get", "(", "self", ".", "_router", ",", "organization", "=", "self", ",", "email", "=", "email", ")", "else", ":", "user", "=", "self", ".", "users", "[", "id", "or", "name", "]", "return", "user" ]
Get user object by email or id.
[ "Get", "user", "object", "by", "email", "or", "id", "." ]
python
train
38.2
diefans/objective
src/objective/fields.py
https://github.com/diefans/objective/blob/e2de37f1cd4f5ad147ab3a5dee7dffd6806f2f88/src/objective/fields.py#L125-L159
def _deserialize(self, value, environment=None): """A collection traverses over something to deserialize its value. :param value: a ``dict`` wich contains mapped values """ if not isinstance(value, MappingABC): raise exc.Invalid(self) # traverse items and match against validated struct mapping = self._create_deserialize_type(value, environment) invalids = [] for name, item in self: # deserialize each item try: mapping[name] = item.deserialize( value.get(name, values.Undefined), environment ) except exc.IgnoreValue: # just ignore this value pass except exc.Invalid as ex: # append this to the list of invalids, so we can return a complete overview of errors invalids.append(ex) if invalids: # on invalids this item is also ``Invalid`` raise exc.InvalidChildren(self, invalids) return mapping
[ "def", "_deserialize", "(", "self", ",", "value", ",", "environment", "=", "None", ")", ":", "if", "not", "isinstance", "(", "value", ",", "MappingABC", ")", ":", "raise", "exc", ".", "Invalid", "(", "self", ")", "# traverse items and match against validated struct", "mapping", "=", "self", ".", "_create_deserialize_type", "(", "value", ",", "environment", ")", "invalids", "=", "[", "]", "for", "name", ",", "item", "in", "self", ":", "# deserialize each item", "try", ":", "mapping", "[", "name", "]", "=", "item", ".", "deserialize", "(", "value", ".", "get", "(", "name", ",", "values", ".", "Undefined", ")", ",", "environment", ")", "except", "exc", ".", "IgnoreValue", ":", "# just ignore this value", "pass", "except", "exc", ".", "Invalid", "as", "ex", ":", "# append this to the list of invalids, so we can return a complete overview of errors", "invalids", ".", "append", "(", "ex", ")", "if", "invalids", ":", "# on invalids this item is also ``Invalid``", "raise", "exc", ".", "InvalidChildren", "(", "self", ",", "invalids", ")", "return", "mapping" ]
A collection traverses over something to deserialize its value. :param value: a ``dict`` wich contains mapped values
[ "A", "collection", "traverses", "over", "something", "to", "deserialize", "its", "value", "." ]
python
train
30.057143
MLAB-project/pymlab
src/pymlab/sensors/lioncell.py
https://github.com/MLAB-project/pymlab/blob/d18d858ae83b203defcf2aead0dbd11b3c444658/src/pymlab/sensors/lioncell.py#L75-L77
def StateOfCharge(self): """ % of Full Charge """ return (self.bus.read_byte_data(self.address, 0x02) + self.bus.read_byte_data(self.address, 0x03) * 256)
[ "def", "StateOfCharge", "(", "self", ")", ":", "return", "(", "self", ".", "bus", ".", "read_byte_data", "(", "self", ".", "address", ",", "0x02", ")", "+", "self", ".", "bus", ".", "read_byte_data", "(", "self", ".", "address", ",", "0x03", ")", "*", "256", ")" ]
% of Full Charge
[ "%", "of", "Full", "Charge" ]
python
train
56
biocore/burrito-fillings
bfillings/pplacer.py
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/pplacer.py#L153-L201
def insert_sequences_into_tree(aln, moltype, params={}, write_log=True): """Returns a tree from Alignment object aln. aln: an xxx.Alignment object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object params: dict of parameters to pass in to the RAxML app controller. The result will be an xxx.Alignment object, or None if tree fails. """ # convert aln to phy since seq_names need fixed to run through pplacer new_aln=get_align_for_phylip(StringIO(aln)) # convert aln to fasta in case it is not already a fasta file aln2 = Alignment(new_aln) seqs = aln2.toFasta() ih = '_input_as_multiline_string' pplacer_app = Pplacer(params=params, InputHandler=ih, WorkingDir=None, SuppressStderr=False, SuppressStdout=False) pplacer_result = pplacer_app(seqs) # write a log file if write_log: log_fp = join(params["--out-dir"],'log_pplacer_' + \ split(get_tmp_filename())[-1]) log_file=open(log_fp,'w') log_file.write(pplacer_result['StdOut'].read()) log_file.close() # use guppy to convert json file into a placement tree guppy_params={'tog':None} new_tree=build_tree_from_json_using_params(pplacer_result['json'].name, \ output_dir=params['--out-dir'], \ params=guppy_params) pplacer_result.cleanUp() return new_tree
[ "def", "insert_sequences_into_tree", "(", "aln", ",", "moltype", ",", "params", "=", "{", "}", ",", "write_log", "=", "True", ")", ":", "# convert aln to phy since seq_names need fixed to run through pplacer", "new_aln", "=", "get_align_for_phylip", "(", "StringIO", "(", "aln", ")", ")", "# convert aln to fasta in case it is not already a fasta file", "aln2", "=", "Alignment", "(", "new_aln", ")", "seqs", "=", "aln2", ".", "toFasta", "(", ")", "ih", "=", "'_input_as_multiline_string'", "pplacer_app", "=", "Pplacer", "(", "params", "=", "params", ",", "InputHandler", "=", "ih", ",", "WorkingDir", "=", "None", ",", "SuppressStderr", "=", "False", ",", "SuppressStdout", "=", "False", ")", "pplacer_result", "=", "pplacer_app", "(", "seqs", ")", "# write a log file", "if", "write_log", ":", "log_fp", "=", "join", "(", "params", "[", "\"--out-dir\"", "]", ",", "'log_pplacer_'", "+", "split", "(", "get_tmp_filename", "(", ")", ")", "[", "-", "1", "]", ")", "log_file", "=", "open", "(", "log_fp", ",", "'w'", ")", "log_file", ".", "write", "(", "pplacer_result", "[", "'StdOut'", "]", ".", "read", "(", ")", ")", "log_file", ".", "close", "(", ")", "# use guppy to convert json file into a placement tree", "guppy_params", "=", "{", "'tog'", ":", "None", "}", "new_tree", "=", "build_tree_from_json_using_params", "(", "pplacer_result", "[", "'json'", "]", ".", "name", ",", "output_dir", "=", "params", "[", "'--out-dir'", "]", ",", "params", "=", "guppy_params", ")", "pplacer_result", ".", "cleanUp", "(", ")", "return", "new_tree" ]
Returns a tree from Alignment object aln. aln: an xxx.Alignment object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object params: dict of parameters to pass in to the RAxML app controller. The result will be an xxx.Alignment object, or None if tree fails.
[ "Returns", "a", "tree", "from", "Alignment", "object", "aln", "." ]
python
train
31.959184
spyder-ide/spyder
spyder/widgets/mixins.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/mixins.py#L712-L767
def find_text(self, text, changed=True, forward=True, case=False, words=False, regexp=False): """Find text""" cursor = self.textCursor() findflag = QTextDocument.FindFlag() if not forward: findflag = findflag | QTextDocument.FindBackward if case: findflag = findflag | QTextDocument.FindCaseSensitively moves = [QTextCursor.NoMove] if forward: moves += [QTextCursor.NextWord, QTextCursor.Start] if changed: if to_text_string(cursor.selectedText()): new_position = min([cursor.selectionStart(), cursor.selectionEnd()]) cursor.setPosition(new_position) else: cursor.movePosition(QTextCursor.PreviousWord) else: moves += [QTextCursor.End] if regexp: text = to_text_string(text) else: text = re.escape(to_text_string(text)) if QT55_VERSION: pattern = QRegularExpression(u"\\b{}\\b".format(text) if words else text) if case: pattern.setPatternOptions( QRegularExpression.CaseInsensitiveOption) else: pattern = QRegExp(u"\\b{}\\b".format(text) if words else text, Qt.CaseSensitive if case else Qt.CaseInsensitive, QRegExp.RegExp2) for move in moves: cursor.movePosition(move) if regexp and '\\n' in text: # Multiline regular expression found_cursor = self.find_multiline_pattern(pattern, cursor, findflag) else: # Single line find: using the QTextDocument's find function, # probably much more efficient than ours found_cursor = self.document().find(pattern, cursor, findflag) if found_cursor is not None and not found_cursor.isNull(): self.setTextCursor(found_cursor) return True return False
[ "def", "find_text", "(", "self", ",", "text", ",", "changed", "=", "True", ",", "forward", "=", "True", ",", "case", "=", "False", ",", "words", "=", "False", ",", "regexp", "=", "False", ")", ":", "cursor", "=", "self", ".", "textCursor", "(", ")", "findflag", "=", "QTextDocument", ".", "FindFlag", "(", ")", "if", "not", "forward", ":", "findflag", "=", "findflag", "|", "QTextDocument", ".", "FindBackward", "if", "case", ":", "findflag", "=", "findflag", "|", "QTextDocument", ".", "FindCaseSensitively", "moves", "=", "[", "QTextCursor", ".", "NoMove", "]", "if", "forward", ":", "moves", "+=", "[", "QTextCursor", ".", "NextWord", ",", "QTextCursor", ".", "Start", "]", "if", "changed", ":", "if", "to_text_string", "(", "cursor", ".", "selectedText", "(", ")", ")", ":", "new_position", "=", "min", "(", "[", "cursor", ".", "selectionStart", "(", ")", ",", "cursor", ".", "selectionEnd", "(", ")", "]", ")", "cursor", ".", "setPosition", "(", "new_position", ")", "else", ":", "cursor", ".", "movePosition", "(", "QTextCursor", ".", "PreviousWord", ")", "else", ":", "moves", "+=", "[", "QTextCursor", ".", "End", "]", "if", "regexp", ":", "text", "=", "to_text_string", "(", "text", ")", "else", ":", "text", "=", "re", ".", "escape", "(", "to_text_string", "(", "text", ")", ")", "if", "QT55_VERSION", ":", "pattern", "=", "QRegularExpression", "(", "u\"\\\\b{}\\\\b\"", ".", "format", "(", "text", ")", "if", "words", "else", "text", ")", "if", "case", ":", "pattern", ".", "setPatternOptions", "(", "QRegularExpression", ".", "CaseInsensitiveOption", ")", "else", ":", "pattern", "=", "QRegExp", "(", "u\"\\\\b{}\\\\b\"", ".", "format", "(", "text", ")", "if", "words", "else", "text", ",", "Qt", ".", "CaseSensitive", "if", "case", "else", "Qt", ".", "CaseInsensitive", ",", "QRegExp", ".", "RegExp2", ")", "for", "move", "in", "moves", ":", "cursor", ".", "movePosition", "(", "move", ")", "if", "regexp", "and", "'\\\\n'", "in", "text", ":", "# Multiline regular expression\r", "found_cursor", "=", "self", ".", "find_multiline_pattern", "(", "pattern", ",", "cursor", ",", "findflag", ")", "else", ":", "# Single line find: using the QTextDocument's find function,\r", "# probably much more efficient than ours\r", "found_cursor", "=", "self", ".", "document", "(", ")", ".", "find", "(", "pattern", ",", "cursor", ",", "findflag", ")", "if", "found_cursor", "is", "not", "None", "and", "not", "found_cursor", ".", "isNull", "(", ")", ":", "self", ".", "setTextCursor", "(", "found_cursor", ")", "return", "True", "return", "False" ]
Find text
[ "Find", "text" ]
python
train
39.857143
tensorflow/probability
tensorflow_probability/examples/disentangled_vae.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/examples/disentangled_vae.py#L929-L962
def sample_dynamic_posterior(self, inputs, samples, static_sample=None): """Sample the static latent posterior. Args: inputs: A batch of intermediate representations of image frames across all timesteps, of shape [..., batch_size, timesteps, hidden_size]. samples: Number of samples to draw from the latent distribution. static_sample: A tensor sample of the static latent variable `f` of shape [..., batch_size, latent_size]. Only used for the full dynamic posterior formulation. Returns: A tuple of a sample tensor of shape [samples, batch_size, length latent_size], and a MultivariateNormalDiag distribution from which the tensor was sampled, with event shape [latent_size], and batch shape [broadcasted_shape, batch_size, length], where `broadcasted_shape` is the broadcasted sampled shape between the inputs and static sample. Raises: ValueError: If the "full" latent posterior formulation is being used, yet a static latent sample was not provided. """ if self.latent_posterior == "factorized": dist = self.dynamic_encoder(inputs) samples = dist.sample(samples) # (s, N, T, lat) else: # full if static_sample is None: raise ValueError( "The full dynamic posterior requires a static latent sample") dist = self.dynamic_encoder((inputs, static_sample)) samples = dist.sample() # (samples, N, latent) return samples, dist
[ "def", "sample_dynamic_posterior", "(", "self", ",", "inputs", ",", "samples", ",", "static_sample", "=", "None", ")", ":", "if", "self", ".", "latent_posterior", "==", "\"factorized\"", ":", "dist", "=", "self", ".", "dynamic_encoder", "(", "inputs", ")", "samples", "=", "dist", ".", "sample", "(", "samples", ")", "# (s, N, T, lat)", "else", ":", "# full", "if", "static_sample", "is", "None", ":", "raise", "ValueError", "(", "\"The full dynamic posterior requires a static latent sample\"", ")", "dist", "=", "self", ".", "dynamic_encoder", "(", "(", "inputs", ",", "static_sample", ")", ")", "samples", "=", "dist", ".", "sample", "(", ")", "# (samples, N, latent)", "return", "samples", ",", "dist" ]
Sample the static latent posterior. Args: inputs: A batch of intermediate representations of image frames across all timesteps, of shape [..., batch_size, timesteps, hidden_size]. samples: Number of samples to draw from the latent distribution. static_sample: A tensor sample of the static latent variable `f` of shape [..., batch_size, latent_size]. Only used for the full dynamic posterior formulation. Returns: A tuple of a sample tensor of shape [samples, batch_size, length latent_size], and a MultivariateNormalDiag distribution from which the tensor was sampled, with event shape [latent_size], and batch shape [broadcasted_shape, batch_size, length], where `broadcasted_shape` is the broadcasted sampled shape between the inputs and static sample. Raises: ValueError: If the "full" latent posterior formulation is being used, yet a static latent sample was not provided.
[ "Sample", "the", "static", "latent", "posterior", "." ]
python
test
43.5
PlaidWeb/Publ
publ/image/local.py
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/image/local.py#L282-L330
def get_rendition_fit_size(spec, input_w, input_h, output_scale): """ Determine the scaled size based on the provided spec """ width = input_w height = input_h scale = spec.get('scale') if scale: width = width / scale height = height / scale min_width = spec.get('scale_min_width') if min_width and width < min_width: height = height * min_width / width width = min_width min_height = spec.get('scale_min_height') if min_height and height < min_height: width = width * min_height / height height = min_height tgt_width, tgt_height = spec.get('width'), spec.get('height') if tgt_width and width > tgt_width: height = height * tgt_width / width width = tgt_width if tgt_height and height > tgt_height: width = width * tgt_height / height height = tgt_height tgt_width, tgt_height = spec.get('max_width'), spec.get('max_height') if tgt_width and width > tgt_width: height = height * tgt_width / width width = tgt_width if tgt_height and height > tgt_height: width = width * tgt_height / height height = tgt_height width = width * output_scale height = height * output_scale # Never scale to larger than the base rendition width = min(round(width), input_w) height = min(round(height), input_h) return (width, height), None
[ "def", "get_rendition_fit_size", "(", "spec", ",", "input_w", ",", "input_h", ",", "output_scale", ")", ":", "width", "=", "input_w", "height", "=", "input_h", "scale", "=", "spec", ".", "get", "(", "'scale'", ")", "if", "scale", ":", "width", "=", "width", "/", "scale", "height", "=", "height", "/", "scale", "min_width", "=", "spec", ".", "get", "(", "'scale_min_width'", ")", "if", "min_width", "and", "width", "<", "min_width", ":", "height", "=", "height", "*", "min_width", "/", "width", "width", "=", "min_width", "min_height", "=", "spec", ".", "get", "(", "'scale_min_height'", ")", "if", "min_height", "and", "height", "<", "min_height", ":", "width", "=", "width", "*", "min_height", "/", "height", "height", "=", "min_height", "tgt_width", ",", "tgt_height", "=", "spec", ".", "get", "(", "'width'", ")", ",", "spec", ".", "get", "(", "'height'", ")", "if", "tgt_width", "and", "width", ">", "tgt_width", ":", "height", "=", "height", "*", "tgt_width", "/", "width", "width", "=", "tgt_width", "if", "tgt_height", "and", "height", ">", "tgt_height", ":", "width", "=", "width", "*", "tgt_height", "/", "height", "height", "=", "tgt_height", "tgt_width", ",", "tgt_height", "=", "spec", ".", "get", "(", "'max_width'", ")", ",", "spec", ".", "get", "(", "'max_height'", ")", "if", "tgt_width", "and", "width", ">", "tgt_width", ":", "height", "=", "height", "*", "tgt_width", "/", "width", "width", "=", "tgt_width", "if", "tgt_height", "and", "height", ">", "tgt_height", ":", "width", "=", "width", "*", "tgt_height", "/", "height", "height", "=", "tgt_height", "width", "=", "width", "*", "output_scale", "height", "=", "height", "*", "output_scale", "# Never scale to larger than the base rendition", "width", "=", "min", "(", "round", "(", "width", ")", ",", "input_w", ")", "height", "=", "min", "(", "round", "(", "height", ")", ",", "input_h", ")", "return", "(", "width", ",", "height", ")", ",", "None" ]
Determine the scaled size based on the provided spec
[ "Determine", "the", "scaled", "size", "based", "on", "the", "provided", "spec" ]
python
train
31.020408
deep-compute/deeputil
deeputil/misc.py
https://github.com/deep-compute/deeputil/blob/9af5702bc3fd990688bf2aed16c20fa104be66df/deeputil/misc.py#L592-L611
def load_object(imp_path): ''' Given a python import path, load the object For dynamic imports in a program >>> isdir = load_object('os.path.isdir') >>> isdir('/tmp') True >>> num = load_object('numbers.Number') >>> isinstance('x', num) False >>> isinstance(777, num) True ''' module_name, obj_name = imp_path.split('.', 1) module = __import__(module_name) obj = attrgetter(obj_name)(module) return obj
[ "def", "load_object", "(", "imp_path", ")", ":", "module_name", ",", "obj_name", "=", "imp_path", ".", "split", "(", "'.'", ",", "1", ")", "module", "=", "__import__", "(", "module_name", ")", "obj", "=", "attrgetter", "(", "obj_name", ")", "(", "module", ")", "return", "obj" ]
Given a python import path, load the object For dynamic imports in a program >>> isdir = load_object('os.path.isdir') >>> isdir('/tmp') True >>> num = load_object('numbers.Number') >>> isinstance('x', num) False >>> isinstance(777, num) True
[ "Given", "a", "python", "import", "path", "load", "the", "object", "For", "dynamic", "imports", "in", "a", "program" ]
python
train
22.5
marrow/mongo
marrow/mongo/query/query.py
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/query/query.py#L531-L567
def near(self, center, sphere=False, min=None, max=None): """Order results by their distance from the given point, optionally with range limits in meters. Geospatial operator: {$near: {...}} Documentation: https://docs.mongodb.com/manual/reference/operator/query/near/#op._S_near { $near: { $geometry: <center; Point or (long, lat)>, $minDistance: <min; distance in meters>, $maxDistance: <max; distance in meters> } } Geospatial operator: {$nearSphere: {...}} Documentation: https://docs.mongodb.com/manual/reference/operator/query/nearSphere/#op._S_nearSphere { $nearSphere: { $geometry: <center; Point or (long, lat)>, $minDistance: <min; distance in meters>, $maxDistance: <max; distance in meters> } } """ from marrow.mongo.geo import Point near = {'$geometry': Point(*center)} if min: near['$minDistance'] = float(min) if max: near['$maxDistance'] = float(max) return Filter({self._name: {'$nearSphere' if sphere else '$near': near}})
[ "def", "near", "(", "self", ",", "center", ",", "sphere", "=", "False", ",", "min", "=", "None", ",", "max", "=", "None", ")", ":", "from", "marrow", ".", "mongo", ".", "geo", "import", "Point", "near", "=", "{", "'$geometry'", ":", "Point", "(", "*", "center", ")", "}", "if", "min", ":", "near", "[", "'$minDistance'", "]", "=", "float", "(", "min", ")", "if", "max", ":", "near", "[", "'$maxDistance'", "]", "=", "float", "(", "max", ")", "return", "Filter", "(", "{", "self", ".", "_name", ":", "{", "'$nearSphere'", "if", "sphere", "else", "'$near'", ":", "near", "}", "}", ")" ]
Order results by their distance from the given point, optionally with range limits in meters. Geospatial operator: {$near: {...}} Documentation: https://docs.mongodb.com/manual/reference/operator/query/near/#op._S_near { $near: { $geometry: <center; Point or (long, lat)>, $minDistance: <min; distance in meters>, $maxDistance: <max; distance in meters> } } Geospatial operator: {$nearSphere: {...}} Documentation: https://docs.mongodb.com/manual/reference/operator/query/nearSphere/#op._S_nearSphere { $nearSphere: { $geometry: <center; Point or (long, lat)>, $minDistance: <min; distance in meters>, $maxDistance: <max; distance in meters> } }
[ "Order", "results", "by", "their", "distance", "from", "the", "given", "point", "optionally", "with", "range", "limits", "in", "meters", ".", "Geospatial", "operator", ":", "{", "$near", ":", "{", "...", "}}", "Documentation", ":", "https", ":", "//", "docs", ".", "mongodb", ".", "com", "/", "manual", "/", "reference", "/", "operator", "/", "query", "/", "near", "/", "#op", ".", "_S_near", "{", "$near", ":", "{", "$geometry", ":", "<center", ";", "Point", "or", "(", "long", "lat", ")", ">", "$minDistance", ":", "<min", ";", "distance", "in", "meters", ">", "$maxDistance", ":", "<max", ";", "distance", "in", "meters", ">", "}", "}", "Geospatial", "operator", ":", "{", "$nearSphere", ":", "{", "...", "}}", "Documentation", ":", "https", ":", "//", "docs", ".", "mongodb", ".", "com", "/", "manual", "/", "reference", "/", "operator", "/", "query", "/", "nearSphere", "/", "#op", ".", "_S_nearSphere", "{", "$nearSphere", ":", "{", "$geometry", ":", "<center", ";", "Point", "or", "(", "long", "lat", ")", ">", "$minDistance", ":", "<min", ";", "distance", "in", "meters", ">", "$maxDistance", ":", "<max", ";", "distance", "in", "meters", ">", "}", "}" ]
python
train
27.486486
pvlib/pvlib-python
pvlib/pvsystem.py
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/pvsystem.py#L202-L220
def get_aoi(self, solar_zenith, solar_azimuth): """Get the angle of incidence on the system. Parameters ---------- solar_zenith : float or Series. Solar zenith angle. solar_azimuth : float or Series. Solar azimuth angle. Returns ------- aoi : Series The angle of incidence """ aoi = irradiance.aoi(self.surface_tilt, self.surface_azimuth, solar_zenith, solar_azimuth) return aoi
[ "def", "get_aoi", "(", "self", ",", "solar_zenith", ",", "solar_azimuth", ")", ":", "aoi", "=", "irradiance", ".", "aoi", "(", "self", ".", "surface_tilt", ",", "self", ".", "surface_azimuth", ",", "solar_zenith", ",", "solar_azimuth", ")", "return", "aoi" ]
Get the angle of incidence on the system. Parameters ---------- solar_zenith : float or Series. Solar zenith angle. solar_azimuth : float or Series. Solar azimuth angle. Returns ------- aoi : Series The angle of incidence
[ "Get", "the", "angle", "of", "incidence", "on", "the", "system", "." ]
python
train
27.157895
rehandalal/therapist
therapist/runner/runner.py
https://github.com/rehandalal/therapist/blob/1995a7e396eea2ec8685bb32a779a4110b459b1f/therapist/runner/runner.py#L47-L90
def run_process(self, process): """Runs a single action.""" message = u'#{bright}' message += u'{} '.format(str(process)[:68]).ljust(69, '.') stashed = False if self.unstaged_changes and not self.include_unstaged_changes: out, err, code = self.git.stash(keep_index=True, quiet=True) stashed = code == 0 try: result = process(files=self.files, cwd=self.cwd, fix=self.fix) # Check for modified files out, err, code = self.git.status(porcelain=True, untracked_files='no') for line in out.splitlines(): file_status = Status(line) # Make sure the file is one of the files that was processed if file_status.path in self.files and file_status.is_modified: mtime = os.path.getmtime(file_status.path) if os.path.exists(file_status.path) else 0 if mtime > self.file_mtimes.get(file_status.path, 0): self.file_mtimes[file_status.path] = mtime result.add_modified_file(file_status.path) if self.stage_modified_files: self.git.add(file_status.path) except: # noqa: E722 raise finally: if stashed: self.git.reset(hard=True, quiet=True) self.git.stash.pop(index=True, quiet=True) if result.is_success: message += u' #{green}[SUCCESS]' elif result.is_failure: message += u' #{red}[FAILURE]' elif result.is_skip: message += u' #{cyan}[SKIPPED]' elif result.is_error: message += u' #{red}[ERROR!!]' return result, message
[ "def", "run_process", "(", "self", ",", "process", ")", ":", "message", "=", "u'#{bright}'", "message", "+=", "u'{} '", ".", "format", "(", "str", "(", "process", ")", "[", ":", "68", "]", ")", ".", "ljust", "(", "69", ",", "'.'", ")", "stashed", "=", "False", "if", "self", ".", "unstaged_changes", "and", "not", "self", ".", "include_unstaged_changes", ":", "out", ",", "err", ",", "code", "=", "self", ".", "git", ".", "stash", "(", "keep_index", "=", "True", ",", "quiet", "=", "True", ")", "stashed", "=", "code", "==", "0", "try", ":", "result", "=", "process", "(", "files", "=", "self", ".", "files", ",", "cwd", "=", "self", ".", "cwd", ",", "fix", "=", "self", ".", "fix", ")", "# Check for modified files", "out", ",", "err", ",", "code", "=", "self", ".", "git", ".", "status", "(", "porcelain", "=", "True", ",", "untracked_files", "=", "'no'", ")", "for", "line", "in", "out", ".", "splitlines", "(", ")", ":", "file_status", "=", "Status", "(", "line", ")", "# Make sure the file is one of the files that was processed", "if", "file_status", ".", "path", "in", "self", ".", "files", "and", "file_status", ".", "is_modified", ":", "mtime", "=", "os", ".", "path", ".", "getmtime", "(", "file_status", ".", "path", ")", "if", "os", ".", "path", ".", "exists", "(", "file_status", ".", "path", ")", "else", "0", "if", "mtime", ">", "self", ".", "file_mtimes", ".", "get", "(", "file_status", ".", "path", ",", "0", ")", ":", "self", ".", "file_mtimes", "[", "file_status", ".", "path", "]", "=", "mtime", "result", ".", "add_modified_file", "(", "file_status", ".", "path", ")", "if", "self", ".", "stage_modified_files", ":", "self", ".", "git", ".", "add", "(", "file_status", ".", "path", ")", "except", ":", "# noqa: E722", "raise", "finally", ":", "if", "stashed", ":", "self", ".", "git", ".", "reset", "(", "hard", "=", "True", ",", "quiet", "=", "True", ")", "self", ".", "git", ".", "stash", ".", "pop", "(", "index", "=", "True", ",", "quiet", "=", "True", ")", "if", "result", ".", "is_success", ":", "message", "+=", "u' #{green}[SUCCESS]'", "elif", "result", ".", "is_failure", ":", "message", "+=", "u' #{red}[FAILURE]'", "elif", "result", ".", "is_skip", ":", "message", "+=", "u' #{cyan}[SKIPPED]'", "elif", "result", ".", "is_error", ":", "message", "+=", "u' #{red}[ERROR!!]'", "return", "result", ",", "message" ]
Runs a single action.
[ "Runs", "a", "single", "action", "." ]
python
train
39.454545
hydraplatform/hydra-base
hydra_base/lib/template.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/template.py#L1692-L1705
def add_typeattr(typeattr,**kwargs): """ Add an typeattr to an existing type. """ tmpltype = get_templatetype(typeattr.type_id, user_id=kwargs.get('user_id')) ta = _set_typeattr(typeattr) tmpltype.typeattrs.append(ta) db.DBSession.flush() return ta
[ "def", "add_typeattr", "(", "typeattr", ",", "*", "*", "kwargs", ")", ":", "tmpltype", "=", "get_templatetype", "(", "typeattr", ".", "type_id", ",", "user_id", "=", "kwargs", ".", "get", "(", "'user_id'", ")", ")", "ta", "=", "_set_typeattr", "(", "typeattr", ")", "tmpltype", ".", "typeattrs", ".", "append", "(", "ta", ")", "db", ".", "DBSession", ".", "flush", "(", ")", "return", "ta" ]
Add an typeattr to an existing type.
[ "Add", "an", "typeattr", "to", "an", "existing", "type", "." ]
python
train
19.714286
chaoss/grimoirelab-sortinghat
sortinghat/cmd/autogender.py
https://github.com/chaoss/grimoirelab-sortinghat/blob/391cd37a75fea26311dc6908bc1c953c540a8e04/sortinghat/cmd/autogender.py#L149-L186
def genderize(name, api_token=None): """Fetch gender from genderize.io""" GENDERIZE_API_URL = "https://api.genderize.io/" TOTAL_RETRIES = 10 MAX_RETRIES = 5 SLEEP_TIME = 0.25 STATUS_FORCELIST = [502] params = { 'name': name } if api_token: params['apikey'] = api_token session = requests.Session() retries = urllib3.util.Retry(total=TOTAL_RETRIES, connect=MAX_RETRIES, status=MAX_RETRIES, status_forcelist=STATUS_FORCELIST, backoff_factor=SLEEP_TIME, raise_on_status=True) session.mount('http://', requests.adapters.HTTPAdapter(max_retries=retries)) session.mount('https://', requests.adapters.HTTPAdapter(max_retries=retries)) r = session.get(GENDERIZE_API_URL, params=params) r.raise_for_status() result = r.json() gender = result['gender'] prob = result.get('probability', None) acc = int(prob * 100) if prob else None return gender, acc
[ "def", "genderize", "(", "name", ",", "api_token", "=", "None", ")", ":", "GENDERIZE_API_URL", "=", "\"https://api.genderize.io/\"", "TOTAL_RETRIES", "=", "10", "MAX_RETRIES", "=", "5", "SLEEP_TIME", "=", "0.25", "STATUS_FORCELIST", "=", "[", "502", "]", "params", "=", "{", "'name'", ":", "name", "}", "if", "api_token", ":", "params", "[", "'apikey'", "]", "=", "api_token", "session", "=", "requests", ".", "Session", "(", ")", "retries", "=", "urllib3", ".", "util", ".", "Retry", "(", "total", "=", "TOTAL_RETRIES", ",", "connect", "=", "MAX_RETRIES", ",", "status", "=", "MAX_RETRIES", ",", "status_forcelist", "=", "STATUS_FORCELIST", ",", "backoff_factor", "=", "SLEEP_TIME", ",", "raise_on_status", "=", "True", ")", "session", ".", "mount", "(", "'http://'", ",", "requests", ".", "adapters", ".", "HTTPAdapter", "(", "max_retries", "=", "retries", ")", ")", "session", ".", "mount", "(", "'https://'", ",", "requests", ".", "adapters", ".", "HTTPAdapter", "(", "max_retries", "=", "retries", ")", ")", "r", "=", "session", ".", "get", "(", "GENDERIZE_API_URL", ",", "params", "=", "params", ")", "r", ".", "raise_for_status", "(", ")", "result", "=", "r", ".", "json", "(", ")", "gender", "=", "result", "[", "'gender'", "]", "prob", "=", "result", ".", "get", "(", "'probability'", ",", "None", ")", "acc", "=", "int", "(", "prob", "*", "100", ")", "if", "prob", "else", "None", "return", "gender", ",", "acc" ]
Fetch gender from genderize.io
[ "Fetch", "gender", "from", "genderize", ".", "io" ]
python
train
28.263158
codeinn/vcs
vcs/backends/git/changeset.py
https://github.com/codeinn/vcs/blob/e6cd94188e9c36d273411bf3adc0584ac6ab92a0/vcs/backends/git/changeset.py#L341-L394
def fill_archive(self, stream=None, kind='tgz', prefix=None, subrepos=False): """ Fills up given stream. :param stream: file like object. :param kind: one of following: ``zip``, ``tgz`` or ``tbz2``. Default: ``tgz``. :param prefix: name of root directory in archive. Default is repository name and changeset's raw_id joined with dash (``repo-tip.<KIND>``). :param subrepos: include subrepos in this archive. :raise ImproperArchiveTypeError: If given kind is wrong. :raise VcsError: If given stream is None """ allowed_kinds = settings.ARCHIVE_SPECS.keys() if kind not in allowed_kinds: raise ImproperArchiveTypeError('Archive kind not supported use one' 'of %s', allowed_kinds) if prefix is None: prefix = '%s-%s' % (self.repository.name, self.short_id) elif prefix.startswith('/'): raise VCSError("Prefix cannot start with leading slash") elif prefix.strip() == '': raise VCSError("Prefix cannot be empty") if kind == 'zip': frmt = 'zip' else: frmt = 'tar' _git_path = settings.GIT_EXECUTABLE_PATH cmd = '%s archive --format=%s --prefix=%s/ %s' % (_git_path, frmt, prefix, self.raw_id) if kind == 'tgz': cmd += ' | gzip -9' elif kind == 'tbz2': cmd += ' | bzip2 -9' if stream is None: raise VCSError('You need to pass in a valid stream for filling' ' with archival data') popen = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True, cwd=self.repository.path) buffer_size = 1024 * 8 chunk = popen.stdout.read(buffer_size) while chunk: stream.write(chunk) chunk = popen.stdout.read(buffer_size) # Make sure all descriptors would be read popen.communicate()
[ "def", "fill_archive", "(", "self", ",", "stream", "=", "None", ",", "kind", "=", "'tgz'", ",", "prefix", "=", "None", ",", "subrepos", "=", "False", ")", ":", "allowed_kinds", "=", "settings", ".", "ARCHIVE_SPECS", ".", "keys", "(", ")", "if", "kind", "not", "in", "allowed_kinds", ":", "raise", "ImproperArchiveTypeError", "(", "'Archive kind not supported use one'", "'of %s'", ",", "allowed_kinds", ")", "if", "prefix", "is", "None", ":", "prefix", "=", "'%s-%s'", "%", "(", "self", ".", "repository", ".", "name", ",", "self", ".", "short_id", ")", "elif", "prefix", ".", "startswith", "(", "'/'", ")", ":", "raise", "VCSError", "(", "\"Prefix cannot start with leading slash\"", ")", "elif", "prefix", ".", "strip", "(", ")", "==", "''", ":", "raise", "VCSError", "(", "\"Prefix cannot be empty\"", ")", "if", "kind", "==", "'zip'", ":", "frmt", "=", "'zip'", "else", ":", "frmt", "=", "'tar'", "_git_path", "=", "settings", ".", "GIT_EXECUTABLE_PATH", "cmd", "=", "'%s archive --format=%s --prefix=%s/ %s'", "%", "(", "_git_path", ",", "frmt", ",", "prefix", ",", "self", ".", "raw_id", ")", "if", "kind", "==", "'tgz'", ":", "cmd", "+=", "' | gzip -9'", "elif", "kind", "==", "'tbz2'", ":", "cmd", "+=", "' | bzip2 -9'", "if", "stream", "is", "None", ":", "raise", "VCSError", "(", "'You need to pass in a valid stream for filling'", "' with archival data'", ")", "popen", "=", "Popen", "(", "cmd", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ",", "shell", "=", "True", ",", "cwd", "=", "self", ".", "repository", ".", "path", ")", "buffer_size", "=", "1024", "*", "8", "chunk", "=", "popen", ".", "stdout", ".", "read", "(", "buffer_size", ")", "while", "chunk", ":", "stream", ".", "write", "(", "chunk", ")", "chunk", "=", "popen", ".", "stdout", ".", "read", "(", "buffer_size", ")", "# Make sure all descriptors would be read", "popen", ".", "communicate", "(", ")" ]
Fills up given stream. :param stream: file like object. :param kind: one of following: ``zip``, ``tgz`` or ``tbz2``. Default: ``tgz``. :param prefix: name of root directory in archive. Default is repository name and changeset's raw_id joined with dash (``repo-tip.<KIND>``). :param subrepos: include subrepos in this archive. :raise ImproperArchiveTypeError: If given kind is wrong. :raise VcsError: If given stream is None
[ "Fills", "up", "given", "stream", "." ]
python
train
37.277778
usc-isi-i2/etk
etk/extractors/spacy_ner_extractor.py
https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/extractors/spacy_ner_extractor.py#L29-L49
def extract(self, text: str, get_attr=['PERSON', 'ORG', 'GPE']) -> List[Extraction]: """ Args: text (str): the text to extract from. get_attr (List[str]): The spaCy NER attributes we're interested in. Returns: List(Extraction): the list of extraction or the empty list if there are no matches. """ doc = self.__nlp(text) attr_list = list() for ent in doc.ents: if ent.label_ in get_attr: attr_list.append(Extraction(extractor_name=self.name, start_char=int(ent.start_char), end_char=int(ent.end_char), value=ent.text, tag=ent.label_, start_token=ent.start, end_token=ent.end)) return attr_list
[ "def", "extract", "(", "self", ",", "text", ":", "str", ",", "get_attr", "=", "[", "'PERSON'", ",", "'ORG'", ",", "'GPE'", "]", ")", "->", "List", "[", "Extraction", "]", ":", "doc", "=", "self", ".", "__nlp", "(", "text", ")", "attr_list", "=", "list", "(", ")", "for", "ent", "in", "doc", ".", "ents", ":", "if", "ent", ".", "label_", "in", "get_attr", ":", "attr_list", ".", "append", "(", "Extraction", "(", "extractor_name", "=", "self", ".", "name", ",", "start_char", "=", "int", "(", "ent", ".", "start_char", ")", ",", "end_char", "=", "int", "(", "ent", ".", "end_char", ")", ",", "value", "=", "ent", ".", "text", ",", "tag", "=", "ent", ".", "label_", ",", "start_token", "=", "ent", ".", "start", ",", "end_token", "=", "ent", ".", "end", ")", ")", "return", "attr_list" ]
Args: text (str): the text to extract from. get_attr (List[str]): The spaCy NER attributes we're interested in. Returns: List(Extraction): the list of extraction or the empty list if there are no matches.
[ "Args", ":", "text", "(", "str", ")", ":", "the", "text", "to", "extract", "from", ".", "get_attr", "(", "List", "[", "str", "]", ")", ":", "The", "spaCy", "NER", "attributes", "we", "re", "interested", "in", "." ]
python
train
46
JarryShaw/PyPCAPKit
src/protocols/internet/ipv6_route.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/protocols/internet/ipv6_route.py#L258-L295
def _read_data_type_2(self, length): """Read IPv6-Route Type 2 data. Structure of IPv6-Route Type 2 data [RFC 6275]: +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Next Header | Hdr Ext Len=2 | Routing Type=2|Segments Left=1| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Reserved | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | + + | | + Home Address + | | + + | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 route.next Next Header 1 8 route.length Header Extensive Length 2 16 route.type Routing Type 3 24 route.seg_left Segments Left 4 32 - Reserved 8 64 route.ip Home Address """ if length != 20: raise ProtocolError(f'{self.alias}: [Typeno 2] invalid format') _resv = self._read_fileng(4) _home = self._read_fileng(16) data = dict( ip=ipaddress.ip_address(_home), ) return data
[ "def", "_read_data_type_2", "(", "self", ",", "length", ")", ":", "if", "length", "!=", "20", ":", "raise", "ProtocolError", "(", "f'{self.alias}: [Typeno 2] invalid format'", ")", "_resv", "=", "self", ".", "_read_fileng", "(", "4", ")", "_home", "=", "self", ".", "_read_fileng", "(", "16", ")", "data", "=", "dict", "(", "ip", "=", "ipaddress", ".", "ip_address", "(", "_home", ")", ",", ")", "return", "data" ]
Read IPv6-Route Type 2 data. Structure of IPv6-Route Type 2 data [RFC 6275]: +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Next Header | Hdr Ext Len=2 | Routing Type=2|Segments Left=1| +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Reserved | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | + + | | + Home Address + | | + + | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Octets Bits Name Description 0 0 route.next Next Header 1 8 route.length Header Extensive Length 2 16 route.type Routing Type 3 24 route.seg_left Segments Left 4 32 - Reserved 8 64 route.ip Home Address
[ "Read", "IPv6", "-", "Route", "Type", "2", "data", "." ]
python
train
49.763158
pytroll/satpy
satpy/readers/sar_c_safe.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/sar_c_safe.py#L428-L455
def get_lonlatalts(self): """Obtain GCPs and construct latitude and longitude arrays. Args: band (gdal band): Measurement band which comes with GCP's array_shape (tuple) : The size of the data array Returns: coordinates (tuple): A tuple with longitude and latitude arrays """ band = self.filehandle (xpoints, ypoints), (gcp_lons, gcp_lats, gcp_alts), (gcps, crs) = self.get_gcps() # FIXME: do interpolation on cartesion coordinates if the area is # problematic. longitudes = interpolate_xarray(xpoints, ypoints, gcp_lons, band.shape) latitudes = interpolate_xarray(xpoints, ypoints, gcp_lats, band.shape) altitudes = interpolate_xarray(xpoints, ypoints, gcp_alts, band.shape) longitudes.attrs['gcps'] = gcps longitudes.attrs['crs'] = crs latitudes.attrs['gcps'] = gcps latitudes.attrs['crs'] = crs altitudes.attrs['gcps'] = gcps altitudes.attrs['crs'] = crs return longitudes, latitudes, altitudes
[ "def", "get_lonlatalts", "(", "self", ")", ":", "band", "=", "self", ".", "filehandle", "(", "xpoints", ",", "ypoints", ")", ",", "(", "gcp_lons", ",", "gcp_lats", ",", "gcp_alts", ")", ",", "(", "gcps", ",", "crs", ")", "=", "self", ".", "get_gcps", "(", ")", "# FIXME: do interpolation on cartesion coordinates if the area is", "# problematic.", "longitudes", "=", "interpolate_xarray", "(", "xpoints", ",", "ypoints", ",", "gcp_lons", ",", "band", ".", "shape", ")", "latitudes", "=", "interpolate_xarray", "(", "xpoints", ",", "ypoints", ",", "gcp_lats", ",", "band", ".", "shape", ")", "altitudes", "=", "interpolate_xarray", "(", "xpoints", ",", "ypoints", ",", "gcp_alts", ",", "band", ".", "shape", ")", "longitudes", ".", "attrs", "[", "'gcps'", "]", "=", "gcps", "longitudes", ".", "attrs", "[", "'crs'", "]", "=", "crs", "latitudes", ".", "attrs", "[", "'gcps'", "]", "=", "gcps", "latitudes", ".", "attrs", "[", "'crs'", "]", "=", "crs", "altitudes", ".", "attrs", "[", "'gcps'", "]", "=", "gcps", "altitudes", ".", "attrs", "[", "'crs'", "]", "=", "crs", "return", "longitudes", ",", "latitudes", ",", "altitudes" ]
Obtain GCPs and construct latitude and longitude arrays. Args: band (gdal band): Measurement band which comes with GCP's array_shape (tuple) : The size of the data array Returns: coordinates (tuple): A tuple with longitude and latitude arrays
[ "Obtain", "GCPs", "and", "construct", "latitude", "and", "longitude", "arrays", "." ]
python
train
37.607143
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_db.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_db.py#L1931-L1949
def ping(self) -> None: """Pings a database connection, reconnecting if necessary.""" if self.db is None or self.db_pythonlib not in [PYTHONLIB_MYSQLDB, PYTHONLIB_PYMYSQL]: return try: self.db.ping(True) # test connection; reconnect upon failure # ... should auto-reconnect; however, it seems to fail the first # time, then work the next time. # Exception (the first time) is: # <class '_mysql_exceptions.OperationalError'>: # (2006, 'MySQL server has gone away') # http://mail.python.org/pipermail/python-list/2008-February/ # 474598.html except mysql.OperationalError: # loss of connection self.db = None self.connect_to_database_mysql( self._database, self._user, self._password, self._server, self._port, self._charset, self._use_unicode)
[ "def", "ping", "(", "self", ")", "->", "None", ":", "if", "self", ".", "db", "is", "None", "or", "self", ".", "db_pythonlib", "not", "in", "[", "PYTHONLIB_MYSQLDB", ",", "PYTHONLIB_PYMYSQL", "]", ":", "return", "try", ":", "self", ".", "db", ".", "ping", "(", "True", ")", "# test connection; reconnect upon failure", "# ... should auto-reconnect; however, it seems to fail the first", "# time, then work the next time.", "# Exception (the first time) is:", "# <class '_mysql_exceptions.OperationalError'>:", "# (2006, 'MySQL server has gone away')", "# http://mail.python.org/pipermail/python-list/2008-February/", "# 474598.html", "except", "mysql", ".", "OperationalError", ":", "# loss of connection", "self", ".", "db", "=", "None", "self", ".", "connect_to_database_mysql", "(", "self", ".", "_database", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_server", ",", "self", ".", "_port", ",", "self", ".", "_charset", ",", "self", ".", "_use_unicode", ")" ]
Pings a database connection, reconnecting if necessary.
[ "Pings", "a", "database", "connection", "reconnecting", "if", "necessary", "." ]
python
train
51.947368
dwavesystems/dimod
dimod/roof_duality/fix_variables.py
https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/roof_duality/fix_variables.py#L19-L103
def fix_variables(bqm, sampling_mode=True): """Determine assignments for some variables of a binary quadratic model. Roof duality finds a lower bound for the minimum of a quadratic polynomial. It can also find minimizing assignments for some of the polynomial's variables; these fixed variables take the same values in all optimal solutions [BHT]_ [BH]_. A quadratic pseudo-Boolean function can be represented as a network to find the lower bound through network-flow computations. `fix_variables` uses maximum flow in the implication network to correctly fix variables. Consequently, you can find an assignment for the remaining variables that attains the optimal value. Args: bqm (:obj:`.BinaryQuadraticModel`) A binary quadratic model. sampling_mode (bool, optional, default=True): In sampling mode, only roof-duality is used. When `sampling_mode` is false, strongly connected components are used to fix more variables, but in some optimal solutions these variables may take different values. Returns: dict: Variable assignments for some variables of the specified binary quadratic model. Examples: This example creates a binary quadratic model with a single ground state and fixes the model's single variable to the minimizing assignment. >>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN) >>> bqm.add_variable('a', 1.0) >>> dimod.fix_variables(bqm) {'a': -1} This example has two ground states, :math:`a=b=-1` and :math:`a=b=1`, with no variable having a single value for all ground states, so neither variable is fixed. >>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN) >>> bqm.add_interaction('a', 'b', -1.0) >>> dimod.fix_variables(bqm) # doctest: +SKIP {} This example turns sampling model off, so variables are fixed to an assignment that attains the ground state. >>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN) >>> bqm.add_interaction('a', 'b', -1.0) >>> dimod.fix_variables(bqm, sampling_mode=False) # doctest: +SKIP {'a': 1, 'b': 1} .. [BHT] Boros, E., P.L. Hammer, G. Tavares. Preprocessing of Unconstraint Quadratic Binary Optimization. Rutcor Research Report 10-2006, April, 2006. .. [BH] Boros, E., P.L. Hammer. Pseudo-Boolean optimization. Discrete Applied Mathematics 123, (2002), pp. 155-225 """ try: from dimod.roof_duality._fix_variables import fix_variables_wrapper except ImportError: raise ImportError("c++ extension roof_duality is not built") if sampling_mode: method = 2 # roof-duality only else: method = 1 # roof-duality and strongly connected components linear = bqm.linear if all(v in linear for v in range(len(bqm))): # we can work with the binary form of the bqm directly fixed = fix_variables_wrapper(bqm.binary, method) else: try: inverse_mapping = dict(enumerate(sorted(linear))) except TypeError: # in python3 unlike types cannot be sorted inverse_mapping = dict(enumerate(linear)) mapping = {v: i for i, v in inverse_mapping.items()} fixed = fix_variables_wrapper(bqm.relabel_variables(mapping, inplace=False).binary, method) fixed = {inverse_mapping[v]: val for v, val in fixed.items()} if bqm.vartype is Vartype.SPIN: return {v: 2*val - 1 for v, val in fixed.items()} else: return fixed
[ "def", "fix_variables", "(", "bqm", ",", "sampling_mode", "=", "True", ")", ":", "try", ":", "from", "dimod", ".", "roof_duality", ".", "_fix_variables", "import", "fix_variables_wrapper", "except", "ImportError", ":", "raise", "ImportError", "(", "\"c++ extension roof_duality is not built\"", ")", "if", "sampling_mode", ":", "method", "=", "2", "# roof-duality only", "else", ":", "method", "=", "1", "# roof-duality and strongly connected components", "linear", "=", "bqm", ".", "linear", "if", "all", "(", "v", "in", "linear", "for", "v", "in", "range", "(", "len", "(", "bqm", ")", ")", ")", ":", "# we can work with the binary form of the bqm directly", "fixed", "=", "fix_variables_wrapper", "(", "bqm", ".", "binary", ",", "method", ")", "else", ":", "try", ":", "inverse_mapping", "=", "dict", "(", "enumerate", "(", "sorted", "(", "linear", ")", ")", ")", "except", "TypeError", ":", "# in python3 unlike types cannot be sorted", "inverse_mapping", "=", "dict", "(", "enumerate", "(", "linear", ")", ")", "mapping", "=", "{", "v", ":", "i", "for", "i", ",", "v", "in", "inverse_mapping", ".", "items", "(", ")", "}", "fixed", "=", "fix_variables_wrapper", "(", "bqm", ".", "relabel_variables", "(", "mapping", ",", "inplace", "=", "False", ")", ".", "binary", ",", "method", ")", "fixed", "=", "{", "inverse_mapping", "[", "v", "]", ":", "val", "for", "v", ",", "val", "in", "fixed", ".", "items", "(", ")", "}", "if", "bqm", ".", "vartype", "is", "Vartype", ".", "SPIN", ":", "return", "{", "v", ":", "2", "*", "val", "-", "1", "for", "v", ",", "val", "in", "fixed", ".", "items", "(", ")", "}", "else", ":", "return", "fixed" ]
Determine assignments for some variables of a binary quadratic model. Roof duality finds a lower bound for the minimum of a quadratic polynomial. It can also find minimizing assignments for some of the polynomial's variables; these fixed variables take the same values in all optimal solutions [BHT]_ [BH]_. A quadratic pseudo-Boolean function can be represented as a network to find the lower bound through network-flow computations. `fix_variables` uses maximum flow in the implication network to correctly fix variables. Consequently, you can find an assignment for the remaining variables that attains the optimal value. Args: bqm (:obj:`.BinaryQuadraticModel`) A binary quadratic model. sampling_mode (bool, optional, default=True): In sampling mode, only roof-duality is used. When `sampling_mode` is false, strongly connected components are used to fix more variables, but in some optimal solutions these variables may take different values. Returns: dict: Variable assignments for some variables of the specified binary quadratic model. Examples: This example creates a binary quadratic model with a single ground state and fixes the model's single variable to the minimizing assignment. >>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN) >>> bqm.add_variable('a', 1.0) >>> dimod.fix_variables(bqm) {'a': -1} This example has two ground states, :math:`a=b=-1` and :math:`a=b=1`, with no variable having a single value for all ground states, so neither variable is fixed. >>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN) >>> bqm.add_interaction('a', 'b', -1.0) >>> dimod.fix_variables(bqm) # doctest: +SKIP {} This example turns sampling model off, so variables are fixed to an assignment that attains the ground state. >>> bqm = dimod.BinaryQuadraticModel.empty(dimod.SPIN) >>> bqm.add_interaction('a', 'b', -1.0) >>> dimod.fix_variables(bqm, sampling_mode=False) # doctest: +SKIP {'a': 1, 'b': 1} .. [BHT] Boros, E., P.L. Hammer, G. Tavares. Preprocessing of Unconstraint Quadratic Binary Optimization. Rutcor Research Report 10-2006, April, 2006. .. [BH] Boros, E., P.L. Hammer. Pseudo-Boolean optimization. Discrete Applied Mathematics 123, (2002), pp. 155-225
[ "Determine", "assignments", "for", "some", "variables", "of", "a", "binary", "quadratic", "model", "." ]
python
train
41.776471
CitrineInformatics/pif-dft
dfttopif/parsers/vasp.py
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/parsers/vasp.py#L354-L380
def _get_bandgap_doscar(filename): """Get the bandgap from the DOSCAR file""" with open(filename) as fp: for i in range(6): l = fp.readline() efermi = float(l.split()[3]) step1 = fp.readline().split()[0] step2 = fp.readline().split()[0] step_size = float(step2)-float(step1) not_found = True while not_found: l = fp.readline().split() e = float(l.pop(0)) dens = 0.0 for i in range(int(len(l)/2)): dens += float(l[i]) if e < efermi and dens > 1e-3: bot = e elif e > efermi and dens > 1e-3: top = e not_found = False if top - bot < step_size*2: bandgap = 0.0 else: bandgap = float(top - bot) return bandgap
[ "def", "_get_bandgap_doscar", "(", "filename", ")", ":", "with", "open", "(", "filename", ")", "as", "fp", ":", "for", "i", "in", "range", "(", "6", ")", ":", "l", "=", "fp", ".", "readline", "(", ")", "efermi", "=", "float", "(", "l", ".", "split", "(", ")", "[", "3", "]", ")", "step1", "=", "fp", ".", "readline", "(", ")", ".", "split", "(", ")", "[", "0", "]", "step2", "=", "fp", ".", "readline", "(", ")", ".", "split", "(", ")", "[", "0", "]", "step_size", "=", "float", "(", "step2", ")", "-", "float", "(", "step1", ")", "not_found", "=", "True", "while", "not_found", ":", "l", "=", "fp", ".", "readline", "(", ")", ".", "split", "(", ")", "e", "=", "float", "(", "l", ".", "pop", "(", "0", ")", ")", "dens", "=", "0.0", "for", "i", "in", "range", "(", "int", "(", "len", "(", "l", ")", "/", "2", ")", ")", ":", "dens", "+=", "float", "(", "l", "[", "i", "]", ")", "if", "e", "<", "efermi", "and", "dens", ">", "1e-3", ":", "bot", "=", "e", "elif", "e", ">", "efermi", "and", "dens", ">", "1e-3", ":", "top", "=", "e", "not_found", "=", "False", "if", "top", "-", "bot", "<", "step_size", "*", "2", ":", "bandgap", "=", "0.0", "else", ":", "bandgap", "=", "float", "(", "top", "-", "bot", ")", "return", "bandgap" ]
Get the bandgap from the DOSCAR file
[ "Get", "the", "bandgap", "from", "the", "DOSCAR", "file" ]
python
train
34.62963
acutesoftware/AIKIF
aikif/toolbox/cls_grid.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/cls_grid.py#L119-L124
def replace_row(self, line, ndx): """ replace a grids row at index 'ndx' with 'line' """ for col in range(len(line)): self.set_tile(ndx, col, line[col])
[ "def", "replace_row", "(", "self", ",", "line", ",", "ndx", ")", ":", "for", "col", "in", "range", "(", "len", "(", "line", ")", ")", ":", "self", ".", "set_tile", "(", "ndx", ",", "col", ",", "line", "[", "col", "]", ")" ]
replace a grids row at index 'ndx' with 'line'
[ "replace", "a", "grids", "row", "at", "index", "ndx", "with", "line" ]
python
train
32.166667
h2non/pook
pook/mock.py
https://github.com/h2non/pook/blob/e64094e41e4d89d98d2d29af7608ef27dc50cf19/pook/mock.py#L170-L185
def path(self, path): """ Defines a URL path to match. Only call this method if the URL has no path already defined. Arguments: path (str): URL path value to match. E.g: ``/api/users``. Returns: self: current Mock instance. """ url = furl(self._request.rawurl) url.path = path self._request.url = url.url self.add_matcher(matcher('PathMatcher', path))
[ "def", "path", "(", "self", ",", "path", ")", ":", "url", "=", "furl", "(", "self", ".", "_request", ".", "rawurl", ")", "url", ".", "path", "=", "path", "self", ".", "_request", ".", "url", "=", "url", ".", "url", "self", ".", "add_matcher", "(", "matcher", "(", "'PathMatcher'", ",", "path", ")", ")" ]
Defines a URL path to match. Only call this method if the URL has no path already defined. Arguments: path (str): URL path value to match. E.g: ``/api/users``. Returns: self: current Mock instance.
[ "Defines", "a", "URL", "path", "to", "match", "." ]
python
test
27.6875
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/work_item_tracking/work_item_tracking_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/work_item_tracking/work_item_tracking_client.py#L189-L207
def get_root_nodes(self, project, depth=None): """GetRootNodes. [Preview API] Gets root classification nodes under the project. :param str project: Project ID or project name :param int depth: Depth of children to fetch. :rtype: [WorkItemClassificationNode] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') query_parameters = {} if depth is not None: query_parameters['$depth'] = self._serialize.query('depth', depth, 'int') response = self._send(http_method='GET', location_id='a70579d1-f53a-48ee-a5be-7be8659023b9', version='5.1-preview.2', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[WorkItemClassificationNode]', self._unwrap_collection(response))
[ "def", "get_root_nodes", "(", "self", ",", "project", ",", "depth", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project'", ",", "project", ",", "'str'", ")", "query_parameters", "=", "{", "}", "if", "depth", "is", "not", "None", ":", "query_parameters", "[", "'$depth'", "]", "=", "self", ".", "_serialize", ".", "query", "(", "'depth'", ",", "depth", ",", "'int'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'GET'", ",", "location_id", "=", "'a70579d1-f53a-48ee-a5be-7be8659023b9'", ",", "version", "=", "'5.1-preview.2'", ",", "route_values", "=", "route_values", ",", "query_parameters", "=", "query_parameters", ")", "return", "self", ".", "_deserialize", "(", "'[WorkItemClassificationNode]'", ",", "self", ".", "_unwrap_collection", "(", "response", ")", ")" ]
GetRootNodes. [Preview API] Gets root classification nodes under the project. :param str project: Project ID or project name :param int depth: Depth of children to fetch. :rtype: [WorkItemClassificationNode]
[ "GetRootNodes", ".", "[", "Preview", "API", "]", "Gets", "root", "classification", "nodes", "under", "the", "project", ".", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "int", "depth", ":", "Depth", "of", "children", "to", "fetch", ".", ":", "rtype", ":", "[", "WorkItemClassificationNode", "]" ]
python
train
52
duniter/duniter-python-api
duniterpy/api/bma/wot.py
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/bma/wot.py#L328-L336
async def certify(client: Client, certification_signed_raw: str) -> ClientResponse: """ POST certification raw document :param client: Client to connect to the api :param certification_signed_raw: Certification raw document :return: """ return await client.post(MODULE + '/certify', {'cert': certification_signed_raw}, rtype=RESPONSE_AIOHTTP)
[ "async", "def", "certify", "(", "client", ":", "Client", ",", "certification_signed_raw", ":", "str", ")", "->", "ClientResponse", ":", "return", "await", "client", ".", "post", "(", "MODULE", "+", "'/certify'", ",", "{", "'cert'", ":", "certification_signed_raw", "}", ",", "rtype", "=", "RESPONSE_AIOHTTP", ")" ]
POST certification raw document :param client: Client to connect to the api :param certification_signed_raw: Certification raw document :return:
[ "POST", "certification", "raw", "document" ]
python
train
40.333333
airysen/caimcaim
caimcaim/caimcaim.py
https://github.com/airysen/caimcaim/blob/82e3ce700da8c23ab6199524f646c790cce0b460/caimcaim/caimcaim.py#L63-L133
def fit(self, X, y): """ Fit CAIM Parameters ---------- X : array-like, pandas dataframe, shape [n_samples, n_feature] Input array can contain missing values y: array-like, pandas dataframe, shape [n_samples] Target variable. Must be categorical. Returns ------- self """ self.split_scheme = dict() if isinstance(X, pd.DataFrame): # self.indx = X.index # self.columns = X.columns if isinstance(self._features, list): self.categorical = [X.columns.get_loc(label) for label in self._features] X = X.values y = y.values if self._features == 'auto': self.categorical = self.check_categorical(X, y) categorical = self.categorical print('Categorical', categorical) min_splits = np.unique(y).shape[0] for j in range(X.shape[1]): if j in categorical: continue xj = X[:, j] xj = xj[np.invert(np.isnan(xj))] new_index = xj.argsort() xj = xj[new_index] yj = y[new_index] allsplits = np.unique(xj)[1:-1].tolist() # potential split points global_caim = -1 mainscheme = [xj[0], xj[-1]] best_caim = 0 k = 1 while (k <= min_splits) or ((global_caim < best_caim) and (allsplits)): split_points = np.random.permutation(allsplits).tolist() best_scheme = None best_point = None best_caim = 0 k = k + 1 while split_points: scheme = mainscheme[:] sp = split_points.pop() scheme.append(sp) scheme.sort() c = self.get_caim(scheme, xj, yj) if c > best_caim: best_caim = c best_scheme = scheme best_point = sp if (k <= min_splits) or (best_caim > global_caim): mainscheme = best_scheme global_caim = best_caim try: allsplits.remove(best_point) except ValueError: raise NotEnoughPoints('The feature #' + str(j) + ' does not have' + ' enough unique values for discretization!' + ' Add it to categorical list!') self.split_scheme[j] = mainscheme print('#', j, ' GLOBAL CAIM ', global_caim) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", ")", ":", "self", ".", "split_scheme", "=", "dict", "(", ")", "if", "isinstance", "(", "X", ",", "pd", ".", "DataFrame", ")", ":", "# self.indx = X.index", "# self.columns = X.columns", "if", "isinstance", "(", "self", ".", "_features", ",", "list", ")", ":", "self", ".", "categorical", "=", "[", "X", ".", "columns", ".", "get_loc", "(", "label", ")", "for", "label", "in", "self", ".", "_features", "]", "X", "=", "X", ".", "values", "y", "=", "y", ".", "values", "if", "self", ".", "_features", "==", "'auto'", ":", "self", ".", "categorical", "=", "self", ".", "check_categorical", "(", "X", ",", "y", ")", "categorical", "=", "self", ".", "categorical", "print", "(", "'Categorical'", ",", "categorical", ")", "min_splits", "=", "np", ".", "unique", "(", "y", ")", ".", "shape", "[", "0", "]", "for", "j", "in", "range", "(", "X", ".", "shape", "[", "1", "]", ")", ":", "if", "j", "in", "categorical", ":", "continue", "xj", "=", "X", "[", ":", ",", "j", "]", "xj", "=", "xj", "[", "np", ".", "invert", "(", "np", ".", "isnan", "(", "xj", ")", ")", "]", "new_index", "=", "xj", ".", "argsort", "(", ")", "xj", "=", "xj", "[", "new_index", "]", "yj", "=", "y", "[", "new_index", "]", "allsplits", "=", "np", ".", "unique", "(", "xj", ")", "[", "1", ":", "-", "1", "]", ".", "tolist", "(", ")", "# potential split points", "global_caim", "=", "-", "1", "mainscheme", "=", "[", "xj", "[", "0", "]", ",", "xj", "[", "-", "1", "]", "]", "best_caim", "=", "0", "k", "=", "1", "while", "(", "k", "<=", "min_splits", ")", "or", "(", "(", "global_caim", "<", "best_caim", ")", "and", "(", "allsplits", ")", ")", ":", "split_points", "=", "np", ".", "random", ".", "permutation", "(", "allsplits", ")", ".", "tolist", "(", ")", "best_scheme", "=", "None", "best_point", "=", "None", "best_caim", "=", "0", "k", "=", "k", "+", "1", "while", "split_points", ":", "scheme", "=", "mainscheme", "[", ":", "]", "sp", "=", "split_points", ".", "pop", "(", ")", "scheme", ".", "append", "(", "sp", ")", "scheme", ".", "sort", "(", ")", "c", "=", "self", ".", "get_caim", "(", "scheme", ",", "xj", ",", "yj", ")", "if", "c", ">", "best_caim", ":", "best_caim", "=", "c", "best_scheme", "=", "scheme", "best_point", "=", "sp", "if", "(", "k", "<=", "min_splits", ")", "or", "(", "best_caim", ">", "global_caim", ")", ":", "mainscheme", "=", "best_scheme", "global_caim", "=", "best_caim", "try", ":", "allsplits", ".", "remove", "(", "best_point", ")", "except", "ValueError", ":", "raise", "NotEnoughPoints", "(", "'The feature #'", "+", "str", "(", "j", ")", "+", "' does not have'", "+", "' enough unique values for discretization!'", "+", "' Add it to categorical list!'", ")", "self", ".", "split_scheme", "[", "j", "]", "=", "mainscheme", "print", "(", "'#'", ",", "j", ",", "' GLOBAL CAIM '", ",", "global_caim", ")", "return", "self" ]
Fit CAIM Parameters ---------- X : array-like, pandas dataframe, shape [n_samples, n_feature] Input array can contain missing values y: array-like, pandas dataframe, shape [n_samples] Target variable. Must be categorical. Returns ------- self
[ "Fit", "CAIM", "Parameters", "----------", "X", ":", "array", "-", "like", "pandas", "dataframe", "shape", "[", "n_samples", "n_feature", "]", "Input", "array", "can", "contain", "missing", "values", "y", ":", "array", "-", "like", "pandas", "dataframe", "shape", "[", "n_samples", "]", "Target", "variable", ".", "Must", "be", "categorical", ".", "Returns", "-------", "self" ]
python
train
37.633803
pyparsing/pyparsing
examples/pymicko.py
https://github.com/pyparsing/pyparsing/blob/f0264bd8d1a548a50b3e5f7d99cfefd577942d14/examples/pymicko.py#L1192-L1206
def if_body_action(self, text, loc, arg): """Code executed after recognising if statement's body""" exshared.setpos(loc, text) if DEBUG > 0: print("IF_BODY:",arg) if DEBUG == 2: self.symtab.display() if DEBUG > 2: return #generate conditional jump (based on last compare) label = self.codegen.label("false{0}".format(self.false_label_number), True, False) self.codegen.jump(self.relexp_code, True, label) #generate 'true' label (executes if condition is satisfied) self.codegen.newline_label("true{0}".format(self.label_number), True, True) #save label numbers (needed for nested if/while statements) self.label_stack.append(self.false_label_number) self.label_stack.append(self.label_number)
[ "def", "if_body_action", "(", "self", ",", "text", ",", "loc", ",", "arg", ")", ":", "exshared", ".", "setpos", "(", "loc", ",", "text", ")", "if", "DEBUG", ">", "0", ":", "print", "(", "\"IF_BODY:\"", ",", "arg", ")", "if", "DEBUG", "==", "2", ":", "self", ".", "symtab", ".", "display", "(", ")", "if", "DEBUG", ">", "2", ":", "return", "#generate conditional jump (based on last compare)\r", "label", "=", "self", ".", "codegen", ".", "label", "(", "\"false{0}\"", ".", "format", "(", "self", ".", "false_label_number", ")", ",", "True", ",", "False", ")", "self", ".", "codegen", ".", "jump", "(", "self", ".", "relexp_code", ",", "True", ",", "label", ")", "#generate 'true' label (executes if condition is satisfied)\r", "self", ".", "codegen", ".", "newline_label", "(", "\"true{0}\"", ".", "format", "(", "self", ".", "label_number", ")", ",", "True", ",", "True", ")", "#save label numbers (needed for nested if/while statements)\r", "self", ".", "label_stack", ".", "append", "(", "self", ".", "false_label_number", ")", "self", ".", "label_stack", ".", "append", "(", "self", ".", "label_number", ")" ]
Code executed after recognising if statement's body
[ "Code", "executed", "after", "recognising", "if", "statement", "s", "body" ]
python
train
54.4
pycampers/zproc
zproc/state/server.py
https://github.com/pycampers/zproc/blob/352a3c7166e2ccc3597c28385a8354b5a22afdc2/zproc/state/server.py#L83-L88
def run_fn_atomically(self, request): """Execute a function, atomically and reply with the result.""" fn = serializer.loads_fn(request[Msgs.info]) args, kwargs = request[Msgs.args], request[Msgs.kwargs] with self.mutate_safely(): self.reply(fn(self.state, *args, **kwargs))
[ "def", "run_fn_atomically", "(", "self", ",", "request", ")", ":", "fn", "=", "serializer", ".", "loads_fn", "(", "request", "[", "Msgs", ".", "info", "]", ")", "args", ",", "kwargs", "=", "request", "[", "Msgs", ".", "args", "]", ",", "request", "[", "Msgs", ".", "kwargs", "]", "with", "self", ".", "mutate_safely", "(", ")", ":", "self", ".", "reply", "(", "fn", "(", "self", ".", "state", ",", "*", "args", ",", "*", "*", "kwargs", ")", ")" ]
Execute a function, atomically and reply with the result.
[ "Execute", "a", "function", "atomically", "and", "reply", "with", "the", "result", "." ]
python
train
52
Azure/azure-sdk-for-python
azure-servicemanagement-legacy/azure/servicemanagement/servicemanagementservice.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicemanagement-legacy/azure/servicemanagement/servicemanagementservice.py#L1625-L1663
def capture_role(self, service_name, deployment_name, role_name, post_capture_action, target_image_name, target_image_label, provisioning_configuration=None): ''' The Capture Role operation captures a virtual machine image to your image gallery. From the captured image, you can create additional customized virtual machines. service_name: The name of the service. deployment_name: The name of the deployment. role_name: The name of the role. post_capture_action: Specifies the action after capture operation completes. Possible values are: Delete, Reprovision. target_image_name: Specifies the image name of the captured virtual machine. target_image_label: Specifies the friendly name of the captured virtual machine. provisioning_configuration: Use an instance of WindowsConfigurationSet or LinuxConfigurationSet. ''' _validate_not_none('service_name', service_name) _validate_not_none('deployment_name', deployment_name) _validate_not_none('role_name', role_name) _validate_not_none('post_capture_action', post_capture_action) _validate_not_none('target_image_name', target_image_name) _validate_not_none('target_image_label', target_image_label) return self._perform_post( self._get_role_instance_operations_path( service_name, deployment_name, role_name), _XmlSerializer.capture_role_to_xml( post_capture_action, target_image_name, target_image_label, provisioning_configuration), as_async=True)
[ "def", "capture_role", "(", "self", ",", "service_name", ",", "deployment_name", ",", "role_name", ",", "post_capture_action", ",", "target_image_name", ",", "target_image_label", ",", "provisioning_configuration", "=", "None", ")", ":", "_validate_not_none", "(", "'service_name'", ",", "service_name", ")", "_validate_not_none", "(", "'deployment_name'", ",", "deployment_name", ")", "_validate_not_none", "(", "'role_name'", ",", "role_name", ")", "_validate_not_none", "(", "'post_capture_action'", ",", "post_capture_action", ")", "_validate_not_none", "(", "'target_image_name'", ",", "target_image_name", ")", "_validate_not_none", "(", "'target_image_label'", ",", "target_image_label", ")", "return", "self", ".", "_perform_post", "(", "self", ".", "_get_role_instance_operations_path", "(", "service_name", ",", "deployment_name", ",", "role_name", ")", ",", "_XmlSerializer", ".", "capture_role_to_xml", "(", "post_capture_action", ",", "target_image_name", ",", "target_image_label", ",", "provisioning_configuration", ")", ",", "as_async", "=", "True", ")" ]
The Capture Role operation captures a virtual machine image to your image gallery. From the captured image, you can create additional customized virtual machines. service_name: The name of the service. deployment_name: The name of the deployment. role_name: The name of the role. post_capture_action: Specifies the action after capture operation completes. Possible values are: Delete, Reprovision. target_image_name: Specifies the image name of the captured virtual machine. target_image_label: Specifies the friendly name of the captured virtual machine. provisioning_configuration: Use an instance of WindowsConfigurationSet or LinuxConfigurationSet.
[ "The", "Capture", "Role", "operation", "captures", "a", "virtual", "machine", "image", "to", "your", "image", "gallery", ".", "From", "the", "captured", "image", "you", "can", "create", "additional", "customized", "virtual", "machines", "." ]
python
test
45.384615
apache/airflow
airflow/contrib/hooks/bigquery_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L923-L994
def run_copy(self, source_project_dataset_tables, destination_project_dataset_table, write_disposition='WRITE_EMPTY', create_disposition='CREATE_IF_NEEDED', labels=None): """ Executes a BigQuery copy command to copy data from one BigQuery table to another. See here: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy For more details about these parameters. :param source_project_dataset_tables: One or more dotted ``(project:|project.)<dataset>.<table>`` BigQuery tables to use as the source data. Use a list if there are multiple source tables. If ``<project>`` is not included, project will be the project defined in the connection json. :type source_project_dataset_tables: list|string :param destination_project_dataset_table: The destination BigQuery table. Format is: ``(project:|project.)<dataset>.<table>`` :type destination_project_dataset_table: str :param write_disposition: The write disposition if the table already exists. :type write_disposition: str :param create_disposition: The create disposition if the table doesn't exist. :type create_disposition: str :param labels: a dictionary containing labels for the job/query, passed to BigQuery :type labels: dict """ source_project_dataset_tables = ([ source_project_dataset_tables ] if not isinstance(source_project_dataset_tables, list) else source_project_dataset_tables) source_project_dataset_tables_fixup = [] for source_project_dataset_table in source_project_dataset_tables: source_project, source_dataset, source_table = \ _split_tablename(table_input=source_project_dataset_table, default_project_id=self.project_id, var_name='source_project_dataset_table') source_project_dataset_tables_fixup.append({ 'projectId': source_project, 'datasetId': source_dataset, 'tableId': source_table }) destination_project, destination_dataset, destination_table = \ _split_tablename(table_input=destination_project_dataset_table, default_project_id=self.project_id) configuration = { 'copy': { 'createDisposition': create_disposition, 'writeDisposition': write_disposition, 'sourceTables': source_project_dataset_tables_fixup, 'destinationTable': { 'projectId': destination_project, 'datasetId': destination_dataset, 'tableId': destination_table } } } if labels: configuration['labels'] = labels return self.run_with_configuration(configuration)
[ "def", "run_copy", "(", "self", ",", "source_project_dataset_tables", ",", "destination_project_dataset_table", ",", "write_disposition", "=", "'WRITE_EMPTY'", ",", "create_disposition", "=", "'CREATE_IF_NEEDED'", ",", "labels", "=", "None", ")", ":", "source_project_dataset_tables", "=", "(", "[", "source_project_dataset_tables", "]", "if", "not", "isinstance", "(", "source_project_dataset_tables", ",", "list", ")", "else", "source_project_dataset_tables", ")", "source_project_dataset_tables_fixup", "=", "[", "]", "for", "source_project_dataset_table", "in", "source_project_dataset_tables", ":", "source_project", ",", "source_dataset", ",", "source_table", "=", "_split_tablename", "(", "table_input", "=", "source_project_dataset_table", ",", "default_project_id", "=", "self", ".", "project_id", ",", "var_name", "=", "'source_project_dataset_table'", ")", "source_project_dataset_tables_fixup", ".", "append", "(", "{", "'projectId'", ":", "source_project", ",", "'datasetId'", ":", "source_dataset", ",", "'tableId'", ":", "source_table", "}", ")", "destination_project", ",", "destination_dataset", ",", "destination_table", "=", "_split_tablename", "(", "table_input", "=", "destination_project_dataset_table", ",", "default_project_id", "=", "self", ".", "project_id", ")", "configuration", "=", "{", "'copy'", ":", "{", "'createDisposition'", ":", "create_disposition", ",", "'writeDisposition'", ":", "write_disposition", ",", "'sourceTables'", ":", "source_project_dataset_tables_fixup", ",", "'destinationTable'", ":", "{", "'projectId'", ":", "destination_project", ",", "'datasetId'", ":", "destination_dataset", ",", "'tableId'", ":", "destination_table", "}", "}", "}", "if", "labels", ":", "configuration", "[", "'labels'", "]", "=", "labels", "return", "self", ".", "run_with_configuration", "(", "configuration", ")" ]
Executes a BigQuery copy command to copy data from one BigQuery table to another. See here: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy For more details about these parameters. :param source_project_dataset_tables: One or more dotted ``(project:|project.)<dataset>.<table>`` BigQuery tables to use as the source data. Use a list if there are multiple source tables. If ``<project>`` is not included, project will be the project defined in the connection json. :type source_project_dataset_tables: list|string :param destination_project_dataset_table: The destination BigQuery table. Format is: ``(project:|project.)<dataset>.<table>`` :type destination_project_dataset_table: str :param write_disposition: The write disposition if the table already exists. :type write_disposition: str :param create_disposition: The create disposition if the table doesn't exist. :type create_disposition: str :param labels: a dictionary containing labels for the job/query, passed to BigQuery :type labels: dict
[ "Executes", "a", "BigQuery", "copy", "command", "to", "copy", "data", "from", "one", "BigQuery", "table", "to", "another", ".", "See", "here", ":" ]
python
test
42.944444
numenta/htmresearch
htmresearch/frameworks/location/location_network_creation.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/location/location_network_creation.py#L645-L649
def getL2Representations(self): """ Returns the active representation in L2. """ return [set(L2.getSelf()._pooler.getActiveCells()) for L2 in self.L2Regions]
[ "def", "getL2Representations", "(", "self", ")", ":", "return", "[", "set", "(", "L2", ".", "getSelf", "(", ")", ".", "_pooler", ".", "getActiveCells", "(", ")", ")", "for", "L2", "in", "self", ".", "L2Regions", "]" ]
Returns the active representation in L2.
[ "Returns", "the", "active", "representation", "in", "L2", "." ]
python
train
33.8