repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
Accelize/pycosio
pycosio/storage/s3.py
https://github.com/Accelize/pycosio/blob/1cc1f8fdf5394d92918b7bae2bfa682169ccc48c/pycosio/storage/s3.py#L331-L357
def _read_range(self, start, end=0): """ Read a range of bytes in stream. Args: start (int): Start stream position. end (int): End stream position. 0 To not specify end. Returns: bytes: number of bytes read """ # Get object part from S3 try: with _handle_client_error(): response = self._client.get_object( Range=self._http_range(start, end), **self._client_kwargs) # Check for end of file except _ClientError as exception: if exception.response['Error']['Code'] == 'InvalidRange': # EOF return bytes() raise # Get object content return response['Body'].read()
[ "def", "_read_range", "(", "self", ",", "start", ",", "end", "=", "0", ")", ":", "# Get object part from S3", "try", ":", "with", "_handle_client_error", "(", ")", ":", "response", "=", "self", ".", "_client", ".", "get_object", "(", "Range", "=", "self", ".", "_http_range", "(", "start", ",", "end", ")", ",", "*", "*", "self", ".", "_client_kwargs", ")", "# Check for end of file", "except", "_ClientError", "as", "exception", ":", "if", "exception", ".", "response", "[", "'Error'", "]", "[", "'Code'", "]", "==", "'InvalidRange'", ":", "# EOF", "return", "bytes", "(", ")", "raise", "# Get object content", "return", "response", "[", "'Body'", "]", ".", "read", "(", ")" ]
Read a range of bytes in stream. Args: start (int): Start stream position. end (int): End stream position. 0 To not specify end. Returns: bytes: number of bytes read
[ "Read", "a", "range", "of", "bytes", "in", "stream", "." ]
python
train
28.962963
marshmallow-code/webargs
src/webargs/core.py
https://github.com/marshmallow-code/webargs/blob/40cc2d25421d15d9630b1a819f1dcefbbf01ed95/src/webargs/core.py#L97-L130
def get_value(data, name, field, allow_many_nested=False): """Get a value from a dictionary. Handles ``MultiDict`` types when ``multiple=True``. If the value is not found, return `missing`. :param object data: Mapping (e.g. `dict`) or list-like instance to pull the value from. :param str name: Name of the key. :param bool multiple: Whether to handle multiple values. :param bool allow_many_nested: Whether to allow a list of nested objects (it is valid only for JSON format, so it is set to True in ``parse_json`` methods). """ missing_value = missing if allow_many_nested and isinstance(field, ma.fields.Nested) and field.many: if is_collection(data): return data if not hasattr(data, "get"): return missing_value multiple = is_multiple(field) val = data.get(name, missing_value) if multiple and val is not missing: if hasattr(data, "getlist"): return data.getlist(name) elif hasattr(data, "getall"): return data.getall(name) elif isinstance(val, (list, tuple)): return val if val is None: return None else: return [val] return val
[ "def", "get_value", "(", "data", ",", "name", ",", "field", ",", "allow_many_nested", "=", "False", ")", ":", "missing_value", "=", "missing", "if", "allow_many_nested", "and", "isinstance", "(", "field", ",", "ma", ".", "fields", ".", "Nested", ")", "and", "field", ".", "many", ":", "if", "is_collection", "(", "data", ")", ":", "return", "data", "if", "not", "hasattr", "(", "data", ",", "\"get\"", ")", ":", "return", "missing_value", "multiple", "=", "is_multiple", "(", "field", ")", "val", "=", "data", ".", "get", "(", "name", ",", "missing_value", ")", "if", "multiple", "and", "val", "is", "not", "missing", ":", "if", "hasattr", "(", "data", ",", "\"getlist\"", ")", ":", "return", "data", ".", "getlist", "(", "name", ")", "elif", "hasattr", "(", "data", ",", "\"getall\"", ")", ":", "return", "data", ".", "getall", "(", "name", ")", "elif", "isinstance", "(", "val", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "val", "if", "val", "is", "None", ":", "return", "None", "else", ":", "return", "[", "val", "]", "return", "val" ]
Get a value from a dictionary. Handles ``MultiDict`` types when ``multiple=True``. If the value is not found, return `missing`. :param object data: Mapping (e.g. `dict`) or list-like instance to pull the value from. :param str name: Name of the key. :param bool multiple: Whether to handle multiple values. :param bool allow_many_nested: Whether to allow a list of nested objects (it is valid only for JSON format, so it is set to True in ``parse_json`` methods).
[ "Get", "a", "value", "from", "a", "dictionary", ".", "Handles", "MultiDict", "types", "when", "multiple", "=", "True", ".", "If", "the", "value", "is", "not", "found", "return", "missing", "." ]
python
train
35.647059
saltstack/salt
salt/utils/cloud.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/cloud.py#L2634-L2677
def cachedir_index_add(minion_id, profile, driver, provider, base=None): ''' Add an entry to the cachedir index. This generally only needs to happen when a new instance is created. This entry should contain: .. code-block:: yaml - minion_id - profile used to create the instance - provider and driver name The intent of this function is to speed up lookups for the cloud roster for salt-ssh. However, other code that makes use of profile information can also make use of this function. ''' base = init_cachedir(base) index_file = os.path.join(base, 'index.p') lock_file(index_file) if os.path.exists(index_file): mode = 'rb' if six.PY3 else 'r' with salt.utils.files.fopen(index_file, mode) as fh_: index = salt.utils.data.decode( salt.utils.msgpack.msgpack.load( fh_, encoding=MSGPACK_ENCODING)) else: index = {} prov_comps = provider.split(':') index.update({ minion_id: { 'id': minion_id, 'profile': profile, 'driver': driver, 'provider': prov_comps[0], } }) mode = 'wb' if six.PY3 else 'w' with salt.utils.files.fopen(index_file, mode) as fh_: salt.utils.msgpack.dump(index, fh_, encoding=MSGPACK_ENCODING) unlock_file(index_file)
[ "def", "cachedir_index_add", "(", "minion_id", ",", "profile", ",", "driver", ",", "provider", ",", "base", "=", "None", ")", ":", "base", "=", "init_cachedir", "(", "base", ")", "index_file", "=", "os", ".", "path", ".", "join", "(", "base", ",", "'index.p'", ")", "lock_file", "(", "index_file", ")", "if", "os", ".", "path", ".", "exists", "(", "index_file", ")", ":", "mode", "=", "'rb'", "if", "six", ".", "PY3", "else", "'r'", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "index_file", ",", "mode", ")", "as", "fh_", ":", "index", "=", "salt", ".", "utils", ".", "data", ".", "decode", "(", "salt", ".", "utils", ".", "msgpack", ".", "msgpack", ".", "load", "(", "fh_", ",", "encoding", "=", "MSGPACK_ENCODING", ")", ")", "else", ":", "index", "=", "{", "}", "prov_comps", "=", "provider", ".", "split", "(", "':'", ")", "index", ".", "update", "(", "{", "minion_id", ":", "{", "'id'", ":", "minion_id", ",", "'profile'", ":", "profile", ",", "'driver'", ":", "driver", ",", "'provider'", ":", "prov_comps", "[", "0", "]", ",", "}", "}", ")", "mode", "=", "'wb'", "if", "six", ".", "PY3", "else", "'w'", "with", "salt", ".", "utils", ".", "files", ".", "fopen", "(", "index_file", ",", "mode", ")", "as", "fh_", ":", "salt", ".", "utils", ".", "msgpack", ".", "dump", "(", "index", ",", "fh_", ",", "encoding", "=", "MSGPACK_ENCODING", ")", "unlock_file", "(", "index_file", ")" ]
Add an entry to the cachedir index. This generally only needs to happen when a new instance is created. This entry should contain: .. code-block:: yaml - minion_id - profile used to create the instance - provider and driver name The intent of this function is to speed up lookups for the cloud roster for salt-ssh. However, other code that makes use of profile information can also make use of this function.
[ "Add", "an", "entry", "to", "the", "cachedir", "index", ".", "This", "generally", "only", "needs", "to", "happen", "when", "a", "new", "instance", "is", "created", ".", "This", "entry", "should", "contain", ":" ]
python
train
30.522727
draperjames/qtpandas
qtpandas/utils.py
https://github.com/draperjames/qtpandas/blob/64294fb69f1839e53dee5ea453337266bfaf24f4/qtpandas/utils.py#L180-L205
def superReadFile(filepath, **kwargs): """ Uses pandas.read_excel (on excel files) and returns a dataframe of the first sheet (unless sheet is specified in kwargs) Uses superReadText (on .txt,.tsv, or .csv files) and returns a dataframe of the data. One function to read almost all types of data files. """ if isinstance(filepath, pd.DataFrame): return filepath ext = os.path.splitext(filepath)[1].lower() if ext in ['.xlsx', '.xls']: df = pd.read_excel(filepath, **kwargs) elif ext in ['.pkl', '.p', '.pickle', '.pk']: df = pd.read_pickle(filepath) else: # Assume it's a text-like file and try to read it. try: df = superReadText(filepath, **kwargs) except Exception as e: # TODO: Make this trace back better? Custom Exception? Raise original? raise Exception("Error reading file: {}".format(e)) return df
[ "def", "superReadFile", "(", "filepath", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "filepath", ",", "pd", ".", "DataFrame", ")", ":", "return", "filepath", "ext", "=", "os", ".", "path", ".", "splitext", "(", "filepath", ")", "[", "1", "]", ".", "lower", "(", ")", "if", "ext", "in", "[", "'.xlsx'", ",", "'.xls'", "]", ":", "df", "=", "pd", ".", "read_excel", "(", "filepath", ",", "*", "*", "kwargs", ")", "elif", "ext", "in", "[", "'.pkl'", ",", "'.p'", ",", "'.pickle'", ",", "'.pk'", "]", ":", "df", "=", "pd", ".", "read_pickle", "(", "filepath", ")", "else", ":", "# Assume it's a text-like file and try to read it.", "try", ":", "df", "=", "superReadText", "(", "filepath", ",", "*", "*", "kwargs", ")", "except", "Exception", "as", "e", ":", "# TODO: Make this trace back better? Custom Exception? Raise original?", "raise", "Exception", "(", "\"Error reading file: {}\"", ".", "format", "(", "e", ")", ")", "return", "df" ]
Uses pandas.read_excel (on excel files) and returns a dataframe of the first sheet (unless sheet is specified in kwargs) Uses superReadText (on .txt,.tsv, or .csv files) and returns a dataframe of the data. One function to read almost all types of data files.
[ "Uses", "pandas", ".", "read_excel", "(", "on", "excel", "files", ")", "and", "returns", "a", "dataframe", "of", "the", "first", "sheet", "(", "unless", "sheet", "is", "specified", "in", "kwargs", ")", "Uses", "superReadText", "(", "on", ".", "txt", ".", "tsv", "or", ".", "csv", "files", ")", "and", "returns", "a", "dataframe", "of", "the", "data", ".", "One", "function", "to", "read", "almost", "all", "types", "of", "data", "files", "." ]
python
train
35.269231
awslabs/serverless-application-model
examples/apps/lex-book-trip-python/lambda_function.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/examples/apps/lex-book-trip-python/lambda_function.py#L116-L127
def generate_hotel_price(location, nights, room_type): """ Generates a number within a reasonable range that might be expected for a hotel. The price is fixed for a pair of location and roomType. """ room_types = ['queen', 'king', 'deluxe'] cost_of_living = 0 for i in range(len(location)): cost_of_living += ord(location.lower()[i]) - 97 return nights * (100 + cost_of_living + (100 + room_types.index(room_type.lower())))
[ "def", "generate_hotel_price", "(", "location", ",", "nights", ",", "room_type", ")", ":", "room_types", "=", "[", "'queen'", ",", "'king'", ",", "'deluxe'", "]", "cost_of_living", "=", "0", "for", "i", "in", "range", "(", "len", "(", "location", ")", ")", ":", "cost_of_living", "+=", "ord", "(", "location", ".", "lower", "(", ")", "[", "i", "]", ")", "-", "97", "return", "nights", "*", "(", "100", "+", "cost_of_living", "+", "(", "100", "+", "room_types", ".", "index", "(", "room_type", ".", "lower", "(", ")", ")", ")", ")" ]
Generates a number within a reasonable range that might be expected for a hotel. The price is fixed for a pair of location and roomType.
[ "Generates", "a", "number", "within", "a", "reasonable", "range", "that", "might", "be", "expected", "for", "a", "hotel", ".", "The", "price", "is", "fixed", "for", "a", "pair", "of", "location", "and", "roomType", "." ]
python
train
37.833333
PyCQA/pylint
pylint/checkers/classes.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/classes.py#L768-L790
def _check_proper_bases(self, node): """ Detect that a class inherits something which is not a class or a type. """ for base in node.bases: ancestor = safe_infer(base) if ancestor in (astroid.Uninferable, None): continue if isinstance(ancestor, astroid.Instance) and ancestor.is_subtype_of( "%s.type" % (BUILTINS,) ): continue if not isinstance(ancestor, astroid.ClassDef) or _is_invalid_base_class( ancestor ): self.add_message("inherit-non-class", args=base.as_string(), node=node) if ancestor.name == object.__name__: self.add_message( "useless-object-inheritance", args=node.name, node=node )
[ "def", "_check_proper_bases", "(", "self", ",", "node", ")", ":", "for", "base", "in", "node", ".", "bases", ":", "ancestor", "=", "safe_infer", "(", "base", ")", "if", "ancestor", "in", "(", "astroid", ".", "Uninferable", ",", "None", ")", ":", "continue", "if", "isinstance", "(", "ancestor", ",", "astroid", ".", "Instance", ")", "and", "ancestor", ".", "is_subtype_of", "(", "\"%s.type\"", "%", "(", "BUILTINS", ",", ")", ")", ":", "continue", "if", "not", "isinstance", "(", "ancestor", ",", "astroid", ".", "ClassDef", ")", "or", "_is_invalid_base_class", "(", "ancestor", ")", ":", "self", ".", "add_message", "(", "\"inherit-non-class\"", ",", "args", "=", "base", ".", "as_string", "(", ")", ",", "node", "=", "node", ")", "if", "ancestor", ".", "name", "==", "object", ".", "__name__", ":", "self", ".", "add_message", "(", "\"useless-object-inheritance\"", ",", "args", "=", "node", ".", "name", ",", "node", "=", "node", ")" ]
Detect that a class inherits something which is not a class or a type.
[ "Detect", "that", "a", "class", "inherits", "something", "which", "is", "not", "a", "class", "or", "a", "type", "." ]
python
test
36.173913
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/parallel/controller/scheduler.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/parallel/controller/scheduler.py#L572-L605
def dispatch_result(self, raw_msg): """dispatch method for result replies""" try: idents,msg = self.session.feed_identities(raw_msg, copy=False) msg = self.session.unserialize(msg, content=False, copy=False) engine = idents[0] try: idx = self.targets.index(engine) except ValueError: pass # skip load-update for dead engines else: self.finish_job(idx) except Exception: self.log.error("task::Invaid result: %r", raw_msg, exc_info=True) return header = msg['header'] parent = msg['parent_header'] if header.get('dependencies_met', True): success = (header['status'] == 'ok') msg_id = parent['msg_id'] retries = self.retries[msg_id] if not success and retries > 0: # failed self.retries[msg_id] = retries - 1 self.handle_unmet_dependency(idents, parent) else: del self.retries[msg_id] # relay to client and update graph self.handle_result(idents, parent, raw_msg, success) # send to Hub monitor self.mon_stream.send_multipart([b'outtask']+raw_msg, copy=False) else: self.handle_unmet_dependency(idents, parent)
[ "def", "dispatch_result", "(", "self", ",", "raw_msg", ")", ":", "try", ":", "idents", ",", "msg", "=", "self", ".", "session", ".", "feed_identities", "(", "raw_msg", ",", "copy", "=", "False", ")", "msg", "=", "self", ".", "session", ".", "unserialize", "(", "msg", ",", "content", "=", "False", ",", "copy", "=", "False", ")", "engine", "=", "idents", "[", "0", "]", "try", ":", "idx", "=", "self", ".", "targets", ".", "index", "(", "engine", ")", "except", "ValueError", ":", "pass", "# skip load-update for dead engines", "else", ":", "self", ".", "finish_job", "(", "idx", ")", "except", "Exception", ":", "self", ".", "log", ".", "error", "(", "\"task::Invaid result: %r\"", ",", "raw_msg", ",", "exc_info", "=", "True", ")", "return", "header", "=", "msg", "[", "'header'", "]", "parent", "=", "msg", "[", "'parent_header'", "]", "if", "header", ".", "get", "(", "'dependencies_met'", ",", "True", ")", ":", "success", "=", "(", "header", "[", "'status'", "]", "==", "'ok'", ")", "msg_id", "=", "parent", "[", "'msg_id'", "]", "retries", "=", "self", ".", "retries", "[", "msg_id", "]", "if", "not", "success", "and", "retries", ">", "0", ":", "# failed", "self", ".", "retries", "[", "msg_id", "]", "=", "retries", "-", "1", "self", ".", "handle_unmet_dependency", "(", "idents", ",", "parent", ")", "else", ":", "del", "self", ".", "retries", "[", "msg_id", "]", "# relay to client and update graph", "self", ".", "handle_result", "(", "idents", ",", "parent", ",", "raw_msg", ",", "success", ")", "# send to Hub monitor", "self", ".", "mon_stream", ".", "send_multipart", "(", "[", "b'outtask'", "]", "+", "raw_msg", ",", "copy", "=", "False", ")", "else", ":", "self", ".", "handle_unmet_dependency", "(", "idents", ",", "parent", ")" ]
dispatch method for result replies
[ "dispatch", "method", "for", "result", "replies" ]
python
test
40.470588
Microsoft/nni
examples/trials/weight_sharing/ga_squad/data.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/examples/trials/weight_sharing/ga_squad/data.py#L137-L146
def shuffle_step(entries, step): ''' Shuffle the step ''' answer = [] for i in range(0, len(entries), step): sub = entries[i:i+step] shuffle(sub) answer += sub return answer
[ "def", "shuffle_step", "(", "entries", ",", "step", ")", ":", "answer", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "entries", ")", ",", "step", ")", ":", "sub", "=", "entries", "[", "i", ":", "i", "+", "step", "]", "shuffle", "(", "sub", ")", "answer", "+=", "sub", "return", "answer" ]
Shuffle the step
[ "Shuffle", "the", "step" ]
python
train
21.2
saltstack/salt
doc/_ext/saltdomain.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/doc/_ext/saltdomain.py#L52-L72
def parse_lit(self, lines): ''' Parse a string line-by-line delineating comments and code :returns: An tuple of boolean/list-of-string pairs. True designates a comment; False designates code. ''' comment_char = '#' # TODO: move this into a directive option comment = re.compile(r'^\s*{0}[ \n]'.format(comment_char)) section_test = lambda val: bool(comment.match(val)) sections = [] for is_doc, group in itertools.groupby(lines, section_test): if is_doc: text = [comment.sub('', i).rstrip('\r\n') for i in group] else: text = [i.rstrip('\r\n') for i in group] sections.append((is_doc, text)) return sections
[ "def", "parse_lit", "(", "self", ",", "lines", ")", ":", "comment_char", "=", "'#'", "# TODO: move this into a directive option", "comment", "=", "re", ".", "compile", "(", "r'^\\s*{0}[ \\n]'", ".", "format", "(", "comment_char", ")", ")", "section_test", "=", "lambda", "val", ":", "bool", "(", "comment", ".", "match", "(", "val", ")", ")", "sections", "=", "[", "]", "for", "is_doc", ",", "group", "in", "itertools", ".", "groupby", "(", "lines", ",", "section_test", ")", ":", "if", "is_doc", ":", "text", "=", "[", "comment", ".", "sub", "(", "''", ",", "i", ")", ".", "rstrip", "(", "'\\r\\n'", ")", "for", "i", "in", "group", "]", "else", ":", "text", "=", "[", "i", ".", "rstrip", "(", "'\\r\\n'", ")", "for", "i", "in", "group", "]", "sections", ".", "append", "(", "(", "is_doc", ",", "text", ")", ")", "return", "sections" ]
Parse a string line-by-line delineating comments and code :returns: An tuple of boolean/list-of-string pairs. True designates a comment; False designates code.
[ "Parse", "a", "string", "line", "-", "by", "-", "line", "delineating", "comments", "and", "code" ]
python
train
35.714286
pantsbuild/pants
src/python/pants/util/contextutil.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/util/contextutil.py#L302-L312
def open_tar(path_or_file, *args, **kwargs): """ A with-context for tar files. Passes through positional and kwargs to tarfile.open. If path_or_file is a file, caller must close it separately. """ (path, fileobj) = ((path_or_file, None) if isinstance(path_or_file, string_types) else (None, path_or_file)) # TODO(#6071): stop using six.string_types # This should only accept python3 `str`, not byte strings. with closing(TarFile.open(path, *args, fileobj=fileobj, **kwargs)) as tar: yield tar
[ "def", "open_tar", "(", "path_or_file", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "(", "path", ",", "fileobj", ")", "=", "(", "(", "path_or_file", ",", "None", ")", "if", "isinstance", "(", "path_or_file", ",", "string_types", ")", "else", "(", "None", ",", "path_or_file", ")", ")", "# TODO(#6071): stop using six.string_types", "# This should only accept python3 `str`, not byte strings.", "with", "closing", "(", "TarFile", ".", "open", "(", "path", ",", "*", "args", ",", "fileobj", "=", "fileobj", ",", "*", "*", "kwargs", ")", ")", "as", "tar", ":", "yield", "tar" ]
A with-context for tar files. Passes through positional and kwargs to tarfile.open. If path_or_file is a file, caller must close it separately.
[ "A", "with", "-", "context", "for", "tar", "files", ".", "Passes", "through", "positional", "and", "kwargs", "to", "tarfile", ".", "open", "." ]
python
train
52.272727
peri-source/peri
peri/opt/optimize.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1138-L1169
def get_termination_stats(self, get_cos=True): """ Returns a dict of termination statistics Parameters ---------- get_cos : Bool, optional Whether or not to calcualte the cosine of the residuals with the tangent plane of the model using the current J. The calculation may take some time. Default is True Returns ------- dict Has keys delta_vals : The last change in parameter values. delta_err : The last change in the error. exp_err : The expected (last) change in the error. frac_err : The fractional change in the error. num_iter : The number of iterations completed. error : The current error. """ delta_vals = self._last_vals - self.param_vals delta_err = self._last_error - self.error frac_err = delta_err / self.error to_return = {'delta_vals':delta_vals, 'delta_err':delta_err, 'num_iter':1*self._num_iter, 'frac_err':frac_err, 'error':self.error, 'exp_err':self._exp_err} if get_cos: model_cosine = self.calc_model_cosine() to_return.update({'model_cosine':model_cosine}) return to_return
[ "def", "get_termination_stats", "(", "self", ",", "get_cos", "=", "True", ")", ":", "delta_vals", "=", "self", ".", "_last_vals", "-", "self", ".", "param_vals", "delta_err", "=", "self", ".", "_last_error", "-", "self", ".", "error", "frac_err", "=", "delta_err", "/", "self", ".", "error", "to_return", "=", "{", "'delta_vals'", ":", "delta_vals", ",", "'delta_err'", ":", "delta_err", ",", "'num_iter'", ":", "1", "*", "self", ".", "_num_iter", ",", "'frac_err'", ":", "frac_err", ",", "'error'", ":", "self", ".", "error", ",", "'exp_err'", ":", "self", ".", "_exp_err", "}", "if", "get_cos", ":", "model_cosine", "=", "self", ".", "calc_model_cosine", "(", ")", "to_return", ".", "update", "(", "{", "'model_cosine'", ":", "model_cosine", "}", ")", "return", "to_return" ]
Returns a dict of termination statistics Parameters ---------- get_cos : Bool, optional Whether or not to calcualte the cosine of the residuals with the tangent plane of the model using the current J. The calculation may take some time. Default is True Returns ------- dict Has keys delta_vals : The last change in parameter values. delta_err : The last change in the error. exp_err : The expected (last) change in the error. frac_err : The fractional change in the error. num_iter : The number of iterations completed. error : The current error.
[ "Returns", "a", "dict", "of", "termination", "statistics" ]
python
valid
42.34375
ebroecker/canmatrix
src/canmatrix/formats/arxml.py
https://github.com/ebroecker/canmatrix/blob/d6150b7a648350f051a11c431e9628308c8d5593/src/canmatrix/formats/arxml.py#L812-L831
def get_element_by_path(tree, path_and_name, namespace): # type: (_Element, str, str) -> typing.Union[_Element, None] """Find sub-element of given path with given short name.""" global xml_element_cache namespace_map = {'A': namespace[1:-1]} base_path, element_name = path_and_name.rsplit('/', 1) if base_path in xml_element_cache: base_element = xml_element_cache[base_path] else: base_xpath = ar_path_to_x_path(base_path) elems = tree.xpath(base_xpath, namespaces=namespace_map) base_element = elems[0] if elems else None xml_element_cache[base_path] = base_element element_found = None if base_element is not None: element_found = base_element.xpath( ".//A:SHORT-NAME[text()='{name}']/..".format(name=element_name), namespaces=namespace_map)[0] return element_found
[ "def", "get_element_by_path", "(", "tree", ",", "path_and_name", ",", "namespace", ")", ":", "# type: (_Element, str, str) -> typing.Union[_Element, None]", "global", "xml_element_cache", "namespace_map", "=", "{", "'A'", ":", "namespace", "[", "1", ":", "-", "1", "]", "}", "base_path", ",", "element_name", "=", "path_and_name", ".", "rsplit", "(", "'/'", ",", "1", ")", "if", "base_path", "in", "xml_element_cache", ":", "base_element", "=", "xml_element_cache", "[", "base_path", "]", "else", ":", "base_xpath", "=", "ar_path_to_x_path", "(", "base_path", ")", "elems", "=", "tree", ".", "xpath", "(", "base_xpath", ",", "namespaces", "=", "namespace_map", ")", "base_element", "=", "elems", "[", "0", "]", "if", "elems", "else", "None", "xml_element_cache", "[", "base_path", "]", "=", "base_element", "element_found", "=", "None", "if", "base_element", "is", "not", "None", ":", "element_found", "=", "base_element", ".", "xpath", "(", "\".//A:SHORT-NAME[text()='{name}']/..\"", ".", "format", "(", "name", "=", "element_name", ")", ",", "namespaces", "=", "namespace_map", ")", "[", "0", "]", "return", "element_found" ]
Find sub-element of given path with given short name.
[ "Find", "sub", "-", "element", "of", "given", "path", "with", "given", "short", "name", "." ]
python
train
43.1
tensorflow/cleverhans
cleverhans/plot/pyplot_image.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/plot/pyplot_image.py#L9-L49
def pair_visual(original, adversarial, figure=None): """ This function displays two images: the original and the adversarial sample :param original: the original input :param adversarial: the input after perturbations have been applied :param figure: if we've already displayed images, use the same plot :return: the matplot figure to reuse for future samples """ import matplotlib.pyplot as plt # Squeeze the image to remove single-dimensional entries from array shape original = np.squeeze(original) adversarial = np.squeeze(adversarial) # Ensure our inputs are of proper shape assert(len(original.shape) == 2 or len(original.shape) == 3) # To avoid creating figures per input sample, reuse the sample plot if figure is None: plt.ion() figure = plt.figure() figure.canvas.set_window_title('Cleverhans: Pair Visualization') # Add the images to the plot perturbations = adversarial - original for index, image in enumerate((original, perturbations, adversarial)): figure.add_subplot(1, 3, index + 1) plt.axis('off') # If the image is 2D, then we have 1 color channel if len(image.shape) == 2: plt.imshow(image, cmap='gray') else: plt.imshow(image) # Give the plot some time to update plt.pause(0.01) # Draw the plot and return plt.show() return figure
[ "def", "pair_visual", "(", "original", ",", "adversarial", ",", "figure", "=", "None", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "# Squeeze the image to remove single-dimensional entries from array shape", "original", "=", "np", ".", "squeeze", "(", "original", ")", "adversarial", "=", "np", ".", "squeeze", "(", "adversarial", ")", "# Ensure our inputs are of proper shape", "assert", "(", "len", "(", "original", ".", "shape", ")", "==", "2", "or", "len", "(", "original", ".", "shape", ")", "==", "3", ")", "# To avoid creating figures per input sample, reuse the sample plot", "if", "figure", "is", "None", ":", "plt", ".", "ion", "(", ")", "figure", "=", "plt", ".", "figure", "(", ")", "figure", ".", "canvas", ".", "set_window_title", "(", "'Cleverhans: Pair Visualization'", ")", "# Add the images to the plot", "perturbations", "=", "adversarial", "-", "original", "for", "index", ",", "image", "in", "enumerate", "(", "(", "original", ",", "perturbations", ",", "adversarial", ")", ")", ":", "figure", ".", "add_subplot", "(", "1", ",", "3", ",", "index", "+", "1", ")", "plt", ".", "axis", "(", "'off'", ")", "# If the image is 2D, then we have 1 color channel", "if", "len", "(", "image", ".", "shape", ")", "==", "2", ":", "plt", ".", "imshow", "(", "image", ",", "cmap", "=", "'gray'", ")", "else", ":", "plt", ".", "imshow", "(", "image", ")", "# Give the plot some time to update", "plt", ".", "pause", "(", "0.01", ")", "# Draw the plot and return", "plt", ".", "show", "(", ")", "return", "figure" ]
This function displays two images: the original and the adversarial sample :param original: the original input :param adversarial: the input after perturbations have been applied :param figure: if we've already displayed images, use the same plot :return: the matplot figure to reuse for future samples
[ "This", "function", "displays", "two", "images", ":", "the", "original", "and", "the", "adversarial", "sample", ":", "param", "original", ":", "the", "original", "input", ":", "param", "adversarial", ":", "the", "input", "after", "perturbations", "have", "been", "applied", ":", "param", "figure", ":", "if", "we", "ve", "already", "displayed", "images", "use", "the", "same", "plot", ":", "return", ":", "the", "matplot", "figure", "to", "reuse", "for", "future", "samples" ]
python
train
32
log2timeline/plaso
plaso/cli/helpers/database_config.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/helpers/database_config.py#L22-L45
def AddArguments(cls, argument_group): """Adds command line arguments the helper supports to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group. """ argument_group.add_argument( '--user', dest='username', type=str, action='store', default=cls._DEFAULT_USERNAME, metavar='USERNAME', required=False, help='The username used to connect to the database.') argument_group.add_argument( '--password', dest='password', type=str, action='store', default=cls._DEFAULT_PASSWORD, metavar='PASSWORD', help=( 'The password for the database user.')) argument_group.add_argument( '--db_name', '--db-name', dest='db_name', action='store', type=str, default=cls._DEFAULT_NAME, required=False, help=( 'The name of the database to connect to.')) server_config.ServerArgumentsHelper.AddArguments(argument_group)
[ "def", "AddArguments", "(", "cls", ",", "argument_group", ")", ":", "argument_group", ".", "add_argument", "(", "'--user'", ",", "dest", "=", "'username'", ",", "type", "=", "str", ",", "action", "=", "'store'", ",", "default", "=", "cls", ".", "_DEFAULT_USERNAME", ",", "metavar", "=", "'USERNAME'", ",", "required", "=", "False", ",", "help", "=", "'The username used to connect to the database.'", ")", "argument_group", ".", "add_argument", "(", "'--password'", ",", "dest", "=", "'password'", ",", "type", "=", "str", ",", "action", "=", "'store'", ",", "default", "=", "cls", ".", "_DEFAULT_PASSWORD", ",", "metavar", "=", "'PASSWORD'", ",", "help", "=", "(", "'The password for the database user.'", ")", ")", "argument_group", ".", "add_argument", "(", "'--db_name'", ",", "'--db-name'", ",", "dest", "=", "'db_name'", ",", "action", "=", "'store'", ",", "type", "=", "str", ",", "default", "=", "cls", ".", "_DEFAULT_NAME", ",", "required", "=", "False", ",", "help", "=", "(", "'The name of the database to connect to.'", ")", ")", "server_config", ".", "ServerArgumentsHelper", ".", "AddArguments", "(", "argument_group", ")" ]
Adds command line arguments the helper supports to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
[ "Adds", "command", "line", "arguments", "the", "helper", "supports", "to", "an", "argument", "group", "." ]
python
train
45.541667
astropy/photutils
photutils/psf/epsf_stars.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/psf/epsf_stars.py#L388-L402
def all_good_stars(self): """ A list of all `EPSFStar` objects stored in this object that have not been excluded from fitting, including those that comprise linked stars (i.e. `LinkedEPSFStar`), as a flat list. """ stars = [] for star in self.all_stars: if star._excluded_from_fit: continue else: stars.append(star) return stars
[ "def", "all_good_stars", "(", "self", ")", ":", "stars", "=", "[", "]", "for", "star", "in", "self", ".", "all_stars", ":", "if", "star", ".", "_excluded_from_fit", ":", "continue", "else", ":", "stars", ".", "append", "(", "star", ")", "return", "stars" ]
A list of all `EPSFStar` objects stored in this object that have not been excluded from fitting, including those that comprise linked stars (i.e. `LinkedEPSFStar`), as a flat list.
[ "A", "list", "of", "all", "EPSFStar", "objects", "stored", "in", "this", "object", "that", "have", "not", "been", "excluded", "from", "fitting", "including", "those", "that", "comprise", "linked", "stars", "(", "i", ".", "e", ".", "LinkedEPSFStar", ")", "as", "a", "flat", "list", "." ]
python
train
29.066667
bukun/TorCMS
torcms/modules/info_modules.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/modules/info_modules.py#L185-L197
def render_it(self, kind, num, with_tag=False, glyph=''): ''' render, no user logged in ''' all_cats = MPost.query_recent(num, kind=kind) kwd = { 'with_tag': with_tag, 'router': router_post[kind], 'glyph': glyph } return self.render_string('modules/info/list_equation.html', recs=all_cats, kwd=kwd)
[ "def", "render_it", "(", "self", ",", "kind", ",", "num", ",", "with_tag", "=", "False", ",", "glyph", "=", "''", ")", ":", "all_cats", "=", "MPost", ".", "query_recent", "(", "num", ",", "kind", "=", "kind", ")", "kwd", "=", "{", "'with_tag'", ":", "with_tag", ",", "'router'", ":", "router_post", "[", "kind", "]", ",", "'glyph'", ":", "glyph", "}", "return", "self", ".", "render_string", "(", "'modules/info/list_equation.html'", ",", "recs", "=", "all_cats", ",", "kwd", "=", "kwd", ")" ]
render, no user logged in
[ "render", "no", "user", "logged", "in" ]
python
train
34.307692
neherlab/treetime
treetime/gtr_site_specific.py
https://github.com/neherlab/treetime/blob/f6cdb58d19243a18ffdaa2b2ec71872fa00e65c0/treetime/gtr_site_specific.py#L83-L125
def random(cls, L=1, avg_mu=1.0, alphabet='nuc', pi_dirichlet_alpha=1, W_dirichlet_alpha=3.0, mu_gamma_alpha=3.0): """ Creates a random GTR model Parameters ---------- mu : float Substitution rate alphabet : str Alphabet name (should be standard: 'nuc', 'nuc_gap', 'aa', 'aa_gap') """ from scipy.stats import gamma alphabet=alphabets[alphabet] gtr = cls(alphabet=alphabet, seq_len=L) n = gtr.alphabet.shape[0] if pi_dirichlet_alpha: pi = 1.0*gamma.rvs(pi_dirichlet_alpha, size=(n,L)) else: pi = np.ones((n,L)) pi /= pi.sum(axis=0) if W_dirichlet_alpha: tmp = 1.0*gamma.rvs(W_dirichlet_alpha, size=(n,n)) else: tmp = np.ones((n,n)) tmp = np.tril(tmp,k=-1) W = tmp + tmp.T if mu_gamma_alpha: mu = gamma.rvs(mu_gamma_alpha, size=(L,)) else: mu = np.ones(L) gtr.assign_rates(mu=mu, pi=pi, W=W) gtr.mu *= avg_mu/np.mean(gtr.mu) return gtr
[ "def", "random", "(", "cls", ",", "L", "=", "1", ",", "avg_mu", "=", "1.0", ",", "alphabet", "=", "'nuc'", ",", "pi_dirichlet_alpha", "=", "1", ",", "W_dirichlet_alpha", "=", "3.0", ",", "mu_gamma_alpha", "=", "3.0", ")", ":", "from", "scipy", ".", "stats", "import", "gamma", "alphabet", "=", "alphabets", "[", "alphabet", "]", "gtr", "=", "cls", "(", "alphabet", "=", "alphabet", ",", "seq_len", "=", "L", ")", "n", "=", "gtr", ".", "alphabet", ".", "shape", "[", "0", "]", "if", "pi_dirichlet_alpha", ":", "pi", "=", "1.0", "*", "gamma", ".", "rvs", "(", "pi_dirichlet_alpha", ",", "size", "=", "(", "n", ",", "L", ")", ")", "else", ":", "pi", "=", "np", ".", "ones", "(", "(", "n", ",", "L", ")", ")", "pi", "/=", "pi", ".", "sum", "(", "axis", "=", "0", ")", "if", "W_dirichlet_alpha", ":", "tmp", "=", "1.0", "*", "gamma", ".", "rvs", "(", "W_dirichlet_alpha", ",", "size", "=", "(", "n", ",", "n", ")", ")", "else", ":", "tmp", "=", "np", ".", "ones", "(", "(", "n", ",", "n", ")", ")", "tmp", "=", "np", ".", "tril", "(", "tmp", ",", "k", "=", "-", "1", ")", "W", "=", "tmp", "+", "tmp", ".", "T", "if", "mu_gamma_alpha", ":", "mu", "=", "gamma", ".", "rvs", "(", "mu_gamma_alpha", ",", "size", "=", "(", "L", ",", ")", ")", "else", ":", "mu", "=", "np", ".", "ones", "(", "L", ")", "gtr", ".", "assign_rates", "(", "mu", "=", "mu", ",", "pi", "=", "pi", ",", "W", "=", "W", ")", "gtr", ".", "mu", "*=", "avg_mu", "/", "np", ".", "mean", "(", "gtr", ".", "mu", ")", "return", "gtr" ]
Creates a random GTR model Parameters ---------- mu : float Substitution rate alphabet : str Alphabet name (should be standard: 'nuc', 'nuc_gap', 'aa', 'aa_gap')
[ "Creates", "a", "random", "GTR", "model" ]
python
test
25.534884
googlefonts/glyphsLib
Lib/glyphsLib/builder/builders.py
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/builder/builders.py#L277-L419
def _apply_bracket_layers(self): """Extract bracket layers in a GSGlyph into free-standing UFO glyphs with Designspace substitution rules. As of Glyphs.app 2.6, only single axis bracket layers are supported, we assume the axis to be the first axis in the Designspace. Bracket layer backgrounds are not round-tripped. A glyph can have more than one bracket layer but Designspace rule/OpenType variation condition sets apply all substitutions in a rule in a range, so we have to potentially sort bracket layers into rule buckets. Example: if a glyph "x" has two bracket layers [300] and [600] and glyph "a" has bracket layer [300] and the bracket axis tops out at 1000, we need the following Designspace rules: - BRACKET.300.600 # min 300, max 600 on the bracket axis. - x -> x.BRACKET.300 - BRACKET.600.1000 - x -> x.BRACKET.600 - BRACKET.300.1000 - a -> a.BRACKET.300 """ if not self._designspace.axes: raise ValueError( "Cannot apply bracket layers unless at least one axis is defined." ) bracket_axis = self._designspace.axes[0] # Determine the axis scale in design space because crossovers/locations are # in design space (axis.default/minimum/maximum may be user space). if bracket_axis.map: axis_scale = [design_location for _, design_location in bracket_axis.map] bracket_axis_min = min(axis_scale) bracket_axis_max = max(axis_scale) else: # No mapping means user and design space are the same. bracket_axis_min = bracket_axis.minimum bracket_axis_max = bracket_axis.maximum # 1. bracket_layer_map: Organize all bracket layers by crossover value, so # we can go through the layers by location and copy them to free-standing # glyphs. # 2. glyph_crossovers: Keep track of the crossover values of a single glyph, so # we can easily sort them into rule buckets. # 3. glyph_sanity_counter: Count the number of master layers providing # bracket layers per glyph and crossover value. We currently only support # the situation where there is a bracket layer for _all_ masters, what the # Glyphs.app tutorial calls 'Changing All Masters'. bracket_layer_map = defaultdict(list) # type: Dict[int, List[classes.GSLayer]] glyph_crossovers = defaultdict(set) # type: Dict[str, Set[int]] glyph_sanity_counter = defaultdict( list ) # type: Dict[Tuple[str, int], List[str]] for layer in self.bracket_layers: glyph_name = layer.parent.name n = layer.name try: bracket_crossover = int(n[n.index("[") + 1 : n.index("]")]) except ValueError: raise ValueError( "Only bracket layers with one numerical (design space) location " "(meaning the first axis in the designspace file) are currently " "supported." ) if not bracket_axis_min <= bracket_crossover <= bracket_axis_max: raise ValueError( "Glyph {glyph_name}: Bracket layer {layer_name} must be within the " "design space bounds of the {bracket_axis_name} axis: minimum " "{bracket_axis_minimum}, maximum {bracket_axis_maximum}.".format( glyph_name=glyph_name, layer_name=n, bracket_axis_name=bracket_axis.name, bracket_axis_minimum=bracket_axis_min, bracket_axis_maximum=bracket_axis_max, ) ) bracket_layer_map[bracket_crossover].append(layer) glyph_crossovers[glyph_name].add(bracket_crossover) glyph_sanity_counter[(glyph_name, bracket_crossover)].append( layer.associatedMasterId ) # Check that each bracket layer is present in all master layers. unbalanced_bracket_layers = [] n_masters = len(list(self.masters)) for ((glyph_name, _), master_layer_ids) in glyph_sanity_counter.items(): if not len(master_layer_ids) == n_masters: unbalanced_bracket_layers.append(glyph_name) if unbalanced_bracket_layers: raise ValueError( "Currently, we only support bracket layers that are present on all " "masters, i.e. what the Glyphs.app tutorial calls 'Changing All " "Masters'. There is a/are bracket layer(s) missing " "for glyph(s) {unbalanced_glyphs}.".format( unbalanced_glyphs=unbalanced_bracket_layers ) ) # Sort crossovers into buckets. rule_bucket = defaultdict(list) # type: Dict[Tuple[int, int], List[int]] for glyph_name, crossovers in sorted(glyph_crossovers.items()): for crossover_min, crossover_max in util.pairwise( sorted(crossovers) + [bracket_axis_max] ): rule_bucket[(int(crossover_min), int(crossover_max))].append(glyph_name) # Generate rules for the bracket layers. for (axis_range_min, axis_range_max), glyph_names in sorted( rule_bucket.items() ): rule_name = "BRACKET.{}.{}".format(axis_range_min, axis_range_max) glyph_sub_suffix = ".BRACKET.{}".format(axis_range_min) rule = designspaceLib.RuleDescriptor() rule.name = rule_name rule.conditionSets.append( [ { "name": bracket_axis.name, "minimum": axis_range_min, "maximum": axis_range_max, } ] ) rule.subs.extend( [ (glyph_name, glyph_name + glyph_sub_suffix) for glyph_name in glyph_names ] ) self._designspace.addRule(rule) # Finally, copy bracket layers to their own glyphs. for location, layers in bracket_layer_map.items(): for layer in layers: ufo_font = self._sources[ layer.associatedMasterId or layer.layerId ].font.layers.defaultLayer ufo_glyph_name = "{glyph_name}.BRACKET.{location}".format( glyph_name=layer.parent.name, location=location ) ufo_glyph = ufo_font.newGlyph(ufo_glyph_name) self.to_ufo_glyph(ufo_glyph, layer, layer.parent) ufo_glyph.unicodes = [] # Avoid cmap interference ufo_glyph.lib[GLYPHLIB_PREFIX + "_originalLayerName"] = layer.name
[ "def", "_apply_bracket_layers", "(", "self", ")", ":", "if", "not", "self", ".", "_designspace", ".", "axes", ":", "raise", "ValueError", "(", "\"Cannot apply bracket layers unless at least one axis is defined.\"", ")", "bracket_axis", "=", "self", ".", "_designspace", ".", "axes", "[", "0", "]", "# Determine the axis scale in design space because crossovers/locations are", "# in design space (axis.default/minimum/maximum may be user space).", "if", "bracket_axis", ".", "map", ":", "axis_scale", "=", "[", "design_location", "for", "_", ",", "design_location", "in", "bracket_axis", ".", "map", "]", "bracket_axis_min", "=", "min", "(", "axis_scale", ")", "bracket_axis_max", "=", "max", "(", "axis_scale", ")", "else", ":", "# No mapping means user and design space are the same.", "bracket_axis_min", "=", "bracket_axis", ".", "minimum", "bracket_axis_max", "=", "bracket_axis", ".", "maximum", "# 1. bracket_layer_map: Organize all bracket layers by crossover value, so", "# we can go through the layers by location and copy them to free-standing", "# glyphs.", "# 2. glyph_crossovers: Keep track of the crossover values of a single glyph, so", "# we can easily sort them into rule buckets.", "# 3. glyph_sanity_counter: Count the number of master layers providing", "# bracket layers per glyph and crossover value. We currently only support", "# the situation where there is a bracket layer for _all_ masters, what the", "# Glyphs.app tutorial calls 'Changing All Masters'.", "bracket_layer_map", "=", "defaultdict", "(", "list", ")", "# type: Dict[int, List[classes.GSLayer]]", "glyph_crossovers", "=", "defaultdict", "(", "set", ")", "# type: Dict[str, Set[int]]", "glyph_sanity_counter", "=", "defaultdict", "(", "list", ")", "# type: Dict[Tuple[str, int], List[str]]", "for", "layer", "in", "self", ".", "bracket_layers", ":", "glyph_name", "=", "layer", ".", "parent", ".", "name", "n", "=", "layer", ".", "name", "try", ":", "bracket_crossover", "=", "int", "(", "n", "[", "n", ".", "index", "(", "\"[\"", ")", "+", "1", ":", "n", ".", "index", "(", "\"]\"", ")", "]", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Only bracket layers with one numerical (design space) location \"", "\"(meaning the first axis in the designspace file) are currently \"", "\"supported.\"", ")", "if", "not", "bracket_axis_min", "<=", "bracket_crossover", "<=", "bracket_axis_max", ":", "raise", "ValueError", "(", "\"Glyph {glyph_name}: Bracket layer {layer_name} must be within the \"", "\"design space bounds of the {bracket_axis_name} axis: minimum \"", "\"{bracket_axis_minimum}, maximum {bracket_axis_maximum}.\"", ".", "format", "(", "glyph_name", "=", "glyph_name", ",", "layer_name", "=", "n", ",", "bracket_axis_name", "=", "bracket_axis", ".", "name", ",", "bracket_axis_minimum", "=", "bracket_axis_min", ",", "bracket_axis_maximum", "=", "bracket_axis_max", ",", ")", ")", "bracket_layer_map", "[", "bracket_crossover", "]", ".", "append", "(", "layer", ")", "glyph_crossovers", "[", "glyph_name", "]", ".", "add", "(", "bracket_crossover", ")", "glyph_sanity_counter", "[", "(", "glyph_name", ",", "bracket_crossover", ")", "]", ".", "append", "(", "layer", ".", "associatedMasterId", ")", "# Check that each bracket layer is present in all master layers.", "unbalanced_bracket_layers", "=", "[", "]", "n_masters", "=", "len", "(", "list", "(", "self", ".", "masters", ")", ")", "for", "(", "(", "glyph_name", ",", "_", ")", ",", "master_layer_ids", ")", "in", "glyph_sanity_counter", ".", "items", "(", ")", ":", "if", "not", "len", "(", "master_layer_ids", ")", "==", "n_masters", ":", "unbalanced_bracket_layers", ".", "append", "(", "glyph_name", ")", "if", "unbalanced_bracket_layers", ":", "raise", "ValueError", "(", "\"Currently, we only support bracket layers that are present on all \"", "\"masters, i.e. what the Glyphs.app tutorial calls 'Changing All \"", "\"Masters'. There is a/are bracket layer(s) missing \"", "\"for glyph(s) {unbalanced_glyphs}.\"", ".", "format", "(", "unbalanced_glyphs", "=", "unbalanced_bracket_layers", ")", ")", "# Sort crossovers into buckets.", "rule_bucket", "=", "defaultdict", "(", "list", ")", "# type: Dict[Tuple[int, int], List[int]]", "for", "glyph_name", ",", "crossovers", "in", "sorted", "(", "glyph_crossovers", ".", "items", "(", ")", ")", ":", "for", "crossover_min", ",", "crossover_max", "in", "util", ".", "pairwise", "(", "sorted", "(", "crossovers", ")", "+", "[", "bracket_axis_max", "]", ")", ":", "rule_bucket", "[", "(", "int", "(", "crossover_min", ")", ",", "int", "(", "crossover_max", ")", ")", "]", ".", "append", "(", "glyph_name", ")", "# Generate rules for the bracket layers.", "for", "(", "axis_range_min", ",", "axis_range_max", ")", ",", "glyph_names", "in", "sorted", "(", "rule_bucket", ".", "items", "(", ")", ")", ":", "rule_name", "=", "\"BRACKET.{}.{}\"", ".", "format", "(", "axis_range_min", ",", "axis_range_max", ")", "glyph_sub_suffix", "=", "\".BRACKET.{}\"", ".", "format", "(", "axis_range_min", ")", "rule", "=", "designspaceLib", ".", "RuleDescriptor", "(", ")", "rule", ".", "name", "=", "rule_name", "rule", ".", "conditionSets", ".", "append", "(", "[", "{", "\"name\"", ":", "bracket_axis", ".", "name", ",", "\"minimum\"", ":", "axis_range_min", ",", "\"maximum\"", ":", "axis_range_max", ",", "}", "]", ")", "rule", ".", "subs", ".", "extend", "(", "[", "(", "glyph_name", ",", "glyph_name", "+", "glyph_sub_suffix", ")", "for", "glyph_name", "in", "glyph_names", "]", ")", "self", ".", "_designspace", ".", "addRule", "(", "rule", ")", "# Finally, copy bracket layers to their own glyphs.", "for", "location", ",", "layers", "in", "bracket_layer_map", ".", "items", "(", ")", ":", "for", "layer", "in", "layers", ":", "ufo_font", "=", "self", ".", "_sources", "[", "layer", ".", "associatedMasterId", "or", "layer", ".", "layerId", "]", ".", "font", ".", "layers", ".", "defaultLayer", "ufo_glyph_name", "=", "\"{glyph_name}.BRACKET.{location}\"", ".", "format", "(", "glyph_name", "=", "layer", ".", "parent", ".", "name", ",", "location", "=", "location", ")", "ufo_glyph", "=", "ufo_font", ".", "newGlyph", "(", "ufo_glyph_name", ")", "self", ".", "to_ufo_glyph", "(", "ufo_glyph", ",", "layer", ",", "layer", ".", "parent", ")", "ufo_glyph", ".", "unicodes", "=", "[", "]", "# Avoid cmap interference", "ufo_glyph", ".", "lib", "[", "GLYPHLIB_PREFIX", "+", "\"_originalLayerName\"", "]", "=", "layer", ".", "name" ]
Extract bracket layers in a GSGlyph into free-standing UFO glyphs with Designspace substitution rules. As of Glyphs.app 2.6, only single axis bracket layers are supported, we assume the axis to be the first axis in the Designspace. Bracket layer backgrounds are not round-tripped. A glyph can have more than one bracket layer but Designspace rule/OpenType variation condition sets apply all substitutions in a rule in a range, so we have to potentially sort bracket layers into rule buckets. Example: if a glyph "x" has two bracket layers [300] and [600] and glyph "a" has bracket layer [300] and the bracket axis tops out at 1000, we need the following Designspace rules: - BRACKET.300.600 # min 300, max 600 on the bracket axis. - x -> x.BRACKET.300 - BRACKET.600.1000 - x -> x.BRACKET.600 - BRACKET.300.1000 - a -> a.BRACKET.300
[ "Extract", "bracket", "layers", "in", "a", "GSGlyph", "into", "free", "-", "standing", "UFO", "glyphs", "with", "Designspace", "substitution", "rules", "." ]
python
train
48.13986
soimort/you-get
src/you_get/extractors/bokecc.py
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/bokecc.py#L17-L39
def download_by_id(self, vid = '', title = None, output_dir='.', merge=True, info_only=False,**kwargs): """self, str->None Keyword arguments: self: self vid: The video ID for BokeCC cloud, something like FE3BB999594978049C33DC5901307461 Calls the prepare() to download the video. If no title is provided, this method shall try to find a proper title with the information providin within the returned content of the API.""" assert vid self.prepare(vid = vid, title = title, **kwargs) self.extract(**kwargs) self.download(output_dir = output_dir, merge = merge, info_only = info_only, **kwargs)
[ "def", "download_by_id", "(", "self", ",", "vid", "=", "''", ",", "title", "=", "None", ",", "output_dir", "=", "'.'", ",", "merge", "=", "True", ",", "info_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "assert", "vid", "self", ".", "prepare", "(", "vid", "=", "vid", ",", "title", "=", "title", ",", "*", "*", "kwargs", ")", "self", ".", "extract", "(", "*", "*", "kwargs", ")", "self", ".", "download", "(", "output_dir", "=", "output_dir", ",", "merge", "=", "merge", ",", "info_only", "=", "info_only", ",", "*", "*", "kwargs", ")" ]
self, str->None Keyword arguments: self: self vid: The video ID for BokeCC cloud, something like FE3BB999594978049C33DC5901307461 Calls the prepare() to download the video. If no title is provided, this method shall try to find a proper title with the information providin within the returned content of the API.
[ "self", "str", "-", ">", "None", "Keyword", "arguments", ":", "self", ":", "self", "vid", ":", "The", "video", "ID", "for", "BokeCC", "cloud", "something", "like", "FE3BB999594978049C33DC5901307461", "Calls", "the", "prepare", "()", "to", "download", "the", "video", ".", "If", "no", "title", "is", "provided", "this", "method", "shall", "try", "to", "find", "a", "proper", "title", "with", "the", "information", "providin", "within", "the", "returned", "content", "of", "the", "API", "." ]
python
test
32.478261
jssimporter/python-jss
jss/jamf_software_server.py
https://github.com/jssimporter/python-jss/blob/b95185d74e0c0531b0b563f280d4129e21d5fe5d/jss/jamf_software_server.py#L660-L663
def MobileDeviceApplication(self, data=None, subset=None): """{dynamic_docstring}""" return self.factory.get_object(jssobjects.MobileDeviceApplication, data, subset)
[ "def", "MobileDeviceApplication", "(", "self", ",", "data", "=", "None", ",", "subset", "=", "None", ")", ":", "return", "self", ".", "factory", ".", "get_object", "(", "jssobjects", ".", "MobileDeviceApplication", ",", "data", ",", "subset", ")" ]
{dynamic_docstring}
[ "{", "dynamic_docstring", "}" ]
python
train
54.25
domainaware/parsedmarc
parsedmarc/utils.py
https://github.com/domainaware/parsedmarc/blob/ecc9fd434c23d896ccd1f35795ccc047f946ed05/parsedmarc/utils.py#L393-L410
def get_filename_safe_string(string): """ Converts a string to a string that is safe for a filename Args: string (str): A string to make safe for a filename Returns: str: A string safe for a filename """ invalid_filename_chars = ['\\', '/', ':', '"', '*', '?', '|', '\n', '\r'] if string is None: string = "None" for char in invalid_filename_chars: string = string.replace(char, "") string = string.rstrip(".") return string
[ "def", "get_filename_safe_string", "(", "string", ")", ":", "invalid_filename_chars", "=", "[", "'\\\\'", ",", "'/'", ",", "':'", ",", "'\"'", ",", "'*'", ",", "'?'", ",", "'|'", ",", "'\\n'", ",", "'\\r'", "]", "if", "string", "is", "None", ":", "string", "=", "\"None\"", "for", "char", "in", "invalid_filename_chars", ":", "string", "=", "string", ".", "replace", "(", "char", ",", "\"\"", ")", "string", "=", "string", ".", "rstrip", "(", "\".\"", ")", "return", "string" ]
Converts a string to a string that is safe for a filename Args: string (str): A string to make safe for a filename Returns: str: A string safe for a filename
[ "Converts", "a", "string", "to", "a", "string", "that", "is", "safe", "for", "a", "filename", "Args", ":", "string", "(", "str", ")", ":", "A", "string", "to", "make", "safe", "for", "a", "filename" ]
python
test
28.388889
BlueBrain/NeuroM
neurom/check/structural_checks.py
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/check/structural_checks.py#L117-L127
def has_valid_soma(data_wrapper): '''Check if a data block has a valid soma Returns: CheckResult with result ''' try: make_soma(data_wrapper.soma_points()) return CheckResult(True) except SomaError: return CheckResult(False)
[ "def", "has_valid_soma", "(", "data_wrapper", ")", ":", "try", ":", "make_soma", "(", "data_wrapper", ".", "soma_points", "(", ")", ")", "return", "CheckResult", "(", "True", ")", "except", "SomaError", ":", "return", "CheckResult", "(", "False", ")" ]
Check if a data block has a valid soma Returns: CheckResult with result
[ "Check", "if", "a", "data", "block", "has", "a", "valid", "soma" ]
python
train
24.272727
roll/interest-py
interest/logger/logger.py
https://github.com/roll/interest-py/blob/e6e1def4f2999222aac2fb1d290ae94250673b89/interest/logger/logger.py#L112-L117
def error(self, message, *args, **kwargs): """Log error event. Compatible with logging.error signature. """ self.system.error(message, *args, **kwargs)
[ "def", "error", "(", "self", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "system", ".", "error", "(", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Log error event. Compatible with logging.error signature.
[ "Log", "error", "event", "." ]
python
train
29.833333
chaoss/grimoirelab-elk
utils/index_mapping.py
https://github.com/chaoss/grimoirelab-elk/blob/64e08b324b36d9f6909bf705145d6451c8d34e65/utils/index_mapping.py#L145-L190
def get_elastic_items(elastic, elastic_scroll_id=None, limit=None): """ Get the items from the index """ scroll_size = limit if not limit: scroll_size = DEFAULT_LIMIT if not elastic: return None url = elastic.index_url max_process_items_pack_time = "5m" # 10 minutes url += "/_search?scroll=%s&size=%i" % (max_process_items_pack_time, scroll_size) if elastic_scroll_id: # Just continue with the scrolling url = elastic.url url += "/_search/scroll" scroll_data = { "scroll": max_process_items_pack_time, "scroll_id": elastic_scroll_id } res = requests.post(url, data=json.dumps(scroll_data)) else: query = """ { "query": { "bool": { "must": [] } } } """ logging.debug("%s\n%s", url, json.dumps(json.loads(query), indent=4)) res = requests.post(url, data=query) rjson = None try: rjson = res.json() except Exception: logging.error("No JSON found in %s", res.text) logging.error("No results found from %s", url) return rjson
[ "def", "get_elastic_items", "(", "elastic", ",", "elastic_scroll_id", "=", "None", ",", "limit", "=", "None", ")", ":", "scroll_size", "=", "limit", "if", "not", "limit", ":", "scroll_size", "=", "DEFAULT_LIMIT", "if", "not", "elastic", ":", "return", "None", "url", "=", "elastic", ".", "index_url", "max_process_items_pack_time", "=", "\"5m\"", "# 10 minutes", "url", "+=", "\"/_search?scroll=%s&size=%i\"", "%", "(", "max_process_items_pack_time", ",", "scroll_size", ")", "if", "elastic_scroll_id", ":", "# Just continue with the scrolling", "url", "=", "elastic", ".", "url", "url", "+=", "\"/_search/scroll\"", "scroll_data", "=", "{", "\"scroll\"", ":", "max_process_items_pack_time", ",", "\"scroll_id\"", ":", "elastic_scroll_id", "}", "res", "=", "requests", ".", "post", "(", "url", ",", "data", "=", "json", ".", "dumps", "(", "scroll_data", ")", ")", "else", ":", "query", "=", "\"\"\"\n {\n \"query\": {\n \"bool\": {\n \"must\": []\n }\n }\n }\n \"\"\"", "logging", ".", "debug", "(", "\"%s\\n%s\"", ",", "url", ",", "json", ".", "dumps", "(", "json", ".", "loads", "(", "query", ")", ",", "indent", "=", "4", ")", ")", "res", "=", "requests", ".", "post", "(", "url", ",", "data", "=", "query", ")", "rjson", "=", "None", "try", ":", "rjson", "=", "res", ".", "json", "(", ")", "except", "Exception", ":", "logging", ".", "error", "(", "\"No JSON found in %s\"", ",", "res", ".", "text", ")", "logging", ".", "error", "(", "\"No results found from %s\"", ",", "url", ")", "return", "rjson" ]
Get the items from the index
[ "Get", "the", "items", "from", "the", "index" ]
python
train
26.413043
orbingol/NURBS-Python
geomdl/operations.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/operations.py#L1369-L1388
def find_ctrlpts(obj, u, v=None, **kwargs): """ Finds the control points involved in the evaluation of the curve/surface point defined by the input parameter(s). :param obj: curve or surface :type obj: abstract.Curve or abstract.Surface :param u: parameter (for curve), parameter on the u-direction (for surface) :type u: float :param v: parameter on the v-direction (for surface only) :type v: float :return: control points; 1-dimensional array for curve, 2-dimensional array for surface :rtype: list """ if isinstance(obj, abstract.Curve): return ops.find_ctrlpts_curve(u, obj, **kwargs) elif isinstance(obj, abstract.Surface): if v is None: raise GeomdlException("Parameter value for the v-direction must be set for operating on surfaces") return ops.find_ctrlpts_surface(u, v, obj, **kwargs) else: raise GeomdlException("The input must be an instance of abstract.Curve or abstract.Surface")
[ "def", "find_ctrlpts", "(", "obj", ",", "u", ",", "v", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "obj", ",", "abstract", ".", "Curve", ")", ":", "return", "ops", ".", "find_ctrlpts_curve", "(", "u", ",", "obj", ",", "*", "*", "kwargs", ")", "elif", "isinstance", "(", "obj", ",", "abstract", ".", "Surface", ")", ":", "if", "v", "is", "None", ":", "raise", "GeomdlException", "(", "\"Parameter value for the v-direction must be set for operating on surfaces\"", ")", "return", "ops", ".", "find_ctrlpts_surface", "(", "u", ",", "v", ",", "obj", ",", "*", "*", "kwargs", ")", "else", ":", "raise", "GeomdlException", "(", "\"The input must be an instance of abstract.Curve or abstract.Surface\"", ")" ]
Finds the control points involved in the evaluation of the curve/surface point defined by the input parameter(s). :param obj: curve or surface :type obj: abstract.Curve or abstract.Surface :param u: parameter (for curve), parameter on the u-direction (for surface) :type u: float :param v: parameter on the v-direction (for surface only) :type v: float :return: control points; 1-dimensional array for curve, 2-dimensional array for surface :rtype: list
[ "Finds", "the", "control", "points", "involved", "in", "the", "evaluation", "of", "the", "curve", "/", "surface", "point", "defined", "by", "the", "input", "parameter", "(", "s", ")", "." ]
python
train
48.6
CiscoTestAutomation/yang
ncdiff/src/yang/ncdiff/gnmi.py
https://github.com/CiscoTestAutomation/yang/blob/c70ec5ac5a91f276c4060009203770ece92e76b4/ncdiff/src/yang/ncdiff/gnmi.py#L569-L634
def get_path(self, instance=True, origin='openconfig'): '''get_path High-level api: get_path returns gNMI path object of the config node. Note that gNMI Path can specify list instance but cannot specify leaf-list instance. Parameters ---------- instance : `bool` True if the gNMI Path object refers to only one instance of a list. False if the gNMI Path object refers to all instances of a list. Returns ------- Path An object of gNMI Path class. ''' def get_name(node, default_ns): if origin == 'openconfig' or origin == '': return gNMIParser.parse_tag(node.tag) else: return self.device.convert_tag(default_ns, node.tag, src=Tag.LXML_ETREE, dst=ns_spec[origin]['path']) def get_keys(node, default_ns): keys = Composer(self.device, node).keys ret = {} for key in keys: if origin=='openconfig' or origin == '': key_ns, key_val = gNMIParser.parse_tag(key) else: key_ns, key_val = self.device.convert_tag(default_ns, key, src=Tag.LXML_ETREE, dst=ns_spec[origin]['path']) ns_tuple = self.convert_ns(key_ns, src=Tag.NAMESPACE) val_ns, val_val = self.device.convert_tag(ns_tuple[Tag.PREFIX], node.find(key).text, src=Tag.XPATH, dst=ns_spec[origin]['path']) ret[key_val] = val_val return ret def get_pathelem(node, default_ns): ns, name = get_name(node, default_ns) schema_node = self.device.get_schema_node(node) if schema_node.get('type') == 'list' and \ (node != self.node or instance): return ns, PathElem(name=name, key=get_keys(node, ns)) else: return ns, PathElem(name=name) nodes = list(reversed(list(self.node.iterancestors())))[1:] + \ [self.node] path_elems = [] default_ns = '' for node in nodes: default_ns, path_elem = get_pathelem(node, default_ns) path_elems.append(path_elem) return Path(elem=path_elems, origin=origin)
[ "def", "get_path", "(", "self", ",", "instance", "=", "True", ",", "origin", "=", "'openconfig'", ")", ":", "def", "get_name", "(", "node", ",", "default_ns", ")", ":", "if", "origin", "==", "'openconfig'", "or", "origin", "==", "''", ":", "return", "gNMIParser", ".", "parse_tag", "(", "node", ".", "tag", ")", "else", ":", "return", "self", ".", "device", ".", "convert_tag", "(", "default_ns", ",", "node", ".", "tag", ",", "src", "=", "Tag", ".", "LXML_ETREE", ",", "dst", "=", "ns_spec", "[", "origin", "]", "[", "'path'", "]", ")", "def", "get_keys", "(", "node", ",", "default_ns", ")", ":", "keys", "=", "Composer", "(", "self", ".", "device", ",", "node", ")", ".", "keys", "ret", "=", "{", "}", "for", "key", "in", "keys", ":", "if", "origin", "==", "'openconfig'", "or", "origin", "==", "''", ":", "key_ns", ",", "key_val", "=", "gNMIParser", ".", "parse_tag", "(", "key", ")", "else", ":", "key_ns", ",", "key_val", "=", "self", ".", "device", ".", "convert_tag", "(", "default_ns", ",", "key", ",", "src", "=", "Tag", ".", "LXML_ETREE", ",", "dst", "=", "ns_spec", "[", "origin", "]", "[", "'path'", "]", ")", "ns_tuple", "=", "self", ".", "convert_ns", "(", "key_ns", ",", "src", "=", "Tag", ".", "NAMESPACE", ")", "val_ns", ",", "val_val", "=", "self", ".", "device", ".", "convert_tag", "(", "ns_tuple", "[", "Tag", ".", "PREFIX", "]", ",", "node", ".", "find", "(", "key", ")", ".", "text", ",", "src", "=", "Tag", ".", "XPATH", ",", "dst", "=", "ns_spec", "[", "origin", "]", "[", "'path'", "]", ")", "ret", "[", "key_val", "]", "=", "val_val", "return", "ret", "def", "get_pathelem", "(", "node", ",", "default_ns", ")", ":", "ns", ",", "name", "=", "get_name", "(", "node", ",", "default_ns", ")", "schema_node", "=", "self", ".", "device", ".", "get_schema_node", "(", "node", ")", "if", "schema_node", ".", "get", "(", "'type'", ")", "==", "'list'", "and", "(", "node", "!=", "self", ".", "node", "or", "instance", ")", ":", "return", "ns", ",", "PathElem", "(", "name", "=", "name", ",", "key", "=", "get_keys", "(", "node", ",", "ns", ")", ")", "else", ":", "return", "ns", ",", "PathElem", "(", "name", "=", "name", ")", "nodes", "=", "list", "(", "reversed", "(", "list", "(", "self", ".", "node", ".", "iterancestors", "(", ")", ")", ")", ")", "[", "1", ":", "]", "+", "[", "self", ".", "node", "]", "path_elems", "=", "[", "]", "default_ns", "=", "''", "for", "node", "in", "nodes", ":", "default_ns", ",", "path_elem", "=", "get_pathelem", "(", "node", ",", "default_ns", ")", "path_elems", ".", "append", "(", "path_elem", ")", "return", "Path", "(", "elem", "=", "path_elems", ",", "origin", "=", "origin", ")" ]
get_path High-level api: get_path returns gNMI path object of the config node. Note that gNMI Path can specify list instance but cannot specify leaf-list instance. Parameters ---------- instance : `bool` True if the gNMI Path object refers to only one instance of a list. False if the gNMI Path object refers to all instances of a list. Returns ------- Path An object of gNMI Path class.
[ "get_path" ]
python
train
41.242424
hobson/aima
aima/probability.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/probability.py#L421-L433
def likelihood_weighting(X, e, bn, N): """Estimate the probability distribution of variable X given evidence e in BayesNet bn. [Fig. 14.15] >>> seed(1017) >>> likelihood_weighting('Burglary', dict(JohnCalls=T, MaryCalls=T), ... burglary, 10000).show_approx() 'False: 0.702, True: 0.298' """ W = dict((x, 0) for x in bn.variable_values(X)) for j in xrange(N): sample, weight = weighted_sample(bn, e) # boldface x, w in Fig. 14.15 W[sample[X]] += weight return ProbDist(X, W)
[ "def", "likelihood_weighting", "(", "X", ",", "e", ",", "bn", ",", "N", ")", ":", "W", "=", "dict", "(", "(", "x", ",", "0", ")", "for", "x", "in", "bn", ".", "variable_values", "(", "X", ")", ")", "for", "j", "in", "xrange", "(", "N", ")", ":", "sample", ",", "weight", "=", "weighted_sample", "(", "bn", ",", "e", ")", "# boldface x, w in Fig. 14.15", "W", "[", "sample", "[", "X", "]", "]", "+=", "weight", "return", "ProbDist", "(", "X", ",", "W", ")" ]
Estimate the probability distribution of variable X given evidence e in BayesNet bn. [Fig. 14.15] >>> seed(1017) >>> likelihood_weighting('Burglary', dict(JohnCalls=T, MaryCalls=T), ... burglary, 10000).show_approx() 'False: 0.702, True: 0.298'
[ "Estimate", "the", "probability", "distribution", "of", "variable", "X", "given", "evidence", "e", "in", "BayesNet", "bn", ".", "[", "Fig", ".", "14", ".", "15", "]", ">>>", "seed", "(", "1017", ")", ">>>", "likelihood_weighting", "(", "Burglary", "dict", "(", "JohnCalls", "=", "T", "MaryCalls", "=", "T", ")", "...", "burglary", "10000", ")", ".", "show_approx", "()", "False", ":", "0", ".", "702", "True", ":", "0", ".", "298" ]
python
valid
40
Esri/ArcREST
src/arcrest/manageorg/_portals.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageorg/_portals.py#L1267-L1334
def users(self, start=1, num=10, sortField="fullName", sortOrder="asc", role=None): """ Lists all the members of the organization. The start and num paging parameters are supported. Inputs: start - The number of the first entry in the result set response. The index number is 1-based. The default value of start is 1 (that is, the first search result). The start parameter, along with the num parameter, can be used to paginate the search results. num - The maximum number of results to be included in the result set response. The default value is 10, and the maximum allowed value is 100.The start parameter, along with the num parameter, can be used to paginate the search results. sortField - field to sort on sortOrder - asc or desc on the sortField role - name of the role or role id to search Output: list of User classes """ users = [] url = self._url + "/users" params = { "f" : "json", "start" : start, "num" : num } if not role is None: params['role'] = role if not sortField is None: params['sortField'] = sortField if not sortOrder is None: params['sortOrder'] = sortOrder from ._community import Community res = self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) if "users" in res: if len(res['users']) > 0: parsed = urlparse.urlparse(self._url) if parsed.netloc.lower().find('arcgis.com') == -1: cURL = "%s://%s/%s/sharing/rest/community" % (parsed.scheme, parsed.netloc, parsed.path[1:].split('/')[0]) else: cURL = "%s://%s/sharing/rest/community" % (parsed.scheme, parsed.netloc) com = Community(url=cURL, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) for r in res['users']: users.append( com.users.user(r["username"]) ) res['users'] = users return res
[ "def", "users", "(", "self", ",", "start", "=", "1", ",", "num", "=", "10", ",", "sortField", "=", "\"fullName\"", ",", "sortOrder", "=", "\"asc\"", ",", "role", "=", "None", ")", ":", "users", "=", "[", "]", "url", "=", "self", ".", "_url", "+", "\"/users\"", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"start\"", ":", "start", ",", "\"num\"", ":", "num", "}", "if", "not", "role", "is", "None", ":", "params", "[", "'role'", "]", "=", "role", "if", "not", "sortField", "is", "None", ":", "params", "[", "'sortField'", "]", "=", "sortField", "if", "not", "sortOrder", "is", "None", ":", "params", "[", "'sortOrder'", "]", "=", "sortOrder", "from", ".", "_community", "import", "Community", "res", "=", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")", "if", "\"users\"", "in", "res", ":", "if", "len", "(", "res", "[", "'users'", "]", ")", ">", "0", ":", "parsed", "=", "urlparse", ".", "urlparse", "(", "self", ".", "_url", ")", "if", "parsed", ".", "netloc", ".", "lower", "(", ")", ".", "find", "(", "'arcgis.com'", ")", "==", "-", "1", ":", "cURL", "=", "\"%s://%s/%s/sharing/rest/community\"", "%", "(", "parsed", ".", "scheme", ",", "parsed", ".", "netloc", ",", "parsed", ".", "path", "[", "1", ":", "]", ".", "split", "(", "'/'", ")", "[", "0", "]", ")", "else", ":", "cURL", "=", "\"%s://%s/sharing/rest/community\"", "%", "(", "parsed", ".", "scheme", ",", "parsed", ".", "netloc", ")", "com", "=", "Community", "(", "url", "=", "cURL", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")", "for", "r", "in", "res", "[", "'users'", "]", ":", "users", ".", "append", "(", "com", ".", "users", ".", "user", "(", "r", "[", "\"username\"", "]", ")", ")", "res", "[", "'users'", "]", "=", "users", "return", "res" ]
Lists all the members of the organization. The start and num paging parameters are supported. Inputs: start - The number of the first entry in the result set response. The index number is 1-based. The default value of start is 1 (that is, the first search result). The start parameter, along with the num parameter, can be used to paginate the search results. num - The maximum number of results to be included in the result set response. The default value is 10, and the maximum allowed value is 100.The start parameter, along with the num parameter, can be used to paginate the search results. sortField - field to sort on sortOrder - asc or desc on the sortField role - name of the role or role id to search Output: list of User classes
[ "Lists", "all", "the", "members", "of", "the", "organization", ".", "The", "start", "and", "num", "paging", "parameters", "are", "supported", "." ]
python
train
41.867647
project-rig/rig
rig/machine_control/machine_controller.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/machine_control/machine_controller.py#L754-L761
def _get_vcpu_field_and_address(self, field_name, x, y, p): """Get the field and address for a VCPU struct field.""" vcpu_struct = self.structs[b"vcpu"] field = vcpu_struct[six.b(field_name)] address = (self.read_struct_field("sv", "vcpu_base", x, y) + vcpu_struct.size * p) + field.offset pack_chars = b"<" + field.pack_chars return field, address, pack_chars
[ "def", "_get_vcpu_field_and_address", "(", "self", ",", "field_name", ",", "x", ",", "y", ",", "p", ")", ":", "vcpu_struct", "=", "self", ".", "structs", "[", "b\"vcpu\"", "]", "field", "=", "vcpu_struct", "[", "six", ".", "b", "(", "field_name", ")", "]", "address", "=", "(", "self", ".", "read_struct_field", "(", "\"sv\"", ",", "\"vcpu_base\"", ",", "x", ",", "y", ")", "+", "vcpu_struct", ".", "size", "*", "p", ")", "+", "field", ".", "offset", "pack_chars", "=", "b\"<\"", "+", "field", ".", "pack_chars", "return", "field", ",", "address", ",", "pack_chars" ]
Get the field and address for a VCPU struct field.
[ "Get", "the", "field", "and", "address", "for", "a", "VCPU", "struct", "field", "." ]
python
train
52.5
tanghaibao/jcvi
jcvi/algorithms/tsp.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/algorithms/tsp.py#L50-L98
def print_to_tsplib(self, edges, tspfile, precision=0): """ See TSPlib format: <https://www.iwr.uni-heidelberg.de/groups/comopt/software/TSPLIB95/> NAME: bayg29 TYPE: TSP COMMENT: 29 Cities in Bavaria, geographical distances DIMENSION: 29 EDGE_WEIGHT_TYPE: EXPLICIT EDGE_WEIGHT_FORMAT: UPPER_ROW DISPLAY_DATA_TYPE: TWOD_DISPLAY EDGE_WEIGHT_SECTION (... numbers ...) """ fw = must_open(tspfile, "w") incident, nodes = node_to_edge(edges, directed=False) self.nodes = nodes nodes_indices = dict((n, i) for i, n in enumerate(nodes)) self.nnodes = nnodes = len(nodes) # TSPLIB requires explicit weights to be integral, and non-negative weights = [x[-1] for x in edges] max_x, min_x = max(weights), min(weights) inf = 2 * max(abs(max_x), abs(min_x)) factor = 10 ** precision logging.debug("TSP rescale: max_x={0}, min_x={1}, inf={2}, factor={3}".\ format(max_x, min_x, inf, factor)) print("NAME: data", file=fw) print("TYPE: TSP", file=fw) print("DIMENSION: {0}".format(nnodes), file=fw) D = np.ones((nnodes, nnodes), dtype=float) * inf for a, b, w in edges: ia, ib = nodes_indices[a], nodes_indices[b] D[ia, ib] = D[ib, ia] = w D = (D - min_x) * factor D = D.astype(int) print("EDGE_WEIGHT_TYPE: EXPLICIT", file=fw) print("EDGE_WEIGHT_FORMAT: FULL_MATRIX", file=fw) print("EDGE_WEIGHT_SECTION", file=fw) for row in D: # Dump the full matrix print(" " + " ".join(str(x) for x in row), file=fw) print("EOF", file=fw) fw.close() logging.debug("Write TSP instance to `{0}`".format(tspfile))
[ "def", "print_to_tsplib", "(", "self", ",", "edges", ",", "tspfile", ",", "precision", "=", "0", ")", ":", "fw", "=", "must_open", "(", "tspfile", ",", "\"w\"", ")", "incident", ",", "nodes", "=", "node_to_edge", "(", "edges", ",", "directed", "=", "False", ")", "self", ".", "nodes", "=", "nodes", "nodes_indices", "=", "dict", "(", "(", "n", ",", "i", ")", "for", "i", ",", "n", "in", "enumerate", "(", "nodes", ")", ")", "self", ".", "nnodes", "=", "nnodes", "=", "len", "(", "nodes", ")", "# TSPLIB requires explicit weights to be integral, and non-negative", "weights", "=", "[", "x", "[", "-", "1", "]", "for", "x", "in", "edges", "]", "max_x", ",", "min_x", "=", "max", "(", "weights", ")", ",", "min", "(", "weights", ")", "inf", "=", "2", "*", "max", "(", "abs", "(", "max_x", ")", ",", "abs", "(", "min_x", ")", ")", "factor", "=", "10", "**", "precision", "logging", ".", "debug", "(", "\"TSP rescale: max_x={0}, min_x={1}, inf={2}, factor={3}\"", ".", "format", "(", "max_x", ",", "min_x", ",", "inf", ",", "factor", ")", ")", "print", "(", "\"NAME: data\"", ",", "file", "=", "fw", ")", "print", "(", "\"TYPE: TSP\"", ",", "file", "=", "fw", ")", "print", "(", "\"DIMENSION: {0}\"", ".", "format", "(", "nnodes", ")", ",", "file", "=", "fw", ")", "D", "=", "np", ".", "ones", "(", "(", "nnodes", ",", "nnodes", ")", ",", "dtype", "=", "float", ")", "*", "inf", "for", "a", ",", "b", ",", "w", "in", "edges", ":", "ia", ",", "ib", "=", "nodes_indices", "[", "a", "]", ",", "nodes_indices", "[", "b", "]", "D", "[", "ia", ",", "ib", "]", "=", "D", "[", "ib", ",", "ia", "]", "=", "w", "D", "=", "(", "D", "-", "min_x", ")", "*", "factor", "D", "=", "D", ".", "astype", "(", "int", ")", "print", "(", "\"EDGE_WEIGHT_TYPE: EXPLICIT\"", ",", "file", "=", "fw", ")", "print", "(", "\"EDGE_WEIGHT_FORMAT: FULL_MATRIX\"", ",", "file", "=", "fw", ")", "print", "(", "\"EDGE_WEIGHT_SECTION\"", ",", "file", "=", "fw", ")", "for", "row", "in", "D", ":", "# Dump the full matrix", "print", "(", "\" \"", "+", "\" \"", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "row", ")", ",", "file", "=", "fw", ")", "print", "(", "\"EOF\"", ",", "file", "=", "fw", ")", "fw", ".", "close", "(", ")", "logging", ".", "debug", "(", "\"Write TSP instance to `{0}`\"", ".", "format", "(", "tspfile", ")", ")" ]
See TSPlib format: <https://www.iwr.uni-heidelberg.de/groups/comopt/software/TSPLIB95/> NAME: bayg29 TYPE: TSP COMMENT: 29 Cities in Bavaria, geographical distances DIMENSION: 29 EDGE_WEIGHT_TYPE: EXPLICIT EDGE_WEIGHT_FORMAT: UPPER_ROW DISPLAY_DATA_TYPE: TWOD_DISPLAY EDGE_WEIGHT_SECTION (... numbers ...)
[ "See", "TSPlib", "format", ":", "<https", ":", "//", "www", ".", "iwr", ".", "uni", "-", "heidelberg", ".", "de", "/", "groups", "/", "comopt", "/", "software", "/", "TSPLIB95", "/", ">" ]
python
train
36.836735
honeynet/beeswarm
beeswarm/drones/client/baits/ftp.py
https://github.com/honeynet/beeswarm/blob/db51ea0bc29f631c3e3b5312b479ac9d5e31079a/beeswarm/drones/client/baits/ftp.py#L206-L217
def _process_list(self, list_line): # -rw-r--r-- 1 ftp ftp 68 May 09 19:37 testftp.txt """ Processes a line of 'ls -l' output, and updates state accordingly. :param list_line: Line to process """ res = list_line.split(' ', 8) if res[0].startswith('-'): self.state['file_list'].append(res[-1]) if res[0].startswith('d'): self.state['dir_list'].append(res[-1])
[ "def", "_process_list", "(", "self", ",", "list_line", ")", ":", "# -rw-r--r-- 1 ftp ftp 68\t May 09 19:37 testftp.txt", "res", "=", "list_line", ".", "split", "(", "' '", ",", "8", ")", "if", "res", "[", "0", "]", ".", "startswith", "(", "'-'", ")", ":", "self", ".", "state", "[", "'file_list'", "]", ".", "append", "(", "res", "[", "-", "1", "]", ")", "if", "res", "[", "0", "]", ".", "startswith", "(", "'d'", ")", ":", "self", ".", "state", "[", "'dir_list'", "]", ".", "append", "(", "res", "[", "-", "1", "]", ")" ]
Processes a line of 'ls -l' output, and updates state accordingly. :param list_line: Line to process
[ "Processes", "a", "line", "of", "ls", "-", "l", "output", "and", "updates", "state", "accordingly", "." ]
python
train
36.75
bwohlberg/sporco
sporco/admm/ccmod.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/ccmod.py#L768-L802
def xstep(self): r"""Minimise Augmented Lagrangian with respect to block vector :math:`\mathbf{x} = \left( \begin{array}{ccc} \mathbf{x}_0^T & \mathbf{x}_1^T & \ldots \end{array} \right)^T\;`. """ # This test reflects empirical evidence that two slightly # different implementations are faster for single or # multi-channel data. This kludge is intended to be temporary. if self.cri.Cd > 1: for i in range(self.Nb): self.xistep(i) else: self.YU[:] = self.Y[..., np.newaxis] - self.U b = np.swapaxes(self.ZSf[..., np.newaxis], self.cri.axisK, -1) \ + self.rho*sl.rfftn(self.YU, None, self.cri.axisN) for i in range(self.Nb): self.Xf[..., i] = sl.solvedbi_sm( self.Zf[..., [i], :], self.rho, b[..., i], axis=self.cri.axisM) self.X = sl.irfftn(self.Xf, self.cri.Nv, self.cri.axisN) if self.opt['LinSolveCheck']: ZSfs = np.sum(self.ZSf, axis=self.cri.axisK, keepdims=True) YU = np.sum(self.Y[..., np.newaxis] - self.U, axis=-1) b = ZSfs + self.rho*sl.rfftn(YU, None, self.cri.axisN) Xf = self.swapaxes(self.Xf) Zop = lambda x: sl.inner(self.Zf, x, axis=self.cri.axisM) ZHop = lambda x: np.conj(self.Zf) * x ax = np.sum(ZHop(Zop(Xf)) + self.rho*Xf, axis=self.cri.axisK, keepdims=True) self.xrrs = sl.rrs(ax, b) else: self.xrrs = None
[ "def", "xstep", "(", "self", ")", ":", "# This test reflects empirical evidence that two slightly", "# different implementations are faster for single or", "# multi-channel data. This kludge is intended to be temporary.", "if", "self", ".", "cri", ".", "Cd", ">", "1", ":", "for", "i", "in", "range", "(", "self", ".", "Nb", ")", ":", "self", ".", "xistep", "(", "i", ")", "else", ":", "self", ".", "YU", "[", ":", "]", "=", "self", ".", "Y", "[", "...", ",", "np", ".", "newaxis", "]", "-", "self", ".", "U", "b", "=", "np", ".", "swapaxes", "(", "self", ".", "ZSf", "[", "...", ",", "np", ".", "newaxis", "]", ",", "self", ".", "cri", ".", "axisK", ",", "-", "1", ")", "+", "self", ".", "rho", "*", "sl", ".", "rfftn", "(", "self", ".", "YU", ",", "None", ",", "self", ".", "cri", ".", "axisN", ")", "for", "i", "in", "range", "(", "self", ".", "Nb", ")", ":", "self", ".", "Xf", "[", "...", ",", "i", "]", "=", "sl", ".", "solvedbi_sm", "(", "self", ".", "Zf", "[", "...", ",", "[", "i", "]", ",", ":", "]", ",", "self", ".", "rho", ",", "b", "[", "...", ",", "i", "]", ",", "axis", "=", "self", ".", "cri", ".", "axisM", ")", "self", ".", "X", "=", "sl", ".", "irfftn", "(", "self", ".", "Xf", ",", "self", ".", "cri", ".", "Nv", ",", "self", ".", "cri", ".", "axisN", ")", "if", "self", ".", "opt", "[", "'LinSolveCheck'", "]", ":", "ZSfs", "=", "np", ".", "sum", "(", "self", ".", "ZSf", ",", "axis", "=", "self", ".", "cri", ".", "axisK", ",", "keepdims", "=", "True", ")", "YU", "=", "np", ".", "sum", "(", "self", ".", "Y", "[", "...", ",", "np", ".", "newaxis", "]", "-", "self", ".", "U", ",", "axis", "=", "-", "1", ")", "b", "=", "ZSfs", "+", "self", ".", "rho", "*", "sl", ".", "rfftn", "(", "YU", ",", "None", ",", "self", ".", "cri", ".", "axisN", ")", "Xf", "=", "self", ".", "swapaxes", "(", "self", ".", "Xf", ")", "Zop", "=", "lambda", "x", ":", "sl", ".", "inner", "(", "self", ".", "Zf", ",", "x", ",", "axis", "=", "self", ".", "cri", ".", "axisM", ")", "ZHop", "=", "lambda", "x", ":", "np", ".", "conj", "(", "self", ".", "Zf", ")", "*", "x", "ax", "=", "np", ".", "sum", "(", "ZHop", "(", "Zop", "(", "Xf", ")", ")", "+", "self", ".", "rho", "*", "Xf", ",", "axis", "=", "self", ".", "cri", ".", "axisK", ",", "keepdims", "=", "True", ")", "self", ".", "xrrs", "=", "sl", ".", "rrs", "(", "ax", ",", "b", ")", "else", ":", "self", ".", "xrrs", "=", "None" ]
r"""Minimise Augmented Lagrangian with respect to block vector :math:`\mathbf{x} = \left( \begin{array}{ccc} \mathbf{x}_0^T & \mathbf{x}_1^T & \ldots \end{array} \right)^T\;`.
[ "r", "Minimise", "Augmented", "Lagrangian", "with", "respect", "to", "block", "vector", ":", "math", ":", "\\", "mathbf", "{", "x", "}", "=", "\\", "left", "(", "\\", "begin", "{", "array", "}", "{", "ccc", "}", "\\", "mathbf", "{", "x", "}", "_0^T", "&", "\\", "mathbf", "{", "x", "}", "_1^T", "&", "\\", "ldots", "\\", "end", "{", "array", "}", "\\", "right", ")", "^T", "\\", ";", "." ]
python
train
44.714286
openpermissions/chub
chub/api.py
https://github.com/openpermissions/chub/blob/00762aa17015f4b3010673d1570c708eab3c34ed/chub/api.py#L75-L79
def prepare_request(self, *args, **kw): """ creates a full featured HTTPRequest objects """ self.http_request = self.request_class(self.path, *args, **kw)
[ "def", "prepare_request", "(", "self", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "self", ".", "http_request", "=", "self", ".", "request_class", "(", "self", ".", "path", ",", "*", "args", ",", "*", "*", "kw", ")" ]
creates a full featured HTTPRequest objects
[ "creates", "a", "full", "featured", "HTTPRequest", "objects" ]
python
train
36.4
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L812-L840
def rotation_from_axis_and_origin(axis, origin, angle, to_frame='world'): """ Returns a rotation matrix around some arbitrary axis, about the point origin, using Rodrigues Formula Parameters ---------- axis : :obj:`numpy.ndarray` of float 3x1 vector representing which axis we should be rotating about origin : :obj:`numpy.ndarray` of float 3x1 vector representing where the rotation should be centered around angle : float how much to rotate (in radians) to_frame : :obj:`str` A name for the frame of reference to which this transform moves objects. """ axis_hat = np.array([[0, -axis[2], axis[1]], [axis[2], 0, -axis[0]], [-axis[1], axis[0], 0]]) # Rodrigues Formula R = RigidTransform( np.eye(3) + np.sin(angle) * axis_hat + (1 - np.cos(angle)) * axis_hat.dot(axis_hat), from_frame=to_frame, to_frame=to_frame ) return RigidTransform(translation=origin, from_frame=to_frame, to_frame=to_frame) \ .dot(R) \ .dot(RigidTransform(translation=-origin, from_frame=to_frame, to_frame=to_frame))
[ "def", "rotation_from_axis_and_origin", "(", "axis", ",", "origin", ",", "angle", ",", "to_frame", "=", "'world'", ")", ":", "axis_hat", "=", "np", ".", "array", "(", "[", "[", "0", ",", "-", "axis", "[", "2", "]", ",", "axis", "[", "1", "]", "]", ",", "[", "axis", "[", "2", "]", ",", "0", ",", "-", "axis", "[", "0", "]", "]", ",", "[", "-", "axis", "[", "1", "]", ",", "axis", "[", "0", "]", ",", "0", "]", "]", ")", "# Rodrigues Formula", "R", "=", "RigidTransform", "(", "np", ".", "eye", "(", "3", ")", "+", "np", ".", "sin", "(", "angle", ")", "*", "axis_hat", "+", "(", "1", "-", "np", ".", "cos", "(", "angle", ")", ")", "*", "axis_hat", ".", "dot", "(", "axis_hat", ")", ",", "from_frame", "=", "to_frame", ",", "to_frame", "=", "to_frame", ")", "return", "RigidTransform", "(", "translation", "=", "origin", ",", "from_frame", "=", "to_frame", ",", "to_frame", "=", "to_frame", ")", ".", "dot", "(", "R", ")", ".", "dot", "(", "RigidTransform", "(", "translation", "=", "-", "origin", ",", "from_frame", "=", "to_frame", ",", "to_frame", "=", "to_frame", ")", ")" ]
Returns a rotation matrix around some arbitrary axis, about the point origin, using Rodrigues Formula Parameters ---------- axis : :obj:`numpy.ndarray` of float 3x1 vector representing which axis we should be rotating about origin : :obj:`numpy.ndarray` of float 3x1 vector representing where the rotation should be centered around angle : float how much to rotate (in radians) to_frame : :obj:`str` A name for the frame of reference to which this transform moves objects.
[ "Returns", "a", "rotation", "matrix", "around", "some", "arbitrary", "axis", "about", "the", "point", "origin", "using", "Rodrigues", "Formula" ]
python
train
43.241379
mental32/spotify.py
spotify/models/player.py
https://github.com/mental32/spotify.py/blob/bb296cac7c3dd289908906b7069bd80f43950515/spotify/models/player.py#L213-L224
async def transfer(self, device: SomeDevice, ensure_playback: bool = False): """Transfer playback to a new device and determine if it should start playing. Parameters ---------- device : :obj:`SomeDevice` The device on which playback should be started/transferred. ensure_playback : bool if `True` ensure playback happens on new device. else keep the current playback state. """ await self._user.http.transfer_player(str(device), play=ensure_playback)
[ "async", "def", "transfer", "(", "self", ",", "device", ":", "SomeDevice", ",", "ensure_playback", ":", "bool", "=", "False", ")", ":", "await", "self", ".", "_user", ".", "http", ".", "transfer_player", "(", "str", "(", "device", ")", ",", "play", "=", "ensure_playback", ")" ]
Transfer playback to a new device and determine if it should start playing. Parameters ---------- device : :obj:`SomeDevice` The device on which playback should be started/transferred. ensure_playback : bool if `True` ensure playback happens on new device. else keep the current playback state.
[ "Transfer", "playback", "to", "a", "new", "device", "and", "determine", "if", "it", "should", "start", "playing", "." ]
python
test
44.416667
soynatan/django-easy-audit
easyaudit/admin_helpers.py
https://github.com/soynatan/django-easy-audit/blob/03e05bc94beb29fc3e4ff86e313a6fef4b766b4b/easyaudit/admin_helpers.py#L66-L116
def purge_objects(self, request): """ Removes all objects in this table. This action first displays a confirmation page; next, it deletes all objects and redirects back to the change list. """ def truncate_table(model): if settings.TRUNCATE_TABLE_SQL_STATEMENT: from django.db import connection sql = settings.TRUNCATE_TABLE_SQL_STATEMENT.format(db_table=model._meta.db_table) cursor = connection.cursor() cursor.execute(sql) else: model.objects.all().delete() modeladmin = self opts = modeladmin.model._meta # Check that the user has delete permission for the actual model if not request.user.is_superuser: raise PermissionDenied if not modeladmin.has_delete_permission(request): raise PermissionDenied # If the user has already confirmed or cancelled the deletion, # (eventually) do the deletion and return to the change list view again. if request.method == 'POST': if 'btn-confirm' in request.POST: try: n = modeladmin.model.objects.count() truncate_table(modeladmin.model) modeladmin.message_user(request, _("Successfully removed %d rows" % n), messages.SUCCESS); except Exception as e: modeladmin.message_user(request, _(u'ERROR') + ': %r' % e, messages.ERROR) else: modeladmin.message_user(request, _("Action cancelled by user"), messages.SUCCESS); return HttpResponseRedirect(reverse('admin:%s_%s_changelist' % (opts.app_label, opts.model_name))) context = { "title": _("Purge all %s ... are you sure?") % opts.verbose_name_plural, "opts": opts, "app_label": opts.app_label, } # Display the confirmation page return render( request, 'admin/easyaudit/purge_confirmation.html', context )
[ "def", "purge_objects", "(", "self", ",", "request", ")", ":", "def", "truncate_table", "(", "model", ")", ":", "if", "settings", ".", "TRUNCATE_TABLE_SQL_STATEMENT", ":", "from", "django", ".", "db", "import", "connection", "sql", "=", "settings", ".", "TRUNCATE_TABLE_SQL_STATEMENT", ".", "format", "(", "db_table", "=", "model", ".", "_meta", ".", "db_table", ")", "cursor", "=", "connection", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "sql", ")", "else", ":", "model", ".", "objects", ".", "all", "(", ")", ".", "delete", "(", ")", "modeladmin", "=", "self", "opts", "=", "modeladmin", ".", "model", ".", "_meta", "# Check that the user has delete permission for the actual model", "if", "not", "request", ".", "user", ".", "is_superuser", ":", "raise", "PermissionDenied", "if", "not", "modeladmin", ".", "has_delete_permission", "(", "request", ")", ":", "raise", "PermissionDenied", "# If the user has already confirmed or cancelled the deletion,", "# (eventually) do the deletion and return to the change list view again.", "if", "request", ".", "method", "==", "'POST'", ":", "if", "'btn-confirm'", "in", "request", ".", "POST", ":", "try", ":", "n", "=", "modeladmin", ".", "model", ".", "objects", ".", "count", "(", ")", "truncate_table", "(", "modeladmin", ".", "model", ")", "modeladmin", ".", "message_user", "(", "request", ",", "_", "(", "\"Successfully removed %d rows\"", "%", "n", ")", ",", "messages", ".", "SUCCESS", ")", "except", "Exception", "as", "e", ":", "modeladmin", ".", "message_user", "(", "request", ",", "_", "(", "u'ERROR'", ")", "+", "': %r'", "%", "e", ",", "messages", ".", "ERROR", ")", "else", ":", "modeladmin", ".", "message_user", "(", "request", ",", "_", "(", "\"Action cancelled by user\"", ")", ",", "messages", ".", "SUCCESS", ")", "return", "HttpResponseRedirect", "(", "reverse", "(", "'admin:%s_%s_changelist'", "%", "(", "opts", ".", "app_label", ",", "opts", ".", "model_name", ")", ")", ")", "context", "=", "{", "\"title\"", ":", "_", "(", "\"Purge all %s ... are you sure?\"", ")", "%", "opts", ".", "verbose_name_plural", ",", "\"opts\"", ":", "opts", ",", "\"app_label\"", ":", "opts", ".", "app_label", ",", "}", "# Display the confirmation page", "return", "render", "(", "request", ",", "'admin/easyaudit/purge_confirmation.html'", ",", "context", ")" ]
Removes all objects in this table. This action first displays a confirmation page; next, it deletes all objects and redirects back to the change list.
[ "Removes", "all", "objects", "in", "this", "table", ".", "This", "action", "first", "displays", "a", "confirmation", "page", ";", "next", "it", "deletes", "all", "objects", "and", "redirects", "back", "to", "the", "change", "list", "." ]
python
train
40.470588
line/line-bot-sdk-python
linebot/api.py
https://github.com/line/line-bot-sdk-python/blob/1b38bfc2497ff3e3c75be4b50e0f1b7425a07ce0/linebot/api.py#L235-L262
def get_group_member_ids(self, group_id, start=None, timeout=None): """Call get group member IDs API. https://devdocs.line.me/en/#get-group-room-member-ids Gets the user IDs of the members of a group that the bot is in. This includes the user IDs of users who have not added the bot as a friend or has blocked the bot. :param str group_id: Group ID :param str start: continuationToken :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float) :rtype: :py:class:`linebot.models.responses.MemberIds` :return: MemberIds instance """ params = None if start is None else {'start': start} response = self._get( '/v2/bot/group/{group_id}/members/ids'.format(group_id=group_id), params=params, timeout=timeout ) return MemberIds.new_from_json_dict(response.json)
[ "def", "get_group_member_ids", "(", "self", ",", "group_id", ",", "start", "=", "None", ",", "timeout", "=", "None", ")", ":", "params", "=", "None", "if", "start", "is", "None", "else", "{", "'start'", ":", "start", "}", "response", "=", "self", ".", "_get", "(", "'/v2/bot/group/{group_id}/members/ids'", ".", "format", "(", "group_id", "=", "group_id", ")", ",", "params", "=", "params", ",", "timeout", "=", "timeout", ")", "return", "MemberIds", ".", "new_from_json_dict", "(", "response", ".", "json", ")" ]
Call get group member IDs API. https://devdocs.line.me/en/#get-group-room-member-ids Gets the user IDs of the members of a group that the bot is in. This includes the user IDs of users who have not added the bot as a friend or has blocked the bot. :param str group_id: Group ID :param str start: continuationToken :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float) :rtype: :py:class:`linebot.models.responses.MemberIds` :return: MemberIds instance
[ "Call", "get", "group", "member", "IDs", "API", "." ]
python
train
39.464286
saltstack/salt
salt/netapi/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/netapi/__init__.py#L178-L191
def wheel(self, fun, **kwargs): ''' Run :ref:`wheel modules <all-salt.wheel>` synchronously Wraps :py:meth:`salt.wheel.WheelClient.master_call`. Note that wheel functions must be called using keyword arguments. Positional arguments are not supported. :return: Returns the result from the wheel module ''' kwargs['fun'] = fun wheel = salt.wheel.WheelClient(self.opts) return wheel.cmd_sync(kwargs)
[ "def", "wheel", "(", "self", ",", "fun", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'fun'", "]", "=", "fun", "wheel", "=", "salt", ".", "wheel", ".", "WheelClient", "(", "self", ".", "opts", ")", "return", "wheel", ".", "cmd_sync", "(", "kwargs", ")" ]
Run :ref:`wheel modules <all-salt.wheel>` synchronously Wraps :py:meth:`salt.wheel.WheelClient.master_call`. Note that wheel functions must be called using keyword arguments. Positional arguments are not supported. :return: Returns the result from the wheel module
[ "Run", ":", "ref", ":", "wheel", "modules", "<all", "-", "salt", ".", "wheel", ">", "synchronously" ]
python
train
33.285714
google/tangent
tangent/ast.py
https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/ast.py#L83-L90
def copy_node(node): """Copy a node but keep its annotations intact.""" if not isinstance(node, gast.AST): return [copy_node(n) for n in node] new_node = copy.deepcopy(node) setattr(new_node, anno.ANNOTATION_FIELD, getattr(node, anno.ANNOTATION_FIELD, {}).copy()) return new_node
[ "def", "copy_node", "(", "node", ")", ":", "if", "not", "isinstance", "(", "node", ",", "gast", ".", "AST", ")", ":", "return", "[", "copy_node", "(", "n", ")", "for", "n", "in", "node", "]", "new_node", "=", "copy", ".", "deepcopy", "(", "node", ")", "setattr", "(", "new_node", ",", "anno", ".", "ANNOTATION_FIELD", ",", "getattr", "(", "node", ",", "anno", ".", "ANNOTATION_FIELD", ",", "{", "}", ")", ".", "copy", "(", ")", ")", "return", "new_node" ]
Copy a node but keep its annotations intact.
[ "Copy", "a", "node", "but", "keep", "its", "annotations", "intact", "." ]
python
train
37
panosl/django-currencies
currencies/management/commands/updatecurrencies.py
https://github.com/panosl/django-currencies/blob/8d4c6c202ad7c4cc06263ab2c1b1f969bbe99acd/currencies/management/commands/updatecurrencies.py#L15-L21
def add_arguments(self, parser): """Add command arguments""" parser.add_argument(self._source_param, **self._source_kwargs) parser.add_argument('--base', '-b', action='store', help= 'Supply the base currency as code or a settings variable name. ' 'The default is taken from settings CURRENCIES_BASE or SHOP_DEFAULT_CURRENCY, ' 'or the db, otherwise USD')
[ "def", "add_arguments", "(", "self", ",", "parser", ")", ":", "parser", ".", "add_argument", "(", "self", ".", "_source_param", ",", "*", "*", "self", ".", "_source_kwargs", ")", "parser", ".", "add_argument", "(", "'--base'", ",", "'-b'", ",", "action", "=", "'store'", ",", "help", "=", "'Supply the base currency as code or a settings variable name. '", "'The default is taken from settings CURRENCIES_BASE or SHOP_DEFAULT_CURRENCY, '", "'or the db, otherwise USD'", ")" ]
Add command arguments
[ "Add", "command", "arguments" ]
python
train
60.857143
tensorflow/probability
tensorflow_probability/python/stats/sample_stats.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/stats/sample_stats.py#L646-L661
def _make_list_or_1d_tensor(values): """Return a list (preferred) or 1d Tensor from values, if values.ndims < 2.""" values = tf.convert_to_tensor(value=values, name='values') values_ = tf.get_static_value(values) # Static didn't work. if values_ is None: # Cheap way to bring to at least 1d. return values + tf.zeros([1], dtype=values.dtype) # Static worked! if values_.ndim > 1: raise ValueError('values had > 1 dim: {}'.format(values_.shape)) # Cheap way to bring to at least 1d. values_ = values_ + np.zeros([1], dtype=values_.dtype) return list(values_)
[ "def", "_make_list_or_1d_tensor", "(", "values", ")", ":", "values", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "values", ",", "name", "=", "'values'", ")", "values_", "=", "tf", ".", "get_static_value", "(", "values", ")", "# Static didn't work.", "if", "values_", "is", "None", ":", "# Cheap way to bring to at least 1d.", "return", "values", "+", "tf", ".", "zeros", "(", "[", "1", "]", ",", "dtype", "=", "values", ".", "dtype", ")", "# Static worked!", "if", "values_", ".", "ndim", ">", "1", ":", "raise", "ValueError", "(", "'values had > 1 dim: {}'", ".", "format", "(", "values_", ".", "shape", ")", ")", "# Cheap way to bring to at least 1d.", "values_", "=", "values_", "+", "np", ".", "zeros", "(", "[", "1", "]", ",", "dtype", "=", "values_", ".", "dtype", ")", "return", "list", "(", "values_", ")" ]
Return a list (preferred) or 1d Tensor from values, if values.ndims < 2.
[ "Return", "a", "list", "(", "preferred", ")", "or", "1d", "Tensor", "from", "values", "if", "values", ".", "ndims", "<", "2", "." ]
python
test
36
anlutro/diay.py
diay/__init__.py
https://github.com/anlutro/diay.py/blob/78cfd2b53c8dca3dbac468d620eaa0bb7af08275/diay/__init__.py#L173-L187
def call(self, func, *args, **kwargs): """ Call a function, resolving any type-hinted arguments. """ guessed_kwargs = self._guess_kwargs(func) for key, val in guessed_kwargs.items(): kwargs.setdefault(key, val) try: return func(*args, **kwargs) except TypeError as exc: msg = ( "tried calling function %r but failed, probably " "because it takes arguments that cannot be resolved" ) % func raise DiayException(msg) from exc
[ "def", "call", "(", "self", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "guessed_kwargs", "=", "self", ".", "_guess_kwargs", "(", "func", ")", "for", "key", ",", "val", "in", "guessed_kwargs", ".", "items", "(", ")", ":", "kwargs", ".", "setdefault", "(", "key", ",", "val", ")", "try", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "TypeError", "as", "exc", ":", "msg", "=", "(", "\"tried calling function %r but failed, probably \"", "\"because it takes arguments that cannot be resolved\"", ")", "%", "func", "raise", "DiayException", "(", "msg", ")", "from", "exc" ]
Call a function, resolving any type-hinted arguments.
[ "Call", "a", "function", "resolving", "any", "type", "-", "hinted", "arguments", "." ]
python
train
37.133333
openvax/varcode
varcode/effects/effect_prediction_coding_in_frame.py
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_prediction_coding_in_frame.py#L110-L267
def predict_in_frame_coding_effect( variant, transcript, trimmed_cdna_ref, trimmed_cdna_alt, sequence_from_start_codon, cds_offset): """Coding effect of an in-frame nucleotide change Parameters ---------- variant : Variant transcript : Transcript trimmed_cdna_ref : str Reference nucleotides from the coding sequence of the transcript trimmed_cdna_alt : str Nucleotides to insert in place of the reference nucleotides sequence_from_start_codon : Bio.Seq or str Transcript sequence from the CDS start codon (including the 3' UTR). This sequence includes the 3' UTR since a mutation may delete the stop codon and we'll have to translate past the normal end of the CDS to determine the new protein sequence. cds_offset : int Index of first ref nucleotide, starting from 0 = beginning of coding sequence. If variant is a pure insertion (no ref nucleotides) then this argument indicates the offset *after* which to insert the alt nucleotides. """ ref_codon_start_offset, ref_codon_end_offset, mutant_codons = get_codons( variant=variant, trimmed_cdna_ref=trimmed_cdna_ref, trimmed_cdna_alt=trimmed_cdna_alt, sequence_from_start_codon=sequence_from_start_codon, cds_offset=cds_offset) mutation_affects_start_codon = (ref_codon_start_offset == 0) if mutation_affects_start_codon and mutant_codons[:3] not in START_CODONS: # if we changed a start codon to something else then # we no longer know where the protein begins (or even in # what frame). # TODO: use the Kozak consensus sequence or a predictive model # to identify the most likely start site return StartLoss( variant=variant, transcript=transcript) # rely on Ensembl's annotation of the protein sequence since we can't # easily predict whether the starting nucleotide is a methionine # (most common) or leucine aa_ref = transcript.protein_sequence[ref_codon_start_offset:ref_codon_end_offset] reference_protein_length = len(transcript.protein_sequence) aa_alt, mutant_stop_codon_index, using_three_prime_utr = \ translate_in_frame_mutation( transcript=transcript, ref_codon_start_offset=ref_codon_start_offset, ref_codon_end_offset=ref_codon_end_offset, mutant_codons=mutant_codons) mutant_codons_contain_stop = mutant_stop_codon_index != -1 # trim shared subsequences at the start and end of reference # and mutated amino acid sequences aa_ref, aa_alt, shared_prefix, shared_suffix = \ trim_shared_flanking_strings( aa_ref, aa_alt) n_aa_ref = len(aa_ref) n_aa_alt = len(aa_alt) n_aa_shared = len(shared_prefix) is_insertion = (ref_codon_start_offset == ref_codon_end_offset) # index of first amino acid which is different from the reference aa_mutation_start_offset = ( ref_codon_start_offset + n_aa_shared + is_insertion) if mutant_codons_contain_stop: mutant_stop_codon_index += n_aa_shared if mutation_affects_start_codon and (aa_ref == aa_alt): # Substitution between start codons gets special treatment since, # though superficially synonymous, this could still potentially # cause a start loss / change in reading frame and might be worth # closer scrutiny return AlternateStartCodon( variant=variant, transcript=transcript, ref_codon=transcript.sequence[:3], alt_codon=mutant_codons[:3]) n_ref_amino_acids_after_mutated_site = ( reference_protein_length - aa_mutation_start_offset - 1) if mutant_codons_contain_stop and ( n_aa_alt <= n_ref_amino_acids_after_mutated_site): # if the new coding sequence contains a stop codon, then this is a # PrematureStop mutation if it decreases the length of the protein return PrematureStop( variant=variant, transcript=transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_ref=aa_ref, aa_alt=aa_alt) if (aa_mutation_start_offset > reference_protein_length) or ( n_aa_ref == n_aa_alt == 0): # if inserted nucleotides go after original stop codon or if nothing # is changed in the amino acid sequence then this is a Silent variant return Silent( variant=variant, transcript=transcript, aa_pos=aa_mutation_start_offset, aa_ref=shared_prefix + shared_suffix) elif using_three_prime_utr: # if non-silent mutation is at the end of the protein then # should be a stop-loss return StopLoss( variant, transcript, aa_ref=aa_ref, aa_alt=aa_alt) elif n_aa_alt == 0: return Deletion( variant, transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_ref=aa_ref) elif n_aa_ref == 0: return Insertion( variant, transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_alt=aa_alt) elif n_aa_ref == n_aa_alt == 1: # simple substitution e.g. p.V600E return Substitution( variant, transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_ref=aa_ref, aa_alt=aa_alt) else: # multiple amino acids were substituted e.g. p.VQQ39FF return ComplexSubstitution( variant, transcript, aa_mutation_start_offset=aa_mutation_start_offset, aa_ref=aa_ref, aa_alt=aa_alt)
[ "def", "predict_in_frame_coding_effect", "(", "variant", ",", "transcript", ",", "trimmed_cdna_ref", ",", "trimmed_cdna_alt", ",", "sequence_from_start_codon", ",", "cds_offset", ")", ":", "ref_codon_start_offset", ",", "ref_codon_end_offset", ",", "mutant_codons", "=", "get_codons", "(", "variant", "=", "variant", ",", "trimmed_cdna_ref", "=", "trimmed_cdna_ref", ",", "trimmed_cdna_alt", "=", "trimmed_cdna_alt", ",", "sequence_from_start_codon", "=", "sequence_from_start_codon", ",", "cds_offset", "=", "cds_offset", ")", "mutation_affects_start_codon", "=", "(", "ref_codon_start_offset", "==", "0", ")", "if", "mutation_affects_start_codon", "and", "mutant_codons", "[", ":", "3", "]", "not", "in", "START_CODONS", ":", "# if we changed a start codon to something else then", "# we no longer know where the protein begins (or even in", "# what frame).", "# TODO: use the Kozak consensus sequence or a predictive model", "# to identify the most likely start site", "return", "StartLoss", "(", "variant", "=", "variant", ",", "transcript", "=", "transcript", ")", "# rely on Ensembl's annotation of the protein sequence since we can't", "# easily predict whether the starting nucleotide is a methionine", "# (most common) or leucine", "aa_ref", "=", "transcript", ".", "protein_sequence", "[", "ref_codon_start_offset", ":", "ref_codon_end_offset", "]", "reference_protein_length", "=", "len", "(", "transcript", ".", "protein_sequence", ")", "aa_alt", ",", "mutant_stop_codon_index", ",", "using_three_prime_utr", "=", "translate_in_frame_mutation", "(", "transcript", "=", "transcript", ",", "ref_codon_start_offset", "=", "ref_codon_start_offset", ",", "ref_codon_end_offset", "=", "ref_codon_end_offset", ",", "mutant_codons", "=", "mutant_codons", ")", "mutant_codons_contain_stop", "=", "mutant_stop_codon_index", "!=", "-", "1", "# trim shared subsequences at the start and end of reference", "# and mutated amino acid sequences", "aa_ref", ",", "aa_alt", ",", "shared_prefix", ",", "shared_suffix", "=", "trim_shared_flanking_strings", "(", "aa_ref", ",", "aa_alt", ")", "n_aa_ref", "=", "len", "(", "aa_ref", ")", "n_aa_alt", "=", "len", "(", "aa_alt", ")", "n_aa_shared", "=", "len", "(", "shared_prefix", ")", "is_insertion", "=", "(", "ref_codon_start_offset", "==", "ref_codon_end_offset", ")", "# index of first amino acid which is different from the reference", "aa_mutation_start_offset", "=", "(", "ref_codon_start_offset", "+", "n_aa_shared", "+", "is_insertion", ")", "if", "mutant_codons_contain_stop", ":", "mutant_stop_codon_index", "+=", "n_aa_shared", "if", "mutation_affects_start_codon", "and", "(", "aa_ref", "==", "aa_alt", ")", ":", "# Substitution between start codons gets special treatment since,", "# though superficially synonymous, this could still potentially", "# cause a start loss / change in reading frame and might be worth", "# closer scrutiny", "return", "AlternateStartCodon", "(", "variant", "=", "variant", ",", "transcript", "=", "transcript", ",", "ref_codon", "=", "transcript", ".", "sequence", "[", ":", "3", "]", ",", "alt_codon", "=", "mutant_codons", "[", ":", "3", "]", ")", "n_ref_amino_acids_after_mutated_site", "=", "(", "reference_protein_length", "-", "aa_mutation_start_offset", "-", "1", ")", "if", "mutant_codons_contain_stop", "and", "(", "n_aa_alt", "<=", "n_ref_amino_acids_after_mutated_site", ")", ":", "# if the new coding sequence contains a stop codon, then this is a", "# PrematureStop mutation if it decreases the length of the protein", "return", "PrematureStop", "(", "variant", "=", "variant", ",", "transcript", "=", "transcript", ",", "aa_mutation_start_offset", "=", "aa_mutation_start_offset", ",", "aa_ref", "=", "aa_ref", ",", "aa_alt", "=", "aa_alt", ")", "if", "(", "aa_mutation_start_offset", ">", "reference_protein_length", ")", "or", "(", "n_aa_ref", "==", "n_aa_alt", "==", "0", ")", ":", "# if inserted nucleotides go after original stop codon or if nothing", "# is changed in the amino acid sequence then this is a Silent variant", "return", "Silent", "(", "variant", "=", "variant", ",", "transcript", "=", "transcript", ",", "aa_pos", "=", "aa_mutation_start_offset", ",", "aa_ref", "=", "shared_prefix", "+", "shared_suffix", ")", "elif", "using_three_prime_utr", ":", "# if non-silent mutation is at the end of the protein then", "# should be a stop-loss", "return", "StopLoss", "(", "variant", ",", "transcript", ",", "aa_ref", "=", "aa_ref", ",", "aa_alt", "=", "aa_alt", ")", "elif", "n_aa_alt", "==", "0", ":", "return", "Deletion", "(", "variant", ",", "transcript", ",", "aa_mutation_start_offset", "=", "aa_mutation_start_offset", ",", "aa_ref", "=", "aa_ref", ")", "elif", "n_aa_ref", "==", "0", ":", "return", "Insertion", "(", "variant", ",", "transcript", ",", "aa_mutation_start_offset", "=", "aa_mutation_start_offset", ",", "aa_alt", "=", "aa_alt", ")", "elif", "n_aa_ref", "==", "n_aa_alt", "==", "1", ":", "# simple substitution e.g. p.V600E", "return", "Substitution", "(", "variant", ",", "transcript", ",", "aa_mutation_start_offset", "=", "aa_mutation_start_offset", ",", "aa_ref", "=", "aa_ref", ",", "aa_alt", "=", "aa_alt", ")", "else", ":", "# multiple amino acids were substituted e.g. p.VQQ39FF", "return", "ComplexSubstitution", "(", "variant", ",", "transcript", ",", "aa_mutation_start_offset", "=", "aa_mutation_start_offset", ",", "aa_ref", "=", "aa_ref", ",", "aa_alt", "=", "aa_alt", ")" ]
Coding effect of an in-frame nucleotide change Parameters ---------- variant : Variant transcript : Transcript trimmed_cdna_ref : str Reference nucleotides from the coding sequence of the transcript trimmed_cdna_alt : str Nucleotides to insert in place of the reference nucleotides sequence_from_start_codon : Bio.Seq or str Transcript sequence from the CDS start codon (including the 3' UTR). This sequence includes the 3' UTR since a mutation may delete the stop codon and we'll have to translate past the normal end of the CDS to determine the new protein sequence. cds_offset : int Index of first ref nucleotide, starting from 0 = beginning of coding sequence. If variant is a pure insertion (no ref nucleotides) then this argument indicates the offset *after* which to insert the alt nucleotides.
[ "Coding", "effect", "of", "an", "in", "-", "frame", "nucleotide", "change" ]
python
train
36.620253
tmoerman/arboreto
arboreto/algo.py
https://github.com/tmoerman/arboreto/blob/3ff7b6f987b32e5774771751dea646fa6feaaa52/arboreto/algo.py#L194-L231
def _prepare_input(expression_data, gene_names, tf_names): """ Wrangle the inputs into the correct formats. :param expression_data: one of: * a pandas DataFrame (rows=observations, columns=genes) * a dense 2D numpy.ndarray * a sparse scipy.sparse.csc_matrix :param gene_names: optional list of gene names (strings). Required when a (dense or sparse) matrix is passed as 'expression_data' instead of a DataFrame. :param tf_names: optional list of transcription factors. If None or 'all', the list of gene_names will be used. :return: a triple of: 1. a np.ndarray or scipy.sparse.csc_matrix 2. a list of gene name strings 3. a list of transcription factor name strings. """ if isinstance(expression_data, pd.DataFrame): expression_matrix = expression_data.as_matrix() gene_names = list(expression_data.columns) else: expression_matrix = expression_data assert expression_matrix.shape[1] == len(gene_names) if tf_names is None: tf_names = gene_names elif tf_names == 'all': tf_names = gene_names else: if len(tf_names) == 0: raise ValueError('Specified tf_names is empty') if not set(gene_names).intersection(set(tf_names)): raise ValueError('Intersection of gene_names and tf_names is empty.') return expression_matrix, gene_names, tf_names
[ "def", "_prepare_input", "(", "expression_data", ",", "gene_names", ",", "tf_names", ")", ":", "if", "isinstance", "(", "expression_data", ",", "pd", ".", "DataFrame", ")", ":", "expression_matrix", "=", "expression_data", ".", "as_matrix", "(", ")", "gene_names", "=", "list", "(", "expression_data", ".", "columns", ")", "else", ":", "expression_matrix", "=", "expression_data", "assert", "expression_matrix", ".", "shape", "[", "1", "]", "==", "len", "(", "gene_names", ")", "if", "tf_names", "is", "None", ":", "tf_names", "=", "gene_names", "elif", "tf_names", "==", "'all'", ":", "tf_names", "=", "gene_names", "else", ":", "if", "len", "(", "tf_names", ")", "==", "0", ":", "raise", "ValueError", "(", "'Specified tf_names is empty'", ")", "if", "not", "set", "(", "gene_names", ")", ".", "intersection", "(", "set", "(", "tf_names", ")", ")", ":", "raise", "ValueError", "(", "'Intersection of gene_names and tf_names is empty.'", ")", "return", "expression_matrix", ",", "gene_names", ",", "tf_names" ]
Wrangle the inputs into the correct formats. :param expression_data: one of: * a pandas DataFrame (rows=observations, columns=genes) * a dense 2D numpy.ndarray * a sparse scipy.sparse.csc_matrix :param gene_names: optional list of gene names (strings). Required when a (dense or sparse) matrix is passed as 'expression_data' instead of a DataFrame. :param tf_names: optional list of transcription factors. If None or 'all', the list of gene_names will be used. :return: a triple of: 1. a np.ndarray or scipy.sparse.csc_matrix 2. a list of gene name strings 3. a list of transcription factor name strings.
[ "Wrangle", "the", "inputs", "into", "the", "correct", "formats", "." ]
python
train
40.263158
quantopian/zipline
zipline/data/minute_bars.py
https://github.com/quantopian/zipline/blob/77ad15e6dc4c1cbcdc133653bac8a63fc704f7fe/zipline/data/minute_bars.py#L661-L666
def set_sid_attrs(self, sid, **kwargs): """Write all the supplied kwargs as attributes of the sid's file. """ table = self._ensure_ctable(sid) for k, v in kwargs.items(): table.attrs[k] = v
[ "def", "set_sid_attrs", "(", "self", ",", "sid", ",", "*", "*", "kwargs", ")", ":", "table", "=", "self", ".", "_ensure_ctable", "(", "sid", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "table", ".", "attrs", "[", "k", "]", "=", "v" ]
Write all the supplied kwargs as attributes of the sid's file.
[ "Write", "all", "the", "supplied", "kwargs", "as", "attributes", "of", "the", "sid", "s", "file", "." ]
python
train
38
guykisel/inline-plz
inlineplz/interfaces/github.py
https://github.com/guykisel/inline-plz/blob/b5b1744e9156e31f68b519c0d8022feff79888ae/inlineplz/interfaces/github.py#L209-L232
def finish_review(self, success=True, error=False): """Mark our review as finished.""" if self.set_status: if error: self.github_repo.create_status( state="error", description="Static analysis error! inline-plz failed to run.", context="inline-plz", sha=self.last_sha, ) elif success: self.github_repo.create_status( state="success", description="Static analysis complete! No errors found in your PR.", context="inline-plz", sha=self.last_sha, ) else: self.github_repo.create_status( state="failure", description="Static analysis complete! Found errors in your PR.", context="inline-plz", sha=self.last_sha, )
[ "def", "finish_review", "(", "self", ",", "success", "=", "True", ",", "error", "=", "False", ")", ":", "if", "self", ".", "set_status", ":", "if", "error", ":", "self", ".", "github_repo", ".", "create_status", "(", "state", "=", "\"error\"", ",", "description", "=", "\"Static analysis error! inline-plz failed to run.\"", ",", "context", "=", "\"inline-plz\"", ",", "sha", "=", "self", ".", "last_sha", ",", ")", "elif", "success", ":", "self", ".", "github_repo", ".", "create_status", "(", "state", "=", "\"success\"", ",", "description", "=", "\"Static analysis complete! No errors found in your PR.\"", ",", "context", "=", "\"inline-plz\"", ",", "sha", "=", "self", ".", "last_sha", ",", ")", "else", ":", "self", ".", "github_repo", ".", "create_status", "(", "state", "=", "\"failure\"", ",", "description", "=", "\"Static analysis complete! Found errors in your PR.\"", ",", "context", "=", "\"inline-plz\"", ",", "sha", "=", "self", ".", "last_sha", ",", ")" ]
Mark our review as finished.
[ "Mark", "our", "review", "as", "finished", "." ]
python
train
40.583333
peerplays-network/python-peerplays
peerplaysapi/websocket.py
https://github.com/peerplays-network/python-peerplays/blob/188f04238e7e21d5f73e9b01099eea44289ef6b7/peerplaysapi/websocket.py#L216-L263
def on_message(self, ws, reply, *args): """ This method is called by the websocket connection on every message that is received. If we receive a ``notice``, we hand over post-processing and signalling of events to ``process_notice``. """ log.debug("Received message: %s" % str(reply)) data = {} try: data = json.loads(reply, strict=False) except ValueError: raise ValueError("API node returned invalid format. Expected JSON!") if data.get("method") == "notice": id = data["params"][0] if id >= len(self.__events__): log.critical("Received an id that is out of range\n\n" + str(data)) return # This is a "general" object change notification if id == self.__events__.index("on_object"): # Let's see if a specific object has changed for notice in data["params"][1]: try: if "id" in notice: self.process_notice(notice) else: for obj in notice: if "id" in obj: self.process_notice(obj) except Exception as e: log.critical( "Error in process_notice: {}\n\n{}".format( str(e), traceback.format_exc ) ) else: try: callbackname = self.__events__[id] log.info("Patching through to call %s" % callbackname) [getattr(self.events, callbackname)(x) for x in data["params"][1]] except Exception as e: log.critical( "Error in {}: {}\n\n{}".format( callbackname, str(e), traceback.format_exc() ) )
[ "def", "on_message", "(", "self", ",", "ws", ",", "reply", ",", "*", "args", ")", ":", "log", ".", "debug", "(", "\"Received message: %s\"", "%", "str", "(", "reply", ")", ")", "data", "=", "{", "}", "try", ":", "data", "=", "json", ".", "loads", "(", "reply", ",", "strict", "=", "False", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"API node returned invalid format. Expected JSON!\"", ")", "if", "data", ".", "get", "(", "\"method\"", ")", "==", "\"notice\"", ":", "id", "=", "data", "[", "\"params\"", "]", "[", "0", "]", "if", "id", ">=", "len", "(", "self", ".", "__events__", ")", ":", "log", ".", "critical", "(", "\"Received an id that is out of range\\n\\n\"", "+", "str", "(", "data", ")", ")", "return", "# This is a \"general\" object change notification", "if", "id", "==", "self", ".", "__events__", ".", "index", "(", "\"on_object\"", ")", ":", "# Let's see if a specific object has changed", "for", "notice", "in", "data", "[", "\"params\"", "]", "[", "1", "]", ":", "try", ":", "if", "\"id\"", "in", "notice", ":", "self", ".", "process_notice", "(", "notice", ")", "else", ":", "for", "obj", "in", "notice", ":", "if", "\"id\"", "in", "obj", ":", "self", ".", "process_notice", "(", "obj", ")", "except", "Exception", "as", "e", ":", "log", ".", "critical", "(", "\"Error in process_notice: {}\\n\\n{}\"", ".", "format", "(", "str", "(", "e", ")", ",", "traceback", ".", "format_exc", ")", ")", "else", ":", "try", ":", "callbackname", "=", "self", ".", "__events__", "[", "id", "]", "log", ".", "info", "(", "\"Patching through to call %s\"", "%", "callbackname", ")", "[", "getattr", "(", "self", ".", "events", ",", "callbackname", ")", "(", "x", ")", "for", "x", "in", "data", "[", "\"params\"", "]", "[", "1", "]", "]", "except", "Exception", "as", "e", ":", "log", ".", "critical", "(", "\"Error in {}: {}\\n\\n{}\"", ".", "format", "(", "callbackname", ",", "str", "(", "e", ")", ",", "traceback", ".", "format_exc", "(", ")", ")", ")" ]
This method is called by the websocket connection on every message that is received. If we receive a ``notice``, we hand over post-processing and signalling of events to ``process_notice``.
[ "This", "method", "is", "called", "by", "the", "websocket", "connection", "on", "every", "message", "that", "is", "received", ".", "If", "we", "receive", "a", "notice", "we", "hand", "over", "post", "-", "processing", "and", "signalling", "of", "events", "to", "process_notice", "." ]
python
train
42.333333
VJftw/invoke-tools
invoke_tools/vcs/git_scm.py
https://github.com/VJftw/invoke-tools/blob/9584a1f8a402118310b6f2a495062f388fc8dc3a/invoke_tools/vcs/git_scm.py#L19-L35
def get_branch(self): """ :return: """ if self.repo.head.is_detached: if os.getenv('GIT_BRANCH'): branch = os.getenv('GIT_BRANCH') elif os.getenv('BRANCH_NAME'): branch = os.getenv('BRANCH_NAME') elif os.getenv('TRAVIS_BRANCH'): branch = os.getenv('TRAVIS_BRANCH') else: branch = "HEAD" else: branch = str(self.repo.active_branch) return branch.replace("/", "_")
[ "def", "get_branch", "(", "self", ")", ":", "if", "self", ".", "repo", ".", "head", ".", "is_detached", ":", "if", "os", ".", "getenv", "(", "'GIT_BRANCH'", ")", ":", "branch", "=", "os", ".", "getenv", "(", "'GIT_BRANCH'", ")", "elif", "os", ".", "getenv", "(", "'BRANCH_NAME'", ")", ":", "branch", "=", "os", ".", "getenv", "(", "'BRANCH_NAME'", ")", "elif", "os", ".", "getenv", "(", "'TRAVIS_BRANCH'", ")", ":", "branch", "=", "os", ".", "getenv", "(", "'TRAVIS_BRANCH'", ")", "else", ":", "branch", "=", "\"HEAD\"", "else", ":", "branch", "=", "str", "(", "self", ".", "repo", ".", "active_branch", ")", "return", "branch", ".", "replace", "(", "\"/\"", ",", "\"_\"", ")" ]
:return:
[ ":", "return", ":" ]
python
train
30.529412
mlperf/training
translation/tensorflow/transformer/utils/tokenizer.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/translation/tensorflow/transformer/utils/tokenizer.py#L481-L499
def _filter_and_bucket_subtokens(subtoken_counts, min_count): """Return a bucketed list of subtokens that are filtered by count. Args: subtoken_counts: defaultdict mapping subtokens to their counts min_count: int count used to filter subtokens Returns: List of subtoken sets, where subtokens in set i have the same length=i. """ # Create list of buckets, where subtokens in bucket i have length i. subtoken_buckets = [] for subtoken, count in six.iteritems(subtoken_counts): if count < min_count: # Filter out subtokens that don't appear enough continue while len(subtoken_buckets) <= len(subtoken): subtoken_buckets.append(set()) subtoken_buckets[len(subtoken)].add(subtoken) return subtoken_buckets
[ "def", "_filter_and_bucket_subtokens", "(", "subtoken_counts", ",", "min_count", ")", ":", "# Create list of buckets, where subtokens in bucket i have length i.", "subtoken_buckets", "=", "[", "]", "for", "subtoken", ",", "count", "in", "six", ".", "iteritems", "(", "subtoken_counts", ")", ":", "if", "count", "<", "min_count", ":", "# Filter out subtokens that don't appear enough", "continue", "while", "len", "(", "subtoken_buckets", ")", "<=", "len", "(", "subtoken", ")", ":", "subtoken_buckets", ".", "append", "(", "set", "(", ")", ")", "subtoken_buckets", "[", "len", "(", "subtoken", ")", "]", ".", "add", "(", "subtoken", ")", "return", "subtoken_buckets" ]
Return a bucketed list of subtokens that are filtered by count. Args: subtoken_counts: defaultdict mapping subtokens to their counts min_count: int count used to filter subtokens Returns: List of subtoken sets, where subtokens in set i have the same length=i.
[ "Return", "a", "bucketed", "list", "of", "subtokens", "that", "are", "filtered", "by", "count", "." ]
python
train
38.789474
FactoryBoy/factory_boy
factory/random.py
https://github.com/FactoryBoy/factory_boy/blob/edaa7c7f5a14065b229927903bd7989cc93cd069/factory/random.py#L16-L21
def set_random_state(state): """Force-set the state of factory.fuzzy's random generator.""" randgen.state_set = True randgen.setstate(state) faker.generator.random.setstate(state)
[ "def", "set_random_state", "(", "state", ")", ":", "randgen", ".", "state_set", "=", "True", "randgen", ".", "setstate", "(", "state", ")", "faker", ".", "generator", ".", "random", ".", "setstate", "(", "state", ")" ]
Force-set the state of factory.fuzzy's random generator.
[ "Force", "-", "set", "the", "state", "of", "factory", ".", "fuzzy", "s", "random", "generator", "." ]
python
train
31.833333
materials-data-facility/toolbox
mdf_toolbox/search_helper.py
https://github.com/materials-data-facility/toolbox/blob/2a4ac2b6a892238263008efa6a5f3923d9a83505/mdf_toolbox/search_helper.py#L522-L541
def match_exists(self, field, required=True, new_group=False): """Require a field to exist in the results. Matches will have some value in ``field``. Arguments: field (str): The field to check. The field must be namespaced according to Elasticsearch rules using the dot syntax. For example, ``"mdf.source_name"`` is the ``source_name`` field of the ``mdf`` dictionary. required (bool): If ``True``, will add term with ``AND``. If ``False``, will use ``OR``. **Default:** ``True``. new_group (bool): If ``True``, will separate the term into a new parenthetical group. If ``False``, will not. **Default:** ``False``. Returns: SearchHelper: Self """ return self.match_field(field, "*", required=required, new_group=new_group)
[ "def", "match_exists", "(", "self", ",", "field", ",", "required", "=", "True", ",", "new_group", "=", "False", ")", ":", "return", "self", ".", "match_field", "(", "field", ",", "\"*\"", ",", "required", "=", "required", ",", "new_group", "=", "new_group", ")" ]
Require a field to exist in the results. Matches will have some value in ``field``. Arguments: field (str): The field to check. The field must be namespaced according to Elasticsearch rules using the dot syntax. For example, ``"mdf.source_name"`` is the ``source_name`` field of the ``mdf`` dictionary. required (bool): If ``True``, will add term with ``AND``. If ``False``, will use ``OR``. **Default:** ``True``. new_group (bool): If ``True``, will separate the term into a new parenthetical group. If ``False``, will not. **Default:** ``False``. Returns: SearchHelper: Self
[ "Require", "a", "field", "to", "exist", "in", "the", "results", ".", "Matches", "will", "have", "some", "value", "in", "field", "." ]
python
train
47.05
genialis/resolwe
resolwe/flow/models/data.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/data.py#L338-L401
def create_entity(self): """Create entity if `flow_collection` is defined in process. Following rules applies for adding `Data` object to `Entity`: * Only add `Data object` to `Entity` if process has defined `flow_collection` field * Add object to existing `Entity`, if all parents that are part of it (but not necessary all parents), are part of the same `Entity` * If parents belong to different `Entities` or do not belong to any `Entity`, create new `Entity` """ entity_type = self.process.entity_type # pylint: disable=no-member entity_descriptor_schema = self.process.entity_descriptor_schema # pylint: disable=no-member entity_input = self.process.entity_input # pylint: disable=no-member if entity_type: data_filter = {} if entity_input: input_id = dict_dot(self.input, entity_input, default=lambda: None) if input_id is None: logger.warning("Skipping creation of entity due to missing input.") return if isinstance(input_id, int): data_filter['data__pk'] = input_id elif isinstance(input_id, list): data_filter['data__pk__in'] = input_id else: raise ValueError( "Cannot create entity due to invalid value of field {}.".format(entity_input) ) else: data_filter['data__in'] = self.parents.all() # pylint: disable=no-member entity_query = Entity.objects.filter(type=entity_type, **data_filter).distinct() entity_count = entity_query.count() if entity_count == 0: descriptor_schema = DescriptorSchema.objects.filter( slug=entity_descriptor_schema ).latest() entity = Entity.objects.create( contributor=self.contributor, descriptor_schema=descriptor_schema, type=entity_type, name=self.name, tags=self.tags, ) assign_contributor_permissions(entity) elif entity_count == 1: entity = entity_query.first() copy_permissions(entity, self) else: logger.info("Skipping creation of entity due to multiple entities found.") entity = None if entity: entity.data.add(self) # Inherit collections from entity. for collection in entity.collections.all(): collection.data.add(self)
[ "def", "create_entity", "(", "self", ")", ":", "entity_type", "=", "self", ".", "process", ".", "entity_type", "# pylint: disable=no-member", "entity_descriptor_schema", "=", "self", ".", "process", ".", "entity_descriptor_schema", "# pylint: disable=no-member", "entity_input", "=", "self", ".", "process", ".", "entity_input", "# pylint: disable=no-member", "if", "entity_type", ":", "data_filter", "=", "{", "}", "if", "entity_input", ":", "input_id", "=", "dict_dot", "(", "self", ".", "input", ",", "entity_input", ",", "default", "=", "lambda", ":", "None", ")", "if", "input_id", "is", "None", ":", "logger", ".", "warning", "(", "\"Skipping creation of entity due to missing input.\"", ")", "return", "if", "isinstance", "(", "input_id", ",", "int", ")", ":", "data_filter", "[", "'data__pk'", "]", "=", "input_id", "elif", "isinstance", "(", "input_id", ",", "list", ")", ":", "data_filter", "[", "'data__pk__in'", "]", "=", "input_id", "else", ":", "raise", "ValueError", "(", "\"Cannot create entity due to invalid value of field {}.\"", ".", "format", "(", "entity_input", ")", ")", "else", ":", "data_filter", "[", "'data__in'", "]", "=", "self", ".", "parents", ".", "all", "(", ")", "# pylint: disable=no-member", "entity_query", "=", "Entity", ".", "objects", ".", "filter", "(", "type", "=", "entity_type", ",", "*", "*", "data_filter", ")", ".", "distinct", "(", ")", "entity_count", "=", "entity_query", ".", "count", "(", ")", "if", "entity_count", "==", "0", ":", "descriptor_schema", "=", "DescriptorSchema", ".", "objects", ".", "filter", "(", "slug", "=", "entity_descriptor_schema", ")", ".", "latest", "(", ")", "entity", "=", "Entity", ".", "objects", ".", "create", "(", "contributor", "=", "self", ".", "contributor", ",", "descriptor_schema", "=", "descriptor_schema", ",", "type", "=", "entity_type", ",", "name", "=", "self", ".", "name", ",", "tags", "=", "self", ".", "tags", ",", ")", "assign_contributor_permissions", "(", "entity", ")", "elif", "entity_count", "==", "1", ":", "entity", "=", "entity_query", ".", "first", "(", ")", "copy_permissions", "(", "entity", ",", "self", ")", "else", ":", "logger", ".", "info", "(", "\"Skipping creation of entity due to multiple entities found.\"", ")", "entity", "=", "None", "if", "entity", ":", "entity", ".", "data", ".", "add", "(", "self", ")", "# Inherit collections from entity.", "for", "collection", "in", "entity", ".", "collections", ".", "all", "(", ")", ":", "collection", ".", "data", ".", "add", "(", "self", ")" ]
Create entity if `flow_collection` is defined in process. Following rules applies for adding `Data` object to `Entity`: * Only add `Data object` to `Entity` if process has defined `flow_collection` field * Add object to existing `Entity`, if all parents that are part of it (but not necessary all parents), are part of the same `Entity` * If parents belong to different `Entities` or do not belong to any `Entity`, create new `Entity`
[ "Create", "entity", "if", "flow_collection", "is", "defined", "in", "process", "." ]
python
train
42.359375
invoice-x/invoice2data
src/invoice2data/main.py
https://github.com/invoice-x/invoice2data/blob/d97fdc5db9c1844fd77fa64f8ea7c42fefd0ba20/src/invoice2data/main.py#L170-L215
def main(args=None): """Take folder or single file and analyze each.""" if args is None: parser = create_parser() args = parser.parse_args() if args.debug: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) input_module = input_mapping[args.input_reader] output_module = output_mapping[args.output_format] templates = [] # Load templates from external folder if set. if args.template_folder: templates += read_templates(os.path.abspath(args.template_folder)) # Load internal templates, if not disabled. if not args.exclude_built_in_templates: templates += read_templates() output = [] for f in args.input_files: res = extract_data(f.name, templates=templates, input_module=input_module) if res: logger.info(res) output.append(res) if args.copy: filename = args.filename.format( date=res['date'].strftime('%Y-%m-%d'), invoice_number=res['invoice_number'], desc=res['desc'], ) shutil.copyfile(f.name, join(args.copy, filename)) if args.move: filename = args.filename.format( date=res['date'].strftime('%Y-%m-%d'), invoice_number=res['invoice_number'], desc=res['desc'], ) shutil.move(f.name, join(args.move, filename)) f.close() if output_module is not None: output_module.write_to_file(output, args.output_name)
[ "def", "main", "(", "args", "=", "None", ")", ":", "if", "args", "is", "None", ":", "parser", "=", "create_parser", "(", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "if", "args", ".", "debug", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "DEBUG", ")", "else", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ")", "input_module", "=", "input_mapping", "[", "args", ".", "input_reader", "]", "output_module", "=", "output_mapping", "[", "args", ".", "output_format", "]", "templates", "=", "[", "]", "# Load templates from external folder if set.", "if", "args", ".", "template_folder", ":", "templates", "+=", "read_templates", "(", "os", ".", "path", ".", "abspath", "(", "args", ".", "template_folder", ")", ")", "# Load internal templates, if not disabled.", "if", "not", "args", ".", "exclude_built_in_templates", ":", "templates", "+=", "read_templates", "(", ")", "output", "=", "[", "]", "for", "f", "in", "args", ".", "input_files", ":", "res", "=", "extract_data", "(", "f", ".", "name", ",", "templates", "=", "templates", ",", "input_module", "=", "input_module", ")", "if", "res", ":", "logger", ".", "info", "(", "res", ")", "output", ".", "append", "(", "res", ")", "if", "args", ".", "copy", ":", "filename", "=", "args", ".", "filename", ".", "format", "(", "date", "=", "res", "[", "'date'", "]", ".", "strftime", "(", "'%Y-%m-%d'", ")", ",", "invoice_number", "=", "res", "[", "'invoice_number'", "]", ",", "desc", "=", "res", "[", "'desc'", "]", ",", ")", "shutil", ".", "copyfile", "(", "f", ".", "name", ",", "join", "(", "args", ".", "copy", ",", "filename", ")", ")", "if", "args", ".", "move", ":", "filename", "=", "args", ".", "filename", ".", "format", "(", "date", "=", "res", "[", "'date'", "]", ".", "strftime", "(", "'%Y-%m-%d'", ")", ",", "invoice_number", "=", "res", "[", "'invoice_number'", "]", ",", "desc", "=", "res", "[", "'desc'", "]", ",", ")", "shutil", ".", "move", "(", "f", ".", "name", ",", "join", "(", "args", ".", "move", ",", "filename", ")", ")", "f", ".", "close", "(", ")", "if", "output_module", "is", "not", "None", ":", "output_module", ".", "write_to_file", "(", "output", ",", "args", ".", "output_name", ")" ]
Take folder or single file and analyze each.
[ "Take", "folder", "or", "single", "file", "and", "analyze", "each", "." ]
python
train
34.913043
spyder-ide/spyder
spyder/preferences/languageserver.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/preferences/languageserver.py#L512-L516
def selection(self, index): """Update selected row.""" self.update() self.isActiveWindow() self._parent.delete_btn.setEnabled(True)
[ "def", "selection", "(", "self", ",", "index", ")", ":", "self", ".", "update", "(", ")", "self", ".", "isActiveWindow", "(", ")", "self", ".", "_parent", ".", "delete_btn", ".", "setEnabled", "(", "True", ")" ]
Update selected row.
[ "Update", "selected", "row", "." ]
python
train
31.8
Microsoft/malmo
Malmo/samples/Python_examples/human_action.py
https://github.com/Microsoft/malmo/blob/4139cd6f3e52f6e893a931a1d4b70d35f8e70e5a/Malmo/samples/Python_examples/human_action.py#L205-L222
def update(self): '''Called at regular intervals to poll the mouse position to send continuous commands.''' if self.action_space == 'continuous': # mouse movement only used for continuous action space if self.world_state and self.world_state.is_mission_running: if self.mouse_event and self.prev_mouse_event: rotation_speed = 0.1 turn_speed = ( self.mouse_event.x - self.prev_mouse_event.x ) * rotation_speed pitch_speed = ( self.mouse_event.y - self.prev_mouse_event.y ) * rotation_speed self.agent_host.sendCommand( 'turn '+str(turn_speed) ) self.agent_host.sendCommand( 'pitch '+str(pitch_speed) ) if self.mouse_event: if os.name == 'nt': # (moving the mouse cursor only seems to work on Windows) self.canvas.event_generate('<Motion>', warp=True, x=old_div(self.canvas.winfo_width(),2), y=old_div(self.canvas.winfo_height(),2)) # put cursor at center self.mouse_event.x = old_div(self.canvas.winfo_width(),2) self.mouse_event.y = old_div(self.canvas.winfo_height(),2) self.prev_mouse_event = self.mouse_event if self.world_state.is_mission_running: self.root.after(50, self.update)
[ "def", "update", "(", "self", ")", ":", "if", "self", ".", "action_space", "==", "'continuous'", ":", "# mouse movement only used for continuous action space", "if", "self", ".", "world_state", "and", "self", ".", "world_state", ".", "is_mission_running", ":", "if", "self", ".", "mouse_event", "and", "self", ".", "prev_mouse_event", ":", "rotation_speed", "=", "0.1", "turn_speed", "=", "(", "self", ".", "mouse_event", ".", "x", "-", "self", ".", "prev_mouse_event", ".", "x", ")", "*", "rotation_speed", "pitch_speed", "=", "(", "self", ".", "mouse_event", ".", "y", "-", "self", ".", "prev_mouse_event", ".", "y", ")", "*", "rotation_speed", "self", ".", "agent_host", ".", "sendCommand", "(", "'turn '", "+", "str", "(", "turn_speed", ")", ")", "self", ".", "agent_host", ".", "sendCommand", "(", "'pitch '", "+", "str", "(", "pitch_speed", ")", ")", "if", "self", ".", "mouse_event", ":", "if", "os", ".", "name", "==", "'nt'", ":", "# (moving the mouse cursor only seems to work on Windows)", "self", ".", "canvas", ".", "event_generate", "(", "'<Motion>'", ",", "warp", "=", "True", ",", "x", "=", "old_div", "(", "self", ".", "canvas", ".", "winfo_width", "(", ")", ",", "2", ")", ",", "y", "=", "old_div", "(", "self", ".", "canvas", ".", "winfo_height", "(", ")", ",", "2", ")", ")", "# put cursor at center", "self", ".", "mouse_event", ".", "x", "=", "old_div", "(", "self", ".", "canvas", ".", "winfo_width", "(", ")", ",", "2", ")", "self", ".", "mouse_event", ".", "y", "=", "old_div", "(", "self", ".", "canvas", ".", "winfo_height", "(", ")", ",", "2", ")", "self", ".", "prev_mouse_event", "=", "self", ".", "mouse_event", "if", "self", ".", "world_state", ".", "is_mission_running", ":", "self", ".", "root", ".", "after", "(", "50", ",", "self", ".", "update", ")" ]
Called at regular intervals to poll the mouse position to send continuous commands.
[ "Called", "at", "regular", "intervals", "to", "poll", "the", "mouse", "position", "to", "send", "continuous", "commands", "." ]
python
train
76.611111
Nic30/hwt
hwt/simulator/vcdHdlSimConfig.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/simulator/vcdHdlSimConfig.py#L24-L33
def vcdTypeInfoForHType(t) -> Tuple[str, int, Callable[[RtlSignalBase, Value], str]]: """ :return: (vcd type name, vcd width) """ if isinstance(t, (SimBitsT, Bits, HBool)): return (VCD_SIG_TYPE.WIRE, t.bit_length(), vcdBitsFormatter) elif isinstance(t, HEnum): return (VCD_SIG_TYPE.REAL, 1, vcdEnumFormatter) else: raise ValueError(t)
[ "def", "vcdTypeInfoForHType", "(", "t", ")", "->", "Tuple", "[", "str", ",", "int", ",", "Callable", "[", "[", "RtlSignalBase", ",", "Value", "]", ",", "str", "]", "]", ":", "if", "isinstance", "(", "t", ",", "(", "SimBitsT", ",", "Bits", ",", "HBool", ")", ")", ":", "return", "(", "VCD_SIG_TYPE", ".", "WIRE", ",", "t", ".", "bit_length", "(", ")", ",", "vcdBitsFormatter", ")", "elif", "isinstance", "(", "t", ",", "HEnum", ")", ":", "return", "(", "VCD_SIG_TYPE", ".", "REAL", ",", "1", ",", "vcdEnumFormatter", ")", "else", ":", "raise", "ValueError", "(", "t", ")" ]
:return: (vcd type name, vcd width)
[ ":", "return", ":", "(", "vcd", "type", "name", "vcd", "width", ")" ]
python
test
37.3
b3j0f/conf
b3j0f/conf/parser/resolver/registry.py
https://github.com/b3j0f/conf/blob/18dd6d5d6560f9b202793739e2330a2181163511/b3j0f/conf/parser/resolver/registry.py#L112-L131
def default(self, value): """Change of resolver name. :param value: new default value to use. :type value: str or callable :raises: KeyError if value is a string not already registered.""" if value is None: if self: value = list(self.keys())[0] elif not isinstance(value, string_types): value = register(exprresolver=value, reg=self) elif value not in self: raise KeyError( '{0} not registered in {1}'.format(value, self) ) self._default = value
[ "def", "default", "(", "self", ",", "value", ")", ":", "if", "value", "is", "None", ":", "if", "self", ":", "value", "=", "list", "(", "self", ".", "keys", "(", ")", ")", "[", "0", "]", "elif", "not", "isinstance", "(", "value", ",", "string_types", ")", ":", "value", "=", "register", "(", "exprresolver", "=", "value", ",", "reg", "=", "self", ")", "elif", "value", "not", "in", "self", ":", "raise", "KeyError", "(", "'{0} not registered in {1}'", ".", "format", "(", "value", ",", "self", ")", ")", "self", ".", "_default", "=", "value" ]
Change of resolver name. :param value: new default value to use. :type value: str or callable :raises: KeyError if value is a string not already registered.
[ "Change", "of", "resolver", "name", "." ]
python
train
28.75
QInfer/python-qinfer
src/qinfer/derived_models.py
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/derived_models.py#L898-L918
def est_update_covariance(self, modelparams): """ Returns the covariance of the gaussian noise process for one unit step. In the case where the covariance is being learned, the expected covariance matrix is returned. :param modelparams: Shape `(n_models, n_modelparams)` shape array of model parameters. """ if self._diagonal: cov = (self._fixed_scale ** 2 if self._has_fixed_covariance \ else np.mean(modelparams[:, self._srw_idxs] ** 2, axis=0)) cov = np.diag(cov) else: if self._has_fixed_covariance: cov = np.dot(self._fixed_chol, self._fixed_chol.T) else: chol = np.zeros((modelparams.shape[0], self._n_rw, self._n_rw)) chol[(np.s_[:],) + self._srw_tri_idxs] = modelparams[:, self._srw_idxs] cov = np.mean(np.einsum('ijk,ilk->ijl', chol, chol), axis=0) return cov
[ "def", "est_update_covariance", "(", "self", ",", "modelparams", ")", ":", "if", "self", ".", "_diagonal", ":", "cov", "=", "(", "self", ".", "_fixed_scale", "**", "2", "if", "self", ".", "_has_fixed_covariance", "else", "np", ".", "mean", "(", "modelparams", "[", ":", ",", "self", ".", "_srw_idxs", "]", "**", "2", ",", "axis", "=", "0", ")", ")", "cov", "=", "np", ".", "diag", "(", "cov", ")", "else", ":", "if", "self", ".", "_has_fixed_covariance", ":", "cov", "=", "np", ".", "dot", "(", "self", ".", "_fixed_chol", ",", "self", ".", "_fixed_chol", ".", "T", ")", "else", ":", "chol", "=", "np", ".", "zeros", "(", "(", "modelparams", ".", "shape", "[", "0", "]", ",", "self", ".", "_n_rw", ",", "self", ".", "_n_rw", ")", ")", "chol", "[", "(", "np", ".", "s_", "[", ":", "]", ",", ")", "+", "self", ".", "_srw_tri_idxs", "]", "=", "modelparams", "[", ":", ",", "self", ".", "_srw_idxs", "]", "cov", "=", "np", ".", "mean", "(", "np", ".", "einsum", "(", "'ijk,ilk->ijl'", ",", "chol", ",", "chol", ")", ",", "axis", "=", "0", ")", "return", "cov" ]
Returns the covariance of the gaussian noise process for one unit step. In the case where the covariance is being learned, the expected covariance matrix is returned. :param modelparams: Shape `(n_models, n_modelparams)` shape array of model parameters.
[ "Returns", "the", "covariance", "of", "the", "gaussian", "noise", "process", "for", "one", "unit", "step", ".", "In", "the", "case", "where", "the", "covariance", "is", "being", "learned", "the", "expected", "covariance", "matrix", "is", "returned", ".", ":", "param", "modelparams", ":", "Shape", "(", "n_models", "n_modelparams", ")", "shape", "array", "of", "model", "parameters", "." ]
python
train
46
Qiskit/qiskit-terra
qiskit/transpiler/coupling.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/transpiler/coupling.py#L126-L141
def _compute_distance_matrix(self): """Compute the full distance matrix on pairs of nodes. The distance map self._dist_matrix is computed from the graph using all_pairs_shortest_path_length. """ if not self.is_connected(): raise CouplingError("coupling graph not connected") lengths = nx.all_pairs_shortest_path_length(self.graph.to_undirected(as_view=True)) lengths = dict(lengths) size = len(lengths) cmap = np.zeros((size, size)) for idx in range(size): cmap[idx, np.fromiter(lengths[idx].keys(), dtype=int)] = np.fromiter( lengths[idx].values(), dtype=int) self._dist_matrix = cmap
[ "def", "_compute_distance_matrix", "(", "self", ")", ":", "if", "not", "self", ".", "is_connected", "(", ")", ":", "raise", "CouplingError", "(", "\"coupling graph not connected\"", ")", "lengths", "=", "nx", ".", "all_pairs_shortest_path_length", "(", "self", ".", "graph", ".", "to_undirected", "(", "as_view", "=", "True", ")", ")", "lengths", "=", "dict", "(", "lengths", ")", "size", "=", "len", "(", "lengths", ")", "cmap", "=", "np", ".", "zeros", "(", "(", "size", ",", "size", ")", ")", "for", "idx", "in", "range", "(", "size", ")", ":", "cmap", "[", "idx", ",", "np", ".", "fromiter", "(", "lengths", "[", "idx", "]", ".", "keys", "(", ")", ",", "dtype", "=", "int", ")", "]", "=", "np", ".", "fromiter", "(", "lengths", "[", "idx", "]", ".", "values", "(", ")", ",", "dtype", "=", "int", ")", "self", ".", "_dist_matrix", "=", "cmap" ]
Compute the full distance matrix on pairs of nodes. The distance map self._dist_matrix is computed from the graph using all_pairs_shortest_path_length.
[ "Compute", "the", "full", "distance", "matrix", "on", "pairs", "of", "nodes", "." ]
python
test
43.6875
Kensuke-Mitsuzawa/JapaneseTokenizers
JapaneseTokenizer/mecab_wrapper/mecab_wrapper.py
https://github.com/Kensuke-Mitsuzawa/JapaneseTokenizers/blob/3bdfb6be73de0f78e5c08f3a51376ad3efa00b6c/JapaneseTokenizer/mecab_wrapper/mecab_wrapper.py#L261-L333
def tokenize(self, sentence, normalized=True, is_feature=False, is_surface=False, return_list=False, func_normalizer=normalize_text): # type: (text_type, bool, bool, bool, bool, Callable[[str], str])->Union[List[str], TokenizedSenetence] """* What you can do - Call mecab tokenizer, and return tokenized objects """ if six.PY2 and isinstance(sentence, str): sentence = sentence.decode(self.string_encoding) else: pass # decide normalization function depending on dictType if func_normalizer is None and self._dictType == 'neologd' and is_neologdn_valid: normalized_sentence = neologdn.normalize(sentence) elif func_normalizer is None and self._dictType == 'neologd' and is_neologdn_valid == False: raise Exception("You could not call neologd dictionary bacause you do NOT install the package neologdn.") elif func_normalizer == normalize_text: normalized_sentence = normalize_text(sentence, dictionary_mode=self._dictType) elif func_normalizer is None: normalized_sentence = sentence else: normalized_sentence = func_normalizer(sentence) # don't delete this variable. The variable "encoded_text" protects sentence from deleting if six.PY2: encoded_text = normalized_sentence.encode(self.string_encoding) else: encoded_text = normalized_sentence if six.PY2: tokenized_objects = [] node = self.mecabObj.parseToNode(encoded_text) node = node.next while node.next is not None: word_surface = node.surface.decode(self.string_encoding) tuple_pos, word_stem = self.__feature_parser(node.feature.decode(self.string_encoding), word_surface) tokenized_obj = TokenizedResult( node_obj=node, tuple_pos=tuple_pos, word_stem=word_stem, word_surface=word_surface, is_feature=is_feature, is_surface=is_surface ) tokenized_objects.append(tokenized_obj) node = node.next tokenized_sentence = TokenizedSenetence( sentence=sentence, tokenized_objects=tokenized_objects) else: parsed_result = self.mecabObj.parse(encoded_text) tokenized_objects = self.__postprocess_analyzed_result( string_mecab_parsed_result=parsed_result, is_feature=is_feature, is_surface=is_surface ) tokenized_sentence = TokenizedSenetence( sentence=sentence, tokenized_objects=tokenized_objects ) # type: TokenizedSenetence if return_list: return tokenized_sentence.convert_list_object() else: return tokenized_sentence
[ "def", "tokenize", "(", "self", ",", "sentence", ",", "normalized", "=", "True", ",", "is_feature", "=", "False", ",", "is_surface", "=", "False", ",", "return_list", "=", "False", ",", "func_normalizer", "=", "normalize_text", ")", ":", "# type: (text_type, bool, bool, bool, bool, Callable[[str], str])->Union[List[str], TokenizedSenetence]", "if", "six", ".", "PY2", "and", "isinstance", "(", "sentence", ",", "str", ")", ":", "sentence", "=", "sentence", ".", "decode", "(", "self", ".", "string_encoding", ")", "else", ":", "pass", "# decide normalization function depending on dictType", "if", "func_normalizer", "is", "None", "and", "self", ".", "_dictType", "==", "'neologd'", "and", "is_neologdn_valid", ":", "normalized_sentence", "=", "neologdn", ".", "normalize", "(", "sentence", ")", "elif", "func_normalizer", "is", "None", "and", "self", ".", "_dictType", "==", "'neologd'", "and", "is_neologdn_valid", "==", "False", ":", "raise", "Exception", "(", "\"You could not call neologd dictionary bacause you do NOT install the package neologdn.\"", ")", "elif", "func_normalizer", "==", "normalize_text", ":", "normalized_sentence", "=", "normalize_text", "(", "sentence", ",", "dictionary_mode", "=", "self", ".", "_dictType", ")", "elif", "func_normalizer", "is", "None", ":", "normalized_sentence", "=", "sentence", "else", ":", "normalized_sentence", "=", "func_normalizer", "(", "sentence", ")", "# don't delete this variable. The variable \"encoded_text\" protects sentence from deleting", "if", "six", ".", "PY2", ":", "encoded_text", "=", "normalized_sentence", ".", "encode", "(", "self", ".", "string_encoding", ")", "else", ":", "encoded_text", "=", "normalized_sentence", "if", "six", ".", "PY2", ":", "tokenized_objects", "=", "[", "]", "node", "=", "self", ".", "mecabObj", ".", "parseToNode", "(", "encoded_text", ")", "node", "=", "node", ".", "next", "while", "node", ".", "next", "is", "not", "None", ":", "word_surface", "=", "node", ".", "surface", ".", "decode", "(", "self", ".", "string_encoding", ")", "tuple_pos", ",", "word_stem", "=", "self", ".", "__feature_parser", "(", "node", ".", "feature", ".", "decode", "(", "self", ".", "string_encoding", ")", ",", "word_surface", ")", "tokenized_obj", "=", "TokenizedResult", "(", "node_obj", "=", "node", ",", "tuple_pos", "=", "tuple_pos", ",", "word_stem", "=", "word_stem", ",", "word_surface", "=", "word_surface", ",", "is_feature", "=", "is_feature", ",", "is_surface", "=", "is_surface", ")", "tokenized_objects", ".", "append", "(", "tokenized_obj", ")", "node", "=", "node", ".", "next", "tokenized_sentence", "=", "TokenizedSenetence", "(", "sentence", "=", "sentence", ",", "tokenized_objects", "=", "tokenized_objects", ")", "else", ":", "parsed_result", "=", "self", ".", "mecabObj", ".", "parse", "(", "encoded_text", ")", "tokenized_objects", "=", "self", ".", "__postprocess_analyzed_result", "(", "string_mecab_parsed_result", "=", "parsed_result", ",", "is_feature", "=", "is_feature", ",", "is_surface", "=", "is_surface", ")", "tokenized_sentence", "=", "TokenizedSenetence", "(", "sentence", "=", "sentence", ",", "tokenized_objects", "=", "tokenized_objects", ")", "# type: TokenizedSenetence", "if", "return_list", ":", "return", "tokenized_sentence", ".", "convert_list_object", "(", ")", "else", ":", "return", "tokenized_sentence" ]
* What you can do - Call mecab tokenizer, and return tokenized objects
[ "*", "What", "you", "can", "do", "-", "Call", "mecab", "tokenizer", "and", "return", "tokenized", "objects" ]
python
train
41.438356
nchopin/particles
particles/distributions.py
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/distributions.py#L250-L256
def posterior(self, x, sigma=1.): """Model is X_1,...,X_n ~ N(theta, sigma^2), theta~self, sigma fixed""" pr0 = 1. / self.scale**2 # prior precision prd = x.size / sigma**2 # data precision varp = 1. / (pr0 + prd) # posterior variance mu = varp * (pr0 * self.loc + prd * x.mean()) return Normal(loc=mu, scale=np.sqrt(varp))
[ "def", "posterior", "(", "self", ",", "x", ",", "sigma", "=", "1.", ")", ":", "pr0", "=", "1.", "/", "self", ".", "scale", "**", "2", "# prior precision", "prd", "=", "x", ".", "size", "/", "sigma", "**", "2", "# data precision", "varp", "=", "1.", "/", "(", "pr0", "+", "prd", ")", "# posterior variance", "mu", "=", "varp", "*", "(", "pr0", "*", "self", ".", "loc", "+", "prd", "*", "x", ".", "mean", "(", ")", ")", "return", "Normal", "(", "loc", "=", "mu", ",", "scale", "=", "np", ".", "sqrt", "(", "varp", ")", ")" ]
Model is X_1,...,X_n ~ N(theta, sigma^2), theta~self, sigma fixed
[ "Model", "is", "X_1", "...", "X_n", "~", "N", "(", "theta", "sigma^2", ")", "theta~self", "sigma", "fixed" ]
python
train
52.571429
rhgrant10/Groupy
groupy/api/groups.py
https://github.com/rhgrant10/Groupy/blob/ffd8cac57586fa1c218e3b4bfaa531142c3be766/groupy/api/groups.py#L64-L73
def get(self, id): """Get a single group by ID. :param str id: a group ID :return: a group :rtype: :class:`~groupy.api.groups.Group` """ url = utils.urljoin(self.url, id) response = self.session.get(url) return Group(self, **response.data)
[ "def", "get", "(", "self", ",", "id", ")", ":", "url", "=", "utils", ".", "urljoin", "(", "self", ".", "url", ",", "id", ")", "response", "=", "self", ".", "session", ".", "get", "(", "url", ")", "return", "Group", "(", "self", ",", "*", "*", "response", ".", "data", ")" ]
Get a single group by ID. :param str id: a group ID :return: a group :rtype: :class:`~groupy.api.groups.Group`
[ "Get", "a", "single", "group", "by", "ID", "." ]
python
train
29.5
tanghaibao/jcvi
jcvi/projects/age.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/projects/age.py#L464-L496
def extract_twin_values(triples, traits, gender=None): """Calculate the heritability of certain traits in triplets. Parameters ========== triples: (a, b, "Female/Male") triples. The sample IDs are then used to query the traits dictionary. traits: sample_id => value dictionary Returns ======= tuples of size 2, that contain paired trait values of the twins """ # Construct the pairs of trait values traitValuesAbsent = 0 nanValues = 0 genderSkipped = 0 twinValues = [] for a, b, t in triples: if gender is not None and t != gender: genderSkipped += 1 continue if not (a in traits and b in traits): traitValuesAbsent += 1 continue if np.isnan(traits[a]) or np.isnan(traits[b]): nanValues += 1 continue twinValues.append((traits[a], traits[b])) print("A total of {} pairs extracted ({} absent; {} nan; {} genderSkipped)"\ .format(len(twinValues), traitValuesAbsent, nanValues, genderSkipped)) return twinValues
[ "def", "extract_twin_values", "(", "triples", ",", "traits", ",", "gender", "=", "None", ")", ":", "# Construct the pairs of trait values", "traitValuesAbsent", "=", "0", "nanValues", "=", "0", "genderSkipped", "=", "0", "twinValues", "=", "[", "]", "for", "a", ",", "b", ",", "t", "in", "triples", ":", "if", "gender", "is", "not", "None", "and", "t", "!=", "gender", ":", "genderSkipped", "+=", "1", "continue", "if", "not", "(", "a", "in", "traits", "and", "b", "in", "traits", ")", ":", "traitValuesAbsent", "+=", "1", "continue", "if", "np", ".", "isnan", "(", "traits", "[", "a", "]", ")", "or", "np", ".", "isnan", "(", "traits", "[", "b", "]", ")", ":", "nanValues", "+=", "1", "continue", "twinValues", ".", "append", "(", "(", "traits", "[", "a", "]", ",", "traits", "[", "b", "]", ")", ")", "print", "(", "\"A total of {} pairs extracted ({} absent; {} nan; {} genderSkipped)\"", ".", "format", "(", "len", "(", "twinValues", ")", ",", "traitValuesAbsent", ",", "nanValues", ",", "genderSkipped", ")", ")", "return", "twinValues" ]
Calculate the heritability of certain traits in triplets. Parameters ========== triples: (a, b, "Female/Male") triples. The sample IDs are then used to query the traits dictionary. traits: sample_id => value dictionary Returns ======= tuples of size 2, that contain paired trait values of the twins
[ "Calculate", "the", "heritability", "of", "certain", "traits", "in", "triplets", "." ]
python
train
32.484848
SCIP-Interfaces/PySCIPOpt
examples/tutorial/logical.py
https://github.com/SCIP-Interfaces/PySCIPOpt/blob/9c960b40d94a48b0304d73dbe28b467b9c065abe/examples/tutorial/logical.py#L50-L58
def or_constraint(v=0, sense="maximize"): """ OR constraint""" assert v in [0,1], "v must be 0 or 1 instead of %s" % v.__repr__() model, x, y, z = _init() r = model.addVar("r", "B") model.addConsOr([x,y,z], r) model.addCons(x==v) model.setObjective(r, sense=sense) _optimize("OR", model)
[ "def", "or_constraint", "(", "v", "=", "0", ",", "sense", "=", "\"maximize\"", ")", ":", "assert", "v", "in", "[", "0", ",", "1", "]", ",", "\"v must be 0 or 1 instead of %s\"", "%", "v", ".", "__repr__", "(", ")", "model", ",", "x", ",", "y", ",", "z", "=", "_init", "(", ")", "r", "=", "model", ".", "addVar", "(", "\"r\"", ",", "\"B\"", ")", "model", ".", "addConsOr", "(", "[", "x", ",", "y", ",", "z", "]", ",", "r", ")", "model", ".", "addCons", "(", "x", "==", "v", ")", "model", ".", "setObjective", "(", "r", ",", "sense", "=", "sense", ")", "_optimize", "(", "\"OR\"", ",", "model", ")" ]
OR constraint
[ "OR", "constraint" ]
python
train
34.555556
CalebBell/thermo
thermo/thermal_conductivity.py
https://github.com/CalebBell/thermo/blob/3857ed023a3e64fd3039a32d53576c24990ef1c3/thermo/thermal_conductivity.py#L2025-L2089
def load_all_methods(self): r'''Method which picks out coefficients for the specified chemical from the various dictionaries and DataFrames storing it. All data is stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`, :obj:`all_methods` and obj:`all_methods_P` as a set of methods for which the data exists for. Called on initialization only. See the source code for the variables at which the coefficients are stored. The coefficients can safely be altered once the class is initialized. This method can be called again to reset the parameters. ''' methods, methods_P = [], [] Tmins, Tmaxs = [], [] if self.CASRN in _VDISaturationDict: methods.append(VDI_TABULAR) Ts, props = VDI_tabular_data(self.CASRN, 'K (g)') self.VDI_Tmin = Ts[0] self.VDI_Tmax = Ts[-1] self.tabular_data[VDI_TABULAR] = (Ts, props) Tmins.append(self.VDI_Tmin); Tmaxs.append(self.VDI_Tmax) if has_CoolProp and self.CASRN in coolprop_dict: methods.append(COOLPROP); methods_P.append(COOLPROP) self.CP_f = coolprop_fluids[self.CASRN] Tmins.append(self.CP_f.Tmin); Tmaxs.append(self.CP_f.Tc) if self.CASRN in Perrys2_314.index: methods.append(DIPPR_PERRY_8E) _, C1, C2, C3, C4, self.Perrys2_314_Tmin, self.Perrys2_314_Tmax = _Perrys2_314_values[Perrys2_314.index.get_loc(self.CASRN)].tolist() self.Perrys2_314_coeffs = [C1, C2, C3, C4] Tmins.append(self.Perrys2_314_Tmin); Tmaxs.append(self.Perrys2_314_Tmax) if self.CASRN in VDI_PPDS_10.index: _, A, B, C, D, E = _VDI_PPDS_10_values[VDI_PPDS_10.index.get_loc(self.CASRN)].tolist() self.VDI_PPDS_coeffs = [A, B, C, D, E] self.VDI_PPDS_coeffs.reverse() methods.append(VDI_PPDS) if all((self.MW, self.Tb, self.Pc, self.omega)): methods.append(GHARAGHEIZI_G) # Turns negative at low T; do not set Tmin Tmaxs.append(3000) if all((self.Cvgm, self.mug, self.MW, self.Tc)): methods.append(DIPPR_9B) Tmins.append(0.01); Tmaxs.append(1E4) # No limit here if all((self.Cvgm, self.mug, self.MW, self.Tc, self.omega)): methods.append(CHUNG) Tmins.append(0.01); Tmaxs.append(1E4) # No limit if all((self.Cvgm, self.MW, self.Tc, self.Vc, self.Zc, self.omega)): methods.append(ELI_HANLEY) Tmaxs.append(1E4) # Numeric error at low T if all((self.Cvgm, self.mug, self.MW)): methods.append(EUCKEN_MOD) methods.append(EUCKEN) Tmins.append(0.01); Tmaxs.append(1E4) # No limits if self.MW: methods.append(BAHADORI_G) # Terrible method, so don't set methods if all([self.MW, self.Tc, self.Vc, self.Zc, self.omega]): methods_P.append(ELI_HANLEY_DENSE) if all([self.MW, self.Tc, self.Vc, self.omega, self.dipole]): methods_P.append(CHUNG_DENSE) if all([self.MW, self.Tc, self.Pc, self.Vc, self.Zc]): methods_P.append(STIEL_THODOS_DENSE) self.all_methods = set(methods) self.all_methods_P = set(methods_P) if Tmins and Tmaxs: self.Tmin, self.Tmax = min(Tmins), max(Tmaxs)
[ "def", "load_all_methods", "(", "self", ")", ":", "methods", ",", "methods_P", "=", "[", "]", ",", "[", "]", "Tmins", ",", "Tmaxs", "=", "[", "]", ",", "[", "]", "if", "self", ".", "CASRN", "in", "_VDISaturationDict", ":", "methods", ".", "append", "(", "VDI_TABULAR", ")", "Ts", ",", "props", "=", "VDI_tabular_data", "(", "self", ".", "CASRN", ",", "'K (g)'", ")", "self", ".", "VDI_Tmin", "=", "Ts", "[", "0", "]", "self", ".", "VDI_Tmax", "=", "Ts", "[", "-", "1", "]", "self", ".", "tabular_data", "[", "VDI_TABULAR", "]", "=", "(", "Ts", ",", "props", ")", "Tmins", ".", "append", "(", "self", ".", "VDI_Tmin", ")", "Tmaxs", ".", "append", "(", "self", ".", "VDI_Tmax", ")", "if", "has_CoolProp", "and", "self", ".", "CASRN", "in", "coolprop_dict", ":", "methods", ".", "append", "(", "COOLPROP", ")", "methods_P", ".", "append", "(", "COOLPROP", ")", "self", ".", "CP_f", "=", "coolprop_fluids", "[", "self", ".", "CASRN", "]", "Tmins", ".", "append", "(", "self", ".", "CP_f", ".", "Tmin", ")", "Tmaxs", ".", "append", "(", "self", ".", "CP_f", ".", "Tc", ")", "if", "self", ".", "CASRN", "in", "Perrys2_314", ".", "index", ":", "methods", ".", "append", "(", "DIPPR_PERRY_8E", ")", "_", ",", "C1", ",", "C2", ",", "C3", ",", "C4", ",", "self", ".", "Perrys2_314_Tmin", ",", "self", ".", "Perrys2_314_Tmax", "=", "_Perrys2_314_values", "[", "Perrys2_314", ".", "index", ".", "get_loc", "(", "self", ".", "CASRN", ")", "]", ".", "tolist", "(", ")", "self", ".", "Perrys2_314_coeffs", "=", "[", "C1", ",", "C2", ",", "C3", ",", "C4", "]", "Tmins", ".", "append", "(", "self", ".", "Perrys2_314_Tmin", ")", "Tmaxs", ".", "append", "(", "self", ".", "Perrys2_314_Tmax", ")", "if", "self", ".", "CASRN", "in", "VDI_PPDS_10", ".", "index", ":", "_", ",", "A", ",", "B", ",", "C", ",", "D", ",", "E", "=", "_VDI_PPDS_10_values", "[", "VDI_PPDS_10", ".", "index", ".", "get_loc", "(", "self", ".", "CASRN", ")", "]", ".", "tolist", "(", ")", "self", ".", "VDI_PPDS_coeffs", "=", "[", "A", ",", "B", ",", "C", ",", "D", ",", "E", "]", "self", ".", "VDI_PPDS_coeffs", ".", "reverse", "(", ")", "methods", ".", "append", "(", "VDI_PPDS", ")", "if", "all", "(", "(", "self", ".", "MW", ",", "self", ".", "Tb", ",", "self", ".", "Pc", ",", "self", ".", "omega", ")", ")", ":", "methods", ".", "append", "(", "GHARAGHEIZI_G", ")", "# Turns negative at low T; do not set Tmin", "Tmaxs", ".", "append", "(", "3000", ")", "if", "all", "(", "(", "self", ".", "Cvgm", ",", "self", ".", "mug", ",", "self", ".", "MW", ",", "self", ".", "Tc", ")", ")", ":", "methods", ".", "append", "(", "DIPPR_9B", ")", "Tmins", ".", "append", "(", "0.01", ")", "Tmaxs", ".", "append", "(", "1E4", ")", "# No limit here", "if", "all", "(", "(", "self", ".", "Cvgm", ",", "self", ".", "mug", ",", "self", ".", "MW", ",", "self", ".", "Tc", ",", "self", ".", "omega", ")", ")", ":", "methods", ".", "append", "(", "CHUNG", ")", "Tmins", ".", "append", "(", "0.01", ")", "Tmaxs", ".", "append", "(", "1E4", ")", "# No limit", "if", "all", "(", "(", "self", ".", "Cvgm", ",", "self", ".", "MW", ",", "self", ".", "Tc", ",", "self", ".", "Vc", ",", "self", ".", "Zc", ",", "self", ".", "omega", ")", ")", ":", "methods", ".", "append", "(", "ELI_HANLEY", ")", "Tmaxs", ".", "append", "(", "1E4", ")", "# Numeric error at low T", "if", "all", "(", "(", "self", ".", "Cvgm", ",", "self", ".", "mug", ",", "self", ".", "MW", ")", ")", ":", "methods", ".", "append", "(", "EUCKEN_MOD", ")", "methods", ".", "append", "(", "EUCKEN", ")", "Tmins", ".", "append", "(", "0.01", ")", "Tmaxs", ".", "append", "(", "1E4", ")", "# No limits", "if", "self", ".", "MW", ":", "methods", ".", "append", "(", "BAHADORI_G", ")", "# Terrible method, so don't set methods", "if", "all", "(", "[", "self", ".", "MW", ",", "self", ".", "Tc", ",", "self", ".", "Vc", ",", "self", ".", "Zc", ",", "self", ".", "omega", "]", ")", ":", "methods_P", ".", "append", "(", "ELI_HANLEY_DENSE", ")", "if", "all", "(", "[", "self", ".", "MW", ",", "self", ".", "Tc", ",", "self", ".", "Vc", ",", "self", ".", "omega", ",", "self", ".", "dipole", "]", ")", ":", "methods_P", ".", "append", "(", "CHUNG_DENSE", ")", "if", "all", "(", "[", "self", ".", "MW", ",", "self", ".", "Tc", ",", "self", ".", "Pc", ",", "self", ".", "Vc", ",", "self", ".", "Zc", "]", ")", ":", "methods_P", ".", "append", "(", "STIEL_THODOS_DENSE", ")", "self", ".", "all_methods", "=", "set", "(", "methods", ")", "self", ".", "all_methods_P", "=", "set", "(", "methods_P", ")", "if", "Tmins", "and", "Tmaxs", ":", "self", ".", "Tmin", ",", "self", ".", "Tmax", "=", "min", "(", "Tmins", ")", ",", "max", "(", "Tmaxs", ")" ]
r'''Method which picks out coefficients for the specified chemical from the various dictionaries and DataFrames storing it. All data is stored as attributes. This method also sets :obj:`Tmin`, :obj:`Tmax`, :obj:`all_methods` and obj:`all_methods_P` as a set of methods for which the data exists for. Called on initialization only. See the source code for the variables at which the coefficients are stored. The coefficients can safely be altered once the class is initialized. This method can be called again to reset the parameters.
[ "r", "Method", "which", "picks", "out", "coefficients", "for", "the", "specified", "chemical", "from", "the", "various", "dictionaries", "and", "DataFrames", "storing", "it", ".", "All", "data", "is", "stored", "as", "attributes", ".", "This", "method", "also", "sets", ":", "obj", ":", "Tmin", ":", "obj", ":", "Tmax", ":", "obj", ":", "all_methods", "and", "obj", ":", "all_methods_P", "as", "a", "set", "of", "methods", "for", "which", "the", "data", "exists", "for", "." ]
python
valid
51.8
caseyjlaw/rtpipe
rtpipe/parsecands.py
https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/parsecands.py#L109-L196
def merge_segments(filename, scan, cleanup=True, sizelimit=0): """ Merges cands/noise pkl files from multiple segments to single cands/noise file. Expects segment cands pkls with have (1) state dict and (2) cands dict. Writes tuple state dict and duple of numpy arrays A single pkl written per scan using root name fileroot. if cleanup, it will remove segments after merging. if sizelimit, it will reduce the output file to be less than this many MB. """ workdir = os.path.dirname(filename) fileroot = os.path.basename(filename) candslist = glob.glob(os.path.join(workdir, 'cands_' + fileroot + '_sc' + str(scan) + 'seg*.pkl')) noiselist = glob.glob(os.path.join(workdir, 'noise_' + fileroot + '_sc' + str(scan) + 'seg*.pkl')) candssegs = sorted([candsfile.rstrip('.pkl').split('seg')[1] for candsfile in candslist]) noisesegs = sorted([noisefile.rstrip('.pkl').split('seg')[1] for noisefile in noiselist]) # test for good list with segments if not candslist and not noiselist: logger.warn('candslist and noiselist are empty.') return # aggregate cands over segments if not os.path.exists(os.path.join(workdir, 'cands_' + fileroot + '_sc' + str(scan) + '.pkl')): logger.info('Aggregating cands over segments %s for fileroot %s, scan %d' % (str(candssegs), fileroot, scan)) logger.debug('%s' % candslist) cands = {} for candsfile in candslist: with open(candsfile, 'r') as pkl: state = pickle.load(pkl) result = pickle.load(pkl) for kk in result.keys(): cands[kk] = result[kk] segment = state.pop('segment') # remove this key, as it has no meaning after merging segments # optionally limit size if sizelimit and len(cands): logger.debug('Checking size of cands dictionary...') if 'snr2' in state['features']: snrcol = state['features'].index('snr2') elif 'snr1' in state['features']: snrcol = state['features'].index('snr1') candsize = sys.getsizeof(cands[cands.keys()[0]])/1e6 maxlen = int(sizelimit/candsize) if len(cands) > maxlen: # need to reduce length to newlen logger.info('cands dictionary of length %.1f would exceed sizelimit of %d MB. Trimming to strongest %d candidates' % (len(cands), sizelimit, maxlen)) snrs = [abs(cands[k][snrcol]) for k in cands.iterkeys()] # take top snrs snrsort = sorted(snrs, reverse=True) snrmax = snrsort[maxlen] # get min snr for given length limit cands = {k: v for k,v in cands.items() if abs(v[snrcol]) > snrmax} # new cands dict # write cands to single file with open(os.path.join(workdir, 'cands_' + fileroot + '_sc' + str(scan) + '.pkl'), 'w') as pkl: pickle.dump(state, pkl, protocol=2) pickle.dump( (np.array(cands.keys()), np.array(cands.values())), pkl, protocol=2) if cleanup: if os.path.exists(os.path.join(workdir, 'cands_' + fileroot + '_sc' + str(scan) + '.pkl')): for candsfile in candslist: os.remove(candsfile) else: logger.warn('Merged candsfile already exists for scan %d. Not merged.' % scan) # aggregate noise over segments if not os.path.exists(os.path.join(workdir, 'noise_' + fileroot + '_sc' + str(scan) + '.pkl')): logger.info('Aggregating noise over segments %s for fileroot %s, scan %d' % (str(noisesegs), fileroot, scan)) logger.debug('%s' % noiselist) noise = [] for noisefile in noiselist: with open(noisefile, 'r') as pkl: result = pickle.load(pkl) # gets all noises for segment as list noise += result # write noise to single file if len(noise): with open(os.path.join(workdir, 'noise_' + fileroot + '_sc' + str(scan) + '.pkl'), 'w') as pkl: pickle.dump(noise, pkl, protocol=2) if cleanup: if os.path.exists(os.path.join(workdir, 'noise_' + fileroot + '_sc' + str(scan) + '.pkl')): for noisefile in noiselist: os.remove(noisefile) else: logger.warn('Merged noisefile already exists for scan %d. Not merged.' % scan)
[ "def", "merge_segments", "(", "filename", ",", "scan", ",", "cleanup", "=", "True", ",", "sizelimit", "=", "0", ")", ":", "workdir", "=", "os", ".", "path", ".", "dirname", "(", "filename", ")", "fileroot", "=", "os", ".", "path", ".", "basename", "(", "filename", ")", "candslist", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "workdir", ",", "'cands_'", "+", "fileroot", "+", "'_sc'", "+", "str", "(", "scan", ")", "+", "'seg*.pkl'", ")", ")", "noiselist", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "workdir", ",", "'noise_'", "+", "fileroot", "+", "'_sc'", "+", "str", "(", "scan", ")", "+", "'seg*.pkl'", ")", ")", "candssegs", "=", "sorted", "(", "[", "candsfile", ".", "rstrip", "(", "'.pkl'", ")", ".", "split", "(", "'seg'", ")", "[", "1", "]", "for", "candsfile", "in", "candslist", "]", ")", "noisesegs", "=", "sorted", "(", "[", "noisefile", ".", "rstrip", "(", "'.pkl'", ")", ".", "split", "(", "'seg'", ")", "[", "1", "]", "for", "noisefile", "in", "noiselist", "]", ")", "# test for good list with segments", "if", "not", "candslist", "and", "not", "noiselist", ":", "logger", ".", "warn", "(", "'candslist and noiselist are empty.'", ")", "return", "# aggregate cands over segments", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "workdir", ",", "'cands_'", "+", "fileroot", "+", "'_sc'", "+", "str", "(", "scan", ")", "+", "'.pkl'", ")", ")", ":", "logger", ".", "info", "(", "'Aggregating cands over segments %s for fileroot %s, scan %d'", "%", "(", "str", "(", "candssegs", ")", ",", "fileroot", ",", "scan", ")", ")", "logger", ".", "debug", "(", "'%s'", "%", "candslist", ")", "cands", "=", "{", "}", "for", "candsfile", "in", "candslist", ":", "with", "open", "(", "candsfile", ",", "'r'", ")", "as", "pkl", ":", "state", "=", "pickle", ".", "load", "(", "pkl", ")", "result", "=", "pickle", ".", "load", "(", "pkl", ")", "for", "kk", "in", "result", ".", "keys", "(", ")", ":", "cands", "[", "kk", "]", "=", "result", "[", "kk", "]", "segment", "=", "state", ".", "pop", "(", "'segment'", ")", "# remove this key, as it has no meaning after merging segments", "# optionally limit size", "if", "sizelimit", "and", "len", "(", "cands", ")", ":", "logger", ".", "debug", "(", "'Checking size of cands dictionary...'", ")", "if", "'snr2'", "in", "state", "[", "'features'", "]", ":", "snrcol", "=", "state", "[", "'features'", "]", ".", "index", "(", "'snr2'", ")", "elif", "'snr1'", "in", "state", "[", "'features'", "]", ":", "snrcol", "=", "state", "[", "'features'", "]", ".", "index", "(", "'snr1'", ")", "candsize", "=", "sys", ".", "getsizeof", "(", "cands", "[", "cands", ".", "keys", "(", ")", "[", "0", "]", "]", ")", "/", "1e6", "maxlen", "=", "int", "(", "sizelimit", "/", "candsize", ")", "if", "len", "(", "cands", ")", ">", "maxlen", ":", "# need to reduce length to newlen", "logger", ".", "info", "(", "'cands dictionary of length %.1f would exceed sizelimit of %d MB. Trimming to strongest %d candidates'", "%", "(", "len", "(", "cands", ")", ",", "sizelimit", ",", "maxlen", ")", ")", "snrs", "=", "[", "abs", "(", "cands", "[", "k", "]", "[", "snrcol", "]", ")", "for", "k", "in", "cands", ".", "iterkeys", "(", ")", "]", "# take top snrs", "snrsort", "=", "sorted", "(", "snrs", ",", "reverse", "=", "True", ")", "snrmax", "=", "snrsort", "[", "maxlen", "]", "# get min snr for given length limit", "cands", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "cands", ".", "items", "(", ")", "if", "abs", "(", "v", "[", "snrcol", "]", ")", ">", "snrmax", "}", "# new cands dict", "# write cands to single file", "with", "open", "(", "os", ".", "path", ".", "join", "(", "workdir", ",", "'cands_'", "+", "fileroot", "+", "'_sc'", "+", "str", "(", "scan", ")", "+", "'.pkl'", ")", ",", "'w'", ")", "as", "pkl", ":", "pickle", ".", "dump", "(", "state", ",", "pkl", ",", "protocol", "=", "2", ")", "pickle", ".", "dump", "(", "(", "np", ".", "array", "(", "cands", ".", "keys", "(", ")", ")", ",", "np", ".", "array", "(", "cands", ".", "values", "(", ")", ")", ")", ",", "pkl", ",", "protocol", "=", "2", ")", "if", "cleanup", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "workdir", ",", "'cands_'", "+", "fileroot", "+", "'_sc'", "+", "str", "(", "scan", ")", "+", "'.pkl'", ")", ")", ":", "for", "candsfile", "in", "candslist", ":", "os", ".", "remove", "(", "candsfile", ")", "else", ":", "logger", ".", "warn", "(", "'Merged candsfile already exists for scan %d. Not merged.'", "%", "scan", ")", "# aggregate noise over segments", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "workdir", ",", "'noise_'", "+", "fileroot", "+", "'_sc'", "+", "str", "(", "scan", ")", "+", "'.pkl'", ")", ")", ":", "logger", ".", "info", "(", "'Aggregating noise over segments %s for fileroot %s, scan %d'", "%", "(", "str", "(", "noisesegs", ")", ",", "fileroot", ",", "scan", ")", ")", "logger", ".", "debug", "(", "'%s'", "%", "noiselist", ")", "noise", "=", "[", "]", "for", "noisefile", "in", "noiselist", ":", "with", "open", "(", "noisefile", ",", "'r'", ")", "as", "pkl", ":", "result", "=", "pickle", ".", "load", "(", "pkl", ")", "# gets all noises for segment as list", "noise", "+=", "result", "# write noise to single file", "if", "len", "(", "noise", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "workdir", ",", "'noise_'", "+", "fileroot", "+", "'_sc'", "+", "str", "(", "scan", ")", "+", "'.pkl'", ")", ",", "'w'", ")", "as", "pkl", ":", "pickle", ".", "dump", "(", "noise", ",", "pkl", ",", "protocol", "=", "2", ")", "if", "cleanup", ":", "if", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "workdir", ",", "'noise_'", "+", "fileroot", "+", "'_sc'", "+", "str", "(", "scan", ")", "+", "'.pkl'", ")", ")", ":", "for", "noisefile", "in", "noiselist", ":", "os", ".", "remove", "(", "noisefile", ")", "else", ":", "logger", ".", "warn", "(", "'Merged noisefile already exists for scan %d. Not merged.'", "%", "scan", ")" ]
Merges cands/noise pkl files from multiple segments to single cands/noise file. Expects segment cands pkls with have (1) state dict and (2) cands dict. Writes tuple state dict and duple of numpy arrays A single pkl written per scan using root name fileroot. if cleanup, it will remove segments after merging. if sizelimit, it will reduce the output file to be less than this many MB.
[ "Merges", "cands", "/", "noise", "pkl", "files", "from", "multiple", "segments", "to", "single", "cands", "/", "noise", "file", "." ]
python
train
49.261364
clchiou/startup
startup.py
https://github.com/clchiou/startup/blob/13cbf3ce1deffbc10d33a5f64c396a73129a5929/startup.py#L185-L218
def call(self, **kwargs): """Call all the functions that have previously been added to the dependency graph in topological and lexicographical order, and then return variables in a ``dict``. You may provide variable values with keyword arguments. These values will be written and can satisfy dependencies. NOTE: This object will be **destroyed** after ``call()`` returns and should not be used any further. """ if not hasattr(self, 'funcs'): raise StartupError('startup cannot be called again') for name, var in self.variables.items(): var.name = name self.variable_values.update(kwargs) for name in self.variable_values: self.variables[name].name = name queue = Closure.sort(self.satisfied) queue.extend(_write_values(self.variable_values, self.variables)) while queue: closure = queue.pop(0) writeto = closure.call() self.funcs.remove(closure.func) queue.extend(_notify_reader_writes(writeto)) if self.funcs: raise StartupError('cannot satisfy dependency for %r' % self.funcs) values = { name: var.read_latest() for name, var in self.variables.items() } # Call _release() on normal exit only; otherwise keep the dead body for # forensic analysis. self._release() return values
[ "def", "call", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'funcs'", ")", ":", "raise", "StartupError", "(", "'startup cannot be called again'", ")", "for", "name", ",", "var", "in", "self", ".", "variables", ".", "items", "(", ")", ":", "var", ".", "name", "=", "name", "self", ".", "variable_values", ".", "update", "(", "kwargs", ")", "for", "name", "in", "self", ".", "variable_values", ":", "self", ".", "variables", "[", "name", "]", ".", "name", "=", "name", "queue", "=", "Closure", ".", "sort", "(", "self", ".", "satisfied", ")", "queue", ".", "extend", "(", "_write_values", "(", "self", ".", "variable_values", ",", "self", ".", "variables", ")", ")", "while", "queue", ":", "closure", "=", "queue", ".", "pop", "(", "0", ")", "writeto", "=", "closure", ".", "call", "(", ")", "self", ".", "funcs", ".", "remove", "(", "closure", ".", "func", ")", "queue", ".", "extend", "(", "_notify_reader_writes", "(", "writeto", ")", ")", "if", "self", ".", "funcs", ":", "raise", "StartupError", "(", "'cannot satisfy dependency for %r'", "%", "self", ".", "funcs", ")", "values", "=", "{", "name", ":", "var", ".", "read_latest", "(", ")", "for", "name", ",", "var", "in", "self", ".", "variables", ".", "items", "(", ")", "}", "# Call _release() on normal exit only; otherwise keep the dead body for", "# forensic analysis.", "self", ".", "_release", "(", ")", "return", "values" ]
Call all the functions that have previously been added to the dependency graph in topological and lexicographical order, and then return variables in a ``dict``. You may provide variable values with keyword arguments. These values will be written and can satisfy dependencies. NOTE: This object will be **destroyed** after ``call()`` returns and should not be used any further.
[ "Call", "all", "the", "functions", "that", "have", "previously", "been", "added", "to", "the", "dependency", "graph", "in", "topological", "and", "lexicographical", "order", "and", "then", "return", "variables", "in", "a", "dict", "." ]
python
train
42.117647
tjcsl/cslbot
cslbot/helpers/reloader.py
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/helpers/reloader.py#L50-L96
def do_reload(bot, target, cmdargs, server_send=None): """The reloading magic. - First, reload handler.py. - Then make copies of all the handler data we want to keep. - Create a new handler and restore all the data. """ def send(msg): if server_send is not None: server_send("%s\n" % msg) else: do_log(bot.connection, bot.get_target(target), msg) confdir = bot.handler.confdir if cmdargs == 'pull': # Permission checks. if isinstance(target, irc.client.Event) and target.source.nick != bot.config['auth']['owner']: bot.connection.privmsg(bot.get_target(target), "Nope, not gonna do it.") return if exists(join(confdir, '.git')): send(misc.do_pull(srcdir=confdir)) else: send(misc.do_pull(repo=bot.config['api']['githubrepo'])) # Reload config importlib.reload(config) bot.config = config.load_config(join(confdir, 'config.cfg'), send) # Reimport helpers errored_helpers = modutils.scan_and_reimport('helpers') if errored_helpers: send("Failed to load some helpers.") for error in errored_helpers: send("%s: %s" % error) return False if not load_modules(bot.config, confdir, send): return False # preserve data data = bot.handler.get_data() bot.shutdown_mp() bot.handler = handler.BotHandler(bot.config, bot.connection, bot.channels, confdir) bot.handler.set_data(data) bot.handler.connection = bot.connection bot.handler.channels = bot.channels return True
[ "def", "do_reload", "(", "bot", ",", "target", ",", "cmdargs", ",", "server_send", "=", "None", ")", ":", "def", "send", "(", "msg", ")", ":", "if", "server_send", "is", "not", "None", ":", "server_send", "(", "\"%s\\n\"", "%", "msg", ")", "else", ":", "do_log", "(", "bot", ".", "connection", ",", "bot", ".", "get_target", "(", "target", ")", ",", "msg", ")", "confdir", "=", "bot", ".", "handler", ".", "confdir", "if", "cmdargs", "==", "'pull'", ":", "# Permission checks.", "if", "isinstance", "(", "target", ",", "irc", ".", "client", ".", "Event", ")", "and", "target", ".", "source", ".", "nick", "!=", "bot", ".", "config", "[", "'auth'", "]", "[", "'owner'", "]", ":", "bot", ".", "connection", ".", "privmsg", "(", "bot", ".", "get_target", "(", "target", ")", ",", "\"Nope, not gonna do it.\"", ")", "return", "if", "exists", "(", "join", "(", "confdir", ",", "'.git'", ")", ")", ":", "send", "(", "misc", ".", "do_pull", "(", "srcdir", "=", "confdir", ")", ")", "else", ":", "send", "(", "misc", ".", "do_pull", "(", "repo", "=", "bot", ".", "config", "[", "'api'", "]", "[", "'githubrepo'", "]", ")", ")", "# Reload config", "importlib", ".", "reload", "(", "config", ")", "bot", ".", "config", "=", "config", ".", "load_config", "(", "join", "(", "confdir", ",", "'config.cfg'", ")", ",", "send", ")", "# Reimport helpers", "errored_helpers", "=", "modutils", ".", "scan_and_reimport", "(", "'helpers'", ")", "if", "errored_helpers", ":", "send", "(", "\"Failed to load some helpers.\"", ")", "for", "error", "in", "errored_helpers", ":", "send", "(", "\"%s: %s\"", "%", "error", ")", "return", "False", "if", "not", "load_modules", "(", "bot", ".", "config", ",", "confdir", ",", "send", ")", ":", "return", "False", "# preserve data", "data", "=", "bot", ".", "handler", ".", "get_data", "(", ")", "bot", ".", "shutdown_mp", "(", ")", "bot", ".", "handler", "=", "handler", ".", "BotHandler", "(", "bot", ".", "config", ",", "bot", ".", "connection", ",", "bot", ".", "channels", ",", "confdir", ")", "bot", ".", "handler", ".", "set_data", "(", "data", ")", "bot", ".", "handler", ".", "connection", "=", "bot", ".", "connection", "bot", ".", "handler", ".", "channels", "=", "bot", ".", "channels", "return", "True" ]
The reloading magic. - First, reload handler.py. - Then make copies of all the handler data we want to keep. - Create a new handler and restore all the data.
[ "The", "reloading", "magic", "." ]
python
train
33.446809
rameshg87/pyremotevbox
pyremotevbox/ZSI/wstools/WSDLTools.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/WSDLTools.py#L1494-L1501
def addOutHeaderInfo(self, name, type, namespace, element_type=0, mustUnderstand=0): """Add an output SOAP header description to the call info.""" headerinfo = HeaderInfo(name, type, namespace, element_type) if mustUnderstand: headerinfo.mustUnderstand = 1 self.outheaders.append(headerinfo) return headerinfo
[ "def", "addOutHeaderInfo", "(", "self", ",", "name", ",", "type", ",", "namespace", ",", "element_type", "=", "0", ",", "mustUnderstand", "=", "0", ")", ":", "headerinfo", "=", "HeaderInfo", "(", "name", ",", "type", ",", "namespace", ",", "element_type", ")", "if", "mustUnderstand", ":", "headerinfo", ".", "mustUnderstand", "=", "1", "self", ".", "outheaders", ".", "append", "(", "headerinfo", ")", "return", "headerinfo" ]
Add an output SOAP header description to the call info.
[ "Add", "an", "output", "SOAP", "header", "description", "to", "the", "call", "info", "." ]
python
train
47.375
F5Networks/f5-common-python
f5/bigip/mixins.py
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/bigip/mixins.py#L237-L241
def exec_cmd(self, command, **kwargs): """Wrapper method that can be changed in the inheriting classes.""" self._is_allowed_command(command) self._check_command_parameters(**kwargs) return self._exec_cmd(command, **kwargs)
[ "def", "exec_cmd", "(", "self", ",", "command", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_is_allowed_command", "(", "command", ")", "self", ".", "_check_command_parameters", "(", "*", "*", "kwargs", ")", "return", "self", ".", "_exec_cmd", "(", "command", ",", "*", "*", "kwargs", ")" ]
Wrapper method that can be changed in the inheriting classes.
[ "Wrapper", "method", "that", "can", "be", "changed", "in", "the", "inheriting", "classes", "." ]
python
train
50
kislyuk/aegea
aegea/packages/github3/session.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/session.py#L136-L145
def no_auth(self): """Unset authentication temporarily as a context manager.""" old_basic_auth, self.auth = self.auth, None old_token_auth = self.headers.pop('Authorization', None) yield self.auth = old_basic_auth if old_token_auth: self.headers['Authorization'] = old_token_auth
[ "def", "no_auth", "(", "self", ")", ":", "old_basic_auth", ",", "self", ".", "auth", "=", "self", ".", "auth", ",", "None", "old_token_auth", "=", "self", ".", "headers", ".", "pop", "(", "'Authorization'", ",", "None", ")", "yield", "self", ".", "auth", "=", "old_basic_auth", "if", "old_token_auth", ":", "self", ".", "headers", "[", "'Authorization'", "]", "=", "old_token_auth" ]
Unset authentication temporarily as a context manager.
[ "Unset", "authentication", "temporarily", "as", "a", "context", "manager", "." ]
python
train
33.2
log2timeline/plaso
plaso/parsers/interface.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/interface.py#L254-L274
def Parse(self, parser_mediator, file_object): """Parses a single file-like object. Args: parser_mediator (ParserMediator): a parser mediator. file_object (dvfvs.FileIO): a file-like object to parse. Raises: UnableToParseFile: when the file cannot be parsed. """ if not file_object: raise errors.UnableToParseFile('Invalid file object') if self._INITIAL_FILE_OFFSET is not None: file_object.seek(self._INITIAL_FILE_OFFSET, os.SEEK_SET) parser_mediator.AppendToParserChain(self) try: self.ParseFileObject(parser_mediator, file_object) finally: parser_mediator.PopFromParserChain()
[ "def", "Parse", "(", "self", ",", "parser_mediator", ",", "file_object", ")", ":", "if", "not", "file_object", ":", "raise", "errors", ".", "UnableToParseFile", "(", "'Invalid file object'", ")", "if", "self", ".", "_INITIAL_FILE_OFFSET", "is", "not", "None", ":", "file_object", ".", "seek", "(", "self", ".", "_INITIAL_FILE_OFFSET", ",", "os", ".", "SEEK_SET", ")", "parser_mediator", ".", "AppendToParserChain", "(", "self", ")", "try", ":", "self", ".", "ParseFileObject", "(", "parser_mediator", ",", "file_object", ")", "finally", ":", "parser_mediator", ".", "PopFromParserChain", "(", ")" ]
Parses a single file-like object. Args: parser_mediator (ParserMediator): a parser mediator. file_object (dvfvs.FileIO): a file-like object to parse. Raises: UnableToParseFile: when the file cannot be parsed.
[ "Parses", "a", "single", "file", "-", "like", "object", "." ]
python
train
30.52381
luckydonald/pytgbot
pytgbot/api_types/sendable/inline.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/pytgbot/api_types/sendable/inline.py#L3958-L3971
def to_array(self): """ Serializes this InputTextMessageContent to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(InputTextMessageContent, self).to_array() array['message_text'] = u(self.message_text) # py2: type unicode, py3: type str if self.parse_mode is not None: array['parse_mode'] = u(self.parse_mode) # py2: type unicode, py3: type str if self.disable_web_page_preview is not None: array['disable_web_page_preview'] = bool(self.disable_web_page_preview) # type bool return array
[ "def", "to_array", "(", "self", ")", ":", "array", "=", "super", "(", "InputTextMessageContent", ",", "self", ")", ".", "to_array", "(", ")", "array", "[", "'message_text'", "]", "=", "u", "(", "self", ".", "message_text", ")", "# py2: type unicode, py3: type str", "if", "self", ".", "parse_mode", "is", "not", "None", ":", "array", "[", "'parse_mode'", "]", "=", "u", "(", "self", ".", "parse_mode", ")", "# py2: type unicode, py3: type str", "if", "self", ".", "disable_web_page_preview", "is", "not", "None", ":", "array", "[", "'disable_web_page_preview'", "]", "=", "bool", "(", "self", ".", "disable_web_page_preview", ")", "# type bool", "return", "array" ]
Serializes this InputTextMessageContent to a dictionary. :return: dictionary representation of this object. :rtype: dict
[ "Serializes", "this", "InputTextMessageContent", "to", "a", "dictionary", "." ]
python
train
45
manahl/arctic
arctic/date/_util.py
https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/date/_util.py#L147-L155
def ms_to_datetime(ms, tzinfo=None): """Convert a millisecond time value to an offset-aware Python datetime object.""" if not isinstance(ms, (int, long)): raise TypeError('expected integer, not %s' % type(ms)) if tzinfo is None: tzinfo = mktz() return datetime.datetime.fromtimestamp(ms * 1e-3, tzinfo)
[ "def", "ms_to_datetime", "(", "ms", ",", "tzinfo", "=", "None", ")", ":", "if", "not", "isinstance", "(", "ms", ",", "(", "int", ",", "long", ")", ")", ":", "raise", "TypeError", "(", "'expected integer, not %s'", "%", "type", "(", "ms", ")", ")", "if", "tzinfo", "is", "None", ":", "tzinfo", "=", "mktz", "(", ")", "return", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "ms", "*", "1e-3", ",", "tzinfo", ")" ]
Convert a millisecond time value to an offset-aware Python datetime object.
[ "Convert", "a", "millisecond", "time", "value", "to", "an", "offset", "-", "aware", "Python", "datetime", "object", "." ]
python
train
36.444444
tensorflow/tensor2tensor
tensor2tensor/models/research/lm_experiments.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/lm_experiments.py#L130-L137
def lmx_moe_h1k_f4k_x32(): """Transformer with mixture of experts. 890M Params.""" hparams = lmx_h1k_f4k() hparams.ffn_layer = "local_moe_tpu" hparams.moe_num_experts = 32 hparams.weight_dtype = "bfloat16" hparams.batch_size = 8192 return hparams
[ "def", "lmx_moe_h1k_f4k_x32", "(", ")", ":", "hparams", "=", "lmx_h1k_f4k", "(", ")", "hparams", ".", "ffn_layer", "=", "\"local_moe_tpu\"", "hparams", ".", "moe_num_experts", "=", "32", "hparams", ".", "weight_dtype", "=", "\"bfloat16\"", "hparams", ".", "batch_size", "=", "8192", "return", "hparams" ]
Transformer with mixture of experts. 890M Params.
[ "Transformer", "with", "mixture", "of", "experts", ".", "890M", "Params", "." ]
python
train
31.75
hyperledger/indy-sdk
docs/how-tos/rotate-key/python/rotate_key.py
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/docs/how-tos/rotate-key/python/rotate_key.py#L33-L37
def print_log(value_color="", value_noncolor=""): """set the colors for text.""" HEADER = '\033[92m' ENDC = '\033[0m' print(HEADER + value_color + ENDC + str(value_noncolor))
[ "def", "print_log", "(", "value_color", "=", "\"\"", ",", "value_noncolor", "=", "\"\"", ")", ":", "HEADER", "=", "'\\033[92m'", "ENDC", "=", "'\\033[0m'", "print", "(", "HEADER", "+", "value_color", "+", "ENDC", "+", "str", "(", "value_noncolor", ")", ")" ]
set the colors for text.
[ "set", "the", "colors", "for", "text", "." ]
python
train
37.2
caffeinehit/django-follow
follow/models.py
https://github.com/caffeinehit/django-follow/blob/765a4795e58f57fbf96efdb7838d0c7222db2e56/follow/models.py#L50-L62
def get_follows(self, model_or_obj_or_qs): """ Returns all the followers of a model, an object or a queryset. """ fname = self.fname(model_or_obj_or_qs) if isinstance(model_or_obj_or_qs, QuerySet): return self.filter(**{'%s__in' % fname: model_or_obj_or_qs}) if inspect.isclass(model_or_obj_or_qs): return self.exclude(**{fname:None}) return self.filter(**{fname:model_or_obj_or_qs})
[ "def", "get_follows", "(", "self", ",", "model_or_obj_or_qs", ")", ":", "fname", "=", "self", ".", "fname", "(", "model_or_obj_or_qs", ")", "if", "isinstance", "(", "model_or_obj_or_qs", ",", "QuerySet", ")", ":", "return", "self", ".", "filter", "(", "*", "*", "{", "'%s__in'", "%", "fname", ":", "model_or_obj_or_qs", "}", ")", "if", "inspect", ".", "isclass", "(", "model_or_obj_or_qs", ")", ":", "return", "self", ".", "exclude", "(", "*", "*", "{", "fname", ":", "None", "}", ")", "return", "self", ".", "filter", "(", "*", "*", "{", "fname", ":", "model_or_obj_or_qs", "}", ")" ]
Returns all the followers of a model, an object or a queryset.
[ "Returns", "all", "the", "followers", "of", "a", "model", "an", "object", "or", "a", "queryset", "." ]
python
train
36.153846
google/grr
grr/core/grr_response_core/lib/rdfvalues/structs.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/rdfvalues/structs.py#L879-L882
def GetDefault(self, container=None): """Return boolean value.""" return rdfvalue.RDFBool( super(ProtoBoolean, self).GetDefault(container=container))
[ "def", "GetDefault", "(", "self", ",", "container", "=", "None", ")", ":", "return", "rdfvalue", ".", "RDFBool", "(", "super", "(", "ProtoBoolean", ",", "self", ")", ".", "GetDefault", "(", "container", "=", "container", ")", ")" ]
Return boolean value.
[ "Return", "boolean", "value", "." ]
python
train
40.5
rueckstiess/mtools
mtools/mplotqueries/plottypes/event_type.py
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/mplotqueries/plottypes/event_type.py#L97-L109
def color_map(cls, group): print("Group %s" % group) """ Change default color behavior. Map certain states always to the same colors (similar to MMS). """ try: state_idx = cls.states.index(group) except ValueError: # on any unexpected state, return black state_idx = 5 return cls.colors[state_idx], cls.markers[0]
[ "def", "color_map", "(", "cls", ",", "group", ")", ":", "print", "(", "\"Group %s\"", "%", "group", ")", "try", ":", "state_idx", "=", "cls", ".", "states", ".", "index", "(", "group", ")", "except", "ValueError", ":", "# on any unexpected state, return black", "state_idx", "=", "5", "return", "cls", ".", "colors", "[", "state_idx", "]", ",", "cls", ".", "markers", "[", "0", "]" ]
Change default color behavior. Map certain states always to the same colors (similar to MMS).
[ "Change", "default", "color", "behavior", "." ]
python
train
30.923077
saltstack/salt
salt/modules/virt.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/virt.py#L5135-L5157
def pool_undefine(name, **kwargs): ''' Remove a defined libvirt storage pool. The pool needs to be stopped before calling. :param name: libvirt storage pool name :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.pool_undefine default ''' conn = __get_conn(**kwargs) try: pool = conn.storagePoolLookupByName(name) return not bool(pool.undefine()) finally: conn.close()
[ "def", "pool_undefine", "(", "name", ",", "*", "*", "kwargs", ")", ":", "conn", "=", "__get_conn", "(", "*", "*", "kwargs", ")", "try", ":", "pool", "=", "conn", ".", "storagePoolLookupByName", "(", "name", ")", "return", "not", "bool", "(", "pool", ".", "undefine", "(", ")", ")", "finally", ":", "conn", ".", "close", "(", ")" ]
Remove a defined libvirt storage pool. The pool needs to be stopped before calling. :param name: libvirt storage pool name :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.pool_undefine default
[ "Remove", "a", "defined", "libvirt", "storage", "pool", ".", "The", "pool", "needs", "to", "be", "stopped", "before", "calling", "." ]
python
train
28.173913
woolfson-group/isambard
isambard/ampal/ligands.py
https://github.com/woolfson-group/isambard/blob/ebc33b48a28ad217e18f93b910dfba46e6e71e07/isambard/ampal/ligands.py#L45-L50
def category_count(self): """Returns the number of categories in `categories`.""" category_dict = self.categories count_dict = {category: len( category_dict[category]) for category in category_dict} return count_dict
[ "def", "category_count", "(", "self", ")", ":", "category_dict", "=", "self", ".", "categories", "count_dict", "=", "{", "category", ":", "len", "(", "category_dict", "[", "category", "]", ")", "for", "category", "in", "category_dict", "}", "return", "count_dict" ]
Returns the number of categories in `categories`.
[ "Returns", "the", "number", "of", "categories", "in", "categories", "." ]
python
train
42.5
pip-services3-python/pip-services3-components-python
pip_services3_components/count/CachedCounters.py
https://github.com/pip-services3-python/pip-services3-components-python/blob/1de9c1bb544cf1891111e9a5f5d67653f62c9b52/pip_services3_components/count/CachedCounters.py#L85-L94
def clear_all(self): """ Clears (resets) all counters. """ self._lock.acquire() try: self._cache = {} self._updated = False finally: self._lock.release()
[ "def", "clear_all", "(", "self", ")", ":", "self", ".", "_lock", ".", "acquire", "(", ")", "try", ":", "self", ".", "_cache", "=", "{", "}", "self", ".", "_updated", "=", "False", "finally", ":", "self", ".", "_lock", ".", "release", "(", ")" ]
Clears (resets) all counters.
[ "Clears", "(", "resets", ")", "all", "counters", "." ]
python
train
22.8
gem/oq-engine
openquake/commands/plot.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commands/plot.py#L92-L117
def make_figure_uhs(extractors, what): """ $ oq plot 'uhs?kind=mean&site_id=0' """ import matplotlib.pyplot as plt fig = plt.figure() got = {} # (calc_id, kind) -> curves for i, ex in enumerate(extractors): uhs = ex.get(what) for kind in uhs.kind: got[ex.calc_id, kind] = uhs[kind] oq = ex.oqparam n_poes = len(oq.poes) periods = [imt.period for imt in oq.imt_periods()] [site] = uhs.site_id for j, poe in enumerate(oq.poes): ax = fig.add_subplot(n_poes, 1, j + 1) ax.set_xlabel('UHS on site %s, poe=%s, inv_time=%dy' % (site, poe, oq.investigation_time)) ax.set_ylabel('SA') for ck, arr in got.items(): ax.plot(periods, arr[0, :, j], '-', label='%s_%s' % ck) ax.plot(periods, arr[0, :, j], '.') ax.grid(True) ax.legend() return plt
[ "def", "make_figure_uhs", "(", "extractors", ",", "what", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "fig", "=", "plt", ".", "figure", "(", ")", "got", "=", "{", "}", "# (calc_id, kind) -> curves", "for", "i", ",", "ex", "in", "enumerate", "(", "extractors", ")", ":", "uhs", "=", "ex", ".", "get", "(", "what", ")", "for", "kind", "in", "uhs", ".", "kind", ":", "got", "[", "ex", ".", "calc_id", ",", "kind", "]", "=", "uhs", "[", "kind", "]", "oq", "=", "ex", ".", "oqparam", "n_poes", "=", "len", "(", "oq", ".", "poes", ")", "periods", "=", "[", "imt", ".", "period", "for", "imt", "in", "oq", ".", "imt_periods", "(", ")", "]", "[", "site", "]", "=", "uhs", ".", "site_id", "for", "j", ",", "poe", "in", "enumerate", "(", "oq", ".", "poes", ")", ":", "ax", "=", "fig", ".", "add_subplot", "(", "n_poes", ",", "1", ",", "j", "+", "1", ")", "ax", ".", "set_xlabel", "(", "'UHS on site %s, poe=%s, inv_time=%dy'", "%", "(", "site", ",", "poe", ",", "oq", ".", "investigation_time", ")", ")", "ax", ".", "set_ylabel", "(", "'SA'", ")", "for", "ck", ",", "arr", "in", "got", ".", "items", "(", ")", ":", "ax", ".", "plot", "(", "periods", ",", "arr", "[", "0", ",", ":", ",", "j", "]", ",", "'-'", ",", "label", "=", "'%s_%s'", "%", "ck", ")", "ax", ".", "plot", "(", "periods", ",", "arr", "[", "0", ",", ":", ",", "j", "]", ",", "'.'", ")", "ax", ".", "grid", "(", "True", ")", "ax", ".", "legend", "(", ")", "return", "plt" ]
$ oq plot 'uhs?kind=mean&site_id=0'
[ "$", "oq", "plot", "uhs?kind", "=", "mean&site_id", "=", "0" ]
python
train
33.923077
eagleflo/adjutant
adjutant.py
https://github.com/eagleflo/adjutant/blob/85d800d9979fa122e0888af48c2e6a697f9da458/adjutant.py#L109-L119
def vlq2int(data): """Read one VLQ-encoded integer value from an input data stream.""" # The VLQ is little-endian. byte = ord(data.read(1)) value = byte & 0x7F shift = 1 while byte & 0x80 != 0: byte = ord(data.read(1)) value = ((byte & 0x7F) << shift * 7) | value shift += 1 return value
[ "def", "vlq2int", "(", "data", ")", ":", "# The VLQ is little-endian.", "byte", "=", "ord", "(", "data", ".", "read", "(", "1", ")", ")", "value", "=", "byte", "&", "0x7F", "shift", "=", "1", "while", "byte", "&", "0x80", "!=", "0", ":", "byte", "=", "ord", "(", "data", ".", "read", "(", "1", ")", ")", "value", "=", "(", "(", "byte", "&", "0x7F", ")", "<<", "shift", "*", "7", ")", "|", "value", "shift", "+=", "1", "return", "value" ]
Read one VLQ-encoded integer value from an input data stream.
[ "Read", "one", "VLQ", "-", "encoded", "integer", "value", "from", "an", "input", "data", "stream", "." ]
python
test
30
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/mylib2.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/mylib2.py#L175-L195
def tree2doe(str1): """tree2doe""" retstuff = makedoedict(str1) ddict = makedoetree(retstuff[0], retstuff[1]) ddict = retstuff[0] retstuff[1] = {}# don't need it anymore str1 = ''#just re-using it l1list = list(ddict.keys()) l1list.sort() for i in range(0, len(l1list)): str1 = str1 + ddict[l1list[i]] l2list = list(ddict[l1list[i]].keys()) l2list.sort() for j in range(0, len(l2list)): str1 = str1 + ddict[l2list[j]] l3list = list(ddict[l1list[i]][l2list[j]].keys()) l3list.sort() for k in range(0, len(l3list)): str1 = str1 + ddict[l3list[k]] return str1
[ "def", "tree2doe", "(", "str1", ")", ":", "retstuff", "=", "makedoedict", "(", "str1", ")", "ddict", "=", "makedoetree", "(", "retstuff", "[", "0", "]", ",", "retstuff", "[", "1", "]", ")", "ddict", "=", "retstuff", "[", "0", "]", "retstuff", "[", "1", "]", "=", "{", "}", "# don't need it anymore", "str1", "=", "''", "#just re-using it", "l1list", "=", "list", "(", "ddict", ".", "keys", "(", ")", ")", "l1list", ".", "sort", "(", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "l1list", ")", ")", ":", "str1", "=", "str1", "+", "ddict", "[", "l1list", "[", "i", "]", "]", "l2list", "=", "list", "(", "ddict", "[", "l1list", "[", "i", "]", "]", ".", "keys", "(", ")", ")", "l2list", ".", "sort", "(", ")", "for", "j", "in", "range", "(", "0", ",", "len", "(", "l2list", ")", ")", ":", "str1", "=", "str1", "+", "ddict", "[", "l2list", "[", "j", "]", "]", "l3list", "=", "list", "(", "ddict", "[", "l1list", "[", "i", "]", "]", "[", "l2list", "[", "j", "]", "]", ".", "keys", "(", ")", ")", "l3list", ".", "sort", "(", ")", "for", "k", "in", "range", "(", "0", ",", "len", "(", "l3list", ")", ")", ":", "str1", "=", "str1", "+", "ddict", "[", "l3list", "[", "k", "]", "]", "return", "str1" ]
tree2doe
[ "tree2doe" ]
python
train
32.047619
ArangoDB-Community/pyArango
pyArango/collection.py
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L695-L726
def getEdges(self, vertex, inEdges = True, outEdges = True, rawResults = False) : """returns in, out, or both edges liked to a given document. vertex can be either a Document object or a string for an _id. If rawResults a arango results will be return as fetched, if false, will return a liste of Edge objects""" if isinstance(vertex, Document): vId = vertex._id elif (type(vertex) is str) or (type(vertex) is bytes): vId = vertex else : raise ValueError("Vertex is neither a Document nor a String") params = {"vertex" : vId} if inEdges and outEdges : pass elif inEdges : params["direction"] = "in" elif outEdges : params["direction"] = "out" else : raise ValueError("inEdges, outEdges or both must have a boolean value") r = self.connection.session.get(self.edgesURL, params = params) data = r.json() if r.status_code == 200 : if not rawResults : ret = [] for e in data["edges"] : ret.append(Edge(self, e)) return ret else : return data["edges"] else : raise CreationError("Unable to return edges for vertex: %s" % vId, data)
[ "def", "getEdges", "(", "self", ",", "vertex", ",", "inEdges", "=", "True", ",", "outEdges", "=", "True", ",", "rawResults", "=", "False", ")", ":", "if", "isinstance", "(", "vertex", ",", "Document", ")", ":", "vId", "=", "vertex", ".", "_id", "elif", "(", "type", "(", "vertex", ")", "is", "str", ")", "or", "(", "type", "(", "vertex", ")", "is", "bytes", ")", ":", "vId", "=", "vertex", "else", ":", "raise", "ValueError", "(", "\"Vertex is neither a Document nor a String\"", ")", "params", "=", "{", "\"vertex\"", ":", "vId", "}", "if", "inEdges", "and", "outEdges", ":", "pass", "elif", "inEdges", ":", "params", "[", "\"direction\"", "]", "=", "\"in\"", "elif", "outEdges", ":", "params", "[", "\"direction\"", "]", "=", "\"out\"", "else", ":", "raise", "ValueError", "(", "\"inEdges, outEdges or both must have a boolean value\"", ")", "r", "=", "self", ".", "connection", ".", "session", ".", "get", "(", "self", ".", "edgesURL", ",", "params", "=", "params", ")", "data", "=", "r", ".", "json", "(", ")", "if", "r", ".", "status_code", "==", "200", ":", "if", "not", "rawResults", ":", "ret", "=", "[", "]", "for", "e", "in", "data", "[", "\"edges\"", "]", ":", "ret", ".", "append", "(", "Edge", "(", "self", ",", "e", ")", ")", "return", "ret", "else", ":", "return", "data", "[", "\"edges\"", "]", "else", ":", "raise", "CreationError", "(", "\"Unable to return edges for vertex: %s\"", "%", "vId", ",", "data", ")" ]
returns in, out, or both edges liked to a given document. vertex can be either a Document object or a string for an _id. If rawResults a arango results will be return as fetched, if false, will return a liste of Edge objects
[ "returns", "in", "out", "or", "both", "edges", "liked", "to", "a", "given", "document", ".", "vertex", "can", "be", "either", "a", "Document", "object", "or", "a", "string", "for", "an", "_id", ".", "If", "rawResults", "a", "arango", "results", "will", "be", "return", "as", "fetched", "if", "false", "will", "return", "a", "liste", "of", "Edge", "objects" ]
python
train
41.03125
theelous3/asks
asks/request_object.py
https://github.com/theelous3/asks/blob/ea522ea971ecb031d488a6301dc2718516cadcd6/asks/request_object.py#L126-L230
async def make_request(self, redirect=False): ''' Acts as the central hub for preparing requests to be sent, and returning them upon completion. Generally just pokes through self's attribs and makes decisions about what to do. Returns: sock: The socket to be returned to the calling session's pool. Response: The response object, after any redirects. If there were redirects, the redirect responses will be stored in the final response object's `.history`. ''' h11_connection = h11.Connection(our_role=h11.CLIENT) (self.scheme, self.host, self.path, self.uri_parameters, self.query, _) = urlparse(self.uri) if not redirect: self.initial_scheme = self.scheme self.initial_netloc = self.host # leave default the host on 80 / 443 # otherwise use the base host with :port appended. host = (self.host if (self.port == '80' or self.port == '443') else self.host.split(':')[0] + ':' + self.port) # default header construction asks_headers = c_i_dict([('Host', host), ('Connection', 'keep-alive'), ('Accept-Encoding', 'gzip, deflate'), ('Accept', '*/*'), ('Content-Length', '0'), ('User-Agent', 'python-asks/2.2.2') ]) # check for a CookieTracker object, and if it's there inject # the relevant cookies in to the (next) request. # What the fuck is this shit. if self.persist_cookies is not None: self.cookies.update( self.persist_cookies.get_additional_cookies( self.host, self.path)) # formulate path / query and intended extra querys for use in uri self._build_path() # handle building the request body, if any body = '' if any((self.data, self.files, self.json is not None)): content_type, content_len, body = await self._formulate_body() asks_headers['Content-Type'] = content_type asks_headers['Content-Length'] = content_len # add custom headers, if any # note that custom headers take precedence if self.headers is not None: asks_headers.update(self.headers) # add auth if self.auth is not None: asks_headers.update(await self._auth_handler_pre()) asks_headers.update(await self._auth_handler_post_get_auth()) # add cookies if self.cookies: cookie_str = '' for k, v in self.cookies.items(): cookie_str += '{}={}; '.format(k, v) asks_headers['Cookie'] = cookie_str[:-1] # Construct h11 body object, if any body. if body: if not isinstance(body, bytes): body = bytes(body, self.encoding) asks_headers['Content-Length'] = str(len(body)) req_body = h11.Data(data=body) else: req_body = None # Construct h11 request object. req = h11.Request(method=self.method, target=self.path, headers=asks_headers.items()) # call i/o handling func response_obj = await self._request_io(req, req_body, h11_connection) # check to see if the final socket object is suitable to be returned # to the calling session's connection pool. # We don't want to return sockets that are of a difference schema or # different top level domain, as they are less likely to be useful. if redirect: if not (self.scheme == self.initial_scheme and self.host == self.initial_netloc): self.sock._active = False if self.streaming: return None, response_obj return self.sock, response_obj
[ "async", "def", "make_request", "(", "self", ",", "redirect", "=", "False", ")", ":", "h11_connection", "=", "h11", ".", "Connection", "(", "our_role", "=", "h11", ".", "CLIENT", ")", "(", "self", ".", "scheme", ",", "self", ".", "host", ",", "self", ".", "path", ",", "self", ".", "uri_parameters", ",", "self", ".", "query", ",", "_", ")", "=", "urlparse", "(", "self", ".", "uri", ")", "if", "not", "redirect", ":", "self", ".", "initial_scheme", "=", "self", ".", "scheme", "self", ".", "initial_netloc", "=", "self", ".", "host", "# leave default the host on 80 / 443", "# otherwise use the base host with :port appended.", "host", "=", "(", "self", ".", "host", "if", "(", "self", ".", "port", "==", "'80'", "or", "self", ".", "port", "==", "'443'", ")", "else", "self", ".", "host", ".", "split", "(", "':'", ")", "[", "0", "]", "+", "':'", "+", "self", ".", "port", ")", "# default header construction", "asks_headers", "=", "c_i_dict", "(", "[", "(", "'Host'", ",", "host", ")", ",", "(", "'Connection'", ",", "'keep-alive'", ")", ",", "(", "'Accept-Encoding'", ",", "'gzip, deflate'", ")", ",", "(", "'Accept'", ",", "'*/*'", ")", ",", "(", "'Content-Length'", ",", "'0'", ")", ",", "(", "'User-Agent'", ",", "'python-asks/2.2.2'", ")", "]", ")", "# check for a CookieTracker object, and if it's there inject", "# the relevant cookies in to the (next) request.", "# What the fuck is this shit.", "if", "self", ".", "persist_cookies", "is", "not", "None", ":", "self", ".", "cookies", ".", "update", "(", "self", ".", "persist_cookies", ".", "get_additional_cookies", "(", "self", ".", "host", ",", "self", ".", "path", ")", ")", "# formulate path / query and intended extra querys for use in uri", "self", ".", "_build_path", "(", ")", "# handle building the request body, if any", "body", "=", "''", "if", "any", "(", "(", "self", ".", "data", ",", "self", ".", "files", ",", "self", ".", "json", "is", "not", "None", ")", ")", ":", "content_type", ",", "content_len", ",", "body", "=", "await", "self", ".", "_formulate_body", "(", ")", "asks_headers", "[", "'Content-Type'", "]", "=", "content_type", "asks_headers", "[", "'Content-Length'", "]", "=", "content_len", "# add custom headers, if any", "# note that custom headers take precedence", "if", "self", ".", "headers", "is", "not", "None", ":", "asks_headers", ".", "update", "(", "self", ".", "headers", ")", "# add auth", "if", "self", ".", "auth", "is", "not", "None", ":", "asks_headers", ".", "update", "(", "await", "self", ".", "_auth_handler_pre", "(", ")", ")", "asks_headers", ".", "update", "(", "await", "self", ".", "_auth_handler_post_get_auth", "(", ")", ")", "# add cookies", "if", "self", ".", "cookies", ":", "cookie_str", "=", "''", "for", "k", ",", "v", "in", "self", ".", "cookies", ".", "items", "(", ")", ":", "cookie_str", "+=", "'{}={}; '", ".", "format", "(", "k", ",", "v", ")", "asks_headers", "[", "'Cookie'", "]", "=", "cookie_str", "[", ":", "-", "1", "]", "# Construct h11 body object, if any body.", "if", "body", ":", "if", "not", "isinstance", "(", "body", ",", "bytes", ")", ":", "body", "=", "bytes", "(", "body", ",", "self", ".", "encoding", ")", "asks_headers", "[", "'Content-Length'", "]", "=", "str", "(", "len", "(", "body", ")", ")", "req_body", "=", "h11", ".", "Data", "(", "data", "=", "body", ")", "else", ":", "req_body", "=", "None", "# Construct h11 request object.", "req", "=", "h11", ".", "Request", "(", "method", "=", "self", ".", "method", ",", "target", "=", "self", ".", "path", ",", "headers", "=", "asks_headers", ".", "items", "(", ")", ")", "# call i/o handling func", "response_obj", "=", "await", "self", ".", "_request_io", "(", "req", ",", "req_body", ",", "h11_connection", ")", "# check to see if the final socket object is suitable to be returned", "# to the calling session's connection pool.", "# We don't want to return sockets that are of a difference schema or", "# different top level domain, as they are less likely to be useful.", "if", "redirect", ":", "if", "not", "(", "self", ".", "scheme", "==", "self", ".", "initial_scheme", "and", "self", ".", "host", "==", "self", ".", "initial_netloc", ")", ":", "self", ".", "sock", ".", "_active", "=", "False", "if", "self", ".", "streaming", ":", "return", "None", ",", "response_obj", "return", "self", ".", "sock", ",", "response_obj" ]
Acts as the central hub for preparing requests to be sent, and returning them upon completion. Generally just pokes through self's attribs and makes decisions about what to do. Returns: sock: The socket to be returned to the calling session's pool. Response: The response object, after any redirects. If there were redirects, the redirect responses will be stored in the final response object's `.history`.
[ "Acts", "as", "the", "central", "hub", "for", "preparing", "requests", "to", "be", "sent", "and", "returning", "them", "upon", "completion", ".", "Generally", "just", "pokes", "through", "self", "s", "attribs", "and", "makes", "decisions", "about", "what", "to", "do", "." ]
python
train
38.619048
soasme/rio
rio/models/utils.py
https://github.com/soasme/rio/blob/f722eb0ff4b0382bceaff77737f0b87cb78429e7/rio/models/utils.py#L43-L61
def ins2dict(ins, kind=''): """Turn a SQLAlchemy Model instance to dict. :param ins: a SQLAlchemy instance. :param kind: specify which kind of dict tranformer should be called. :return: dict, instance data. If model has defined `to_xxx_dict`, then ins2dict(ins, 'xxx') will call `model.to_xxx_dict()`. Default kind is ''. If model not defined 'to_dict', then ins2dict will transform according by model column definition. """ if kind and hasattr(ins, 'to_%s_dict' % kind): return getattr(ins, 'to_%s_dict' % kind)() elif hasattr(ins, 'to_dict'): return getattr(ins, 'to_dict')() else: return _turn_row_to_dict(ins)
[ "def", "ins2dict", "(", "ins", ",", "kind", "=", "''", ")", ":", "if", "kind", "and", "hasattr", "(", "ins", ",", "'to_%s_dict'", "%", "kind", ")", ":", "return", "getattr", "(", "ins", ",", "'to_%s_dict'", "%", "kind", ")", "(", ")", "elif", "hasattr", "(", "ins", ",", "'to_dict'", ")", ":", "return", "getattr", "(", "ins", ",", "'to_dict'", ")", "(", ")", "else", ":", "return", "_turn_row_to_dict", "(", "ins", ")" ]
Turn a SQLAlchemy Model instance to dict. :param ins: a SQLAlchemy instance. :param kind: specify which kind of dict tranformer should be called. :return: dict, instance data. If model has defined `to_xxx_dict`, then ins2dict(ins, 'xxx') will call `model.to_xxx_dict()`. Default kind is ''. If model not defined 'to_dict', then ins2dict will transform according by model column definition.
[ "Turn", "a", "SQLAlchemy", "Model", "instance", "to", "dict", "." ]
python
train
35.263158
dpkp/kafka-python
kafka/producer/sender.py
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/producer/sender.py#L162-L166
def initiate_close(self): """Start closing the sender (won't complete until all data is sent).""" self._running = False self._accumulator.close() self.wakeup()
[ "def", "initiate_close", "(", "self", ")", ":", "self", ".", "_running", "=", "False", "self", ".", "_accumulator", ".", "close", "(", ")", "self", ".", "wakeup", "(", ")" ]
Start closing the sender (won't complete until all data is sent).
[ "Start", "closing", "the", "sender", "(", "won", "t", "complete", "until", "all", "data", "is", "sent", ")", "." ]
python
train
37.4
nickmckay/LiPD-utilities
Python/lipd/__init__.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/__init__.py#L649-L665
def showLipds(D=None): """ Display the dataset names of a given LiPD data | Example | lipd.showLipds(D) :pararm dict D: LiPD data :return none: """ if not D: print("Error: LiPD data not provided. Pass LiPD data into the function.") else: print(json.dumps(D.keys(), indent=2)) return
[ "def", "showLipds", "(", "D", "=", "None", ")", ":", "if", "not", "D", ":", "print", "(", "\"Error: LiPD data not provided. Pass LiPD data into the function.\"", ")", "else", ":", "print", "(", "json", ".", "dumps", "(", "D", ".", "keys", "(", ")", ",", "indent", "=", "2", ")", ")", "return" ]
Display the dataset names of a given LiPD data | Example | lipd.showLipds(D) :pararm dict D: LiPD data :return none:
[ "Display", "the", "dataset", "names", "of", "a", "given", "LiPD", "data" ]
python
train
19.176471
ejeschke/ginga
ginga/examples/gl/example_wireframe.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/examples/gl/example_wireframe.py#L110-L128
def get_wireframe(viewer, x, y, z, **kwargs): """Produce a compound object of paths implementing a wireframe. x, y, z are expected to be 2D arrays of points making up the mesh. """ # TODO: something like this would make a great utility function # for ginga n, m = x.shape objs = [] for i in range(n): pts = np.asarray([(x[i][j], y[i][j], z[i][j]) for j in range(m)]) objs.append(viewer.dc.Path(pts, **kwargs)) for j in range(m): pts = np.asarray([(x[i][j], y[i][j], z[i][j]) for i in range(n)]) objs.append(viewer.dc.Path(pts, **kwargs)) return viewer.dc.CompoundObject(*objs)
[ "def", "get_wireframe", "(", "viewer", ",", "x", ",", "y", ",", "z", ",", "*", "*", "kwargs", ")", ":", "# TODO: something like this would make a great utility function", "# for ginga", "n", ",", "m", "=", "x", ".", "shape", "objs", "=", "[", "]", "for", "i", "in", "range", "(", "n", ")", ":", "pts", "=", "np", ".", "asarray", "(", "[", "(", "x", "[", "i", "]", "[", "j", "]", ",", "y", "[", "i", "]", "[", "j", "]", ",", "z", "[", "i", "]", "[", "j", "]", ")", "for", "j", "in", "range", "(", "m", ")", "]", ")", "objs", ".", "append", "(", "viewer", ".", "dc", ".", "Path", "(", "pts", ",", "*", "*", "kwargs", ")", ")", "for", "j", "in", "range", "(", "m", ")", ":", "pts", "=", "np", ".", "asarray", "(", "[", "(", "x", "[", "i", "]", "[", "j", "]", ",", "y", "[", "i", "]", "[", "j", "]", ",", "z", "[", "i", "]", "[", "j", "]", ")", "for", "i", "in", "range", "(", "n", ")", "]", ")", "objs", ".", "append", "(", "viewer", ".", "dc", ".", "Path", "(", "pts", ",", "*", "*", "kwargs", ")", ")", "return", "viewer", ".", "dc", ".", "CompoundObject", "(", "*", "objs", ")" ]
Produce a compound object of paths implementing a wireframe. x, y, z are expected to be 2D arrays of points making up the mesh.
[ "Produce", "a", "compound", "object", "of", "paths", "implementing", "a", "wireframe", ".", "x", "y", "z", "are", "expected", "to", "be", "2D", "arrays", "of", "points", "making", "up", "the", "mesh", "." ]
python
train
36
sbarham/dsrt
dsrt/data/SampleSet.py
https://github.com/sbarham/dsrt/blob/bc664739f2f52839461d3e72773b71146fd56a9a/dsrt/data/SampleSet.py#L36-L40
def save_sampleset(self, f, name): '''Serialize the sampleset to file using the HDF5 format. Name is usually in {train, test}.''' f.create_dataset(name + '_encoder_x', data=self.encoder_x) f.create_dataset(name + '_decoder_x', data=self.decoder_x) f.create_dataset(name + '_decoder_y', data=self.decoder_y)
[ "def", "save_sampleset", "(", "self", ",", "f", ",", "name", ")", ":", "f", ".", "create_dataset", "(", "name", "+", "'_encoder_x'", ",", "data", "=", "self", ".", "encoder_x", ")", "f", ".", "create_dataset", "(", "name", "+", "'_decoder_x'", ",", "data", "=", "self", ".", "decoder_x", ")", "f", ".", "create_dataset", "(", "name", "+", "'_decoder_y'", ",", "data", "=", "self", ".", "decoder_y", ")" ]
Serialize the sampleset to file using the HDF5 format. Name is usually in {train, test}.
[ "Serialize", "the", "sampleset", "to", "file", "using", "the", "HDF5", "format", ".", "Name", "is", "usually", "in", "{", "train", "test", "}", "." ]
python
train
66.8
tjcsl/cslbot
cslbot/helpers/acl.py
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/helpers/acl.py#L24-L45
def set_admin(msg, handler): """Handle admin verification responses from NickServ. | If NickServ tells us that the nick is authed, mark it as verified. """ if handler.config['feature']['servicestype'] == "ircservices": match = re.match("STATUS (.*) ([0-3])", msg) elif handler.config['feature']['servicestype'] == "atheme": match = re.match("(.*) ACC ([0-3])", msg) if match: status = int(match.group(2)) nick = match.group(1) if status != 3: return with handler.db.session_scope() as session: admin = session.query(Permissions).filter(Permissions.nick == nick).first() if admin is None: session.add(Permissions(nick=nick, role='admin', registered=True, time=datetime.now())) else: admin.registered = True admin.time = datetime.now()
[ "def", "set_admin", "(", "msg", ",", "handler", ")", ":", "if", "handler", ".", "config", "[", "'feature'", "]", "[", "'servicestype'", "]", "==", "\"ircservices\"", ":", "match", "=", "re", ".", "match", "(", "\"STATUS (.*) ([0-3])\"", ",", "msg", ")", "elif", "handler", ".", "config", "[", "'feature'", "]", "[", "'servicestype'", "]", "==", "\"atheme\"", ":", "match", "=", "re", ".", "match", "(", "\"(.*) ACC ([0-3])\"", ",", "msg", ")", "if", "match", ":", "status", "=", "int", "(", "match", ".", "group", "(", "2", ")", ")", "nick", "=", "match", ".", "group", "(", "1", ")", "if", "status", "!=", "3", ":", "return", "with", "handler", ".", "db", ".", "session_scope", "(", ")", "as", "session", ":", "admin", "=", "session", ".", "query", "(", "Permissions", ")", ".", "filter", "(", "Permissions", ".", "nick", "==", "nick", ")", ".", "first", "(", ")", "if", "admin", "is", "None", ":", "session", ".", "add", "(", "Permissions", "(", "nick", "=", "nick", ",", "role", "=", "'admin'", ",", "registered", "=", "True", ",", "time", "=", "datetime", ".", "now", "(", ")", ")", ")", "else", ":", "admin", ".", "registered", "=", "True", "admin", ".", "time", "=", "datetime", ".", "now", "(", ")" ]
Handle admin verification responses from NickServ. | If NickServ tells us that the nick is authed, mark it as verified.
[ "Handle", "admin", "verification", "responses", "from", "NickServ", "." ]
python
train
40.090909
Neurita/boyle
boyle/mhd/write.py
https://github.com/Neurita/boyle/blob/2dae7199849395a209c887d5f30506e1de8a9ad9/boyle/mhd/write.py#L73-L147
def write_mhd_file(filename, data, shape=None, meta_dict=None): """ Write the `data` and `meta_dict` in two files with names that use `filename` as a prefix. Parameters ---------- filename: str Path to the output file. This is going to be used as a preffix. Two files will be created, one with a '.mhd' extension and another with '.raw'. If `filename` has any of these already they will be taken into account to build the filenames. data: numpy.ndarray n-dimensional image data array. shape: tuple Tuple describing the shape of `data` Default: data.shape meta_dict: dict Dictionary with the fields of the metadata .mhd file Default: {} Returns ------- mhd_filename: str Path to the .mhd file raw_filename: str Path to the .raw file """ # check its extension ext = get_extension(filename) fname = op.basename(filename) if ext != '.mhd' or ext != '.raw': mhd_filename = fname + '.mhd' raw_filename = fname + '.raw' elif ext == '.mhd': mhd_filename = fname raw_filename = remove_ext(fname) + '.raw' elif ext == '.raw': mhd_filename = remove_ext(fname) + '.mhd' raw_filename = fname else: raise ValueError('`filename` extension {} from {} is not recognised. ' 'Expected .mhd or .raw.'.format(ext, filename)) # default values if meta_dict is None: meta_dict = {} if shape is None: shape = data.shape # prepare the default header meta_dict['ObjectType'] = meta_dict.get('ObjectType', 'Image') meta_dict['BinaryData'] = meta_dict.get('BinaryData', 'True' ) meta_dict['BinaryDataByteOrderMSB'] = meta_dict.get('BinaryDataByteOrderMSB', 'False') meta_dict['ElementType'] = meta_dict.get('ElementType', NUMPY_TO_MHD_TYPE[data.dtype.type]) meta_dict['NDims'] = meta_dict.get('NDims', str(len(shape))) meta_dict['DimSize'] = meta_dict.get('DimSize', ' '.join([str(i) for i in shape])) meta_dict['ElementDataFile'] = meta_dict.get('ElementDataFile', raw_filename) # target files mhd_filename = op.join(op.dirname(filename), mhd_filename) raw_filename = op.join(op.dirname(filename), raw_filename) # write the header write_meta_header(mhd_filename, meta_dict) # write the data dump_raw_data(raw_filename, data) return mhd_filename, raw_filename
[ "def", "write_mhd_file", "(", "filename", ",", "data", ",", "shape", "=", "None", ",", "meta_dict", "=", "None", ")", ":", "# check its extension", "ext", "=", "get_extension", "(", "filename", ")", "fname", "=", "op", ".", "basename", "(", "filename", ")", "if", "ext", "!=", "'.mhd'", "or", "ext", "!=", "'.raw'", ":", "mhd_filename", "=", "fname", "+", "'.mhd'", "raw_filename", "=", "fname", "+", "'.raw'", "elif", "ext", "==", "'.mhd'", ":", "mhd_filename", "=", "fname", "raw_filename", "=", "remove_ext", "(", "fname", ")", "+", "'.raw'", "elif", "ext", "==", "'.raw'", ":", "mhd_filename", "=", "remove_ext", "(", "fname", ")", "+", "'.mhd'", "raw_filename", "=", "fname", "else", ":", "raise", "ValueError", "(", "'`filename` extension {} from {} is not recognised. '", "'Expected .mhd or .raw.'", ".", "format", "(", "ext", ",", "filename", ")", ")", "# default values", "if", "meta_dict", "is", "None", ":", "meta_dict", "=", "{", "}", "if", "shape", "is", "None", ":", "shape", "=", "data", ".", "shape", "# prepare the default header", "meta_dict", "[", "'ObjectType'", "]", "=", "meta_dict", ".", "get", "(", "'ObjectType'", ",", "'Image'", ")", "meta_dict", "[", "'BinaryData'", "]", "=", "meta_dict", ".", "get", "(", "'BinaryData'", ",", "'True'", ")", "meta_dict", "[", "'BinaryDataByteOrderMSB'", "]", "=", "meta_dict", ".", "get", "(", "'BinaryDataByteOrderMSB'", ",", "'False'", ")", "meta_dict", "[", "'ElementType'", "]", "=", "meta_dict", ".", "get", "(", "'ElementType'", ",", "NUMPY_TO_MHD_TYPE", "[", "data", ".", "dtype", ".", "type", "]", ")", "meta_dict", "[", "'NDims'", "]", "=", "meta_dict", ".", "get", "(", "'NDims'", ",", "str", "(", "len", "(", "shape", ")", ")", ")", "meta_dict", "[", "'DimSize'", "]", "=", "meta_dict", ".", "get", "(", "'DimSize'", ",", "' '", ".", "join", "(", "[", "str", "(", "i", ")", "for", "i", "in", "shape", "]", ")", ")", "meta_dict", "[", "'ElementDataFile'", "]", "=", "meta_dict", ".", "get", "(", "'ElementDataFile'", ",", "raw_filename", ")", "# target files", "mhd_filename", "=", "op", ".", "join", "(", "op", ".", "dirname", "(", "filename", ")", ",", "mhd_filename", ")", "raw_filename", "=", "op", ".", "join", "(", "op", ".", "dirname", "(", "filename", ")", ",", "raw_filename", ")", "# write the header", "write_meta_header", "(", "mhd_filename", ",", "meta_dict", ")", "# write the data", "dump_raw_data", "(", "raw_filename", ",", "data", ")", "return", "mhd_filename", ",", "raw_filename" ]
Write the `data` and `meta_dict` in two files with names that use `filename` as a prefix. Parameters ---------- filename: str Path to the output file. This is going to be used as a preffix. Two files will be created, one with a '.mhd' extension and another with '.raw'. If `filename` has any of these already they will be taken into account to build the filenames. data: numpy.ndarray n-dimensional image data array. shape: tuple Tuple describing the shape of `data` Default: data.shape meta_dict: dict Dictionary with the fields of the metadata .mhd file Default: {} Returns ------- mhd_filename: str Path to the .mhd file raw_filename: str Path to the .raw file
[ "Write", "the", "data", "and", "meta_dict", "in", "two", "files", "with", "names", "that", "use", "filename", "as", "a", "prefix", "." ]
python
valid
34.213333
OLC-Bioinformatics/sipprverse
cgecore/utility.py
https://github.com/OLC-Bioinformatics/sipprverse/blob/d4f10cdf8e1a39dac0953db61c21c97efc6006de/cgecore/utility.py#L576-L584
def copy_dir(src, dst): """ this function will simply copy the file from the source path to the dest path given as input """ try: debug.log("copy dir from "+ src, "to "+ dst) shutil.copytree(src, dst) except Exception as e: debug.log("Error: happened while copying!\n%s\n"%e)
[ "def", "copy_dir", "(", "src", ",", "dst", ")", ":", "try", ":", "debug", ".", "log", "(", "\"copy dir from \"", "+", "src", ",", "\"to \"", "+", "dst", ")", "shutil", ".", "copytree", "(", "src", ",", "dst", ")", "except", "Exception", "as", "e", ":", "debug", ".", "log", "(", "\"Error: happened while copying!\\n%s\\n\"", "%", "e", ")" ]
this function will simply copy the file from the source path to the dest path given as input
[ "this", "function", "will", "simply", "copy", "the", "file", "from", "the", "source", "path", "to", "the", "dest", "path", "given", "as", "input" ]
python
train
33.333333
tumblr/pytumblr
pytumblr/__init__.py
https://github.com/tumblr/pytumblr/blob/4a5cd7c4b8ae78d12811d9fd52620afa1692a415/pytumblr/__init__.py#L372-L389
def create_chat(self, blogname, **kwargs): """ Create a chat post on a blog :param blogname: a string, the url of the blog you want to post to. :param state: a string, The state of the post. :param tags: a list of tags that you want applied to the post :param tweet: a string, the customized tweet that you want :param date: a string, the GMT date and time of the post :param format: a string, sets the format type of the post. html or markdown :param slug: a string, a short text summary to the end of the post url :param title: a string, the title of the conversation :param conversation: a string, the conversation you are posting :returns: a dict created from the JSON response """ kwargs.update({"type": "chat"}) return self._send_post(blogname, kwargs)
[ "def", "create_chat", "(", "self", ",", "blogname", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "{", "\"type\"", ":", "\"chat\"", "}", ")", "return", "self", ".", "_send_post", "(", "blogname", ",", "kwargs", ")" ]
Create a chat post on a blog :param blogname: a string, the url of the blog you want to post to. :param state: a string, The state of the post. :param tags: a list of tags that you want applied to the post :param tweet: a string, the customized tweet that you want :param date: a string, the GMT date and time of the post :param format: a string, sets the format type of the post. html or markdown :param slug: a string, a short text summary to the end of the post url :param title: a string, the title of the conversation :param conversation: a string, the conversation you are posting :returns: a dict created from the JSON response
[ "Create", "a", "chat", "post", "on", "a", "blog" ]
python
train
47.944444
tylertreat/BigQuery-Python
bigquery/client.py
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/client.py#L2007-L2049
def dataset_resource(self, ref_id, friendly_name=None, description=None, access=None, location=None, project_id=None): """See https://developers.google.com/bigquery/docs/reference/v2/datasets#resource Parameters ---------- ref_id : str Dataset id (the reference id, not the integer id) friendly_name : str, optional An optional descriptive name for the dataset description : str, optional An optional description for the dataset access : list, optional Indicating access permissions location: str, optional, 'EU' or 'US' An optional geographical location for the dataset(EU or US) project_id: str Unique ``str`` identifying the BigQuery project contains the dataset Returns ------- dict Representing BigQuery dataset resource """ project_id = self._get_project_id(project_id) data = { "datasetReference": { "datasetId": ref_id, "projectId": project_id } } if friendly_name: data["friendlyName"] = friendly_name if description: data["description"] = description if access: data["access"] = access if location: data["location"] = location return data
[ "def", "dataset_resource", "(", "self", ",", "ref_id", ",", "friendly_name", "=", "None", ",", "description", "=", "None", ",", "access", "=", "None", ",", "location", "=", "None", ",", "project_id", "=", "None", ")", ":", "project_id", "=", "self", ".", "_get_project_id", "(", "project_id", ")", "data", "=", "{", "\"datasetReference\"", ":", "{", "\"datasetId\"", ":", "ref_id", ",", "\"projectId\"", ":", "project_id", "}", "}", "if", "friendly_name", ":", "data", "[", "\"friendlyName\"", "]", "=", "friendly_name", "if", "description", ":", "data", "[", "\"description\"", "]", "=", "description", "if", "access", ":", "data", "[", "\"access\"", "]", "=", "access", "if", "location", ":", "data", "[", "\"location\"", "]", "=", "location", "return", "data" ]
See https://developers.google.com/bigquery/docs/reference/v2/datasets#resource Parameters ---------- ref_id : str Dataset id (the reference id, not the integer id) friendly_name : str, optional An optional descriptive name for the dataset description : str, optional An optional description for the dataset access : list, optional Indicating access permissions location: str, optional, 'EU' or 'US' An optional geographical location for the dataset(EU or US) project_id: str Unique ``str`` identifying the BigQuery project contains the dataset Returns ------- dict Representing BigQuery dataset resource
[ "See", "https", ":", "//", "developers", ".", "google", ".", "com", "/", "bigquery", "/", "docs", "/", "reference", "/", "v2", "/", "datasets#resource" ]
python
train
32.906977
baccuslab/shannon
shannon/bottleneck.py
https://github.com/baccuslab/shannon/blob/38abb4d9e53208ffd1c4149ef9fdf3abceccac48/shannon/bottleneck.py#L31-L38
def change_response(x, prob, index): ''' change every response in x that matches 'index' by randomly sampling from prob ''' #pdb.set_trace() N = (x==index).sum() #x[x==index]=9 x[x==index] = dist.sample(N)
[ "def", "change_response", "(", "x", ",", "prob", ",", "index", ")", ":", "#pdb.set_trace()", "N", "=", "(", "x", "==", "index", ")", ".", "sum", "(", ")", "#x[x==index]=9", "x", "[", "x", "==", "index", "]", "=", "dist", ".", "sample", "(", "N", ")" ]
change every response in x that matches 'index' by randomly sampling from prob
[ "change", "every", "response", "in", "x", "that", "matches", "index", "by", "randomly", "sampling", "from", "prob" ]
python
train
28.25
BlueBrain/NeuroM
neurom/morphmath.py
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/morphmath.py#L129-L139
def scalar_projection(v1, v2): '''compute the scalar projection of v1 upon v2 Args: v1, v2: iterable indices 0, 1, 2 corresponding to cartesian coordinates Returns: 3-vector of the projection of point p onto the direction of v ''' return np.dot(v1, v2) / np.linalg.norm(v2)
[ "def", "scalar_projection", "(", "v1", ",", "v2", ")", ":", "return", "np", ".", "dot", "(", "v1", ",", "v2", ")", "/", "np", ".", "linalg", ".", "norm", "(", "v2", ")" ]
compute the scalar projection of v1 upon v2 Args: v1, v2: iterable indices 0, 1, 2 corresponding to cartesian coordinates Returns: 3-vector of the projection of point p onto the direction of v
[ "compute", "the", "scalar", "projection", "of", "v1", "upon", "v2" ]
python
train
28.090909
log2timeline/plaso
plaso/engine/processing_status.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/engine/processing_status.py#L471-L518
def UpdateWorkerStatus( self, identifier, status, pid, used_memory, display_name, number_of_consumed_sources, number_of_produced_sources, number_of_consumed_events, number_of_produced_events, number_of_consumed_event_tags, number_of_produced_event_tags, number_of_consumed_reports, number_of_produced_reports, number_of_consumed_warnings, number_of_produced_warnings): """Updates the status of a worker. Args: identifier (str): worker identifier. status (str): human readable status of the worker e.g. 'Idle'. pid (int): process identifier (PID). used_memory (int): size of used memory in bytes. display_name (str): human readable of the file entry currently being processed by the worker. number_of_consumed_sources (int): total number of event sources consumed by the worker. number_of_produced_sources (int): total number of event sources produced by the worker. number_of_consumed_events (int): total number of events consumed by the worker. number_of_produced_events (int): total number of events produced by the worker. number_of_consumed_event_tags (int): total number of event tags consumed by the worker. number_of_produced_event_tags (int): total number of event tags produced by the worker. number_of_consumed_reports (int): total number of event reports consumed by the process. number_of_produced_reports (int): total number of event reports produced by the process. number_of_consumed_warnings (int): total number of warnings consumed by the worker. number_of_produced_warnings (int): total number of warnings produced by the worker. """ if identifier not in self._workers_status: self._workers_status[identifier] = ProcessStatus() process_status = self._workers_status[identifier] self._UpdateProcessStatus( process_status, identifier, status, pid, used_memory, display_name, number_of_consumed_sources, number_of_produced_sources, number_of_consumed_events, number_of_produced_events, number_of_consumed_event_tags, number_of_produced_event_tags, number_of_consumed_reports, number_of_produced_reports, number_of_consumed_warnings, number_of_produced_warnings)
[ "def", "UpdateWorkerStatus", "(", "self", ",", "identifier", ",", "status", ",", "pid", ",", "used_memory", ",", "display_name", ",", "number_of_consumed_sources", ",", "number_of_produced_sources", ",", "number_of_consumed_events", ",", "number_of_produced_events", ",", "number_of_consumed_event_tags", ",", "number_of_produced_event_tags", ",", "number_of_consumed_reports", ",", "number_of_produced_reports", ",", "number_of_consumed_warnings", ",", "number_of_produced_warnings", ")", ":", "if", "identifier", "not", "in", "self", ".", "_workers_status", ":", "self", ".", "_workers_status", "[", "identifier", "]", "=", "ProcessStatus", "(", ")", "process_status", "=", "self", ".", "_workers_status", "[", "identifier", "]", "self", ".", "_UpdateProcessStatus", "(", "process_status", ",", "identifier", ",", "status", ",", "pid", ",", "used_memory", ",", "display_name", ",", "number_of_consumed_sources", ",", "number_of_produced_sources", ",", "number_of_consumed_events", ",", "number_of_produced_events", ",", "number_of_consumed_event_tags", ",", "number_of_produced_event_tags", ",", "number_of_consumed_reports", ",", "number_of_produced_reports", ",", "number_of_consumed_warnings", ",", "number_of_produced_warnings", ")" ]
Updates the status of a worker. Args: identifier (str): worker identifier. status (str): human readable status of the worker e.g. 'Idle'. pid (int): process identifier (PID). used_memory (int): size of used memory in bytes. display_name (str): human readable of the file entry currently being processed by the worker. number_of_consumed_sources (int): total number of event sources consumed by the worker. number_of_produced_sources (int): total number of event sources produced by the worker. number_of_consumed_events (int): total number of events consumed by the worker. number_of_produced_events (int): total number of events produced by the worker. number_of_consumed_event_tags (int): total number of event tags consumed by the worker. number_of_produced_event_tags (int): total number of event tags produced by the worker. number_of_consumed_reports (int): total number of event reports consumed by the process. number_of_produced_reports (int): total number of event reports produced by the process. number_of_consumed_warnings (int): total number of warnings consumed by the worker. number_of_produced_warnings (int): total number of warnings produced by the worker.
[ "Updates", "the", "status", "of", "a", "worker", "." ]
python
train
48.854167