repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
shexSpec/grammar
parsers/python/pyshexc/parser_impl/shex_shape_expression_parser.py
https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/shex_shape_expression_parser.py#L101-L109
def visitShapeNot(self, ctx: ShExDocParser.ShapeNotContext): """ shapeNot: negation? shapeAtom """ if ctx.negation(): self.expr = ShapeNot(id=self.label) sn = ShexShapeExpressionParser(self.context) sn.visit(ctx.shapeAtom()) self.expr.shapeExpr = sn.expr if sn.expr is not None else Shape() else: self.visitChildren(ctx)
[ "def", "visitShapeNot", "(", "self", ",", "ctx", ":", "ShExDocParser", ".", "ShapeNotContext", ")", ":", "if", "ctx", ".", "negation", "(", ")", ":", "self", ".", "expr", "=", "ShapeNot", "(", "id", "=", "self", ".", "label", ")", "sn", "=", "ShexShapeExpressionParser", "(", "self", ".", "context", ")", "sn", ".", "visit", "(", "ctx", ".", "shapeAtom", "(", ")", ")", "self", ".", "expr", ".", "shapeExpr", "=", "sn", ".", "expr", "if", "sn", ".", "expr", "is", "not", "None", "else", "Shape", "(", ")", "else", ":", "self", ".", "visitChildren", "(", "ctx", ")" ]
shapeNot: negation? shapeAtom
[ "shapeNot", ":", "negation?", "shapeAtom" ]
python
train
DeepHorizons/iarm
iarm/arm_instructions/conditional_branch.py
https://github.com/DeepHorizons/iarm/blob/b913c9fd577b793a6bbced78b78a5d8d7cd88de4/iarm/arm_instructions/conditional_branch.py#L260-L275
def BVS(self, params): """ BVS label Branch to the instruction at label if the V flag is set """ label = self.get_one_parameter(self.ONE_PARAMETER, params) self.check_arguments(label_exists=(label,)) # BVS label def BVS_func(): if self.is_V_set(): self.register['PC'] = self.labels[label] return BVS_func
[ "def", "BVS", "(", "self", ",", "params", ")", ":", "label", "=", "self", ".", "get_one_parameter", "(", "self", ".", "ONE_PARAMETER", ",", "params", ")", "self", ".", "check_arguments", "(", "label_exists", "=", "(", "label", ",", ")", ")", "# BVS label", "def", "BVS_func", "(", ")", ":", "if", "self", ".", "is_V_set", "(", ")", ":", "self", ".", "register", "[", "'PC'", "]", "=", "self", ".", "labels", "[", "label", "]", "return", "BVS_func" ]
BVS label Branch to the instruction at label if the V flag is set
[ "BVS", "label" ]
python
train
serge-sans-paille/pythran
docs/papers/sc2013/hyantes_core.py
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/docs/papers/sc2013/hyantes_core.py#L4-L14
def run(xmin, ymin, xmax, ymax, step, range_, range_x, range_y, t): pt = zeros((range_x, range_y, 3)) "omp parallel for private(i,j,k,tmp)" for i in xrange(range_x): for j in xrange(range_y): pt[i,j,0], pt[i,j,1] = (xmin+step*i)*180/math.pi, (ymin+step*j)*180/math.pi for k in xrange(t.shape[0]): tmp = 6368.* math.acos( math.cos(xmin+step*i)*math.cos( t[k,0] ) * math.cos((ymin+step*j)-t[k,1])+ math.sin(xmin+step*i)*math.sin(t[k,0])) if tmp < range_: pt[i,j,2]+= t[k,2] / (1+tmp) return pt
[ "def", "run", "(", "xmin", ",", "ymin", ",", "xmax", ",", "ymax", ",", "step", ",", "range_", ",", "range_x", ",", "range_y", ",", "t", ")", ":", "pt", "=", "zeros", "(", "(", "range_x", ",", "range_y", ",", "3", ")", ")", "for", "i", "in", "xrange", "(", "range_x", ")", ":", "for", "j", "in", "xrange", "(", "range_y", ")", ":", "pt", "[", "i", ",", "j", ",", "0", "]", ",", "pt", "[", "i", ",", "j", ",", "1", "]", "=", "(", "xmin", "+", "step", "*", "i", ")", "*", "180", "/", "math", ".", "pi", ",", "(", "ymin", "+", "step", "*", "j", ")", "*", "180", "/", "math", ".", "pi", "for", "k", "in", "xrange", "(", "t", ".", "shape", "[", "0", "]", ")", ":", "tmp", "=", "6368.", "*", "math", ".", "acos", "(", "math", ".", "cos", "(", "xmin", "+", "step", "*", "i", ")", "*", "math", ".", "cos", "(", "t", "[", "k", ",", "0", "]", ")", "*", "math", ".", "cos", "(", "(", "ymin", "+", "step", "*", "j", ")", "-", "t", "[", "k", ",", "1", "]", ")", "+", "math", ".", "sin", "(", "xmin", "+", "step", "*", "i", ")", "*", "math", ".", "sin", "(", "t", "[", "k", ",", "0", "]", ")", ")", "if", "tmp", "<", "range_", ":", "pt", "[", "i", ",", "j", ",", "2", "]", "+=", "t", "[", "k", ",", "2", "]", "/", "(", "1", "+", "tmp", ")", "return", "pt" ]
omp parallel for private(i,j,k,tmp)
[ "omp", "parallel", "for", "private", "(", "i", "j", "k", "tmp", ")" ]
python
train
Spinmob/spinmob
_functions.py
https://github.com/Spinmob/spinmob/blob/f037f5df07f194bcd4a01f4d9916e57b9e8fb45a/_functions.py#L1405-L1415
def submatrix(matrix,i1,i2,j1,j2): """ returns the submatrix defined by the index bounds i1-i2 and j1-j2 Endpoints included! """ new = [] for i in range(i1,i2+1): new.append(matrix[i][j1:j2+1]) return _n.array(new)
[ "def", "submatrix", "(", "matrix", ",", "i1", ",", "i2", ",", "j1", ",", "j2", ")", ":", "new", "=", "[", "]", "for", "i", "in", "range", "(", "i1", ",", "i2", "+", "1", ")", ":", "new", ".", "append", "(", "matrix", "[", "i", "]", "[", "j1", ":", "j2", "+", "1", "]", ")", "return", "_n", ".", "array", "(", "new", ")" ]
returns the submatrix defined by the index bounds i1-i2 and j1-j2 Endpoints included!
[ "returns", "the", "submatrix", "defined", "by", "the", "index", "bounds", "i1", "-", "i2", "and", "j1", "-", "j2" ]
python
train
pgmpy/pgmpy
pgmpy/readwrite/BIF.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/readwrite/BIF.py#L176-L196
def get_property(self): """ Returns the property of the variable Example ------------- >>> from pgmpy.readwrite import BIFReader >>> reader = BIFReader("bif_test.bif") >>> reader.get_property() {'bowel-problem': ['position = (335, 99)'], 'dog-out': ['position = (300, 195)'], 'family-out': ['position = (257, 99)'], 'hear-bark': ['position = (296, 268)'], 'light-on': ['position = (218, 195)']} """ variable_properties = {} for block in self.variable_block(): name = self.name_expr.searchString(block)[0][0] properties = self.property_expr.searchString(block) variable_properties[name] = [y.strip() for x in properties for y in x] return variable_properties
[ "def", "get_property", "(", "self", ")", ":", "variable_properties", "=", "{", "}", "for", "block", "in", "self", ".", "variable_block", "(", ")", ":", "name", "=", "self", ".", "name_expr", ".", "searchString", "(", "block", ")", "[", "0", "]", "[", "0", "]", "properties", "=", "self", ".", "property_expr", ".", "searchString", "(", "block", ")", "variable_properties", "[", "name", "]", "=", "[", "y", ".", "strip", "(", ")", "for", "x", "in", "properties", "for", "y", "in", "x", "]", "return", "variable_properties" ]
Returns the property of the variable Example ------------- >>> from pgmpy.readwrite import BIFReader >>> reader = BIFReader("bif_test.bif") >>> reader.get_property() {'bowel-problem': ['position = (335, 99)'], 'dog-out': ['position = (300, 195)'], 'family-out': ['position = (257, 99)'], 'hear-bark': ['position = (296, 268)'], 'light-on': ['position = (218, 195)']}
[ "Returns", "the", "property", "of", "the", "variable" ]
python
train
RacingTadpole/django-singleton-admin
django_singleton_admin/admin.py
https://github.com/RacingTadpole/django-singleton-admin/blob/0a81454be11fdcbaf95ca5018667a8dff3f45bf7/django_singleton_admin/admin.py#L44-L55
def add_view(self, *args, **kwargs): """ Redirect to the change view if the singleton instance exists. """ try: singleton = self.model.objects.get() except (self.model.DoesNotExist, self.model.MultipleObjectsReturned): kwargs.setdefault("extra_context", {}) kwargs["extra_context"]["singleton"] = True response = super(SingletonAdmin, self).add_view(*args, **kwargs) return self.handle_save(args[0], response) return redirect(admin_url(self.model, "change", singleton.id))
[ "def", "add_view", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "singleton", "=", "self", ".", "model", ".", "objects", ".", "get", "(", ")", "except", "(", "self", ".", "model", ".", "DoesNotExist", ",", "self", ".", "model", ".", "MultipleObjectsReturned", ")", ":", "kwargs", ".", "setdefault", "(", "\"extra_context\"", ",", "{", "}", ")", "kwargs", "[", "\"extra_context\"", "]", "[", "\"singleton\"", "]", "=", "True", "response", "=", "super", "(", "SingletonAdmin", ",", "self", ")", ".", "add_view", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "self", ".", "handle_save", "(", "args", "[", "0", "]", ",", "response", ")", "return", "redirect", "(", "admin_url", "(", "self", ".", "model", ",", "\"change\"", ",", "singleton", ".", "id", ")", ")" ]
Redirect to the change view if the singleton instance exists.
[ "Redirect", "to", "the", "change", "view", "if", "the", "singleton", "instance", "exists", "." ]
python
train
secdev/scapy
scapy/layers/ipsec.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/ipsec.py#L341-L366
def encrypt(self, sa, esp, key): """ Encrypt an ESP packet @param sa: the SecurityAssociation associated with the ESP packet. @param esp: an unencrypted _ESPPlain packet with valid padding @param key: the secret key used for encryption @return: a valid ESP packet encrypted with this algorithm """ data = esp.data_for_encryption() if self.cipher: mode_iv = self._format_mode_iv(algo=self, sa=sa, iv=esp.iv) cipher = self.new_cipher(key, mode_iv) encryptor = cipher.encryptor() if self.is_aead: aad = struct.pack('!LL', esp.spi, esp.seq) encryptor.authenticate_additional_data(aad) data = encryptor.update(data) + encryptor.finalize() data += encryptor.tag[:self.icv_size] else: data = encryptor.update(data) + encryptor.finalize() return ESP(spi=esp.spi, seq=esp.seq, data=esp.iv + data)
[ "def", "encrypt", "(", "self", ",", "sa", ",", "esp", ",", "key", ")", ":", "data", "=", "esp", ".", "data_for_encryption", "(", ")", "if", "self", ".", "cipher", ":", "mode_iv", "=", "self", ".", "_format_mode_iv", "(", "algo", "=", "self", ",", "sa", "=", "sa", ",", "iv", "=", "esp", ".", "iv", ")", "cipher", "=", "self", ".", "new_cipher", "(", "key", ",", "mode_iv", ")", "encryptor", "=", "cipher", ".", "encryptor", "(", ")", "if", "self", ".", "is_aead", ":", "aad", "=", "struct", ".", "pack", "(", "'!LL'", ",", "esp", ".", "spi", ",", "esp", ".", "seq", ")", "encryptor", ".", "authenticate_additional_data", "(", "aad", ")", "data", "=", "encryptor", ".", "update", "(", "data", ")", "+", "encryptor", ".", "finalize", "(", ")", "data", "+=", "encryptor", ".", "tag", "[", ":", "self", ".", "icv_size", "]", "else", ":", "data", "=", "encryptor", ".", "update", "(", "data", ")", "+", "encryptor", ".", "finalize", "(", ")", "return", "ESP", "(", "spi", "=", "esp", ".", "spi", ",", "seq", "=", "esp", ".", "seq", ",", "data", "=", "esp", ".", "iv", "+", "data", ")" ]
Encrypt an ESP packet @param sa: the SecurityAssociation associated with the ESP packet. @param esp: an unencrypted _ESPPlain packet with valid padding @param key: the secret key used for encryption @return: a valid ESP packet encrypted with this algorithm
[ "Encrypt", "an", "ESP", "packet" ]
python
train
saltstack/salt
salt/modules/boto_elb.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_elb.py#L1076-L1092
def _remove_tags(conn, load_balancer_names, tags): ''' Delete metadata tags for the specified resource ids. :type load_balancer_names: list :param load_balancer_names: A list of load balancer names. :type tags: list :param tags: A list containing just tag names for the tags to be deleted. ''' params = {} conn.build_list_params(params, load_balancer_names, 'LoadBalancerNames.member.%d') conn.build_list_params(params, tags, 'Tags.member.%d.Key') return conn.get_status('RemoveTags', params, verb='POST')
[ "def", "_remove_tags", "(", "conn", ",", "load_balancer_names", ",", "tags", ")", ":", "params", "=", "{", "}", "conn", ".", "build_list_params", "(", "params", ",", "load_balancer_names", ",", "'LoadBalancerNames.member.%d'", ")", "conn", ".", "build_list_params", "(", "params", ",", "tags", ",", "'Tags.member.%d.Key'", ")", "return", "conn", ".", "get_status", "(", "'RemoveTags'", ",", "params", ",", "verb", "=", "'POST'", ")" ]
Delete metadata tags for the specified resource ids. :type load_balancer_names: list :param load_balancer_names: A list of load balancer names. :type tags: list :param tags: A list containing just tag names for the tags to be deleted.
[ "Delete", "metadata", "tags", "for", "the", "specified", "resource", "ids", "." ]
python
train
TylerTemp/docpie
docpie/__init__.py
https://github.com/TylerTemp/docpie/blob/e658454b81b6c79a020d499f12ad73496392c09a/docpie/__init__.py#L34-L133
def docpie(doc, argv=None, help=True, version=None, stdopt=True, attachopt=True, attachvalue=True, helpstyle='python', auto2dashes=True, name=None, case_sensitive=False, optionsfirst=False, appearedonly=False, namedoptions=False, extra=None): """ Parse `argv` based on command-line interface described in `doc`. `docpie` creates your command-line interface based on its description that you pass as `doc`. Such description can contain --options, <positional-argument>, commands, which could be [optional], (required), (mutually | exclusive) or repeated... Parameters ---------- doc : str Description of your command-line interface. argv : list of str, optional Argument vector to be parsed. sys.argv is used if not provided. help : bool (default: True) Set to False to disable automatic help on -h or --help options. version : any object but None If passed, the object will be printed if --version is in `argv`. stdopt : bool (default: True) When it's True, long flag should only starts with -- attachopt: bool (default: True) write/pass several short flag into one, e.g. -abc can mean -a -b -c. This only works when stdopt=True attachvalue: bool (default: True) allow you to write short flag and its value together, e.g. -abc can mean -a bc auto2dashes: bool (default: True) automaticly handle -- (which means "end of command line flag") name: str (default: None) the "name" of your program. In each of your "usage" the "name" will be ignored. By default docpie will ignore the first element of your "usage". case_sensitive: bool (deprecated / default: False) specifies if it need case sensitive when matching "Usage:" and "Options:" optionsfirst: bool (default: False) everything after first positional argument will be interpreted as positional argument appearedonly: bool (default: False) when set True, the options that never appear in argv will not be put in result. Note this only affect options extra: dict customize pre-handled options. See http://docpie.comes.today/document/advanced-apis/ for more infomation. Returns ------- args : dict A dictionary, where keys are names of command-line elements such as e.g. "--verbose" and "<path>", and values are the parsed values of those elements. Example ------- >>> from docpie import docpie >>> doc = ''' ... Usage: ... my_program tcp <host> <port> [--timeout=<seconds>] ... my_program serial <port> [--baud=<n>] [--timeout=<seconds>] ... my_program (-h | --help | --version) ... ... Options: ... -h, --help Show this screen and exit. ... --baud=<n> Baudrate [default: 9600] ... ''' >>> argv = ['my_program', 'tcp', '127.0.0.1', '80', '--timeout', '30'] >>> docpie(doc, argv) { '--': False, '-h': False, '--baud': '9600', '--help': False, '--timeout': '30', '--version': False, '<host>': '127.0.0.1', '<port>': '80', 'serial': False, 'tcp': True} See also -------- * Full documentation is available in README.md as well as online at http://docpie.comes.today/document/quick-start/ """ if case_sensitive: warnings.warn('`case_sensitive` is deprecated, `docpie` is always ' 'case insensitive') kwargs = locals() argv = kwargs.pop('argv') pie = Docpie(**kwargs) pie.docpie(argv) return pie
[ "def", "docpie", "(", "doc", ",", "argv", "=", "None", ",", "help", "=", "True", ",", "version", "=", "None", ",", "stdopt", "=", "True", ",", "attachopt", "=", "True", ",", "attachvalue", "=", "True", ",", "helpstyle", "=", "'python'", ",", "auto2dashes", "=", "True", ",", "name", "=", "None", ",", "case_sensitive", "=", "False", ",", "optionsfirst", "=", "False", ",", "appearedonly", "=", "False", ",", "namedoptions", "=", "False", ",", "extra", "=", "None", ")", ":", "if", "case_sensitive", ":", "warnings", ".", "warn", "(", "'`case_sensitive` is deprecated, `docpie` is always '", "'case insensitive'", ")", "kwargs", "=", "locals", "(", ")", "argv", "=", "kwargs", ".", "pop", "(", "'argv'", ")", "pie", "=", "Docpie", "(", "*", "*", "kwargs", ")", "pie", ".", "docpie", "(", "argv", ")", "return", "pie" ]
Parse `argv` based on command-line interface described in `doc`. `docpie` creates your command-line interface based on its description that you pass as `doc`. Such description can contain --options, <positional-argument>, commands, which could be [optional], (required), (mutually | exclusive) or repeated... Parameters ---------- doc : str Description of your command-line interface. argv : list of str, optional Argument vector to be parsed. sys.argv is used if not provided. help : bool (default: True) Set to False to disable automatic help on -h or --help options. version : any object but None If passed, the object will be printed if --version is in `argv`. stdopt : bool (default: True) When it's True, long flag should only starts with -- attachopt: bool (default: True) write/pass several short flag into one, e.g. -abc can mean -a -b -c. This only works when stdopt=True attachvalue: bool (default: True) allow you to write short flag and its value together, e.g. -abc can mean -a bc auto2dashes: bool (default: True) automaticly handle -- (which means "end of command line flag") name: str (default: None) the "name" of your program. In each of your "usage" the "name" will be ignored. By default docpie will ignore the first element of your "usage". case_sensitive: bool (deprecated / default: False) specifies if it need case sensitive when matching "Usage:" and "Options:" optionsfirst: bool (default: False) everything after first positional argument will be interpreted as positional argument appearedonly: bool (default: False) when set True, the options that never appear in argv will not be put in result. Note this only affect options extra: dict customize pre-handled options. See http://docpie.comes.today/document/advanced-apis/ for more infomation. Returns ------- args : dict A dictionary, where keys are names of command-line elements such as e.g. "--verbose" and "<path>", and values are the parsed values of those elements. Example ------- >>> from docpie import docpie >>> doc = ''' ... Usage: ... my_program tcp <host> <port> [--timeout=<seconds>] ... my_program serial <port> [--baud=<n>] [--timeout=<seconds>] ... my_program (-h | --help | --version) ... ... Options: ... -h, --help Show this screen and exit. ... --baud=<n> Baudrate [default: 9600] ... ''' >>> argv = ['my_program', 'tcp', '127.0.0.1', '80', '--timeout', '30'] >>> docpie(doc, argv) { '--': False, '-h': False, '--baud': '9600', '--help': False, '--timeout': '30', '--version': False, '<host>': '127.0.0.1', '<port>': '80', 'serial': False, 'tcp': True} See also -------- * Full documentation is available in README.md as well as online at http://docpie.comes.today/document/quick-start/
[ "Parse", "argv", "based", "on", "command", "-", "line", "interface", "described", "in", "doc", "." ]
python
train
Parquery/icontract
icontract/_represent.py
https://github.com/Parquery/icontract/blob/846e3187869a9ba790e9b893c98e5055e1cce274/icontract/_represent.py#L183-L245
def inspect_decorator(lines: List[str], lineno: int, filename: str) -> DecoratorInspection: """ Parse the file in which the decorator is called and figure out the corresponding call AST node. :param lines: lines of the source file corresponding to the decorator call :param lineno: line index (starting with 0) of one of the lines in the decorator call :param filename: name of the file where decorator is called :return: inspected decorator call """ if lineno < 0 or lineno >= len(lines): raise ValueError(("Given line number {} of one of the decorator lines " "is not within the range [{}, {}) of lines in {}").format(lineno, 0, len(lines), filename)) # Go up till a line starts with a decorator decorator_lineno = None # type: Optional[int] for i in range(lineno, -1, -1): if _DECORATOR_RE.match(lines[i]): decorator_lineno = i break if decorator_lineno is None: raise SyntaxError("Decorator corresponding to the line {} could not be found in file {}: {!r}".format( lineno + 1, filename, lines[lineno])) # Find the decorator end -- it's either a function definition, a class definition or another decorator decorator_end_lineno = None # type: Optional[int] for i in range(lineno + 1, len(lines)): line = lines[i] if _DECORATOR_RE.match(line) or _DEF_CLASS_RE.match(line): decorator_end_lineno = i break if decorator_end_lineno is None: raise SyntaxError(("The next statement following the decorator corresponding to the line {} " "could not be found in file {}: {!r}").format(lineno + 1, filename, lines[lineno])) decorator_lines = lines[decorator_lineno:decorator_end_lineno] # We need to dedent the decorator and add a dummy decoratee so that we can parse its text as valid source code. decorator_text = textwrap.dedent("".join(decorator_lines)) + "def dummy_{}(): pass".format(uuid.uuid4().hex) atok = asttokens.ASTTokens(decorator_text, parse=True) assert isinstance(atok.tree, ast.Module), "Expected the parsed decorator text to live in an AST module." module_node = atok.tree assert len(module_node.body) == 1, "Expected the module AST of the decorator text to have a single statement." assert isinstance(module_node.body[0], ast.FunctionDef), \ "Expected the only statement in the AST module corresponding to the decorator text to be a function definition." func_def_node = module_node.body[0] assert len(func_def_node.decorator_list) == 1, \ "Expected the function AST node corresponding to the decorator text to have a single decorator." assert isinstance(func_def_node.decorator_list[0], ast.Call), \ "Expected the only decorator in the function definition AST node corresponding to the decorator text " \ "to be a call node." call_node = func_def_node.decorator_list[0] return DecoratorInspection(atok=atok, node=call_node)
[ "def", "inspect_decorator", "(", "lines", ":", "List", "[", "str", "]", ",", "lineno", ":", "int", ",", "filename", ":", "str", ")", "->", "DecoratorInspection", ":", "if", "lineno", "<", "0", "or", "lineno", ">=", "len", "(", "lines", ")", ":", "raise", "ValueError", "(", "(", "\"Given line number {} of one of the decorator lines \"", "\"is not within the range [{}, {}) of lines in {}\"", ")", ".", "format", "(", "lineno", ",", "0", ",", "len", "(", "lines", ")", ",", "filename", ")", ")", "# Go up till a line starts with a decorator", "decorator_lineno", "=", "None", "# type: Optional[int]", "for", "i", "in", "range", "(", "lineno", ",", "-", "1", ",", "-", "1", ")", ":", "if", "_DECORATOR_RE", ".", "match", "(", "lines", "[", "i", "]", ")", ":", "decorator_lineno", "=", "i", "break", "if", "decorator_lineno", "is", "None", ":", "raise", "SyntaxError", "(", "\"Decorator corresponding to the line {} could not be found in file {}: {!r}\"", ".", "format", "(", "lineno", "+", "1", ",", "filename", ",", "lines", "[", "lineno", "]", ")", ")", "# Find the decorator end -- it's either a function definition, a class definition or another decorator", "decorator_end_lineno", "=", "None", "# type: Optional[int]", "for", "i", "in", "range", "(", "lineno", "+", "1", ",", "len", "(", "lines", ")", ")", ":", "line", "=", "lines", "[", "i", "]", "if", "_DECORATOR_RE", ".", "match", "(", "line", ")", "or", "_DEF_CLASS_RE", ".", "match", "(", "line", ")", ":", "decorator_end_lineno", "=", "i", "break", "if", "decorator_end_lineno", "is", "None", ":", "raise", "SyntaxError", "(", "(", "\"The next statement following the decorator corresponding to the line {} \"", "\"could not be found in file {}: {!r}\"", ")", ".", "format", "(", "lineno", "+", "1", ",", "filename", ",", "lines", "[", "lineno", "]", ")", ")", "decorator_lines", "=", "lines", "[", "decorator_lineno", ":", "decorator_end_lineno", "]", "# We need to dedent the decorator and add a dummy decoratee so that we can parse its text as valid source code.", "decorator_text", "=", "textwrap", ".", "dedent", "(", "\"\"", ".", "join", "(", "decorator_lines", ")", ")", "+", "\"def dummy_{}(): pass\"", ".", "format", "(", "uuid", ".", "uuid4", "(", ")", ".", "hex", ")", "atok", "=", "asttokens", ".", "ASTTokens", "(", "decorator_text", ",", "parse", "=", "True", ")", "assert", "isinstance", "(", "atok", ".", "tree", ",", "ast", ".", "Module", ")", ",", "\"Expected the parsed decorator text to live in an AST module.\"", "module_node", "=", "atok", ".", "tree", "assert", "len", "(", "module_node", ".", "body", ")", "==", "1", ",", "\"Expected the module AST of the decorator text to have a single statement.\"", "assert", "isinstance", "(", "module_node", ".", "body", "[", "0", "]", ",", "ast", ".", "FunctionDef", ")", ",", "\"Expected the only statement in the AST module corresponding to the decorator text to be a function definition.\"", "func_def_node", "=", "module_node", ".", "body", "[", "0", "]", "assert", "len", "(", "func_def_node", ".", "decorator_list", ")", "==", "1", ",", "\"Expected the function AST node corresponding to the decorator text to have a single decorator.\"", "assert", "isinstance", "(", "func_def_node", ".", "decorator_list", "[", "0", "]", ",", "ast", ".", "Call", ")", ",", "\"Expected the only decorator in the function definition AST node corresponding to the decorator text \"", "\"to be a call node.\"", "call_node", "=", "func_def_node", ".", "decorator_list", "[", "0", "]", "return", "DecoratorInspection", "(", "atok", "=", "atok", ",", "node", "=", "call_node", ")" ]
Parse the file in which the decorator is called and figure out the corresponding call AST node. :param lines: lines of the source file corresponding to the decorator call :param lineno: line index (starting with 0) of one of the lines in the decorator call :param filename: name of the file where decorator is called :return: inspected decorator call
[ "Parse", "the", "file", "in", "which", "the", "decorator", "is", "called", "and", "figure", "out", "the", "corresponding", "call", "AST", "node", "." ]
python
train
saltstack/salt
salt/modules/nova.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/nova.py#L554-L565
def flavor_access_list(flavor_id, profile=None, **kwargs): ''' Return a list of project IDs assigned to flavor ID CLI Example: .. code-block:: bash salt '*' nova.flavor_access_list flavor_id=ID ''' conn = _auth(profile, **kwargs) return conn.flavor_access_list(flavor_id=flavor_id, **kwargs)
[ "def", "flavor_access_list", "(", "flavor_id", ",", "profile", "=", "None", ",", "*", "*", "kwargs", ")", ":", "conn", "=", "_auth", "(", "profile", ",", "*", "*", "kwargs", ")", "return", "conn", ".", "flavor_access_list", "(", "flavor_id", "=", "flavor_id", ",", "*", "*", "kwargs", ")" ]
Return a list of project IDs assigned to flavor ID CLI Example: .. code-block:: bash salt '*' nova.flavor_access_list flavor_id=ID
[ "Return", "a", "list", "of", "project", "IDs", "assigned", "to", "flavor", "ID" ]
python
train
zyga/python-phablet
phablet.py
https://github.com/zyga/python-phablet/blob/c281045dfb8b55dd2888e1efe9631f72ffc77ac8/phablet.py#L215-L249
def ssh_cmdline(self, cmd): """ Get argument list for meth:`subprocess.Popen()` to run ssh. :param cmd: a list of arguments to pass to ssh :returns: argument list to pass as the first argument to subprocess.Popen() .. note:: you must call :meth:`connect()` at least once before calling this method. This method returns the ``args`` argument (first argument) to subprocess.Popen() required to execute the specified command on the phablet device. You can use it to construct your own connections, to intercept command output or to setup any additional things that you may require. .. versionadded:: 0.2 """ if not isinstance(cmd, list): raise TypeError("cmd needs to be a list") if not all(isinstance(item, str) for item in cmd): raise TypeError("cmd needs to be a list of strings") if self._port is None: raise ProgrammingError("run connect() first") ssh_cmd = ['ssh'] for opt in self._get_ssh_options(): ssh_cmd.append('-o') ssh_cmd.append(opt) ssh_cmd.extend(['phablet@localhost', '--']) ssh_cmd.extend(cmd) _logger.debug("ssh_cmdline %r => %r", cmd, ssh_cmd) return ssh_cmd
[ "def", "ssh_cmdline", "(", "self", ",", "cmd", ")", ":", "if", "not", "isinstance", "(", "cmd", ",", "list", ")", ":", "raise", "TypeError", "(", "\"cmd needs to be a list\"", ")", "if", "not", "all", "(", "isinstance", "(", "item", ",", "str", ")", "for", "item", "in", "cmd", ")", ":", "raise", "TypeError", "(", "\"cmd needs to be a list of strings\"", ")", "if", "self", ".", "_port", "is", "None", ":", "raise", "ProgrammingError", "(", "\"run connect() first\"", ")", "ssh_cmd", "=", "[", "'ssh'", "]", "for", "opt", "in", "self", ".", "_get_ssh_options", "(", ")", ":", "ssh_cmd", ".", "append", "(", "'-o'", ")", "ssh_cmd", ".", "append", "(", "opt", ")", "ssh_cmd", ".", "extend", "(", "[", "'phablet@localhost'", ",", "'--'", "]", ")", "ssh_cmd", ".", "extend", "(", "cmd", ")", "_logger", ".", "debug", "(", "\"ssh_cmdline %r => %r\"", ",", "cmd", ",", "ssh_cmd", ")", "return", "ssh_cmd" ]
Get argument list for meth:`subprocess.Popen()` to run ssh. :param cmd: a list of arguments to pass to ssh :returns: argument list to pass as the first argument to subprocess.Popen() .. note:: you must call :meth:`connect()` at least once before calling this method. This method returns the ``args`` argument (first argument) to subprocess.Popen() required to execute the specified command on the phablet device. You can use it to construct your own connections, to intercept command output or to setup any additional things that you may require. .. versionadded:: 0.2
[ "Get", "argument", "list", "for", "meth", ":", "subprocess", ".", "Popen", "()", "to", "run", "ssh", "." ]
python
train
bids-standard/pybids
bids/layout/layout.py
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/layout/layout.py#L1080-L1136
def search(self, files=None, defined_fields=None, **kwargs): """Search files in the layout by metadata fields. Args: files (list): Optional list of names of files to search. If None, all files in the layout are scanned. defined_fields (list): Optional list of names of fields that must be defined in the JSON sidecar in order to consider the file a match, but which don't need to match any particular value. kwargs: Optional keyword arguments defining search constraints; keys are names of metadata fields, and values are the values to match those fields against (e.g., SliceTiming=0.017) would return all files that have a SliceTiming value of 0.071 in metadata. Returns: A list of filenames that match all constraints. """ if defined_fields is None: defined_fields = [] all_keys = set(defined_fields) | set(kwargs.keys()) if not all_keys: raise ValueError("At least one field to search on must be passed.") # If no list of files is passed, use all files in layout if files is None: files = set(self.layout.files.keys()) # Index metadata for any previously unseen files for f in files: self.index_file(f) # Get file intersection of all kwargs keys--this is fast filesets = [set(self.key_index.get(k, [])) for k in all_keys] matches = reduce(lambda x, y: x & y, filesets) if files is not None: matches &= set(files) if not matches: return [] def check_matches(f, key, val): if isinstance(val, six.string_types) and '*' in val: val = ('^%s$' % val).replace('*', ".*") return re.search(str(self.file_index[f][key]), val) is not None else: return val == self.file_index[f][key] # Serially check matches against each pattern, with early termination for k, val in kwargs.items(): matches = list(filter(lambda x: check_matches(x, k, val), matches)) if not matches: return [] return matches
[ "def", "search", "(", "self", ",", "files", "=", "None", ",", "defined_fields", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "defined_fields", "is", "None", ":", "defined_fields", "=", "[", "]", "all_keys", "=", "set", "(", "defined_fields", ")", "|", "set", "(", "kwargs", ".", "keys", "(", ")", ")", "if", "not", "all_keys", ":", "raise", "ValueError", "(", "\"At least one field to search on must be passed.\"", ")", "# If no list of files is passed, use all files in layout", "if", "files", "is", "None", ":", "files", "=", "set", "(", "self", ".", "layout", ".", "files", ".", "keys", "(", ")", ")", "# Index metadata for any previously unseen files", "for", "f", "in", "files", ":", "self", ".", "index_file", "(", "f", ")", "# Get file intersection of all kwargs keys--this is fast", "filesets", "=", "[", "set", "(", "self", ".", "key_index", ".", "get", "(", "k", ",", "[", "]", ")", ")", "for", "k", "in", "all_keys", "]", "matches", "=", "reduce", "(", "lambda", "x", ",", "y", ":", "x", "&", "y", ",", "filesets", ")", "if", "files", "is", "not", "None", ":", "matches", "&=", "set", "(", "files", ")", "if", "not", "matches", ":", "return", "[", "]", "def", "check_matches", "(", "f", ",", "key", ",", "val", ")", ":", "if", "isinstance", "(", "val", ",", "six", ".", "string_types", ")", "and", "'*'", "in", "val", ":", "val", "=", "(", "'^%s$'", "%", "val", ")", ".", "replace", "(", "'*'", ",", "\".*\"", ")", "return", "re", ".", "search", "(", "str", "(", "self", ".", "file_index", "[", "f", "]", "[", "key", "]", ")", ",", "val", ")", "is", "not", "None", "else", ":", "return", "val", "==", "self", ".", "file_index", "[", "f", "]", "[", "key", "]", "# Serially check matches against each pattern, with early termination", "for", "k", ",", "val", "in", "kwargs", ".", "items", "(", ")", ":", "matches", "=", "list", "(", "filter", "(", "lambda", "x", ":", "check_matches", "(", "x", ",", "k", ",", "val", ")", ",", "matches", ")", ")", "if", "not", "matches", ":", "return", "[", "]", "return", "matches" ]
Search files in the layout by metadata fields. Args: files (list): Optional list of names of files to search. If None, all files in the layout are scanned. defined_fields (list): Optional list of names of fields that must be defined in the JSON sidecar in order to consider the file a match, but which don't need to match any particular value. kwargs: Optional keyword arguments defining search constraints; keys are names of metadata fields, and values are the values to match those fields against (e.g., SliceTiming=0.017) would return all files that have a SliceTiming value of 0.071 in metadata. Returns: A list of filenames that match all constraints.
[ "Search", "files", "in", "the", "layout", "by", "metadata", "fields", "." ]
python
train
tanghaibao/jcvi
jcvi/apps/restriction.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/restriction.py#L72-L104
def extract_ends(rec, sites, flank, fw, maxfragsize=800): """ Extraction of ends of fragments above certain size. """ nsites = len(sites) size = len(rec) for i, s in enumerate(sites): newid = "{0}:{1}".format(rec.name, s) recs = [] if i == 0 or s - sites[i - 1] <= maxfragsize: newidL = newid + "L" left = max(s - flank, 0) right = s frag = rec.seq[left:right].strip("Nn") recL = SeqRecord(frag, id=newidL, description="") if i == 0 and s > maxfragsize: # Contig L-end pass else: recs.append(recL) if i == nsites - 1 or sites[i + 1] - s <= maxfragsize: newidR = newid + "R" left = s right = min(s + flank, size) frag = rec.seq[left:right].strip("Nn") recR = SeqRecord(frag, id=newidR, description="") if i == nsites - 1 and size - s > maxfragsize: # Contig R-end pass else: recs.append(recR) SeqIO.write(recs, fw, "fasta")
[ "def", "extract_ends", "(", "rec", ",", "sites", ",", "flank", ",", "fw", ",", "maxfragsize", "=", "800", ")", ":", "nsites", "=", "len", "(", "sites", ")", "size", "=", "len", "(", "rec", ")", "for", "i", ",", "s", "in", "enumerate", "(", "sites", ")", ":", "newid", "=", "\"{0}:{1}\"", ".", "format", "(", "rec", ".", "name", ",", "s", ")", "recs", "=", "[", "]", "if", "i", "==", "0", "or", "s", "-", "sites", "[", "i", "-", "1", "]", "<=", "maxfragsize", ":", "newidL", "=", "newid", "+", "\"L\"", "left", "=", "max", "(", "s", "-", "flank", ",", "0", ")", "right", "=", "s", "frag", "=", "rec", ".", "seq", "[", "left", ":", "right", "]", ".", "strip", "(", "\"Nn\"", ")", "recL", "=", "SeqRecord", "(", "frag", ",", "id", "=", "newidL", ",", "description", "=", "\"\"", ")", "if", "i", "==", "0", "and", "s", ">", "maxfragsize", ":", "# Contig L-end", "pass", "else", ":", "recs", ".", "append", "(", "recL", ")", "if", "i", "==", "nsites", "-", "1", "or", "sites", "[", "i", "+", "1", "]", "-", "s", "<=", "maxfragsize", ":", "newidR", "=", "newid", "+", "\"R\"", "left", "=", "s", "right", "=", "min", "(", "s", "+", "flank", ",", "size", ")", "frag", "=", "rec", ".", "seq", "[", "left", ":", "right", "]", ".", "strip", "(", "\"Nn\"", ")", "recR", "=", "SeqRecord", "(", "frag", ",", "id", "=", "newidR", ",", "description", "=", "\"\"", ")", "if", "i", "==", "nsites", "-", "1", "and", "size", "-", "s", ">", "maxfragsize", ":", "# Contig R-end", "pass", "else", ":", "recs", ".", "append", "(", "recR", ")", "SeqIO", ".", "write", "(", "recs", ",", "fw", ",", "\"fasta\"", ")" ]
Extraction of ends of fragments above certain size.
[ "Extraction", "of", "ends", "of", "fragments", "above", "certain", "size", "." ]
python
train
IdentityPython/SATOSA
src/satosa/backends/saml2.py
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/backends/saml2.py#L240-L259
def disco_response(self, context): """ Endpoint for the discovery server response :type context: satosa.context.Context :rtype: satosa.response.Response :param context: The current context :return: response """ info = context.request state = context.state try: entity_id = info["entityID"] except KeyError as err: satosa_logging(logger, logging.DEBUG, "No IDP chosen for state", state, exc_info=True) raise SATOSAAuthenticationError(state, "No IDP chosen") from err return self.authn_request(context, entity_id)
[ "def", "disco_response", "(", "self", ",", "context", ")", ":", "info", "=", "context", ".", "request", "state", "=", "context", ".", "state", "try", ":", "entity_id", "=", "info", "[", "\"entityID\"", "]", "except", "KeyError", "as", "err", ":", "satosa_logging", "(", "logger", ",", "logging", ".", "DEBUG", ",", "\"No IDP chosen for state\"", ",", "state", ",", "exc_info", "=", "True", ")", "raise", "SATOSAAuthenticationError", "(", "state", ",", "\"No IDP chosen\"", ")", "from", "err", "return", "self", ".", "authn_request", "(", "context", ",", "entity_id", ")" ]
Endpoint for the discovery server response :type context: satosa.context.Context :rtype: satosa.response.Response :param context: The current context :return: response
[ "Endpoint", "for", "the", "discovery", "server", "response" ]
python
train
Microsoft/botbuilder-python
libraries/botframework-connector/botframework/connector/auth/jwt_token_validation.py
https://github.com/Microsoft/botbuilder-python/blob/274663dd91c811bae6ac4488915ba5880771b0a7/libraries/botframework-connector/botframework/connector/auth/jwt_token_validation.py#L12-L39
async def authenticate_request(activity: Activity, auth_header: str, credentials: CredentialProvider) -> ClaimsIdentity: """Authenticates the request and sets the service url in the set of trusted urls. :param activity: The incoming Activity from the Bot Framework or the Emulator :type activity: ~botframework.connector.models.Activity :param auth_header: The Bearer token included as part of the request :type auth_header: str :param credentials: The set of valid credentials, such as the Bot Application ID :type credentials: CredentialProvider :raises Exception: """ if not auth_header: # No auth header was sent. We might be on the anonymous code path. is_auth_disabled = await credentials.is_authentication_disabled() if is_auth_disabled: # We are on the anonymous code path. return # No Auth Header. Auth is required. Request is not authorized. raise Exception('Unauthorized Access. Request is not authorized') claims_identity = await JwtTokenValidation.validate_auth_header(auth_header, credentials, activity.channel_id, activity.service_url) # On the standard Auth path, we need to trust the URL that was incoming. MicrosoftAppCredentials.trust_service_url(activity.service_url) return claims_identity
[ "async", "def", "authenticate_request", "(", "activity", ":", "Activity", ",", "auth_header", ":", "str", ",", "credentials", ":", "CredentialProvider", ")", "->", "ClaimsIdentity", ":", "if", "not", "auth_header", ":", "# No auth header was sent. We might be on the anonymous code path.", "is_auth_disabled", "=", "await", "credentials", ".", "is_authentication_disabled", "(", ")", "if", "is_auth_disabled", ":", "# We are on the anonymous code path.", "return", "# No Auth Header. Auth is required. Request is not authorized.", "raise", "Exception", "(", "'Unauthorized Access. Request is not authorized'", ")", "claims_identity", "=", "await", "JwtTokenValidation", ".", "validate_auth_header", "(", "auth_header", ",", "credentials", ",", "activity", ".", "channel_id", ",", "activity", ".", "service_url", ")", "# On the standard Auth path, we need to trust the URL that was incoming.", "MicrosoftAppCredentials", ".", "trust_service_url", "(", "activity", ".", "service_url", ")", "return", "claims_identity" ]
Authenticates the request and sets the service url in the set of trusted urls. :param activity: The incoming Activity from the Bot Framework or the Emulator :type activity: ~botframework.connector.models.Activity :param auth_header: The Bearer token included as part of the request :type auth_header: str :param credentials: The set of valid credentials, such as the Bot Application ID :type credentials: CredentialProvider :raises Exception:
[ "Authenticates", "the", "request", "and", "sets", "the", "service", "url", "in", "the", "set", "of", "trusted", "urls", ".", ":", "param", "activity", ":", "The", "incoming", "Activity", "from", "the", "Bot", "Framework", "or", "the", "Emulator", ":", "type", "activity", ":", "~botframework", ".", "connector", ".", "models", ".", "Activity", ":", "param", "auth_header", ":", "The", "Bearer", "token", "included", "as", "part", "of", "the", "request", ":", "type", "auth_header", ":", "str", ":", "param", "credentials", ":", "The", "set", "of", "valid", "credentials", "such", "as", "the", "Bot", "Application", "ID", ":", "type", "credentials", ":", "CredentialProvider" ]
python
test
carpedm20/fbchat
fbchat/_client.py
https://github.com/carpedm20/fbchat/blob/f480d68b5773473e6daba7f66075ee30e8d737a8/fbchat/_client.py#L2000-L2020
def createPlan(self, plan, thread_id=None): """ Sets a plan :param plan: Plan to set :param thread_id: User/Group ID to send plan to. See :ref:`intro_threads` :type plan: models.Plan :raises: FBchatException if request failed """ thread_id, thread_type = self._getThread(thread_id, None) data = { "event_type": "EVENT", "event_time": plan.time, "title": plan.title, "thread_id": thread_id, "location_id": plan.location_id or "", "location_name": plan.location or "", "acontext": ACONTEXT, } j = self._post(self.req_url.PLAN_CREATE, data, fix_request=True, as_json=True)
[ "def", "createPlan", "(", "self", ",", "plan", ",", "thread_id", "=", "None", ")", ":", "thread_id", ",", "thread_type", "=", "self", ".", "_getThread", "(", "thread_id", ",", "None", ")", "data", "=", "{", "\"event_type\"", ":", "\"EVENT\"", ",", "\"event_time\"", ":", "plan", ".", "time", ",", "\"title\"", ":", "plan", ".", "title", ",", "\"thread_id\"", ":", "thread_id", ",", "\"location_id\"", ":", "plan", ".", "location_id", "or", "\"\"", ",", "\"location_name\"", ":", "plan", ".", "location", "or", "\"\"", ",", "\"acontext\"", ":", "ACONTEXT", ",", "}", "j", "=", "self", ".", "_post", "(", "self", ".", "req_url", ".", "PLAN_CREATE", ",", "data", ",", "fix_request", "=", "True", ",", "as_json", "=", "True", ")" ]
Sets a plan :param plan: Plan to set :param thread_id: User/Group ID to send plan to. See :ref:`intro_threads` :type plan: models.Plan :raises: FBchatException if request failed
[ "Sets", "a", "plan" ]
python
train
seung-lab/cloud-volume
cloudvolume/lib.py
https://github.com/seung-lab/cloud-volume/blob/d2fd4500333f1bc3cd3e3919a8b649cec5d8e214/cloudvolume/lib.py#L792-L877
def save_images(image, directory=None, axis='z', channel=None, global_norm=True, image_format='PNG'): """ Serialize a 3D or 4D array into a series of PNGs for visualization. image: A 3D or 4D numpy array. Supported dtypes: integer, float, boolean axis: 'x', 'y', 'z' channel: None, 0, 1, 2, etc, which channel to serialize. Does all by default. directory: override the default output directory global_norm: Normalize floating point volumes globally or per slice? image_format: 'PNG', 'JPEG', etc """ if directory is None: directory = os.path.join('./saved_images', 'default', 'default', '0', Bbox( (0,0,0), image.shape[:3] ).to_filename()) mkdir(directory) print("Saving to {}".format(directory)) indexmap = { 'x': 0, 'y': 1, 'z': 2, } index = indexmap[axis] channel = slice(None) if channel is None else channel while image.ndim < 4: image = image[..., np.newaxis ] def normalize_float(img): img = np.copy(img) img[ img == np.inf ] = 0 img[ img == -np.inf ] = 0 lower, upper = img.min(), img.max() img = (img - lower) / (upper - lower) * 255.0 return img.astype(np.uint8) if global_norm and np.issubdtype(image.dtype, np.floating): image = normalize_float(image) for level in tqdm(range(image.shape[index]), desc="Saving Images"): if index == 0: img = image[level, :, :, channel ] elif index == 1: img = image[:, level, :, channel ] elif index == 2: img = image[:, :, level, channel ] else: raise IndexError("Index {} is not valid. Expected 0, 1, or 2.".format(index)) while img.ndim < 3: img = img[..., np.newaxis ] num_channels = img.shape[2] for channel_index in range(num_channels): img2d = img[:, :, channel_index] if not global_norm and img2d.dtype in (np.float32, np.float64): img2d = normalize_float(img2d) # discovered that downloaded cube is in a weird rotated state. # it requires a 90deg counterclockwise rotation on xy plane (leaving z alone) # followed by a flip on Y if axis == 'z': img2d = np.flipud(np.rot90(img2d, 1)) if img2d.dtype == np.uint8: img2d = Image.fromarray(img2d, 'L') elif img2d.dtype == np.bool: img2d = img2d.astype(np.uint8) * 255 img2d = Image.fromarray(img2d, 'L') else: img2d = img2d.astype('uint32') img2d[:,:] |= 0xff000000 # for little endian abgr img2d = Image.fromarray(img2d, 'RGBA') file_index = str(level).zfill(3) filename = '{}.{}'.format(file_index, image_format.lower()) if num_channels > 1: filename = '{}-{}'.format(channel_index, filename) path = os.path.join(directory, filename) img2d.save(path, image_format)
[ "def", "save_images", "(", "image", ",", "directory", "=", "None", ",", "axis", "=", "'z'", ",", "channel", "=", "None", ",", "global_norm", "=", "True", ",", "image_format", "=", "'PNG'", ")", ":", "if", "directory", "is", "None", ":", "directory", "=", "os", ".", "path", ".", "join", "(", "'./saved_images'", ",", "'default'", ",", "'default'", ",", "'0'", ",", "Bbox", "(", "(", "0", ",", "0", ",", "0", ")", ",", "image", ".", "shape", "[", ":", "3", "]", ")", ".", "to_filename", "(", ")", ")", "mkdir", "(", "directory", ")", "print", "(", "\"Saving to {}\"", ".", "format", "(", "directory", ")", ")", "indexmap", "=", "{", "'x'", ":", "0", ",", "'y'", ":", "1", ",", "'z'", ":", "2", ",", "}", "index", "=", "indexmap", "[", "axis", "]", "channel", "=", "slice", "(", "None", ")", "if", "channel", "is", "None", "else", "channel", "while", "image", ".", "ndim", "<", "4", ":", "image", "=", "image", "[", "...", ",", "np", ".", "newaxis", "]", "def", "normalize_float", "(", "img", ")", ":", "img", "=", "np", ".", "copy", "(", "img", ")", "img", "[", "img", "==", "np", ".", "inf", "]", "=", "0", "img", "[", "img", "==", "-", "np", ".", "inf", "]", "=", "0", "lower", ",", "upper", "=", "img", ".", "min", "(", ")", ",", "img", ".", "max", "(", ")", "img", "=", "(", "img", "-", "lower", ")", "/", "(", "upper", "-", "lower", ")", "*", "255.0", "return", "img", ".", "astype", "(", "np", ".", "uint8", ")", "if", "global_norm", "and", "np", ".", "issubdtype", "(", "image", ".", "dtype", ",", "np", ".", "floating", ")", ":", "image", "=", "normalize_float", "(", "image", ")", "for", "level", "in", "tqdm", "(", "range", "(", "image", ".", "shape", "[", "index", "]", ")", ",", "desc", "=", "\"Saving Images\"", ")", ":", "if", "index", "==", "0", ":", "img", "=", "image", "[", "level", ",", ":", ",", ":", ",", "channel", "]", "elif", "index", "==", "1", ":", "img", "=", "image", "[", ":", ",", "level", ",", ":", ",", "channel", "]", "elif", "index", "==", "2", ":", "img", "=", "image", "[", ":", ",", ":", ",", "level", ",", "channel", "]", "else", ":", "raise", "IndexError", "(", "\"Index {} is not valid. Expected 0, 1, or 2.\"", ".", "format", "(", "index", ")", ")", "while", "img", ".", "ndim", "<", "3", ":", "img", "=", "img", "[", "...", ",", "np", ".", "newaxis", "]", "num_channels", "=", "img", ".", "shape", "[", "2", "]", "for", "channel_index", "in", "range", "(", "num_channels", ")", ":", "img2d", "=", "img", "[", ":", ",", ":", ",", "channel_index", "]", "if", "not", "global_norm", "and", "img2d", ".", "dtype", "in", "(", "np", ".", "float32", ",", "np", ".", "float64", ")", ":", "img2d", "=", "normalize_float", "(", "img2d", ")", "# discovered that downloaded cube is in a weird rotated state.", "# it requires a 90deg counterclockwise rotation on xy plane (leaving z alone)", "# followed by a flip on Y", "if", "axis", "==", "'z'", ":", "img2d", "=", "np", ".", "flipud", "(", "np", ".", "rot90", "(", "img2d", ",", "1", ")", ")", "if", "img2d", ".", "dtype", "==", "np", ".", "uint8", ":", "img2d", "=", "Image", ".", "fromarray", "(", "img2d", ",", "'L'", ")", "elif", "img2d", ".", "dtype", "==", "np", ".", "bool", ":", "img2d", "=", "img2d", ".", "astype", "(", "np", ".", "uint8", ")", "*", "255", "img2d", "=", "Image", ".", "fromarray", "(", "img2d", ",", "'L'", ")", "else", ":", "img2d", "=", "img2d", ".", "astype", "(", "'uint32'", ")", "img2d", "[", ":", ",", ":", "]", "|=", "0xff000000", "# for little endian abgr", "img2d", "=", "Image", ".", "fromarray", "(", "img2d", ",", "'RGBA'", ")", "file_index", "=", "str", "(", "level", ")", ".", "zfill", "(", "3", ")", "filename", "=", "'{}.{}'", ".", "format", "(", "file_index", ",", "image_format", ".", "lower", "(", ")", ")", "if", "num_channels", ">", "1", ":", "filename", "=", "'{}-{}'", ".", "format", "(", "channel_index", ",", "filename", ")", "path", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "filename", ")", "img2d", ".", "save", "(", "path", ",", "image_format", ")" ]
Serialize a 3D or 4D array into a series of PNGs for visualization. image: A 3D or 4D numpy array. Supported dtypes: integer, float, boolean axis: 'x', 'y', 'z' channel: None, 0, 1, 2, etc, which channel to serialize. Does all by default. directory: override the default output directory global_norm: Normalize floating point volumes globally or per slice? image_format: 'PNG', 'JPEG', etc
[ "Serialize", "a", "3D", "or", "4D", "array", "into", "a", "series", "of", "PNGs", "for", "visualization", "." ]
python
train
SmokinCaterpillar/pypet
pypet/trajectory.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/trajectory.py#L2327-L2401
def _merge_links(self, other_trajectory, used_runs, allowed_translations, ignore_data): """ Merges all links""" linked_items = other_trajectory._linked_by run_name_dummys = set([f(-1) for f in other_trajectory._wildcard_functions.values()]) if len(linked_items) > 0: self._logger.info('Merging potential links!') for old_linked_name in other_trajectory._linked_by: if old_linked_name in ignore_data: continue split_name = old_linked_name.split('.') if any(x in run_name_dummys for x in split_name): self._logger.warning('Ignoring all links linking to `%s` because ' 'I don`t know how to resolve links under `%s` nodes.' % (old_linked_name, str(run_name_dummys))) continue old_link_dict = other_trajectory._linked_by[old_linked_name] split_name = old_linked_name.split('.') if all(x in allowed_translations for x in split_name): new_linked_full_name = self._rename_full_name(old_linked_name, other_trajectory, used_runs=used_runs) else: new_linked_full_name = old_linked_name for linking_node, link_set in old_link_dict.values(): linking_full_name = linking_node.v_full_name split_name = linking_full_name .split('.') if any(x in run_name_dummys for x in split_name): self._logger.warning('Ignoring links under `%s` because ' 'I don`t know how to resolve links ' 'under a `%s` node.' % (linking_full_name, str(run_name_dummys))) split_name = linking_full_name .split('.') if any(x in allowed_translations for x in split_name): new_linking_full_name = self._rename_full_name(linking_full_name, other_trajectory, used_runs=used_runs) else: new_linking_full_name = linking_full_name for link in link_set: if (linking_full_name + '.' + link) in ignore_data: continue if link in run_name_dummys: self._logger.warning('Ignoring link `%s` under `%s` because ' 'I don`t know how to resolve ' 'links named as `%s`.' % (link, linking_full_name, str(run_name_dummys))) continue try: new_linked_item = self.f_get(new_linked_full_name, shortcuts=False) if self.f_contains(new_linking_full_name): new_linking_item = self.f_get(new_linking_full_name, shortcuts=False) else: new_linking_item = self.f_add_group(new_linking_full_name) if link in allowed_translations: run_indices, wildcards = other_trajectory._reversed_wildcards[link] link = self.f_wildcard(wildcards[0], used_runs[run_indices[0]]) if not link in new_linking_item._links: new_linking_item.f_add_link(link, new_linked_item) else: self._logger.debug('Link `%s` exists already under `%s`.' % (link, new_linked_item.v_full_name)) except (AttributeError, ValueError) as exc: self._logger.error('Could not copy link `%s` under `%s` linking ' 'to `%s` due to `%s`' % (link, linking_full_name, old_linked_name, repr(exc)))
[ "def", "_merge_links", "(", "self", ",", "other_trajectory", ",", "used_runs", ",", "allowed_translations", ",", "ignore_data", ")", ":", "linked_items", "=", "other_trajectory", ".", "_linked_by", "run_name_dummys", "=", "set", "(", "[", "f", "(", "-", "1", ")", "for", "f", "in", "other_trajectory", ".", "_wildcard_functions", ".", "values", "(", ")", "]", ")", "if", "len", "(", "linked_items", ")", ">", "0", ":", "self", ".", "_logger", ".", "info", "(", "'Merging potential links!'", ")", "for", "old_linked_name", "in", "other_trajectory", ".", "_linked_by", ":", "if", "old_linked_name", "in", "ignore_data", ":", "continue", "split_name", "=", "old_linked_name", ".", "split", "(", "'.'", ")", "if", "any", "(", "x", "in", "run_name_dummys", "for", "x", "in", "split_name", ")", ":", "self", ".", "_logger", ".", "warning", "(", "'Ignoring all links linking to `%s` because '", "'I don`t know how to resolve links under `%s` nodes.'", "%", "(", "old_linked_name", ",", "str", "(", "run_name_dummys", ")", ")", ")", "continue", "old_link_dict", "=", "other_trajectory", ".", "_linked_by", "[", "old_linked_name", "]", "split_name", "=", "old_linked_name", ".", "split", "(", "'.'", ")", "if", "all", "(", "x", "in", "allowed_translations", "for", "x", "in", "split_name", ")", ":", "new_linked_full_name", "=", "self", ".", "_rename_full_name", "(", "old_linked_name", ",", "other_trajectory", ",", "used_runs", "=", "used_runs", ")", "else", ":", "new_linked_full_name", "=", "old_linked_name", "for", "linking_node", ",", "link_set", "in", "old_link_dict", ".", "values", "(", ")", ":", "linking_full_name", "=", "linking_node", ".", "v_full_name", "split_name", "=", "linking_full_name", ".", "split", "(", "'.'", ")", "if", "any", "(", "x", "in", "run_name_dummys", "for", "x", "in", "split_name", ")", ":", "self", ".", "_logger", ".", "warning", "(", "'Ignoring links under `%s` because '", "'I don`t know how to resolve links '", "'under a `%s` node.'", "%", "(", "linking_full_name", ",", "str", "(", "run_name_dummys", ")", ")", ")", "split_name", "=", "linking_full_name", ".", "split", "(", "'.'", ")", "if", "any", "(", "x", "in", "allowed_translations", "for", "x", "in", "split_name", ")", ":", "new_linking_full_name", "=", "self", ".", "_rename_full_name", "(", "linking_full_name", ",", "other_trajectory", ",", "used_runs", "=", "used_runs", ")", "else", ":", "new_linking_full_name", "=", "linking_full_name", "for", "link", "in", "link_set", ":", "if", "(", "linking_full_name", "+", "'.'", "+", "link", ")", "in", "ignore_data", ":", "continue", "if", "link", "in", "run_name_dummys", ":", "self", ".", "_logger", ".", "warning", "(", "'Ignoring link `%s` under `%s` because '", "'I don`t know how to resolve '", "'links named as `%s`.'", "%", "(", "link", ",", "linking_full_name", ",", "str", "(", "run_name_dummys", ")", ")", ")", "continue", "try", ":", "new_linked_item", "=", "self", ".", "f_get", "(", "new_linked_full_name", ",", "shortcuts", "=", "False", ")", "if", "self", ".", "f_contains", "(", "new_linking_full_name", ")", ":", "new_linking_item", "=", "self", ".", "f_get", "(", "new_linking_full_name", ",", "shortcuts", "=", "False", ")", "else", ":", "new_linking_item", "=", "self", ".", "f_add_group", "(", "new_linking_full_name", ")", "if", "link", "in", "allowed_translations", ":", "run_indices", ",", "wildcards", "=", "other_trajectory", ".", "_reversed_wildcards", "[", "link", "]", "link", "=", "self", ".", "f_wildcard", "(", "wildcards", "[", "0", "]", ",", "used_runs", "[", "run_indices", "[", "0", "]", "]", ")", "if", "not", "link", "in", "new_linking_item", ".", "_links", ":", "new_linking_item", ".", "f_add_link", "(", "link", ",", "new_linked_item", ")", "else", ":", "self", ".", "_logger", ".", "debug", "(", "'Link `%s` exists already under `%s`.'", "%", "(", "link", ",", "new_linked_item", ".", "v_full_name", ")", ")", "except", "(", "AttributeError", ",", "ValueError", ")", "as", "exc", ":", "self", ".", "_logger", ".", "error", "(", "'Could not copy link `%s` under `%s` linking '", "'to `%s` due to `%s`'", "%", "(", "link", ",", "linking_full_name", ",", "old_linked_name", ",", "repr", "(", "exc", ")", ")", ")" ]
Merges all links
[ "Merges", "all", "links" ]
python
test
mitodl/edx-api-client
edx_api/enrollments/__init__.py
https://github.com/mitodl/edx-api-client/blob/083fd23a48b3ef0d39602fc3e7e53ef02f4ad6d6/edx_api/enrollments/__init__.py#L121-L141
def create_audit_student_enrollment(self, course_id): """ Creates an audit enrollment for the user in a given course Args: course_id (str): an edX course id Returns: Enrollment: object representing the student enrollment in the provided course """ audit_enrollment = { "mode": "audit", "course_details": {"course_id": course_id} } # the request is done in behalf of the current logged in user resp = self.requester.post( urljoin(self.base_url, self.enrollment_url), json=audit_enrollment ) resp.raise_for_status() return Enrollment(resp.json())
[ "def", "create_audit_student_enrollment", "(", "self", ",", "course_id", ")", ":", "audit_enrollment", "=", "{", "\"mode\"", ":", "\"audit\"", ",", "\"course_details\"", ":", "{", "\"course_id\"", ":", "course_id", "}", "}", "# the request is done in behalf of the current logged in user", "resp", "=", "self", ".", "requester", ".", "post", "(", "urljoin", "(", "self", ".", "base_url", ",", "self", ".", "enrollment_url", ")", ",", "json", "=", "audit_enrollment", ")", "resp", ".", "raise_for_status", "(", ")", "return", "Enrollment", "(", "resp", ".", "json", "(", ")", ")" ]
Creates an audit enrollment for the user in a given course Args: course_id (str): an edX course id Returns: Enrollment: object representing the student enrollment in the provided course
[ "Creates", "an", "audit", "enrollment", "for", "the", "user", "in", "a", "given", "course" ]
python
train
ambitioninc/newrelic-api
newrelic_api/alert_conditions_infra.py
https://github.com/ambitioninc/newrelic-api/blob/07b4430aa6ae61e4704e2928a6e7a24c76f0f424/newrelic_api/alert_conditions_infra.py#L71-L106
def show(self, alert_condition_infra_id): """ This API endpoint returns an alert condition for infrastucture, identified by its ID. :type alert_condition_infra_id: int :param alert_condition_infra_id: Alert Condition Infra ID :rtype: dict :return: The JSON response of the API :: { "data": { "id": "integer", "policy_id": "integer", "type": "string", "name": "string", "enabled": "boolean", "where_clause": "string", "comparison": "string", "filter": "hash", "critical_threshold": "hash", "event_type": "string", "process_where_clause": "string", "created_at_epoch_millis": "time", "updated_at_epoch_millis": "time" } } """ return self._get( url='{0}alerts/conditions/{1}'.format(self.URL, alert_condition_infra_id), headers=self.headers, )
[ "def", "show", "(", "self", ",", "alert_condition_infra_id", ")", ":", "return", "self", ".", "_get", "(", "url", "=", "'{0}alerts/conditions/{1}'", ".", "format", "(", "self", ".", "URL", ",", "alert_condition_infra_id", ")", ",", "headers", "=", "self", ".", "headers", ",", ")" ]
This API endpoint returns an alert condition for infrastucture, identified by its ID. :type alert_condition_infra_id: int :param alert_condition_infra_id: Alert Condition Infra ID :rtype: dict :return: The JSON response of the API :: { "data": { "id": "integer", "policy_id": "integer", "type": "string", "name": "string", "enabled": "boolean", "where_clause": "string", "comparison": "string", "filter": "hash", "critical_threshold": "hash", "event_type": "string", "process_where_clause": "string", "created_at_epoch_millis": "time", "updated_at_epoch_millis": "time" } }
[ "This", "API", "endpoint", "returns", "an", "alert", "condition", "for", "infrastucture", "identified", "by", "its", "ID", "." ]
python
train
uber/tchannel-python
tchannel/peer_heap.py
https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/peer_heap.py#L136-L147
def remove_peer(self, peer): """Remove the peer from the heap. Return: removed peer if peer exists. If peer's index is out of range, raise IndexError. """ if peer.index < 0 or peer.index >= self.size(): raise IndexError('Peer index is out of range') assert peer is self.peers[peer.index], "peer is not in the heap" return heap.remove(self, peer.index)
[ "def", "remove_peer", "(", "self", ",", "peer", ")", ":", "if", "peer", ".", "index", "<", "0", "or", "peer", ".", "index", ">=", "self", ".", "size", "(", ")", ":", "raise", "IndexError", "(", "'Peer index is out of range'", ")", "assert", "peer", "is", "self", ".", "peers", "[", "peer", ".", "index", "]", ",", "\"peer is not in the heap\"", "return", "heap", ".", "remove", "(", "self", ",", "peer", ".", "index", ")" ]
Remove the peer from the heap. Return: removed peer if peer exists. If peer's index is out of range, raise IndexError.
[ "Remove", "the", "peer", "from", "the", "heap", "." ]
python
train
ev3dev/ev3dev-lang-python
ev3dev2/motor.py
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/motor.py#L644-L653
def ramp_down_sp(self): """ Writing sets the ramp down setpoint. Reading returns the current value. Units are in milliseconds and must be positive. When set to a non-zero value, the motor speed will decrease from 0 to 100% of `max_speed` over the span of this setpoint. The actual ramp time is the ratio of the difference between the `speed_sp` and the current `speed` and max_speed multiplied by `ramp_down_sp`. """ self._ramp_down_sp, value = self.get_attr_int(self._ramp_down_sp, 'ramp_down_sp') return value
[ "def", "ramp_down_sp", "(", "self", ")", ":", "self", ".", "_ramp_down_sp", ",", "value", "=", "self", ".", "get_attr_int", "(", "self", ".", "_ramp_down_sp", ",", "'ramp_down_sp'", ")", "return", "value" ]
Writing sets the ramp down setpoint. Reading returns the current value. Units are in milliseconds and must be positive. When set to a non-zero value, the motor speed will decrease from 0 to 100% of `max_speed` over the span of this setpoint. The actual ramp time is the ratio of the difference between the `speed_sp` and the current `speed` and max_speed multiplied by `ramp_down_sp`.
[ "Writing", "sets", "the", "ramp", "down", "setpoint", ".", "Reading", "returns", "the", "current", "value", ".", "Units", "are", "in", "milliseconds", "and", "must", "be", "positive", ".", "When", "set", "to", "a", "non", "-", "zero", "value", "the", "motor", "speed", "will", "decrease", "from", "0", "to", "100%", "of", "max_speed", "over", "the", "span", "of", "this", "setpoint", ".", "The", "actual", "ramp", "time", "is", "the", "ratio", "of", "the", "difference", "between", "the", "speed_sp", "and", "the", "current", "speed", "and", "max_speed", "multiplied", "by", "ramp_down_sp", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_wp.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_wp.py#L248-L267
def set_home_location(self): '''set home location from last map click''' try: latlon = self.module('map').click_position except Exception: print("No map available") return lat = float(latlon[0]) lon = float(latlon[1]) if self.wploader.count() == 0: self.wploader.add_latlonalt(lat, lon, 0) w = self.wploader.wp(0) w.x = lat w.y = lon self.wploader.set(w, 0) self.loading_waypoints = True self.loading_waypoint_lasttime = time.time() self.master.mav.mission_write_partial_list_send(self.target_system, self.target_component, 0, 0)
[ "def", "set_home_location", "(", "self", ")", ":", "try", ":", "latlon", "=", "self", ".", "module", "(", "'map'", ")", ".", "click_position", "except", "Exception", ":", "print", "(", "\"No map available\"", ")", "return", "lat", "=", "float", "(", "latlon", "[", "0", "]", ")", "lon", "=", "float", "(", "latlon", "[", "1", "]", ")", "if", "self", ".", "wploader", ".", "count", "(", ")", "==", "0", ":", "self", ".", "wploader", ".", "add_latlonalt", "(", "lat", ",", "lon", ",", "0", ")", "w", "=", "self", ".", "wploader", ".", "wp", "(", "0", ")", "w", ".", "x", "=", "lat", "w", ".", "y", "=", "lon", "self", ".", "wploader", ".", "set", "(", "w", ",", "0", ")", "self", ".", "loading_waypoints", "=", "True", "self", ".", "loading_waypoint_lasttime", "=", "time", ".", "time", "(", ")", "self", ".", "master", ".", "mav", ".", "mission_write_partial_list_send", "(", "self", ".", "target_system", ",", "self", ".", "target_component", ",", "0", ",", "0", ")" ]
set home location from last map click
[ "set", "home", "location", "from", "last", "map", "click" ]
python
train
openstack/pyghmi
pyghmi/ipmi/oem/lenovo/inventory.py
https://github.com/openstack/pyghmi/blob/f710b1d30a8eed19a9e86f01f9351c737666f3e5/pyghmi/ipmi/oem/lenovo/inventory.py#L105-L147
def parse_inventory_category_entry(raw, fields): """Parses one entry in an inventory category. :param raw: the raw data to the entry. May contain more than one entry, only one entry will be read in that case. :param fields: an iterable of EntryField objects to be used for parsing the entry. :returns: dict -- a tuple with the number of bytes read and a dictionary representing the entry. """ r = raw obj = {} bytes_read = 0 discard = False for field in fields: value = struct.unpack_from(field.fmt, r)[0] read = struct.calcsize(field.fmt) bytes_read += read r = r[read:] # If this entry is not actually present, just parse and then discard it if field.presence and not bool(value): discard = True if not field.include: continue if (field.fmt[-1] == "s"): value = value.rstrip("\x00") if (field.mapper and value in field.mapper): value = field.mapper[value] if (field.valuefunc): value = field.valuefunc(value) if not field.multivaluefunc: obj[field.name] = value else: for key in value: obj[key] = value[key] if discard: obj = None return bytes_read, obj
[ "def", "parse_inventory_category_entry", "(", "raw", ",", "fields", ")", ":", "r", "=", "raw", "obj", "=", "{", "}", "bytes_read", "=", "0", "discard", "=", "False", "for", "field", "in", "fields", ":", "value", "=", "struct", ".", "unpack_from", "(", "field", ".", "fmt", ",", "r", ")", "[", "0", "]", "read", "=", "struct", ".", "calcsize", "(", "field", ".", "fmt", ")", "bytes_read", "+=", "read", "r", "=", "r", "[", "read", ":", "]", "# If this entry is not actually present, just parse and then discard it", "if", "field", ".", "presence", "and", "not", "bool", "(", "value", ")", ":", "discard", "=", "True", "if", "not", "field", ".", "include", ":", "continue", "if", "(", "field", ".", "fmt", "[", "-", "1", "]", "==", "\"s\"", ")", ":", "value", "=", "value", ".", "rstrip", "(", "\"\\x00\"", ")", "if", "(", "field", ".", "mapper", "and", "value", "in", "field", ".", "mapper", ")", ":", "value", "=", "field", ".", "mapper", "[", "value", "]", "if", "(", "field", ".", "valuefunc", ")", ":", "value", "=", "field", ".", "valuefunc", "(", "value", ")", "if", "not", "field", ".", "multivaluefunc", ":", "obj", "[", "field", ".", "name", "]", "=", "value", "else", ":", "for", "key", "in", "value", ":", "obj", "[", "key", "]", "=", "value", "[", "key", "]", "if", "discard", ":", "obj", "=", "None", "return", "bytes_read", ",", "obj" ]
Parses one entry in an inventory category. :param raw: the raw data to the entry. May contain more than one entry, only one entry will be read in that case. :param fields: an iterable of EntryField objects to be used for parsing the entry. :returns: dict -- a tuple with the number of bytes read and a dictionary representing the entry.
[ "Parses", "one", "entry", "in", "an", "inventory", "category", "." ]
python
train
Microsoft/malmo
Malmo/samples/Python_examples/human_action.py
https://github.com/Microsoft/malmo/blob/4139cd6f3e52f6e893a931a1d4b70d35f8e70e5a/Malmo/samples/Python_examples/human_action.py#L205-L222
def update(self): '''Called at regular intervals to poll the mouse position to send continuous commands.''' if self.action_space == 'continuous': # mouse movement only used for continuous action space if self.world_state and self.world_state.is_mission_running: if self.mouse_event and self.prev_mouse_event: rotation_speed = 0.1 turn_speed = ( self.mouse_event.x - self.prev_mouse_event.x ) * rotation_speed pitch_speed = ( self.mouse_event.y - self.prev_mouse_event.y ) * rotation_speed self.agent_host.sendCommand( 'turn '+str(turn_speed) ) self.agent_host.sendCommand( 'pitch '+str(pitch_speed) ) if self.mouse_event: if os.name == 'nt': # (moving the mouse cursor only seems to work on Windows) self.canvas.event_generate('<Motion>', warp=True, x=old_div(self.canvas.winfo_width(),2), y=old_div(self.canvas.winfo_height(),2)) # put cursor at center self.mouse_event.x = old_div(self.canvas.winfo_width(),2) self.mouse_event.y = old_div(self.canvas.winfo_height(),2) self.prev_mouse_event = self.mouse_event if self.world_state.is_mission_running: self.root.after(50, self.update)
[ "def", "update", "(", "self", ")", ":", "if", "self", ".", "action_space", "==", "'continuous'", ":", "# mouse movement only used for continuous action space", "if", "self", ".", "world_state", "and", "self", ".", "world_state", ".", "is_mission_running", ":", "if", "self", ".", "mouse_event", "and", "self", ".", "prev_mouse_event", ":", "rotation_speed", "=", "0.1", "turn_speed", "=", "(", "self", ".", "mouse_event", ".", "x", "-", "self", ".", "prev_mouse_event", ".", "x", ")", "*", "rotation_speed", "pitch_speed", "=", "(", "self", ".", "mouse_event", ".", "y", "-", "self", ".", "prev_mouse_event", ".", "y", ")", "*", "rotation_speed", "self", ".", "agent_host", ".", "sendCommand", "(", "'turn '", "+", "str", "(", "turn_speed", ")", ")", "self", ".", "agent_host", ".", "sendCommand", "(", "'pitch '", "+", "str", "(", "pitch_speed", ")", ")", "if", "self", ".", "mouse_event", ":", "if", "os", ".", "name", "==", "'nt'", ":", "# (moving the mouse cursor only seems to work on Windows)", "self", ".", "canvas", ".", "event_generate", "(", "'<Motion>'", ",", "warp", "=", "True", ",", "x", "=", "old_div", "(", "self", ".", "canvas", ".", "winfo_width", "(", ")", ",", "2", ")", ",", "y", "=", "old_div", "(", "self", ".", "canvas", ".", "winfo_height", "(", ")", ",", "2", ")", ")", "# put cursor at center", "self", ".", "mouse_event", ".", "x", "=", "old_div", "(", "self", ".", "canvas", ".", "winfo_width", "(", ")", ",", "2", ")", "self", ".", "mouse_event", ".", "y", "=", "old_div", "(", "self", ".", "canvas", ".", "winfo_height", "(", ")", ",", "2", ")", "self", ".", "prev_mouse_event", "=", "self", ".", "mouse_event", "if", "self", ".", "world_state", ".", "is_mission_running", ":", "self", ".", "root", ".", "after", "(", "50", ",", "self", ".", "update", ")" ]
Called at regular intervals to poll the mouse position to send continuous commands.
[ "Called", "at", "regular", "intervals", "to", "poll", "the", "mouse", "position", "to", "send", "continuous", "commands", "." ]
python
train
bioidiap/gridtk
gridtk/models.py
https://github.com/bioidiap/gridtk/blob/9e3291b8b50388682908927231b2730db1da147d/gridtk/models.py#L127-L156
def queue(self, new_job_id = None, new_job_name = None, queue_name = None): """Sets the status of this job to 'queued' or 'waiting'.""" # update the job id (i.e., when the job is executed in the grid) if new_job_id is not None: self.id = new_job_id if new_job_name is not None: self.name = new_job_name if queue_name is not None: self.queue_name = queue_name new_status = 'queued' self.result = None # check if we have to wait for another job to finish for job in self.get_jobs_we_wait_for(): if job.status not in ('success', 'failure'): new_status = 'waiting' elif self.stop_on_failure and job.status == 'failure': new_status = 'failure' # reset the queued jobs that depend on us to waiting status for job in self.get_jobs_waiting_for_us(): if job.status == 'queued': job.status = 'failure' if new_status == 'failure' else 'waiting' self.status = new_status for array_job in self.array: if array_job.status not in ('success', 'failure'): array_job.status = new_status
[ "def", "queue", "(", "self", ",", "new_job_id", "=", "None", ",", "new_job_name", "=", "None", ",", "queue_name", "=", "None", ")", ":", "# update the job id (i.e., when the job is executed in the grid)", "if", "new_job_id", "is", "not", "None", ":", "self", ".", "id", "=", "new_job_id", "if", "new_job_name", "is", "not", "None", ":", "self", ".", "name", "=", "new_job_name", "if", "queue_name", "is", "not", "None", ":", "self", ".", "queue_name", "=", "queue_name", "new_status", "=", "'queued'", "self", ".", "result", "=", "None", "# check if we have to wait for another job to finish", "for", "job", "in", "self", ".", "get_jobs_we_wait_for", "(", ")", ":", "if", "job", ".", "status", "not", "in", "(", "'success'", ",", "'failure'", ")", ":", "new_status", "=", "'waiting'", "elif", "self", ".", "stop_on_failure", "and", "job", ".", "status", "==", "'failure'", ":", "new_status", "=", "'failure'", "# reset the queued jobs that depend on us to waiting status", "for", "job", "in", "self", ".", "get_jobs_waiting_for_us", "(", ")", ":", "if", "job", ".", "status", "==", "'queued'", ":", "job", ".", "status", "=", "'failure'", "if", "new_status", "==", "'failure'", "else", "'waiting'", "self", ".", "status", "=", "new_status", "for", "array_job", "in", "self", ".", "array", ":", "if", "array_job", ".", "status", "not", "in", "(", "'success'", ",", "'failure'", ")", ":", "array_job", ".", "status", "=", "new_status" ]
Sets the status of this job to 'queued' or 'waiting'.
[ "Sets", "the", "status", "of", "this", "job", "to", "queued", "or", "waiting", "." ]
python
train
totalgood/pugnlp
src/pugnlp/util.py
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L189-L238
def sort_strings(strings, sort_order=None, reverse=False, case_sensitive=False, sort_order_first=True): """Sort a list of strings according to the provided sorted list of string prefixes TODO: - Provide an option to use `.startswith()` rather than a fixed prefix length (will be much slower) Arguments: sort_order_first (bool): Whether strings in sort_order should always preceed "unknown" strings sort_order (sequence of str): Desired ordering as a list of prefixes to the strings If sort_order strings have varying length, the max length will determine the prefix length compared reverse (bool): whether to reverse the sort orded. Passed through to `sorted(strings, reverse=reverse)` case_senstive (bool): Whether to sort in lexographic rather than alphabetic order and whether the prefixes in sort_order are checked in a case-sensitive way Examples: >>> sort_strings(['morn32', 'morning', 'unknown', 'date', 'dow', 'doy', 'moy'], ... ('dat', 'dow', 'moy', 'dom', 'doy', 'mor')) ['date', 'dow', 'moy', 'doy', 'morn32', 'morning', 'unknown'] >>> sort_strings(['morn32', 'morning', 'unknown', 'less unknown', 'lucy', 'date', 'dow', 'doy', 'moy'], ... ('dat', 'dow', 'moy', 'dom', 'doy', 'mor'), reverse=True) ['unknown', 'lucy', 'less unknown', 'morning', 'morn32', 'doy', 'moy', 'dow', 'date'] Strings whose prefixes don't exist in `sort_order` sequence can be interleaved into the sorted list in lexical order by setting `sort_order_first=False` >>> sort_strings(['morn32', 'morning', 'unknown', 'lucy', 'less unknown', 'date', 'dow', 'doy', 'moy'], ... ('dat', 'dow', 'moy', 'dom', 'moy', 'mor'), ... sort_order_first=False) # doctest: +NORMALIZE_WHITESPACE ['date', 'dow', 'doy', 'less unknown', 'lucy', 'moy', 'morn32', 'morning', 'unknown'] """ if not case_sensitive: sort_order = tuple(s.lower() for s in sort_order) strings = tuple(s.lower() for s in strings) prefix_len = max(len(s) for s in sort_order) def compare(a, b, prefix_len=prefix_len): if prefix_len: if a[:prefix_len] in sort_order: if b[:prefix_len] in sort_order: comparison = sort_order.index(a[:prefix_len]) - sort_order.index(b[:prefix_len]) comparison = int(comparison / abs(comparison or 1)) if comparison: return comparison * (-2 * reverse + 1) elif sort_order_first: return -1 * (-2 * reverse + 1) # b may be in sort_order list, so it should be first elif sort_order_first and b[:prefix_len] in sort_order: return -2 * reverse + 1 return (-1 * (a < b) + 1 * (a > b)) * (-2 * reverse + 1) return sorted(strings, key=functools.cmp_to_key(compare))
[ "def", "sort_strings", "(", "strings", ",", "sort_order", "=", "None", ",", "reverse", "=", "False", ",", "case_sensitive", "=", "False", ",", "sort_order_first", "=", "True", ")", ":", "if", "not", "case_sensitive", ":", "sort_order", "=", "tuple", "(", "s", ".", "lower", "(", ")", "for", "s", "in", "sort_order", ")", "strings", "=", "tuple", "(", "s", ".", "lower", "(", ")", "for", "s", "in", "strings", ")", "prefix_len", "=", "max", "(", "len", "(", "s", ")", "for", "s", "in", "sort_order", ")", "def", "compare", "(", "a", ",", "b", ",", "prefix_len", "=", "prefix_len", ")", ":", "if", "prefix_len", ":", "if", "a", "[", ":", "prefix_len", "]", "in", "sort_order", ":", "if", "b", "[", ":", "prefix_len", "]", "in", "sort_order", ":", "comparison", "=", "sort_order", ".", "index", "(", "a", "[", ":", "prefix_len", "]", ")", "-", "sort_order", ".", "index", "(", "b", "[", ":", "prefix_len", "]", ")", "comparison", "=", "int", "(", "comparison", "/", "abs", "(", "comparison", "or", "1", ")", ")", "if", "comparison", ":", "return", "comparison", "*", "(", "-", "2", "*", "reverse", "+", "1", ")", "elif", "sort_order_first", ":", "return", "-", "1", "*", "(", "-", "2", "*", "reverse", "+", "1", ")", "# b may be in sort_order list, so it should be first", "elif", "sort_order_first", "and", "b", "[", ":", "prefix_len", "]", "in", "sort_order", ":", "return", "-", "2", "*", "reverse", "+", "1", "return", "(", "-", "1", "*", "(", "a", "<", "b", ")", "+", "1", "*", "(", "a", ">", "b", ")", ")", "*", "(", "-", "2", "*", "reverse", "+", "1", ")", "return", "sorted", "(", "strings", ",", "key", "=", "functools", ".", "cmp_to_key", "(", "compare", ")", ")" ]
Sort a list of strings according to the provided sorted list of string prefixes TODO: - Provide an option to use `.startswith()` rather than a fixed prefix length (will be much slower) Arguments: sort_order_first (bool): Whether strings in sort_order should always preceed "unknown" strings sort_order (sequence of str): Desired ordering as a list of prefixes to the strings If sort_order strings have varying length, the max length will determine the prefix length compared reverse (bool): whether to reverse the sort orded. Passed through to `sorted(strings, reverse=reverse)` case_senstive (bool): Whether to sort in lexographic rather than alphabetic order and whether the prefixes in sort_order are checked in a case-sensitive way Examples: >>> sort_strings(['morn32', 'morning', 'unknown', 'date', 'dow', 'doy', 'moy'], ... ('dat', 'dow', 'moy', 'dom', 'doy', 'mor')) ['date', 'dow', 'moy', 'doy', 'morn32', 'morning', 'unknown'] >>> sort_strings(['morn32', 'morning', 'unknown', 'less unknown', 'lucy', 'date', 'dow', 'doy', 'moy'], ... ('dat', 'dow', 'moy', 'dom', 'doy', 'mor'), reverse=True) ['unknown', 'lucy', 'less unknown', 'morning', 'morn32', 'doy', 'moy', 'dow', 'date'] Strings whose prefixes don't exist in `sort_order` sequence can be interleaved into the sorted list in lexical order by setting `sort_order_first=False` >>> sort_strings(['morn32', 'morning', 'unknown', 'lucy', 'less unknown', 'date', 'dow', 'doy', 'moy'], ... ('dat', 'dow', 'moy', 'dom', 'moy', 'mor'), ... sort_order_first=False) # doctest: +NORMALIZE_WHITESPACE ['date', 'dow', 'doy', 'less unknown', 'lucy', 'moy', 'morn32', 'morning', 'unknown']
[ "Sort", "a", "list", "of", "strings", "according", "to", "the", "provided", "sorted", "list", "of", "string", "prefixes" ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L23267-L23302
def get_properties(self, names): """Returns values for a group of properties in one call. The names of the properties to get are specified using the @a names argument which is a list of comma-separated property names or an empty string if all properties are to be returned. Currently the value of this argument is ignored and the method always returns all existing properties. The list of all properties supported by the given medium format can be obtained with :py:func:`IMediumFormat.describe_properties` . The method returns two arrays, the array of property names corresponding to the @a names argument and the current values of these properties. Both arrays have the same number of elements with each element at the given index in the first array corresponds to an element at the same index in the second array. For properties that do not have assigned values, an empty string is returned at the appropriate index in the @a returnValues array. in names of type str Names of properties to get. out return_names of type str Names of returned properties. return return_values of type str Values of returned properties. """ if not isinstance(names, basestring): raise TypeError("names can only be an instance of type basestring") (return_values, return_names) = self._call("getProperties", in_p=[names]) return (return_values, return_names)
[ "def", "get_properties", "(", "self", ",", "names", ")", ":", "if", "not", "isinstance", "(", "names", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"names can only be an instance of type basestring\"", ")", "(", "return_values", ",", "return_names", ")", "=", "self", ".", "_call", "(", "\"getProperties\"", ",", "in_p", "=", "[", "names", "]", ")", "return", "(", "return_values", ",", "return_names", ")" ]
Returns values for a group of properties in one call. The names of the properties to get are specified using the @a names argument which is a list of comma-separated property names or an empty string if all properties are to be returned. Currently the value of this argument is ignored and the method always returns all existing properties. The list of all properties supported by the given medium format can be obtained with :py:func:`IMediumFormat.describe_properties` . The method returns two arrays, the array of property names corresponding to the @a names argument and the current values of these properties. Both arrays have the same number of elements with each element at the given index in the first array corresponds to an element at the same index in the second array. For properties that do not have assigned values, an empty string is returned at the appropriate index in the @a returnValues array. in names of type str Names of properties to get. out return_names of type str Names of returned properties. return return_values of type str Values of returned properties.
[ "Returns", "values", "for", "a", "group", "of", "properties", "in", "one", "call", ".", "The", "names", "of", "the", "properties", "to", "get", "are", "specified", "using", "the", "@a", "names", "argument", "which", "is", "a", "list", "of", "comma", "-", "separated", "property", "names", "or", "an", "empty", "string", "if", "all", "properties", "are", "to", "be", "returned", ".", "Currently", "the", "value", "of", "this", "argument", "is", "ignored", "and", "the", "method", "always", "returns", "all", "existing", "properties", ".", "The", "list", "of", "all", "properties", "supported", "by", "the", "given", "medium", "format", "can", "be", "obtained", "with", ":", "py", ":", "func", ":", "IMediumFormat", ".", "describe_properties", ".", "The", "method", "returns", "two", "arrays", "the", "array", "of", "property", "names", "corresponding", "to", "the", "@a", "names", "argument", "and", "the", "current", "values", "of", "these", "properties", ".", "Both", "arrays", "have", "the", "same", "number", "of", "elements", "with", "each", "element", "at", "the", "given", "index", "in", "the", "first", "array", "corresponds", "to", "an", "element", "at", "the", "same", "index", "in", "the", "second", "array", ".", "For", "properties", "that", "do", "not", "have", "assigned", "values", "an", "empty", "string", "is", "returned", "at", "the", "appropriate", "index", "in", "the", "@a", "returnValues", "array", "." ]
python
train
numenta/htmresearch
projects/speech_commands/analyze_nonzero.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/speech_commands/analyze_nonzero.py#L194-L214
def run(pool, expName, name, args): """ Runs :func:`analyzeWeightPruning` in parallel and save the results :param pool: multiprocessing pool :param expName: Experiment name :param name: File/Plot name (i.e. 'weight_prunning') :param args: Argument list to be passed to :func:`analyzeWeightPruning` :return: panda dataframe with all the results """ tables = pool.map(analyzeWeightPruning, args) merged = pd.concat(tables, axis=1).sort_index(axis=1) filename = "{}_{}".format(name, expName) plotDataframe(merged, filename, "{}.pdf".format(filename)) print() print(filename) print(tabulate(merged, headers='keys', tablefmt='fancy_grid', numalign="right")) merged.to_csv("{}.csv".format(filename)) return merged
[ "def", "run", "(", "pool", ",", "expName", ",", "name", ",", "args", ")", ":", "tables", "=", "pool", ".", "map", "(", "analyzeWeightPruning", ",", "args", ")", "merged", "=", "pd", ".", "concat", "(", "tables", ",", "axis", "=", "1", ")", ".", "sort_index", "(", "axis", "=", "1", ")", "filename", "=", "\"{}_{}\"", ".", "format", "(", "name", ",", "expName", ")", "plotDataframe", "(", "merged", ",", "filename", ",", "\"{}.pdf\"", ".", "format", "(", "filename", ")", ")", "print", "(", ")", "print", "(", "filename", ")", "print", "(", "tabulate", "(", "merged", ",", "headers", "=", "'keys'", ",", "tablefmt", "=", "'fancy_grid'", ",", "numalign", "=", "\"right\"", ")", ")", "merged", ".", "to_csv", "(", "\"{}.csv\"", ".", "format", "(", "filename", ")", ")", "return", "merged" ]
Runs :func:`analyzeWeightPruning` in parallel and save the results :param pool: multiprocessing pool :param expName: Experiment name :param name: File/Plot name (i.e. 'weight_prunning') :param args: Argument list to be passed to :func:`analyzeWeightPruning` :return: panda dataframe with all the results
[ "Runs", ":", "func", ":", "analyzeWeightPruning", "in", "parallel", "and", "save", "the", "results" ]
python
train
noahbenson/pimms
pimms/calculation.py
https://github.com/noahbenson/pimms/blob/9051b86d6b858a7a13511b72c48dc21bc903dab2/pimms/calculation.py#L104-L114
def set_meta(self, meta_data): ''' node.set_meta(meta) yields a calculation node identical to the given node except that its meta_data attribute has been set to the given dictionary meta. If meta is not persistent, it is cast to a persistent dictionary first. ''' if not (isinstance(meta_data, ps.PMap) or isinstance(meta_data, IMap)): meta_data = ps.pmap(meta_data) new_cnode = copy.copy(self) object.__setattr__(new_cnode, 'meta_data', meta_data) return new_cnode
[ "def", "set_meta", "(", "self", ",", "meta_data", ")", ":", "if", "not", "(", "isinstance", "(", "meta_data", ",", "ps", ".", "PMap", ")", "or", "isinstance", "(", "meta_data", ",", "IMap", ")", ")", ":", "meta_data", "=", "ps", ".", "pmap", "(", "meta_data", ")", "new_cnode", "=", "copy", ".", "copy", "(", "self", ")", "object", ".", "__setattr__", "(", "new_cnode", ",", "'meta_data'", ",", "meta_data", ")", "return", "new_cnode" ]
node.set_meta(meta) yields a calculation node identical to the given node except that its meta_data attribute has been set to the given dictionary meta. If meta is not persistent, it is cast to a persistent dictionary first.
[ "node", ".", "set_meta", "(", "meta", ")", "yields", "a", "calculation", "node", "identical", "to", "the", "given", "node", "except", "that", "its", "meta_data", "attribute", "has", "been", "set", "to", "the", "given", "dictionary", "meta", ".", "If", "meta", "is", "not", "persistent", "it", "is", "cast", "to", "a", "persistent", "dictionary", "first", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L680-L690
def cyvcf_add_filter(rec, name): """Add a FILTER value to a cyvcf2 record """ if rec.FILTER: filters = rec.FILTER.split(";") else: filters = [] if name not in filters: filters.append(name) rec.FILTER = filters return rec
[ "def", "cyvcf_add_filter", "(", "rec", ",", "name", ")", ":", "if", "rec", ".", "FILTER", ":", "filters", "=", "rec", ".", "FILTER", ".", "split", "(", "\";\"", ")", "else", ":", "filters", "=", "[", "]", "if", "name", "not", "in", "filters", ":", "filters", ".", "append", "(", "name", ")", "rec", ".", "FILTER", "=", "filters", "return", "rec" ]
Add a FILTER value to a cyvcf2 record
[ "Add", "a", "FILTER", "value", "to", "a", "cyvcf2", "record" ]
python
train
HPENetworking/PYHPEIMC
pyhpeimc/plat/perf.py
https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/pyhpeimc/plat/perf.py#L101-L130
def delete_perf_task(task_name, auth, url): """ Function takes a str of the target task_name to be deleted and retrieves task_id using the get_perf_task function. Once the task_id has been successfully retrieved it is populated into the task_id variable and an DELETE call is made against the HPE IMC REST interface to delete the target task. :param task_name: str of task name :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: int of 204 if successful, str of "Perf Task doesn't exist" i :rtype: int """ task_id = get_perf_task(task_name, auth, url) if isinstance(task_id, str): print("Perf task doesn't exist") return 403 task_id = task_id['taskId'] get_perf_task_url = "/imcrs/perf/task/delete/" + str(task_id) f_url = url + get_perf_task_url response = requests.delete(f_url, auth=auth, headers=HEADERS) try: if response.status_code == 204: print("Perf Task successfully delete") return response.status_code except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + ' delete_perf_task: An Error has occured'
[ "def", "delete_perf_task", "(", "task_name", ",", "auth", ",", "url", ")", ":", "task_id", "=", "get_perf_task", "(", "task_name", ",", "auth", ",", "url", ")", "if", "isinstance", "(", "task_id", ",", "str", ")", ":", "print", "(", "\"Perf task doesn't exist\"", ")", "return", "403", "task_id", "=", "task_id", "[", "'taskId'", "]", "get_perf_task_url", "=", "\"/imcrs/perf/task/delete/\"", "+", "str", "(", "task_id", ")", "f_url", "=", "url", "+", "get_perf_task_url", "response", "=", "requests", ".", "delete", "(", "f_url", ",", "auth", "=", "auth", ",", "headers", "=", "HEADERS", ")", "try", ":", "if", "response", ".", "status_code", "==", "204", ":", "print", "(", "\"Perf Task successfully delete\"", ")", "return", "response", ".", "status_code", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "error", ":", "return", "\"Error:\\n\"", "+", "str", "(", "error", ")", "+", "' delete_perf_task: An Error has occured'" ]
Function takes a str of the target task_name to be deleted and retrieves task_id using the get_perf_task function. Once the task_id has been successfully retrieved it is populated into the task_id variable and an DELETE call is made against the HPE IMC REST interface to delete the target task. :param task_name: str of task name :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: int of 204 if successful, str of "Perf Task doesn't exist" i :rtype: int
[ "Function", "takes", "a", "str", "of", "the", "target", "task_name", "to", "be", "deleted", "and", "retrieves", "task_id", "using", "the", "get_perf_task", "function", ".", "Once", "the", "task_id", "has", "been", "successfully", "retrieved", "it", "is", "populated", "into", "the", "task_id", "variable", "and", "an", "DELETE", "call", "is", "made", "against", "the", "HPE", "IMC", "REST", "interface", "to", "delete", "the", "target", "task", ".", ":", "param", "task_name", ":", "str", "of", "task", "name", ":", "param", "auth", ":", "requests", "auth", "object", "#usually", "auth", ".", "creds", "from", "auth", "pyhpeimc", ".", "auth", ".", "class" ]
python
train
c0ntrol-x/p4rr0t007
p4rr0t007/lib/core.py
https://github.com/c0ntrol-x/p4rr0t007/blob/6fe88ec1231a778b9f1d13bc61332581715d646e/p4rr0t007/lib/core.py#L89-L99
def lpad(s, N, char='\0'): """pads a string to the left with null-bytes or any other given character. ..note:: This is used by the :py:func:`xor` function. :param s: the string :param N: an integer of how much padding should be done :returns: the original bytes """ assert isinstance(char, bytes) and len(char) == 1, 'char should be a string with length 1' return s.rjust(N, char)
[ "def", "lpad", "(", "s", ",", "N", ",", "char", "=", "'\\0'", ")", ":", "assert", "isinstance", "(", "char", ",", "bytes", ")", "and", "len", "(", "char", ")", "==", "1", ",", "'char should be a string with length 1'", "return", "s", ".", "rjust", "(", "N", ",", "char", ")" ]
pads a string to the left with null-bytes or any other given character. ..note:: This is used by the :py:func:`xor` function. :param s: the string :param N: an integer of how much padding should be done :returns: the original bytes
[ "pads", "a", "string", "to", "the", "left", "with", "null", "-", "bytes", "or", "any", "other", "given", "character", "." ]
python
train
roboogle/gtkmvc3
gtkmvco/gtkmvc3/adapters/basic.py
https://github.com/roboogle/gtkmvc3/blob/63405fd8d2056be26af49103b13a8d5e57fe4dff/gtkmvco/gtkmvc3/adapters/basic.py#L369-L382
def _write_widget(self, val): """Writes value into the widget. If specified, user setter is invoked.""" self._itsme = True try: setter = self._wid_info[self._wid][1] wtype = self._wid_info[self._wid][2] if setter: if wtype is not None: setter(self._wid, self._cast_value(val, wtype)) else: setter(self._wid, val) finally: self._itsme = False
[ "def", "_write_widget", "(", "self", ",", "val", ")", ":", "self", ".", "_itsme", "=", "True", "try", ":", "setter", "=", "self", ".", "_wid_info", "[", "self", ".", "_wid", "]", "[", "1", "]", "wtype", "=", "self", ".", "_wid_info", "[", "self", ".", "_wid", "]", "[", "2", "]", "if", "setter", ":", "if", "wtype", "is", "not", "None", ":", "setter", "(", "self", ".", "_wid", ",", "self", ".", "_cast_value", "(", "val", ",", "wtype", ")", ")", "else", ":", "setter", "(", "self", ".", "_wid", ",", "val", ")", "finally", ":", "self", ".", "_itsme", "=", "False" ]
Writes value into the widget. If specified, user setter is invoked.
[ "Writes", "value", "into", "the", "widget", ".", "If", "specified", "user", "setter", "is", "invoked", "." ]
python
train
pypa/pipenv
pipenv/vendor/distlib/_backport/tarfile.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L1016-L1027
def create_ustar_header(self, info, encoding, errors): """Return the object as a ustar header block. """ info["magic"] = POSIX_MAGIC if len(info["linkname"]) > LENGTH_LINK: raise ValueError("linkname is too long") if len(info["name"]) > LENGTH_NAME: info["prefix"], info["name"] = self._posix_split_name(info["name"]) return self._create_header(info, USTAR_FORMAT, encoding, errors)
[ "def", "create_ustar_header", "(", "self", ",", "info", ",", "encoding", ",", "errors", ")", ":", "info", "[", "\"magic\"", "]", "=", "POSIX_MAGIC", "if", "len", "(", "info", "[", "\"linkname\"", "]", ")", ">", "LENGTH_LINK", ":", "raise", "ValueError", "(", "\"linkname is too long\"", ")", "if", "len", "(", "info", "[", "\"name\"", "]", ")", ">", "LENGTH_NAME", ":", "info", "[", "\"prefix\"", "]", ",", "info", "[", "\"name\"", "]", "=", "self", ".", "_posix_split_name", "(", "info", "[", "\"name\"", "]", ")", "return", "self", ".", "_create_header", "(", "info", ",", "USTAR_FORMAT", ",", "encoding", ",", "errors", ")" ]
Return the object as a ustar header block.
[ "Return", "the", "object", "as", "a", "ustar", "header", "block", "." ]
python
train
twisted/mantissa
xmantissa/webnav.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/webnav.py#L251-L310
def applicationNavigation(ctx, translator, navigation): """ Horizontal, primary-only navigation view. For the navigation element currently being viewed, copies of the I{selected-app-tab} and I{selected-tab-contents} patterns will be loaded from the tag. For all other navigation elements, copies of the I{app-tab} and I{tab-contents} patterns will be loaded. For either case, the former pattern will have its I{name} slot filled with the name of the navigation element and its I{tab-contents} slot filled with the latter pattern. The latter pattern will have its I{href} slot filled with a link to the corresponding navigation element. The I{tabs} slot on the tag will be filled with all the I{selected-app-tab} or I{app-tab} pattern copies. @type ctx: L{nevow.context.WebContext} @type translator: L{IWebTranslator} provider @type navigation: L{list} of L{Tab} @rtype: {nevow.stan.Tag} """ setTabURLs(navigation, translator) selectedTab = getSelectedTab(navigation, url.URL.fromContext(ctx)) getp = IQ(ctx.tag).onePattern tabs = [] for tab in navigation: if tab == selectedTab or selectedTab in tab.children: p = 'selected-app-tab' contentp = 'selected-tab-contents' else: p = 'app-tab' contentp = 'tab-contents' childTabs = [] for subtab in tab.children: try: subtabp = getp("subtab") except NodeNotFound: continue childTabs.append( dictFillSlots(subtabp, { 'name': subtab.name, 'href': subtab.linkURL, 'tab-contents': getp("subtab-contents") })) tabs.append(dictFillSlots( getp(p), {'name': tab.name, 'tab-contents': getp(contentp).fillSlots( 'href', tab.linkURL), 'subtabs': childTabs})) ctx.tag.fillSlots('tabs', tabs) return ctx.tag
[ "def", "applicationNavigation", "(", "ctx", ",", "translator", ",", "navigation", ")", ":", "setTabURLs", "(", "navigation", ",", "translator", ")", "selectedTab", "=", "getSelectedTab", "(", "navigation", ",", "url", ".", "URL", ".", "fromContext", "(", "ctx", ")", ")", "getp", "=", "IQ", "(", "ctx", ".", "tag", ")", ".", "onePattern", "tabs", "=", "[", "]", "for", "tab", "in", "navigation", ":", "if", "tab", "==", "selectedTab", "or", "selectedTab", "in", "tab", ".", "children", ":", "p", "=", "'selected-app-tab'", "contentp", "=", "'selected-tab-contents'", "else", ":", "p", "=", "'app-tab'", "contentp", "=", "'tab-contents'", "childTabs", "=", "[", "]", "for", "subtab", "in", "tab", ".", "children", ":", "try", ":", "subtabp", "=", "getp", "(", "\"subtab\"", ")", "except", "NodeNotFound", ":", "continue", "childTabs", ".", "append", "(", "dictFillSlots", "(", "subtabp", ",", "{", "'name'", ":", "subtab", ".", "name", ",", "'href'", ":", "subtab", ".", "linkURL", ",", "'tab-contents'", ":", "getp", "(", "\"subtab-contents\"", ")", "}", ")", ")", "tabs", ".", "append", "(", "dictFillSlots", "(", "getp", "(", "p", ")", ",", "{", "'name'", ":", "tab", ".", "name", ",", "'tab-contents'", ":", "getp", "(", "contentp", ")", ".", "fillSlots", "(", "'href'", ",", "tab", ".", "linkURL", ")", ",", "'subtabs'", ":", "childTabs", "}", ")", ")", "ctx", ".", "tag", ".", "fillSlots", "(", "'tabs'", ",", "tabs", ")", "return", "ctx", ".", "tag" ]
Horizontal, primary-only navigation view. For the navigation element currently being viewed, copies of the I{selected-app-tab} and I{selected-tab-contents} patterns will be loaded from the tag. For all other navigation elements, copies of the I{app-tab} and I{tab-contents} patterns will be loaded. For either case, the former pattern will have its I{name} slot filled with the name of the navigation element and its I{tab-contents} slot filled with the latter pattern. The latter pattern will have its I{href} slot filled with a link to the corresponding navigation element. The I{tabs} slot on the tag will be filled with all the I{selected-app-tab} or I{app-tab} pattern copies. @type ctx: L{nevow.context.WebContext} @type translator: L{IWebTranslator} provider @type navigation: L{list} of L{Tab} @rtype: {nevow.stan.Tag}
[ "Horizontal", "primary", "-", "only", "navigation", "view", "." ]
python
train
linkhub-sdk/popbill.py
popbill/kakaoService.py
https://github.com/linkhub-sdk/popbill.py/blob/68a0dd7f7a937603318e93be321fde73c50b96cc/popbill/kakaoService.py#L289-L351
def sendFMS_same(self, CorpNum, PlusFriendID, Sender, Content, AltContent, AltSendType, SndDT, FilePath, ImageURL, KakaoMessages, KakaoButtons, AdsYN=False, UserID=None, RequestNum=None): """ 친구톡 이미지 대량 전송 :param CorpNum: 팝빌회원 사업자번호 :param PlusFriendID: 플러스친구 아이디 :param Sender: 발신번호 :param Content: [동보] 친구톡 내용 :param AltContent: [동보] 대체문자 내용 :param AltSendType: 대체문자 유형 [공백-미전송, C-알림톡내용, A-대체문자내용] :param SndDT: 예약일시 [작성형식 : yyyyMMddHHmmss] :param FilePath: 파일경로 :param ImageURL: 이미지URL :param KakaoMessages: 친구톡 내용 (배열) :param KakaoButtons: 버튼 목록 (최대 5개) :param AdsYN: 광고 전송여부 :param UserID: 팝빌회원 아이디 :param RequestNum : 요청번호 :return: receiptNum (접수번호) """ if PlusFriendID is None or PlusFriendID == '': raise PopbillException(-99999999, "플러스친구 아이디가 입력되지 않았습니다.") if Sender is None or Sender == '': raise PopbillException(-99999999, "발신번호가 입력되지 않았습니다.") req = {} if PlusFriendID is not None or PlusFriendID != '': req['plusFriendID'] = PlusFriendID if Sender is not None or Sender != '': req['snd'] = Sender if Content is not None or Content != '': req['content'] = Content if AltContent is not None or AltContent != '': req['altContent'] = AltContent if AltSendType is not None or AltSendType != '': req['altSendType'] = AltSendType if SndDT is not None or SndDT != '': req['sndDT'] = SndDT if KakaoMessages is not None or KakaoMessages != '': req['msgs'] = KakaoMessages if ImageURL is not None or ImageURL != '': req['imageURL'] = ImageURL if KakaoButtons: req['btns'] = KakaoButtons if AdsYN: req['adsYN'] = True if RequestNum is not None or RequestNum != '': req['requestNum'] = RequestNum postData = self._stringtify(req) files = [] try: with open(FilePath, "rb") as F: files = [File(fieldName='file', fileName=F.name, fileData=F.read())] except IOError: raise PopbillException(-99999999, "해당경로에 파일이 없거나 읽을 수 없습니다.") result = self._httppost_files('/FMS', postData, files, CorpNum, UserID) return result.receiptNum
[ "def", "sendFMS_same", "(", "self", ",", "CorpNum", ",", "PlusFriendID", ",", "Sender", ",", "Content", ",", "AltContent", ",", "AltSendType", ",", "SndDT", ",", "FilePath", ",", "ImageURL", ",", "KakaoMessages", ",", "KakaoButtons", ",", "AdsYN", "=", "False", ",", "UserID", "=", "None", ",", "RequestNum", "=", "None", ")", ":", "if", "PlusFriendID", "is", "None", "or", "PlusFriendID", "==", "''", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"플러스친구 아이디가 입력되지 않았습니다.\")", "", "if", "Sender", "is", "None", "or", "Sender", "==", "''", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"발신번호가 입력되지 않았습니다.\")", "", "req", "=", "{", "}", "if", "PlusFriendID", "is", "not", "None", "or", "PlusFriendID", "!=", "''", ":", "req", "[", "'plusFriendID'", "]", "=", "PlusFriendID", "if", "Sender", "is", "not", "None", "or", "Sender", "!=", "''", ":", "req", "[", "'snd'", "]", "=", "Sender", "if", "Content", "is", "not", "None", "or", "Content", "!=", "''", ":", "req", "[", "'content'", "]", "=", "Content", "if", "AltContent", "is", "not", "None", "or", "AltContent", "!=", "''", ":", "req", "[", "'altContent'", "]", "=", "AltContent", "if", "AltSendType", "is", "not", "None", "or", "AltSendType", "!=", "''", ":", "req", "[", "'altSendType'", "]", "=", "AltSendType", "if", "SndDT", "is", "not", "None", "or", "SndDT", "!=", "''", ":", "req", "[", "'sndDT'", "]", "=", "SndDT", "if", "KakaoMessages", "is", "not", "None", "or", "KakaoMessages", "!=", "''", ":", "req", "[", "'msgs'", "]", "=", "KakaoMessages", "if", "ImageURL", "is", "not", "None", "or", "ImageURL", "!=", "''", ":", "req", "[", "'imageURL'", "]", "=", "ImageURL", "if", "KakaoButtons", ":", "req", "[", "'btns'", "]", "=", "KakaoButtons", "if", "AdsYN", ":", "req", "[", "'adsYN'", "]", "=", "True", "if", "RequestNum", "is", "not", "None", "or", "RequestNum", "!=", "''", ":", "req", "[", "'requestNum'", "]", "=", "RequestNum", "postData", "=", "self", ".", "_stringtify", "(", "req", ")", "files", "=", "[", "]", "try", ":", "with", "open", "(", "FilePath", ",", "\"rb\"", ")", "as", "F", ":", "files", "=", "[", "File", "(", "fieldName", "=", "'file'", ",", "fileName", "=", "F", ".", "name", ",", "fileData", "=", "F", ".", "read", "(", ")", ")", "]", "except", "IOError", ":", "raise", "PopbillException", "(", "-", "99999999", ",", "\"해당경로에 파일이 없거나 읽을 수 없습니다.\")", "", "result", "=", "self", ".", "_httppost_files", "(", "'/FMS'", ",", "postData", ",", "files", ",", "CorpNum", ",", "UserID", ")", "return", "result", ".", "receiptNum" ]
친구톡 이미지 대량 전송 :param CorpNum: 팝빌회원 사업자번호 :param PlusFriendID: 플러스친구 아이디 :param Sender: 발신번호 :param Content: [동보] 친구톡 내용 :param AltContent: [동보] 대체문자 내용 :param AltSendType: 대체문자 유형 [공백-미전송, C-알림톡내용, A-대체문자내용] :param SndDT: 예약일시 [작성형식 : yyyyMMddHHmmss] :param FilePath: 파일경로 :param ImageURL: 이미지URL :param KakaoMessages: 친구톡 내용 (배열) :param KakaoButtons: 버튼 목록 (최대 5개) :param AdsYN: 광고 전송여부 :param UserID: 팝빌회원 아이디 :param RequestNum : 요청번호 :return: receiptNum (접수번호)
[ "친구톡", "이미지", "대량", "전송", ":", "param", "CorpNum", ":", "팝빌회원", "사업자번호", ":", "param", "PlusFriendID", ":", "플러스친구", "아이디", ":", "param", "Sender", ":", "발신번호", ":", "param", "Content", ":", "[", "동보", "]", "친구톡", "내용", ":", "param", "AltContent", ":", "[", "동보", "]", "대체문자", "내용", ":", "param", "AltSendType", ":", "대체문자", "유형", "[", "공백", "-", "미전송", "C", "-", "알림톡내용", "A", "-", "대체문자내용", "]", ":", "param", "SndDT", ":", "예약일시", "[", "작성형식", ":", "yyyyMMddHHmmss", "]", ":", "param", "FilePath", ":", "파일경로", ":", "param", "ImageURL", ":", "이미지URL", ":", "param", "KakaoMessages", ":", "친구톡", "내용", "(", "배열", ")", ":", "param", "KakaoButtons", ":", "버튼", "목록", "(", "최대", "5개", ")", ":", "param", "AdsYN", ":", "광고", "전송여부", ":", "param", "UserID", ":", "팝빌회원", "아이디", ":", "param", "RequestNum", ":", "요청번호", ":", "return", ":", "receiptNum", "(", "접수번호", ")" ]
python
train
PmagPy/PmagPy
programs/thellier_gui.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/thellier_gui.py#L2188-L2199
def write_preferences_file(self): """ Write json preferences file to (platform specific) user data directory, or PmagPy directory if appdirs module is missing. """ user_data_dir = find_pmag_dir.find_user_data_dir("thellier_gui") if not os.path.exists(user_data_dir): find_pmag_dir.make_user_data_dir(user_data_dir) pref_file = os.path.join(user_data_dir, "thellier_gui_preferences.json") with open(pref_file, "w+") as pfile: print('-I- writing preferences to {}'.format(pref_file)) json.dump(self.preferences, pfile)
[ "def", "write_preferences_file", "(", "self", ")", ":", "user_data_dir", "=", "find_pmag_dir", ".", "find_user_data_dir", "(", "\"thellier_gui\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "user_data_dir", ")", ":", "find_pmag_dir", ".", "make_user_data_dir", "(", "user_data_dir", ")", "pref_file", "=", "os", ".", "path", ".", "join", "(", "user_data_dir", ",", "\"thellier_gui_preferences.json\"", ")", "with", "open", "(", "pref_file", ",", "\"w+\"", ")", "as", "pfile", ":", "print", "(", "'-I- writing preferences to {}'", ".", "format", "(", "pref_file", ")", ")", "json", ".", "dump", "(", "self", ".", "preferences", ",", "pfile", ")" ]
Write json preferences file to (platform specific) user data directory, or PmagPy directory if appdirs module is missing.
[ "Write", "json", "preferences", "file", "to", "(", "platform", "specific", ")", "user", "data", "directory", "or", "PmagPy", "directory", "if", "appdirs", "module", "is", "missing", "." ]
python
train
openvax/datacache
datacache/database.py
https://github.com/openvax/datacache/blob/73bcac02d37cf153710a07fbdc636aa55cb214ca/datacache/database.py#L48-L53
def table_names(self): """Returns names of all tables in the database""" query = "SELECT name FROM sqlite_master WHERE type='table'" cursor = self.connection.execute(query) results = cursor.fetchall() return [result_tuple[0] for result_tuple in results]
[ "def", "table_names", "(", "self", ")", ":", "query", "=", "\"SELECT name FROM sqlite_master WHERE type='table'\"", "cursor", "=", "self", ".", "connection", ".", "execute", "(", "query", ")", "results", "=", "cursor", ".", "fetchall", "(", ")", "return", "[", "result_tuple", "[", "0", "]", "for", "result_tuple", "in", "results", "]" ]
Returns names of all tables in the database
[ "Returns", "names", "of", "all", "tables", "in", "the", "database" ]
python
train
totalgood/pugnlp
src/pugnlp/util.py
https://github.com/totalgood/pugnlp/blob/c43445b14afddfdeadc5f3076675c9e8fc1ee67c/src/pugnlp/util.py#L2522-L2549
def unlistify(n, depth=1, typ=list, get=None): """Return the desired element in a list ignoring the rest. >>> unlistify([1,2,3]) 1 >>> unlistify([1,[4, 5, 6],3], get=1) [4, 5, 6] >>> unlistify([1,[4, 5, 6],3], depth=2, get=1) 5 >>> unlistify([1,(4, 5, 6),3], depth=2, get=1) (4, 5, 6) >>> unlistify([1,2,(4, 5, 6)], depth=2, get=2) (4, 5, 6) >>> unlistify([1,2,(4, 5, 6)], depth=2, typ=(list, tuple), get=2) 6 """ i = 0 if depth is None: depth = 1 index_desired = get or 0 while i < depth and isinstance(n, typ): if len(n): if len(n) > index_desired: n = n[index_desired] i += 1 else: return n return n
[ "def", "unlistify", "(", "n", ",", "depth", "=", "1", ",", "typ", "=", "list", ",", "get", "=", "None", ")", ":", "i", "=", "0", "if", "depth", "is", "None", ":", "depth", "=", "1", "index_desired", "=", "get", "or", "0", "while", "i", "<", "depth", "and", "isinstance", "(", "n", ",", "typ", ")", ":", "if", "len", "(", "n", ")", ":", "if", "len", "(", "n", ")", ">", "index_desired", ":", "n", "=", "n", "[", "index_desired", "]", "i", "+=", "1", "else", ":", "return", "n", "return", "n" ]
Return the desired element in a list ignoring the rest. >>> unlistify([1,2,3]) 1 >>> unlistify([1,[4, 5, 6],3], get=1) [4, 5, 6] >>> unlistify([1,[4, 5, 6],3], depth=2, get=1) 5 >>> unlistify([1,(4, 5, 6),3], depth=2, get=1) (4, 5, 6) >>> unlistify([1,2,(4, 5, 6)], depth=2, get=2) (4, 5, 6) >>> unlistify([1,2,(4, 5, 6)], depth=2, typ=(list, tuple), get=2) 6
[ "Return", "the", "desired", "element", "in", "a", "list", "ignoring", "the", "rest", "." ]
python
train
xray7224/PyPump
pypump/models/feed.py
https://github.com/xray7224/PyPump/blob/f921f691c39fe021f4fd124b6bc91718c9e49b4a/pypump/models/feed.py#L456-L463
def major(self): """ Major inbox feed, contains major activities such as notes and images. """ url = self._subfeed("major") if "major" in self.url or "minor" in self.url: return self if self._major is None: self._major = self.__class__(url, pypump=self._pump) return self._major
[ "def", "major", "(", "self", ")", ":", "url", "=", "self", ".", "_subfeed", "(", "\"major\"", ")", "if", "\"major\"", "in", "self", ".", "url", "or", "\"minor\"", "in", "self", ".", "url", ":", "return", "self", "if", "self", ".", "_major", "is", "None", ":", "self", ".", "_major", "=", "self", ".", "__class__", "(", "url", ",", "pypump", "=", "self", ".", "_pump", ")", "return", "self", ".", "_major" ]
Major inbox feed, contains major activities such as notes and images.
[ "Major", "inbox", "feed", "contains", "major", "activities", "such", "as", "notes", "and", "images", "." ]
python
train
oscarbranson/latools
latools/filtering/filt_obj.py
https://github.com/oscarbranson/latools/blob/cd25a650cfee318152f234d992708511f7047fbe/latools/filtering/filt_obj.py#L174-L189
def clear(self): """ Clear all filters. """ self.components = {} self.info = {} self.params = {} self.switches = {} self.keys = {} self.index = {} self.sets = {} self.maxset = -1 self.n = 0 for a in self.analytes: self.switches[a] = {} return
[ "def", "clear", "(", "self", ")", ":", "self", ".", "components", "=", "{", "}", "self", ".", "info", "=", "{", "}", "self", ".", "params", "=", "{", "}", "self", ".", "switches", "=", "{", "}", "self", ".", "keys", "=", "{", "}", "self", ".", "index", "=", "{", "}", "self", ".", "sets", "=", "{", "}", "self", ".", "maxset", "=", "-", "1", "self", ".", "n", "=", "0", "for", "a", "in", "self", ".", "analytes", ":", "self", ".", "switches", "[", "a", "]", "=", "{", "}", "return" ]
Clear all filters.
[ "Clear", "all", "filters", "." ]
python
test
nkavaldj/myhdl_lib
myhdl_lib/simulation/_DUTer.py
https://github.com/nkavaldj/myhdl_lib/blob/9902afd2031e7847373f692821b2135fd0810aa8/myhdl_lib/simulation/_DUTer.py#L66-L98
def _getCosimulation(self, func, **kwargs): ''' Returns a co-simulation instance of func. Uses the _simulator specified by self._simulator. Enables traces if self._trace is True func - MyHDL function to be simulated kwargs - dict of func interface assignments: for signals and parameters ''' vals = {} vals['topname'] = func.func_name vals['unitname'] = func.func_name.lower() hdlsim = self._simulator if not hdlsim: raise ValueError("No _simulator specified") if not self.sim_reg.has_key(hdlsim): raise ValueError("Simulator {} is not registered".format(hdlsim)) hdl, analyze_cmd, elaborate_cmd, simulate_cmd = self.sim_reg[hdlsim] # Convert to HDL if hdl == "verilog": toVerilog(func, **kwargs) if self._trace: self._enableTracesVerilog("./tb_{topname}.v".format(**vals)) elif hdl == "vhdl": toVHDL(func, **kwargs) # Analyze HDL os.system(analyze_cmd.format(**vals)) # Elaborate if elaborate_cmd: os.system(elaborate_cmd.format(**vals)) # Simulate return Cosimulation(simulate_cmd.format(**vals), **kwargs)
[ "def", "_getCosimulation", "(", "self", ",", "func", ",", "*", "*", "kwargs", ")", ":", "vals", "=", "{", "}", "vals", "[", "'topname'", "]", "=", "func", ".", "func_name", "vals", "[", "'unitname'", "]", "=", "func", ".", "func_name", ".", "lower", "(", ")", "hdlsim", "=", "self", ".", "_simulator", "if", "not", "hdlsim", ":", "raise", "ValueError", "(", "\"No _simulator specified\"", ")", "if", "not", "self", ".", "sim_reg", ".", "has_key", "(", "hdlsim", ")", ":", "raise", "ValueError", "(", "\"Simulator {} is not registered\"", ".", "format", "(", "hdlsim", ")", ")", "hdl", ",", "analyze_cmd", ",", "elaborate_cmd", ",", "simulate_cmd", "=", "self", ".", "sim_reg", "[", "hdlsim", "]", "# Convert to HDL", "if", "hdl", "==", "\"verilog\"", ":", "toVerilog", "(", "func", ",", "*", "*", "kwargs", ")", "if", "self", ".", "_trace", ":", "self", ".", "_enableTracesVerilog", "(", "\"./tb_{topname}.v\"", ".", "format", "(", "*", "*", "vals", ")", ")", "elif", "hdl", "==", "\"vhdl\"", ":", "toVHDL", "(", "func", ",", "*", "*", "kwargs", ")", "# Analyze HDL", "os", ".", "system", "(", "analyze_cmd", ".", "format", "(", "*", "*", "vals", ")", ")", "# Elaborate", "if", "elaborate_cmd", ":", "os", ".", "system", "(", "elaborate_cmd", ".", "format", "(", "*", "*", "vals", ")", ")", "# Simulate", "return", "Cosimulation", "(", "simulate_cmd", ".", "format", "(", "*", "*", "vals", ")", ",", "*", "*", "kwargs", ")" ]
Returns a co-simulation instance of func. Uses the _simulator specified by self._simulator. Enables traces if self._trace is True func - MyHDL function to be simulated kwargs - dict of func interface assignments: for signals and parameters
[ "Returns", "a", "co", "-", "simulation", "instance", "of", "func", ".", "Uses", "the", "_simulator", "specified", "by", "self", ".", "_simulator", ".", "Enables", "traces", "if", "self", ".", "_trace", "is", "True", "func", "-", "MyHDL", "function", "to", "be", "simulated", "kwargs", "-", "dict", "of", "func", "interface", "assignments", ":", "for", "signals", "and", "parameters" ]
python
train
couchbase/couchbase-python-client
couchbase/bucketmanager.py
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/bucketmanager.py#L414-L428
def n1ql_index_create_primary(self, defer=False, ignore_exists=False): """ Create the primary index on the bucket. Equivalent to:: n1ql_index_create('', primary=True, **kwargs) :param bool defer: :param bool ignore_exists: .. seealso:: :meth:`create_index` """ return self.n1ql_index_create( '', defer=defer, primary=True, ignore_exists=ignore_exists)
[ "def", "n1ql_index_create_primary", "(", "self", ",", "defer", "=", "False", ",", "ignore_exists", "=", "False", ")", ":", "return", "self", ".", "n1ql_index_create", "(", "''", ",", "defer", "=", "defer", ",", "primary", "=", "True", ",", "ignore_exists", "=", "ignore_exists", ")" ]
Create the primary index on the bucket. Equivalent to:: n1ql_index_create('', primary=True, **kwargs) :param bool defer: :param bool ignore_exists: .. seealso:: :meth:`create_index`
[ "Create", "the", "primary", "index", "on", "the", "bucket", "." ]
python
train
log2timeline/dfvfs
dfvfs/vfs/vshadow_file_system.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/vfs/vshadow_file_system.py#L76-L93
def FileEntryExistsByPathSpec(self, path_spec): """Determines if a file entry for a path specification exists. Args: path_spec (PathSpec): path specification. Returns: bool: True if the file entry exists. """ store_index = vshadow.VShadowPathSpecGetStoreIndex(path_spec) # The virtual root file has not corresponding store index but # should have a location. if store_index is None: location = getattr(path_spec, 'location', None) return location is not None and location == self.LOCATION_ROOT return 0 <= store_index < self._vshadow_volume.number_of_stores
[ "def", "FileEntryExistsByPathSpec", "(", "self", ",", "path_spec", ")", ":", "store_index", "=", "vshadow", ".", "VShadowPathSpecGetStoreIndex", "(", "path_spec", ")", "# The virtual root file has not corresponding store index but", "# should have a location.", "if", "store_index", "is", "None", ":", "location", "=", "getattr", "(", "path_spec", ",", "'location'", ",", "None", ")", "return", "location", "is", "not", "None", "and", "location", "==", "self", ".", "LOCATION_ROOT", "return", "0", "<=", "store_index", "<", "self", ".", "_vshadow_volume", ".", "number_of_stores" ]
Determines if a file entry for a path specification exists. Args: path_spec (PathSpec): path specification. Returns: bool: True if the file entry exists.
[ "Determines", "if", "a", "file", "entry", "for", "a", "path", "specification", "exists", "." ]
python
train
hyperledger/indy-sdk
vcx/wrappers/python3/vcx/api/connection.py
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/vcx/wrappers/python3/vcx/api/connection.py#L254-L278
async def invite_details(self, abbreviated: bool) -> dict: """ Get the invite details that were sent or can be sent to the endpoint. :param abbreviated: abbreviate invite details or not Example: phone_number = '8019119191' connection = await Connection.create('foobar123') invite_details = await connection.connect(phone_number) inivte_details_again = await connection.invite_details() :return: JSON of invite_details sent to connection """ if not hasattr(Connection.invite_details, "cb"): self.logger.debug("vcx_connection_invite_details: Creating callback") Connection.invite_details.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p)) c_connection_handle = c_uint32(self.handle) c_abbreviated = c_bool(abbreviated) details = await do_call('vcx_connection_invite_details', c_connection_handle, c_abbreviated, Connection.invite_details.cb) return json.loads(details.decode())
[ "async", "def", "invite_details", "(", "self", ",", "abbreviated", ":", "bool", ")", "->", "dict", ":", "if", "not", "hasattr", "(", "Connection", ".", "invite_details", ",", "\"cb\"", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"vcx_connection_invite_details: Creating callback\"", ")", "Connection", ".", "invite_details", ".", "cb", "=", "create_cb", "(", "CFUNCTYPE", "(", "None", ",", "c_uint32", ",", "c_uint32", ",", "c_char_p", ")", ")", "c_connection_handle", "=", "c_uint32", "(", "self", ".", "handle", ")", "c_abbreviated", "=", "c_bool", "(", "abbreviated", ")", "details", "=", "await", "do_call", "(", "'vcx_connection_invite_details'", ",", "c_connection_handle", ",", "c_abbreviated", ",", "Connection", ".", "invite_details", ".", "cb", ")", "return", "json", ".", "loads", "(", "details", ".", "decode", "(", ")", ")" ]
Get the invite details that were sent or can be sent to the endpoint. :param abbreviated: abbreviate invite details or not Example: phone_number = '8019119191' connection = await Connection.create('foobar123') invite_details = await connection.connect(phone_number) inivte_details_again = await connection.invite_details() :return: JSON of invite_details sent to connection
[ "Get", "the", "invite", "details", "that", "were", "sent", "or", "can", "be", "sent", "to", "the", "endpoint", "." ]
python
train
AlecAivazis/graphql-over-kafka
nautilus/auth/primitives/passwordHash.py
https://github.com/AlecAivazis/graphql-over-kafka/blob/70e2acef27a2f87355590be1a6ca60ce3ab4d09c/nautilus/auth/primitives/passwordHash.py#L59-L63
def coerce(cls, key, value): """Ensure that loaded values are PasswordHashes.""" if isinstance(value, PasswordHash): return value return super(PasswordHash, cls).coerce(key, value)
[ "def", "coerce", "(", "cls", ",", "key", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "PasswordHash", ")", ":", "return", "value", "return", "super", "(", "PasswordHash", ",", "cls", ")", ".", "coerce", "(", "key", ",", "value", ")" ]
Ensure that loaded values are PasswordHashes.
[ "Ensure", "that", "loaded", "values", "are", "PasswordHashes", "." ]
python
train
tcalmant/ipopo
pelix/ipopo/handlers/requires.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/handlers/requires.py#L369-L384
def on_service_arrival(self, svc_ref): """ Called when a service has been registered in the framework :param svc_ref: A service reference """ with self._lock: if self._value is None: # Inject the service self.reference = svc_ref self._value = self._context.get_service(svc_ref) self._ipopo_instance.bind(self, self._value, self.reference) return True return None
[ "def", "on_service_arrival", "(", "self", ",", "svc_ref", ")", ":", "with", "self", ".", "_lock", ":", "if", "self", ".", "_value", "is", "None", ":", "# Inject the service", "self", ".", "reference", "=", "svc_ref", "self", ".", "_value", "=", "self", ".", "_context", ".", "get_service", "(", "svc_ref", ")", "self", ".", "_ipopo_instance", ".", "bind", "(", "self", ",", "self", ".", "_value", ",", "self", ".", "reference", ")", "return", "True", "return", "None" ]
Called when a service has been registered in the framework :param svc_ref: A service reference
[ "Called", "when", "a", "service", "has", "been", "registered", "in", "the", "framework" ]
python
train
aliyun/aliyun-log-python-sdk
aliyun/log/logclient.py
https://github.com/aliyun/aliyun-log-python-sdk/blob/ac383db0a16abf1e5ef7df36074374184b43516e/aliyun/log/logclient.py#L639-L666
def get_cursor(self, project_name, logstore_name, shard_id, start_time): """ Get cursor from log service for batch pull logs Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type shard_id: int :param shard_id: the shard id :type start_time: string/int :param start_time: the start time of cursor, e.g 1441093445 or "begin"/"end", or readable time like "%Y-%m-%d %H:%M:%S<time_zone>" e.g. "2018-01-02 12:12:10+8:00", also support human readable string, e.g. "1 hour ago", "now", "yesterday 0:0:0", refer to https://aliyun-log-cli.readthedocs.io/en/latest/tutorials/tutorial_human_readable_datetime.html :return: GetCursorResponse :raise: LogException """ headers = {'Content-Type': 'application/json'} params = {'type': 'cursor', 'from': str(start_time) if start_time in ("begin", "end") else parse_timestamp(start_time)} resource = "/logstores/" + logstore_name + "/shards/" + str(shard_id) (resp, header) = self._send("GET", project_name, None, resource, params, headers) return GetCursorResponse(resp, header)
[ "def", "get_cursor", "(", "self", ",", "project_name", ",", "logstore_name", ",", "shard_id", ",", "start_time", ")", ":", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", "params", "=", "{", "'type'", ":", "'cursor'", ",", "'from'", ":", "str", "(", "start_time", ")", "if", "start_time", "in", "(", "\"begin\"", ",", "\"end\"", ")", "else", "parse_timestamp", "(", "start_time", ")", "}", "resource", "=", "\"/logstores/\"", "+", "logstore_name", "+", "\"/shards/\"", "+", "str", "(", "shard_id", ")", "(", "resp", ",", "header", ")", "=", "self", ".", "_send", "(", "\"GET\"", ",", "project_name", ",", "None", ",", "resource", ",", "params", ",", "headers", ")", "return", "GetCursorResponse", "(", "resp", ",", "header", ")" ]
Get cursor from log service for batch pull logs Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type shard_id: int :param shard_id: the shard id :type start_time: string/int :param start_time: the start time of cursor, e.g 1441093445 or "begin"/"end", or readable time like "%Y-%m-%d %H:%M:%S<time_zone>" e.g. "2018-01-02 12:12:10+8:00", also support human readable string, e.g. "1 hour ago", "now", "yesterday 0:0:0", refer to https://aliyun-log-cli.readthedocs.io/en/latest/tutorials/tutorial_human_readable_datetime.html :return: GetCursorResponse :raise: LogException
[ "Get", "cursor", "from", "log", "service", "for", "batch", "pull", "logs", "Unsuccessful", "opertaion", "will", "cause", "an", "LogException", ".", ":", "type", "project_name", ":", "string", ":", "param", "project_name", ":", "the", "Project", "name", ":", "type", "logstore_name", ":", "string", ":", "param", "logstore_name", ":", "the", "logstore", "name", ":", "type", "shard_id", ":", "int", ":", "param", "shard_id", ":", "the", "shard", "id", ":", "type", "start_time", ":", "string", "/", "int", ":", "param", "start_time", ":", "the", "start", "time", "of", "cursor", "e", ".", "g", "1441093445", "or", "begin", "/", "end", "or", "readable", "time", "like", "%Y", "-", "%m", "-", "%d", "%H", ":", "%M", ":", "%S<time_zone", ">", "e", ".", "g", ".", "2018", "-", "01", "-", "02", "12", ":", "12", ":", "10", "+", "8", ":", "00", "also", "support", "human", "readable", "string", "e", ".", "g", ".", "1", "hour", "ago", "now", "yesterday", "0", ":", "0", ":", "0", "refer", "to", "https", ":", "//", "aliyun", "-", "log", "-", "cli", ".", "readthedocs", ".", "io", "/", "en", "/", "latest", "/", "tutorials", "/", "tutorial_human_readable_datetime", ".", "html", ":", "return", ":", "GetCursorResponse", ":", "raise", ":", "LogException" ]
python
train
ArabellaTech/ydcommon
ydcommon/fab.py
https://github.com/ArabellaTech/ydcommon/blob/4aa105e7b33f379e8f09111497c0e427b189c36c/ydcommon/fab.py#L324-L342
def copy_s3_bucket(src_bucket_name, src_bucket_secret_key, src_bucket_access_key, dst_bucket_name, dst_bucket_secret_key, dst_bucket_access_key): """ Copy S3 bucket directory with CMS data between environments. Operations are done on server. """ with cd(env.remote_path): tmp_dir = "s3_tmp" sudo('rm -rf %s' % tmp_dir, warn_only=True, user=env.remote_user) sudo('mkdir %s' % tmp_dir, user=env.remote_user) sudo('s3cmd --recursive get s3://%s/upload/ %s --secret_key=%s --access_key=%s' % ( src_bucket_name, tmp_dir, src_bucket_secret_key, src_bucket_access_key), user=env.remote_user) sudo('s3cmd --recursive put %s/ s3://%s/upload/ --secret_key=%s --access_key=%s' % ( tmp_dir, dst_bucket_name, dst_bucket_secret_key, dst_bucket_access_key), user=env.remote_user) sudo('s3cmd setacl s3://%s/upload --acl-public --recursive --secret_key=%s --access_key=%s' % ( dst_bucket_name, dst_bucket_secret_key, dst_bucket_access_key), user=env.remote_user) # cleanup sudo('rm -rf %s' % tmp_dir, warn_only=True, user=env.remote_user)
[ "def", "copy_s3_bucket", "(", "src_bucket_name", ",", "src_bucket_secret_key", ",", "src_bucket_access_key", ",", "dst_bucket_name", ",", "dst_bucket_secret_key", ",", "dst_bucket_access_key", ")", ":", "with", "cd", "(", "env", ".", "remote_path", ")", ":", "tmp_dir", "=", "\"s3_tmp\"", "sudo", "(", "'rm -rf %s'", "%", "tmp_dir", ",", "warn_only", "=", "True", ",", "user", "=", "env", ".", "remote_user", ")", "sudo", "(", "'mkdir %s'", "%", "tmp_dir", ",", "user", "=", "env", ".", "remote_user", ")", "sudo", "(", "'s3cmd --recursive get s3://%s/upload/ %s --secret_key=%s --access_key=%s'", "%", "(", "src_bucket_name", ",", "tmp_dir", ",", "src_bucket_secret_key", ",", "src_bucket_access_key", ")", ",", "user", "=", "env", ".", "remote_user", ")", "sudo", "(", "'s3cmd --recursive put %s/ s3://%s/upload/ --secret_key=%s --access_key=%s'", "%", "(", "tmp_dir", ",", "dst_bucket_name", ",", "dst_bucket_secret_key", ",", "dst_bucket_access_key", ")", ",", "user", "=", "env", ".", "remote_user", ")", "sudo", "(", "'s3cmd setacl s3://%s/upload --acl-public --recursive --secret_key=%s --access_key=%s'", "%", "(", "dst_bucket_name", ",", "dst_bucket_secret_key", ",", "dst_bucket_access_key", ")", ",", "user", "=", "env", ".", "remote_user", ")", "# cleanup", "sudo", "(", "'rm -rf %s'", "%", "tmp_dir", ",", "warn_only", "=", "True", ",", "user", "=", "env", ".", "remote_user", ")" ]
Copy S3 bucket directory with CMS data between environments. Operations are done on server.
[ "Copy", "S3", "bucket", "directory", "with", "CMS", "data", "between", "environments", ".", "Operations", "are", "done", "on", "server", "." ]
python
train
pyca/pyopenssl
src/OpenSSL/SSL.py
https://github.com/pyca/pyopenssl/blob/1fbe064c50fd030948141d7d630673761525b0d0/src/OpenSSL/SSL.py#L1343-L1354
def set_options(self, options): """ Add options. Options set before are not cleared! This method should be used with the :const:`OP_*` constants. :param options: The options to add. :return: The new option bitmask. """ if not isinstance(options, integer_types): raise TypeError("options must be an integer") return _lib.SSL_CTX_set_options(self._context, options)
[ "def", "set_options", "(", "self", ",", "options", ")", ":", "if", "not", "isinstance", "(", "options", ",", "integer_types", ")", ":", "raise", "TypeError", "(", "\"options must be an integer\"", ")", "return", "_lib", ".", "SSL_CTX_set_options", "(", "self", ".", "_context", ",", "options", ")" ]
Add options. Options set before are not cleared! This method should be used with the :const:`OP_*` constants. :param options: The options to add. :return: The new option bitmask.
[ "Add", "options", ".", "Options", "set", "before", "are", "not", "cleared!", "This", "method", "should", "be", "used", "with", "the", ":", "const", ":", "OP_", "*", "constants", "." ]
python
test
timstaley/voevent-parse
src/voeventparse/voevent.py
https://github.com/timstaley/voevent-parse/blob/58fc1eb3af5eca23d9e819c727204950615402a7/src/voeventparse/voevent.py#L255-L306
def add_where_when(voevent, coords, obs_time, observatory_location, allow_tz_naive_datetime=False): """ Add details of an observation to the WhereWhen section. We Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. coords(:class:`.Position2D`): Sky co-ordinates of event. obs_time(datetime.datetime): Nominal DateTime of the observation. Must either be timezone-aware, or should be carefully verified as representing UTC and then set parameter ``allow_tz_naive_datetime=True``. observatory_location(str): Telescope locale, e.g. 'La Palma'. May be a generic location as listed under :class:`voeventparse.definitions.observatory_location`. allow_tz_naive_datetime (bool): (Default False). Accept timezone-naive datetime-timestamps. See comments for ``obs_time``. """ # .. todo:: Implement TimeError using datetime.timedelta if obs_time.tzinfo is not None: utc_naive_obs_time = obs_time.astimezone(pytz.utc).replace(tzinfo=None) elif not allow_tz_naive_datetime: raise ValueError( "Datetime passed without tzinfo, cannot be sure if it is really a " "UTC timestamp. Please verify function call and either add tzinfo " "or pass parameter 'allow_tz_naive_obstime=True', as appropriate", ) else: utc_naive_obs_time = obs_time obs_data = etree.SubElement(voevent.WhereWhen, 'ObsDataLocation') etree.SubElement(obs_data, 'ObservatoryLocation', id=observatory_location) ol = etree.SubElement(obs_data, 'ObservationLocation') etree.SubElement(ol, 'AstroCoordSystem', id=coords.system) ac = etree.SubElement(ol, 'AstroCoords', coord_system_id=coords.system) time = etree.SubElement(ac, 'Time', unit='s') instant = etree.SubElement(time, 'TimeInstant') instant.ISOTime = utc_naive_obs_time.isoformat() # iso_time = etree.SubElement(instant, 'ISOTime') = obs_time.isoformat() pos2d = etree.SubElement(ac, 'Position2D', unit=coords.units) pos2d.Name1 = 'RA' pos2d.Name2 = 'Dec' pos2d_val = etree.SubElement(pos2d, 'Value2') pos2d_val.C1 = coords.ra pos2d_val.C2 = coords.dec pos2d.Error2Radius = coords.err
[ "def", "add_where_when", "(", "voevent", ",", "coords", ",", "obs_time", ",", "observatory_location", ",", "allow_tz_naive_datetime", "=", "False", ")", ":", "# .. todo:: Implement TimeError using datetime.timedelta", "if", "obs_time", ".", "tzinfo", "is", "not", "None", ":", "utc_naive_obs_time", "=", "obs_time", ".", "astimezone", "(", "pytz", ".", "utc", ")", ".", "replace", "(", "tzinfo", "=", "None", ")", "elif", "not", "allow_tz_naive_datetime", ":", "raise", "ValueError", "(", "\"Datetime passed without tzinfo, cannot be sure if it is really a \"", "\"UTC timestamp. Please verify function call and either add tzinfo \"", "\"or pass parameter 'allow_tz_naive_obstime=True', as appropriate\"", ",", ")", "else", ":", "utc_naive_obs_time", "=", "obs_time", "obs_data", "=", "etree", ".", "SubElement", "(", "voevent", ".", "WhereWhen", ",", "'ObsDataLocation'", ")", "etree", ".", "SubElement", "(", "obs_data", ",", "'ObservatoryLocation'", ",", "id", "=", "observatory_location", ")", "ol", "=", "etree", ".", "SubElement", "(", "obs_data", ",", "'ObservationLocation'", ")", "etree", ".", "SubElement", "(", "ol", ",", "'AstroCoordSystem'", ",", "id", "=", "coords", ".", "system", ")", "ac", "=", "etree", ".", "SubElement", "(", "ol", ",", "'AstroCoords'", ",", "coord_system_id", "=", "coords", ".", "system", ")", "time", "=", "etree", ".", "SubElement", "(", "ac", ",", "'Time'", ",", "unit", "=", "'s'", ")", "instant", "=", "etree", ".", "SubElement", "(", "time", ",", "'TimeInstant'", ")", "instant", ".", "ISOTime", "=", "utc_naive_obs_time", ".", "isoformat", "(", ")", "# iso_time = etree.SubElement(instant, 'ISOTime') = obs_time.isoformat()", "pos2d", "=", "etree", ".", "SubElement", "(", "ac", ",", "'Position2D'", ",", "unit", "=", "coords", ".", "units", ")", "pos2d", ".", "Name1", "=", "'RA'", "pos2d", ".", "Name2", "=", "'Dec'", "pos2d_val", "=", "etree", ".", "SubElement", "(", "pos2d", ",", "'Value2'", ")", "pos2d_val", ".", "C1", "=", "coords", ".", "ra", "pos2d_val", ".", "C2", "=", "coords", ".", "dec", "pos2d", ".", "Error2Radius", "=", "coords", ".", "err" ]
Add details of an observation to the WhereWhen section. We Args: voevent(:class:`Voevent`): Root node of a VOEvent etree. coords(:class:`.Position2D`): Sky co-ordinates of event. obs_time(datetime.datetime): Nominal DateTime of the observation. Must either be timezone-aware, or should be carefully verified as representing UTC and then set parameter ``allow_tz_naive_datetime=True``. observatory_location(str): Telescope locale, e.g. 'La Palma'. May be a generic location as listed under :class:`voeventparse.definitions.observatory_location`. allow_tz_naive_datetime (bool): (Default False). Accept timezone-naive datetime-timestamps. See comments for ``obs_time``.
[ "Add", "details", "of", "an", "observation", "to", "the", "WhereWhen", "section", "." ]
python
train
Cadair/jupyter_environment_kernels
environment_kernels/activate_helper.py
https://github.com/Cadair/jupyter_environment_kernels/blob/3da304550b511bda7d5d39280379b5ca39bb31bc/environment_kernels/activate_helper.py#L85-L105
def source_cmd(args, stdin=None): """Simple cmd.exe-specific wrapper around source-foreign. returns a dict to be used as a new environment """ args = list(args) fpath = locate_binary(args[0]) args[0] = fpath if fpath else args[0] if not os.path.isfile(args[0]): raise RuntimeError("Command not found: %s" % args[0]) prevcmd = 'call ' prevcmd += ' '.join([argvquote(arg, force=True) for arg in args]) prevcmd = escape_windows_cmd_string(prevcmd) args.append('--prevcmd={}'.format(prevcmd)) args.insert(0, 'cmd') args.append('--interactive=0') args.append('--sourcer=call') args.append('--envcmd=set') args.append('--seterrpostcmd=if errorlevel 1 exit 1') args.append('--use-tmpfile=1') return source_foreign(args, stdin=stdin)
[ "def", "source_cmd", "(", "args", ",", "stdin", "=", "None", ")", ":", "args", "=", "list", "(", "args", ")", "fpath", "=", "locate_binary", "(", "args", "[", "0", "]", ")", "args", "[", "0", "]", "=", "fpath", "if", "fpath", "else", "args", "[", "0", "]", "if", "not", "os", ".", "path", ".", "isfile", "(", "args", "[", "0", "]", ")", ":", "raise", "RuntimeError", "(", "\"Command not found: %s\"", "%", "args", "[", "0", "]", ")", "prevcmd", "=", "'call '", "prevcmd", "+=", "' '", ".", "join", "(", "[", "argvquote", "(", "arg", ",", "force", "=", "True", ")", "for", "arg", "in", "args", "]", ")", "prevcmd", "=", "escape_windows_cmd_string", "(", "prevcmd", ")", "args", ".", "append", "(", "'--prevcmd={}'", ".", "format", "(", "prevcmd", ")", ")", "args", ".", "insert", "(", "0", ",", "'cmd'", ")", "args", ".", "append", "(", "'--interactive=0'", ")", "args", ".", "append", "(", "'--sourcer=call'", ")", "args", ".", "append", "(", "'--envcmd=set'", ")", "args", ".", "append", "(", "'--seterrpostcmd=if errorlevel 1 exit 1'", ")", "args", ".", "append", "(", "'--use-tmpfile=1'", ")", "return", "source_foreign", "(", "args", ",", "stdin", "=", "stdin", ")" ]
Simple cmd.exe-specific wrapper around source-foreign. returns a dict to be used as a new environment
[ "Simple", "cmd", ".", "exe", "-", "specific", "wrapper", "around", "source", "-", "foreign", "." ]
python
train
glormph/msstitch
src/app/lookups/sqlite/proteingroups.py
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/lookups/sqlite/proteingroups.py#L79-L85
def get_proteins_for_peptide(self, psm_id): """Returns list of proteins for a passed psm_id""" protsql = self.get_sql_select(['protein_acc'], 'protein_psm') protsql = '{0} WHERE psm_id=?'.format(protsql) cursor = self.get_cursor() proteins = cursor.execute(protsql, psm_id).fetchall() return [x[0] for x in proteins]
[ "def", "get_proteins_for_peptide", "(", "self", ",", "psm_id", ")", ":", "protsql", "=", "self", ".", "get_sql_select", "(", "[", "'protein_acc'", "]", ",", "'protein_psm'", ")", "protsql", "=", "'{0} WHERE psm_id=?'", ".", "format", "(", "protsql", ")", "cursor", "=", "self", ".", "get_cursor", "(", ")", "proteins", "=", "cursor", ".", "execute", "(", "protsql", ",", "psm_id", ")", ".", "fetchall", "(", ")", "return", "[", "x", "[", "0", "]", "for", "x", "in", "proteins", "]" ]
Returns list of proteins for a passed psm_id
[ "Returns", "list", "of", "proteins", "for", "a", "passed", "psm_id" ]
python
train
dpkp/kafka-python
kafka/client_async.py
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/client_async.py#L418-L435
def close(self, node_id=None): """Close one or all broker connections. Arguments: node_id (int, optional): the id of the node to close """ with self._lock: if node_id is None: self._close() conns = list(self._conns.values()) self._conns.clear() for conn in conns: conn.close() elif node_id in self._conns: self._conns.pop(node_id).close() else: log.warning("Node %s not found in current connection list; skipping", node_id) return
[ "def", "close", "(", "self", ",", "node_id", "=", "None", ")", ":", "with", "self", ".", "_lock", ":", "if", "node_id", "is", "None", ":", "self", ".", "_close", "(", ")", "conns", "=", "list", "(", "self", ".", "_conns", ".", "values", "(", ")", ")", "self", ".", "_conns", ".", "clear", "(", ")", "for", "conn", "in", "conns", ":", "conn", ".", "close", "(", ")", "elif", "node_id", "in", "self", ".", "_conns", ":", "self", ".", "_conns", ".", "pop", "(", "node_id", ")", ".", "close", "(", ")", "else", ":", "log", ".", "warning", "(", "\"Node %s not found in current connection list; skipping\"", ",", "node_id", ")", "return" ]
Close one or all broker connections. Arguments: node_id (int, optional): the id of the node to close
[ "Close", "one", "or", "all", "broker", "connections", "." ]
python
train
doconix/django-mako-plus
django_mako_plus/template/adapter.py
https://github.com/doconix/django-mako-plus/blob/a90f9b4af19e5fa9f83452989cdcaed21569a181/django_mako_plus/template/adapter.py#L39-L43
def name(self): '''Returns the name of this template (if created from a file) or "string" if not''' if self.mako_template.filename: return os.path.basename(self.mako_template.filename) return 'string'
[ "def", "name", "(", "self", ")", ":", "if", "self", ".", "mako_template", ".", "filename", ":", "return", "os", ".", "path", ".", "basename", "(", "self", ".", "mako_template", ".", "filename", ")", "return", "'string'" ]
Returns the name of this template (if created from a file) or "string" if not
[ "Returns", "the", "name", "of", "this", "template", "(", "if", "created", "from", "a", "file", ")", "or", "string", "if", "not" ]
python
train
CivicSpleen/ambry
ambry/identity.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/identity.py#L852-L878
def base62_decode(cls, string): """Decode a Base X encoded string into the number. Arguments: - `string`: The encoded string - `alphabet`: The alphabet to use for encoding Stolen from: http://stackoverflow.com/a/1119769/1144479 """ alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" base = len(alphabet) strlen = len(string) num = 0 idx = 0 for char in string: power = (strlen - (idx + 1)) try: num += alphabet.index(char) * (base ** power) except ValueError: raise Base62DecodeError( "Failed to decode char: '{}'".format(char)) idx += 1 return num
[ "def", "base62_decode", "(", "cls", ",", "string", ")", ":", "alphabet", "=", "\"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"", "base", "=", "len", "(", "alphabet", ")", "strlen", "=", "len", "(", "string", ")", "num", "=", "0", "idx", "=", "0", "for", "char", "in", "string", ":", "power", "=", "(", "strlen", "-", "(", "idx", "+", "1", ")", ")", "try", ":", "num", "+=", "alphabet", ".", "index", "(", "char", ")", "*", "(", "base", "**", "power", ")", "except", "ValueError", ":", "raise", "Base62DecodeError", "(", "\"Failed to decode char: '{}'\"", ".", "format", "(", "char", ")", ")", "idx", "+=", "1", "return", "num" ]
Decode a Base X encoded string into the number. Arguments: - `string`: The encoded string - `alphabet`: The alphabet to use for encoding Stolen from: http://stackoverflow.com/a/1119769/1144479
[ "Decode", "a", "Base", "X", "encoded", "string", "into", "the", "number", "." ]
python
train
duniter/duniter-python-api
duniterpy/grammars/output.py
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/grammars/output.py#L109-L117
def compose(self, parser: Any, grammar: Any = None, attr_of: str = None): """ Return the CSV(time) expression as string format :param parser: Parser instance :param grammar: Grammar :param attr_of: Attribute of... """ return "CSV({0})".format(self.time)
[ "def", "compose", "(", "self", ",", "parser", ":", "Any", ",", "grammar", ":", "Any", "=", "None", ",", "attr_of", ":", "str", "=", "None", ")", ":", "return", "\"CSV({0})\"", ".", "format", "(", "self", ".", "time", ")" ]
Return the CSV(time) expression as string format :param parser: Parser instance :param grammar: Grammar :param attr_of: Attribute of...
[ "Return", "the", "CSV", "(", "time", ")", "expression", "as", "string", "format" ]
python
train
msoulier/tftpy
tftpy/TftpPacketTypes.py
https://github.com/msoulier/tftpy/blob/af2f2fe89a3bf45748b78703820efb0986a8207a/tftpy/TftpPacketTypes.py#L410-L429
def decode(self): "Decode self.buffer, populating instance variables and return self." buflen = len(self.buffer) tftpassert(buflen >= 4, "malformed ERR packet, too short") log.debug("Decoding ERR packet, length %s bytes", buflen) if buflen == 4: log.debug("Allowing this affront to the RFC of a 4-byte packet") fmt = b"!HH" log.debug("Decoding ERR packet with fmt: %s", fmt) self.opcode, self.errorcode = struct.unpack(fmt, self.buffer) else: log.debug("Good ERR packet > 4 bytes") fmt = b"!HH%dsx" % (len(self.buffer) - 5) log.debug("Decoding ERR packet with fmt: %s", fmt) self.opcode, self.errorcode, self.errmsg = struct.unpack(fmt, self.buffer) log.error("ERR packet - errorcode: %d, message: %s" % (self.errorcode, self.errmsg)) return self
[ "def", "decode", "(", "self", ")", ":", "buflen", "=", "len", "(", "self", ".", "buffer", ")", "tftpassert", "(", "buflen", ">=", "4", ",", "\"malformed ERR packet, too short\"", ")", "log", ".", "debug", "(", "\"Decoding ERR packet, length %s bytes\"", ",", "buflen", ")", "if", "buflen", "==", "4", ":", "log", ".", "debug", "(", "\"Allowing this affront to the RFC of a 4-byte packet\"", ")", "fmt", "=", "b\"!HH\"", "log", ".", "debug", "(", "\"Decoding ERR packet with fmt: %s\"", ",", "fmt", ")", "self", ".", "opcode", ",", "self", ".", "errorcode", "=", "struct", ".", "unpack", "(", "fmt", ",", "self", ".", "buffer", ")", "else", ":", "log", ".", "debug", "(", "\"Good ERR packet > 4 bytes\"", ")", "fmt", "=", "b\"!HH%dsx\"", "%", "(", "len", "(", "self", ".", "buffer", ")", "-", "5", ")", "log", ".", "debug", "(", "\"Decoding ERR packet with fmt: %s\"", ",", "fmt", ")", "self", ".", "opcode", ",", "self", ".", "errorcode", ",", "self", ".", "errmsg", "=", "struct", ".", "unpack", "(", "fmt", ",", "self", ".", "buffer", ")", "log", ".", "error", "(", "\"ERR packet - errorcode: %d, message: %s\"", "%", "(", "self", ".", "errorcode", ",", "self", ".", "errmsg", ")", ")", "return", "self" ]
Decode self.buffer, populating instance variables and return self.
[ "Decode", "self", ".", "buffer", "populating", "instance", "variables", "and", "return", "self", "." ]
python
train
PyPSA/PyPSA
pypsa/pf.py
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/pf.py#L393-L411
def network_lpf(network, snapshots=None, skip_pre=False): """ Linear power flow for generic network. Parameters ---------- snapshots : list-like|single snapshot A subset or an elements of network.snapshots on which to run the power flow, defaults to network.snapshots skip_pre: bool, default False Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls. Returns ------- None """ _network_prepare_and_run_pf(network, snapshots, skip_pre, linear=True)
[ "def", "network_lpf", "(", "network", ",", "snapshots", "=", "None", ",", "skip_pre", "=", "False", ")", ":", "_network_prepare_and_run_pf", "(", "network", ",", "snapshots", ",", "skip_pre", ",", "linear", "=", "True", ")" ]
Linear power flow for generic network. Parameters ---------- snapshots : list-like|single snapshot A subset or an elements of network.snapshots on which to run the power flow, defaults to network.snapshots skip_pre: bool, default False Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls. Returns ------- None
[ "Linear", "power", "flow", "for", "generic", "network", "." ]
python
train
couchbase/couchbase-python-client
couchbase/asynchronous/rowsbase.py
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/couchbase/asynchronous/rowsbase.py#L66-L80
def _callback(self, mres): """ This is invoked as the row callback. If 'rows' is true, then we are a row callback, otherwise the request has ended and it's time to collect the other data """ try: rows = self._process_payload(self.raw.rows) if rows: self.on_rows(rows) if self.raw.done: self.on_done() finally: if self.raw.done: self._clear()
[ "def", "_callback", "(", "self", ",", "mres", ")", ":", "try", ":", "rows", "=", "self", ".", "_process_payload", "(", "self", ".", "raw", ".", "rows", ")", "if", "rows", ":", "self", ".", "on_rows", "(", "rows", ")", "if", "self", ".", "raw", ".", "done", ":", "self", ".", "on_done", "(", ")", "finally", ":", "if", "self", ".", "raw", ".", "done", ":", "self", ".", "_clear", "(", ")" ]
This is invoked as the row callback. If 'rows' is true, then we are a row callback, otherwise the request has ended and it's time to collect the other data
[ "This", "is", "invoked", "as", "the", "row", "callback", ".", "If", "rows", "is", "true", "then", "we", "are", "a", "row", "callback", "otherwise", "the", "request", "has", "ended", "and", "it", "s", "time", "to", "collect", "the", "other", "data" ]
python
train
prompt-toolkit/ptpython
ptpython/python_input.py
https://github.com/prompt-toolkit/ptpython/blob/b1bba26a491324cd65e0ef46c7b818c4b88fd993/ptpython/python_input.py#L330-L337
def install_code_colorscheme(self, name, style_dict): """ Install a new code color scheme. """ assert isinstance(name, six.text_type) assert isinstance(style_dict, dict) self.code_styles[name] = style_dict
[ "def", "install_code_colorscheme", "(", "self", ",", "name", ",", "style_dict", ")", ":", "assert", "isinstance", "(", "name", ",", "six", ".", "text_type", ")", "assert", "isinstance", "(", "style_dict", ",", "dict", ")", "self", ".", "code_styles", "[", "name", "]", "=", "style_dict" ]
Install a new code color scheme.
[ "Install", "a", "new", "code", "color", "scheme", "." ]
python
train
lepture/flask-oauthlib
flask_oauthlib/provider/oauth1.py
https://github.com/lepture/flask-oauthlib/blob/9e6f152a5bb360e7496210da21561c3e6d41b0e1/flask_oauthlib/provider/oauth1.py#L860-L877
def save_request_token(self, token, request): """Save request token to database. A grantsetter is required, which accepts a token and request parameters:: def grantsetter(token, request): grant = Grant( token=token['oauth_token'], secret=token['oauth_token_secret'], client=request.client, redirect_uri=oauth.redirect_uri, realms=request.realms, ) return grant.save() """ log.debug('Save request token %r', token) self._grantsetter(token, request)
[ "def", "save_request_token", "(", "self", ",", "token", ",", "request", ")", ":", "log", ".", "debug", "(", "'Save request token %r'", ",", "token", ")", "self", ".", "_grantsetter", "(", "token", ",", "request", ")" ]
Save request token to database. A grantsetter is required, which accepts a token and request parameters:: def grantsetter(token, request): grant = Grant( token=token['oauth_token'], secret=token['oauth_token_secret'], client=request.client, redirect_uri=oauth.redirect_uri, realms=request.realms, ) return grant.save()
[ "Save", "request", "token", "to", "database", "." ]
python
test
mdorn/pyinstapaper
pyinstapaper/instapaper.py
https://github.com/mdorn/pyinstapaper/blob/94f5f61ccd07079ba3967f788c555aea1a81cca5/pyinstapaper/instapaper.py#L191-L204
def _simple_action(self, action=None): '''Issue a request for an API method whose only param is the obj ID. :param str action: The name of the action for the resource :returns: Response from the API :rtype: dict ''' if not action: raise Exception('No simple action defined') path = "/".join([self.RESOURCE, action]) response = self.client.request( path, {self.RESOURCE_ID_ATTRIBUTE: self.object_id} ) return response
[ "def", "_simple_action", "(", "self", ",", "action", "=", "None", ")", ":", "if", "not", "action", ":", "raise", "Exception", "(", "'No simple action defined'", ")", "path", "=", "\"/\"", ".", "join", "(", "[", "self", ".", "RESOURCE", ",", "action", "]", ")", "response", "=", "self", ".", "client", ".", "request", "(", "path", ",", "{", "self", ".", "RESOURCE_ID_ATTRIBUTE", ":", "self", ".", "object_id", "}", ")", "return", "response" ]
Issue a request for an API method whose only param is the obj ID. :param str action: The name of the action for the resource :returns: Response from the API :rtype: dict
[ "Issue", "a", "request", "for", "an", "API", "method", "whose", "only", "param", "is", "the", "obj", "ID", "." ]
python
train
google/grr
grr/core/grr_response_core/lib/interpolation.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/interpolation.py#L56-L91
def Substitute(self, pattern): """Formats given pattern with this substitution environment. A pattern can contain placeholders for variables (`%%foo%%`) and scopes (`%%bar.baz%%`) that are replaced with concrete values in this substiution environment (specified in the constructor). Args: pattern: A pattern with placeholders to substitute. Returns: A pattern with placeholders substituted with concrete values. """ if isinstance(pattern, bytes): substs = [re.escape(subst.encode("ascii")) for subst in self._substs] regex = re.compile(b"|".join(substs)) def Replacement(match): key = match.group(0).decode("ascii") return self._substs[key].encode("utf-8") elif isinstance(pattern, Text): substs = [re.escape(subst) for subst in self._substs] regex = re.compile("|".join(substs)) def Replacement(match): key = match.group(0) return self._substs[key] else: raise TypeError("Unexpected pattern type '{}'".format(type(pattern))) if not substs: return pattern else: return regex.sub(Replacement, pattern)
[ "def", "Substitute", "(", "self", ",", "pattern", ")", ":", "if", "isinstance", "(", "pattern", ",", "bytes", ")", ":", "substs", "=", "[", "re", ".", "escape", "(", "subst", ".", "encode", "(", "\"ascii\"", ")", ")", "for", "subst", "in", "self", ".", "_substs", "]", "regex", "=", "re", ".", "compile", "(", "b\"|\"", ".", "join", "(", "substs", ")", ")", "def", "Replacement", "(", "match", ")", ":", "key", "=", "match", ".", "group", "(", "0", ")", ".", "decode", "(", "\"ascii\"", ")", "return", "self", ".", "_substs", "[", "key", "]", ".", "encode", "(", "\"utf-8\"", ")", "elif", "isinstance", "(", "pattern", ",", "Text", ")", ":", "substs", "=", "[", "re", ".", "escape", "(", "subst", ")", "for", "subst", "in", "self", ".", "_substs", "]", "regex", "=", "re", ".", "compile", "(", "\"|\"", ".", "join", "(", "substs", ")", ")", "def", "Replacement", "(", "match", ")", ":", "key", "=", "match", ".", "group", "(", "0", ")", "return", "self", ".", "_substs", "[", "key", "]", "else", ":", "raise", "TypeError", "(", "\"Unexpected pattern type '{}'\"", ".", "format", "(", "type", "(", "pattern", ")", ")", ")", "if", "not", "substs", ":", "return", "pattern", "else", ":", "return", "regex", ".", "sub", "(", "Replacement", ",", "pattern", ")" ]
Formats given pattern with this substitution environment. A pattern can contain placeholders for variables (`%%foo%%`) and scopes (`%%bar.baz%%`) that are replaced with concrete values in this substiution environment (specified in the constructor). Args: pattern: A pattern with placeholders to substitute. Returns: A pattern with placeholders substituted with concrete values.
[ "Formats", "given", "pattern", "with", "this", "substitution", "environment", "." ]
python
train
markchil/gptools
gptools/kernel/core.py
https://github.com/markchil/gptools/blob/225db52bfe6baef1516529ad22177aa2cf7b71e4/gptools/kernel/core.py#L236-L256
def set_hyperparams(self, new_params): """Sets the free hyperparameters to the new parameter values in new_params. Parameters ---------- new_params : :py:class:`Array` or other Array-like, (len(:py:attr:`self.free_params`),) New parameter values, ordered as dictated by the docstring for the class. """ new_params = scipy.asarray(new_params, dtype=float) if len(new_params) == len(self.free_params): if self.enforce_bounds: for idx, new_param, bound in zip(range(0, len(new_params)), new_params, self.free_param_bounds): if bound[0] is not None and new_param < bound[0]: new_params[idx] = bound[0] elif bound[1] is not None and new_param > bound[1]: new_params[idx] = bound[1] self.params[~self.fixed_params] = new_params else: raise ValueError("Length of new_params must be %s!" % (len(self.free_params),))
[ "def", "set_hyperparams", "(", "self", ",", "new_params", ")", ":", "new_params", "=", "scipy", ".", "asarray", "(", "new_params", ",", "dtype", "=", "float", ")", "if", "len", "(", "new_params", ")", "==", "len", "(", "self", ".", "free_params", ")", ":", "if", "self", ".", "enforce_bounds", ":", "for", "idx", ",", "new_param", ",", "bound", "in", "zip", "(", "range", "(", "0", ",", "len", "(", "new_params", ")", ")", ",", "new_params", ",", "self", ".", "free_param_bounds", ")", ":", "if", "bound", "[", "0", "]", "is", "not", "None", "and", "new_param", "<", "bound", "[", "0", "]", ":", "new_params", "[", "idx", "]", "=", "bound", "[", "0", "]", "elif", "bound", "[", "1", "]", "is", "not", "None", "and", "new_param", ">", "bound", "[", "1", "]", ":", "new_params", "[", "idx", "]", "=", "bound", "[", "1", "]", "self", ".", "params", "[", "~", "self", ".", "fixed_params", "]", "=", "new_params", "else", ":", "raise", "ValueError", "(", "\"Length of new_params must be %s!\"", "%", "(", "len", "(", "self", ".", "free_params", ")", ",", ")", ")" ]
Sets the free hyperparameters to the new parameter values in new_params. Parameters ---------- new_params : :py:class:`Array` or other Array-like, (len(:py:attr:`self.free_params`),) New parameter values, ordered as dictated by the docstring for the class.
[ "Sets", "the", "free", "hyperparameters", "to", "the", "new", "parameter", "values", "in", "new_params", ".", "Parameters", "----------", "new_params", ":", ":", "py", ":", "class", ":", "Array", "or", "other", "Array", "-", "like", "(", "len", "(", ":", "py", ":", "attr", ":", "self", ".", "free_params", ")", ")", "New", "parameter", "values", "ordered", "as", "dictated", "by", "the", "docstring", "for", "the", "class", "." ]
python
train
wummel/linkchecker
linkcheck/checker/fileurl.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/fileurl.py#L112-L138
def build_base_url(self): """The URL is normed according to the platform: - the base URL is made an absolute file:// URL - under Windows platform the drive specifier is normed """ if self.base_url is None: return base_url = self.base_url if not (self.parent_url or self.base_ref or base_url.startswith("file:")): base_url = os.path.expanduser(base_url) if not is_absolute_path(base_url): try: base_url = os.getcwd()+"/"+base_url except OSError as msg: # occurs on stale remote filesystems (eg. NFS) errmsg = _("Could not get current working directory: %(msg)s") % dict(msg=msg) raise LinkCheckerError(errmsg) if os.path.isdir(base_url): base_url += "/" base_url = "file://"+base_url if os.name == "nt": base_url = base_url.replace("\\", "/") # transform c:/windows into /c|/windows base_url = re.sub("^file://(/?)([a-zA-Z]):", r"file:///\2|", base_url) # transform file://path into file:///path base_url = re.sub("^file://([^/])", r"file:///\1", base_url) self.base_url = unicode(base_url)
[ "def", "build_base_url", "(", "self", ")", ":", "if", "self", ".", "base_url", "is", "None", ":", "return", "base_url", "=", "self", ".", "base_url", "if", "not", "(", "self", ".", "parent_url", "or", "self", ".", "base_ref", "or", "base_url", ".", "startswith", "(", "\"file:\"", ")", ")", ":", "base_url", "=", "os", ".", "path", ".", "expanduser", "(", "base_url", ")", "if", "not", "is_absolute_path", "(", "base_url", ")", ":", "try", ":", "base_url", "=", "os", ".", "getcwd", "(", ")", "+", "\"/\"", "+", "base_url", "except", "OSError", "as", "msg", ":", "# occurs on stale remote filesystems (eg. NFS)", "errmsg", "=", "_", "(", "\"Could not get current working directory: %(msg)s\"", ")", "%", "dict", "(", "msg", "=", "msg", ")", "raise", "LinkCheckerError", "(", "errmsg", ")", "if", "os", ".", "path", ".", "isdir", "(", "base_url", ")", ":", "base_url", "+=", "\"/\"", "base_url", "=", "\"file://\"", "+", "base_url", "if", "os", ".", "name", "==", "\"nt\"", ":", "base_url", "=", "base_url", ".", "replace", "(", "\"\\\\\"", ",", "\"/\"", ")", "# transform c:/windows into /c|/windows", "base_url", "=", "re", ".", "sub", "(", "\"^file://(/?)([a-zA-Z]):\"", ",", "r\"file:///\\2|\"", ",", "base_url", ")", "# transform file://path into file:///path", "base_url", "=", "re", ".", "sub", "(", "\"^file://([^/])\"", ",", "r\"file:///\\1\"", ",", "base_url", ")", "self", ".", "base_url", "=", "unicode", "(", "base_url", ")" ]
The URL is normed according to the platform: - the base URL is made an absolute file:// URL - under Windows platform the drive specifier is normed
[ "The", "URL", "is", "normed", "according", "to", "the", "platform", ":", "-", "the", "base", "URL", "is", "made", "an", "absolute", "file", ":", "//", "URL", "-", "under", "Windows", "platform", "the", "drive", "specifier", "is", "normed" ]
python
train
selectel/timecard
timecard/timecard.py
https://github.com/selectel/timecard/blob/cfbd14356511c8d7817750c22acf3164ff0030de/timecard/timecard.py#L556-L592
def write_line(self, fix=True): """ Output line containing values to console and csv file. Only committed values are written to css file. :param bool fix: to commit measurement values """ cells = [] csv_values = [] for m in self.values(): cells.append(m.render_value(fix=fix)) if isinstance(m, MultiMetric): for sub in m.values(): csv_values.append(sub.to_csv()) else: csv_values.append(m.to_csv()) if fix: m.reset() if fix and self._csvfile: self._write_csv_row(csv_values) c = _ansi["gray"] + "|" + _ansi["reset"] if self._last_line_fixed: stdout.write("\n\r") else: stdout.write("\r") if not fix: stdout.write(_ansi["reverse"]) stdout.write(c.join(cells)) stdout.flush() self._last_line_fixed = fix
[ "def", "write_line", "(", "self", ",", "fix", "=", "True", ")", ":", "cells", "=", "[", "]", "csv_values", "=", "[", "]", "for", "m", "in", "self", ".", "values", "(", ")", ":", "cells", ".", "append", "(", "m", ".", "render_value", "(", "fix", "=", "fix", ")", ")", "if", "isinstance", "(", "m", ",", "MultiMetric", ")", ":", "for", "sub", "in", "m", ".", "values", "(", ")", ":", "csv_values", ".", "append", "(", "sub", ".", "to_csv", "(", ")", ")", "else", ":", "csv_values", ".", "append", "(", "m", ".", "to_csv", "(", ")", ")", "if", "fix", ":", "m", ".", "reset", "(", ")", "if", "fix", "and", "self", ".", "_csvfile", ":", "self", ".", "_write_csv_row", "(", "csv_values", ")", "c", "=", "_ansi", "[", "\"gray\"", "]", "+", "\"|\"", "+", "_ansi", "[", "\"reset\"", "]", "if", "self", ".", "_last_line_fixed", ":", "stdout", ".", "write", "(", "\"\\n\\r\"", ")", "else", ":", "stdout", ".", "write", "(", "\"\\r\"", ")", "if", "not", "fix", ":", "stdout", ".", "write", "(", "_ansi", "[", "\"reverse\"", "]", ")", "stdout", ".", "write", "(", "c", ".", "join", "(", "cells", ")", ")", "stdout", ".", "flush", "(", ")", "self", ".", "_last_line_fixed", "=", "fix" ]
Output line containing values to console and csv file. Only committed values are written to css file. :param bool fix: to commit measurement values
[ "Output", "line", "containing", "values", "to", "console", "and", "csv", "file", ".", "Only", "committed", "values", "are", "written", "to", "css", "file", ".", ":", "param", "bool", "fix", ":", "to", "commit", "measurement", "values" ]
python
train
jldantas/libmft
libmft/api.py
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/api.py#L641-L654
def _add_data_attribute(self, data_attr): """Add a data attribute to the datastream structure. Data attributes require processing before they can be interpreted as datastream. This function grants that it is adding the attribute to the correct datastream or creating a new datastream if necessary. """ attr_name = data_attr.header.attr_name stream = self._find_datastream(attr_name) if stream is None: stream = Datastream(attr_name) self.data_streams.append(stream) stream.add_data_attribute(data_attr)
[ "def", "_add_data_attribute", "(", "self", ",", "data_attr", ")", ":", "attr_name", "=", "data_attr", ".", "header", ".", "attr_name", "stream", "=", "self", ".", "_find_datastream", "(", "attr_name", ")", "if", "stream", "is", "None", ":", "stream", "=", "Datastream", "(", "attr_name", ")", "self", ".", "data_streams", ".", "append", "(", "stream", ")", "stream", ".", "add_data_attribute", "(", "data_attr", ")" ]
Add a data attribute to the datastream structure. Data attributes require processing before they can be interpreted as datastream. This function grants that it is adding the attribute to the correct datastream or creating a new datastream if necessary.
[ "Add", "a", "data", "attribute", "to", "the", "datastream", "structure", "." ]
python
train
chrismattmann/tika-python
tika/tika.py
https://github.com/chrismattmann/tika-python/blob/ffd3879ac3eaa9142c0fb6557cc1dc52d458a75a/tika/tika.py#L285-L300
def parse(option, urlOrPaths, serverEndpoint=ServerEndpoint, verbose=Verbose, tikaServerJar=TikaServerJar, responseMimeType='application/json', services={'meta': '/meta', 'text': '/tika', 'all': '/rmeta'}, rawResponse=False): ''' Parse the objects and return extracted metadata and/or text in JSON format. :param option: :param urlOrPaths: :param serverEndpoint: :param verbose: :param tikaServerJar: :param responseMimeType: :param services: :return: ''' return [parse1(option, path, serverEndpoint, verbose, tikaServerJar, responseMimeType, services) for path in urlOrPaths]
[ "def", "parse", "(", "option", ",", "urlOrPaths", ",", "serverEndpoint", "=", "ServerEndpoint", ",", "verbose", "=", "Verbose", ",", "tikaServerJar", "=", "TikaServerJar", ",", "responseMimeType", "=", "'application/json'", ",", "services", "=", "{", "'meta'", ":", "'/meta'", ",", "'text'", ":", "'/tika'", ",", "'all'", ":", "'/rmeta'", "}", ",", "rawResponse", "=", "False", ")", ":", "return", "[", "parse1", "(", "option", ",", "path", ",", "serverEndpoint", ",", "verbose", ",", "tikaServerJar", ",", "responseMimeType", ",", "services", ")", "for", "path", "in", "urlOrPaths", "]" ]
Parse the objects and return extracted metadata and/or text in JSON format. :param option: :param urlOrPaths: :param serverEndpoint: :param verbose: :param tikaServerJar: :param responseMimeType: :param services: :return:
[ "Parse", "the", "objects", "and", "return", "extracted", "metadata", "and", "/", "or", "text", "in", "JSON", "format", ".", ":", "param", "option", ":", ":", "param", "urlOrPaths", ":", ":", "param", "serverEndpoint", ":", ":", "param", "verbose", ":", ":", "param", "tikaServerJar", ":", ":", "param", "responseMimeType", ":", ":", "param", "services", ":", ":", "return", ":" ]
python
train
HazyResearch/metal
metal/analysis.py
https://github.com/HazyResearch/metal/blob/c24e3772e25ac6d0917b8b7af4c1bcb92928f84a/metal/analysis.py#L25-L29
def _conflicted_data_points(L): """Returns an indicator vector where ith element = 1 if x_i is labeled by at least two LFs that give it disagreeing labels.""" m = sparse.diags(np.ravel(L.max(axis=1).todense())) return np.ravel(np.max(m @ (L != 0) != L, axis=1).astype(int).todense())
[ "def", "_conflicted_data_points", "(", "L", ")", ":", "m", "=", "sparse", ".", "diags", "(", "np", ".", "ravel", "(", "L", ".", "max", "(", "axis", "=", "1", ")", ".", "todense", "(", ")", ")", ")", "return", "np", ".", "ravel", "(", "np", ".", "max", "(", "m", "@", "(", "L", "!=", "0", ")", "!=", "L", ",", "axis", "=", "1", ")", ".", "astype", "(", "int", ")", ".", "todense", "(", ")", ")" ]
Returns an indicator vector where ith element = 1 if x_i is labeled by at least two LFs that give it disagreeing labels.
[ "Returns", "an", "indicator", "vector", "where", "ith", "element", "=", "1", "if", "x_i", "is", "labeled", "by", "at", "least", "two", "LFs", "that", "give", "it", "disagreeing", "labels", "." ]
python
train
taskcluster/taskcluster-client.py
taskcluster/queue.py
https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/queue.py#L188-L212
def scheduleTask(self, *args, **kwargs): """ Schedule Defined Task scheduleTask will schedule a task to be executed, even if it has unresolved dependencies. A task would otherwise only be scheduled if its dependencies were resolved. This is useful if you have defined a task that depends on itself or on some other task that has not been resolved, but you wish the task to be scheduled immediately. This will announce the task as pending and workers will be allowed to claim it and resolve the task. **Note** this operation is **idempotent** and will not fail or complain if called with a `taskId` that is already scheduled, or even resolved. To reschedule a task previously resolved, use `rerunTask`. This method gives output: ``v1/task-status-response.json#`` This method is ``stable`` """ return self._makeApiCall(self.funcinfo["scheduleTask"], *args, **kwargs)
[ "def", "scheduleTask", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_makeApiCall", "(", "self", ".", "funcinfo", "[", "\"scheduleTask\"", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Schedule Defined Task scheduleTask will schedule a task to be executed, even if it has unresolved dependencies. A task would otherwise only be scheduled if its dependencies were resolved. This is useful if you have defined a task that depends on itself or on some other task that has not been resolved, but you wish the task to be scheduled immediately. This will announce the task as pending and workers will be allowed to claim it and resolve the task. **Note** this operation is **idempotent** and will not fail or complain if called with a `taskId` that is already scheduled, or even resolved. To reschedule a task previously resolved, use `rerunTask`. This method gives output: ``v1/task-status-response.json#`` This method is ``stable``
[ "Schedule", "Defined", "Task" ]
python
train
elliterate/capybara.py
capybara/selector/selector.py
https://github.com/elliterate/capybara.py/blob/0c6ae449cc37e4445ec3cd6af95674533beedc6c/capybara/selector/selector.py#L57-L62
def expression_filters(self): """ Dict[str, ExpressionFilter]: Returns the expression filters for this selector. """ return { name: filter for name, filter in iter(self.filters.items()) if isinstance(filter, ExpressionFilter)}
[ "def", "expression_filters", "(", "self", ")", ":", "return", "{", "name", ":", "filter", "for", "name", ",", "filter", "in", "iter", "(", "self", ".", "filters", ".", "items", "(", ")", ")", "if", "isinstance", "(", "filter", ",", "ExpressionFilter", ")", "}" ]
Dict[str, ExpressionFilter]: Returns the expression filters for this selector.
[ "Dict", "[", "str", "ExpressionFilter", "]", ":", "Returns", "the", "expression", "filters", "for", "this", "selector", "." ]
python
test
jacebrowning/comparable
comparable/tools.py
https://github.com/jacebrowning/comparable/blob/48455e613650e22412d31109681368fcc479298d/comparable/tools.py#L68-L76
def sort(base, items): """Get a sorted list of items ranked in descending similarity. @param base: base item to perform comparison against @param items: list of items to compare to the base @return: list of items sorted by similarity to the base """ return sorted(items, key=base.similarity, reverse=True)
[ "def", "sort", "(", "base", ",", "items", ")", ":", "return", "sorted", "(", "items", ",", "key", "=", "base", ".", "similarity", ",", "reverse", "=", "True", ")" ]
Get a sorted list of items ranked in descending similarity. @param base: base item to perform comparison against @param items: list of items to compare to the base @return: list of items sorted by similarity to the base
[ "Get", "a", "sorted", "list", "of", "items", "ranked", "in", "descending", "similarity", "." ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/brat.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/brat.py#L41-L101
def brat_output(docgraph, layer=None, show_relations=True): """ converts a document graph with pointing chains into a string representation of a brat *.ann file. Parameters ---------- docgraph : DiscourseDocumentGraph a document graph which might contain pointing chains (e.g. coreference links) layer : str or None the name of the layer that contains the pointing chains (e.g. 'mmax' or 'pocores'). If unspecified, all pointing chains in the document will be considered Returns ------- ret_str : unicode the content of a brat *.ann file """ # we can't rely on the .ns attribute of a merged graph if layer: namespace = dg.layer2namespace(layer) else: namespace = docgraph.ns ret_str = u'' pointing_chains = dg.get_pointing_chains(docgraph, layer=layer) # a token can be part of 1+ markable(s) first_token2markables = defaultdict(list) markable_dict = {} markable_index = 1 for pointing_chain in pointing_chains: for markable in sorted(pointing_chain, key=dg.util.natural_sort_key): span_tokens = spanstring2tokens(docgraph, docgraph.node[markable][namespace+':span']) span_text = dg.tokens2text(docgraph, span_tokens) first_token2markables[span_tokens[0]].append(markable) markable_dict[markable] = (markable_index, span_text, len(span_text)) markable_index += 1 onset = 0 for token_id in docgraph.tokens: tok_len = len(docgraph.get_token(token_id)) if token_id in first_token2markables: for markable in first_token2markables[token_id]: mark_index, mark_text, mark_len = markable_dict[markable] ret_str += u"T{0}\tMarkable {1} {2}\t{3}\n".format( mark_index, onset, onset+mark_len, mark_text) onset += tok_len+1 if show_relations: relation = 1 for pointing_chain in pointing_chains: last_to_first_mention = sorted(pointing_chain, key=dg.util.natural_sort_key, reverse=True) for i in xrange(0, len(pointing_chain)-1): chain_element = markable_dict[last_to_first_mention[i]][0] prev_chain_element = markable_dict[last_to_first_mention[i+1]][0] ret_str += u"R{0}\tCoreference Arg1:T{1} Arg2:T{2}\n".format( relation, chain_element, prev_chain_element) relation += 1 return ret_str
[ "def", "brat_output", "(", "docgraph", ",", "layer", "=", "None", ",", "show_relations", "=", "True", ")", ":", "# we can't rely on the .ns attribute of a merged graph", "if", "layer", ":", "namespace", "=", "dg", ".", "layer2namespace", "(", "layer", ")", "else", ":", "namespace", "=", "docgraph", ".", "ns", "ret_str", "=", "u''", "pointing_chains", "=", "dg", ".", "get_pointing_chains", "(", "docgraph", ",", "layer", "=", "layer", ")", "# a token can be part of 1+ markable(s)", "first_token2markables", "=", "defaultdict", "(", "list", ")", "markable_dict", "=", "{", "}", "markable_index", "=", "1", "for", "pointing_chain", "in", "pointing_chains", ":", "for", "markable", "in", "sorted", "(", "pointing_chain", ",", "key", "=", "dg", ".", "util", ".", "natural_sort_key", ")", ":", "span_tokens", "=", "spanstring2tokens", "(", "docgraph", ",", "docgraph", ".", "node", "[", "markable", "]", "[", "namespace", "+", "':span'", "]", ")", "span_text", "=", "dg", ".", "tokens2text", "(", "docgraph", ",", "span_tokens", ")", "first_token2markables", "[", "span_tokens", "[", "0", "]", "]", ".", "append", "(", "markable", ")", "markable_dict", "[", "markable", "]", "=", "(", "markable_index", ",", "span_text", ",", "len", "(", "span_text", ")", ")", "markable_index", "+=", "1", "onset", "=", "0", "for", "token_id", "in", "docgraph", ".", "tokens", ":", "tok_len", "=", "len", "(", "docgraph", ".", "get_token", "(", "token_id", ")", ")", "if", "token_id", "in", "first_token2markables", ":", "for", "markable", "in", "first_token2markables", "[", "token_id", "]", ":", "mark_index", ",", "mark_text", ",", "mark_len", "=", "markable_dict", "[", "markable", "]", "ret_str", "+=", "u\"T{0}\\tMarkable {1} {2}\\t{3}\\n\"", ".", "format", "(", "mark_index", ",", "onset", ",", "onset", "+", "mark_len", ",", "mark_text", ")", "onset", "+=", "tok_len", "+", "1", "if", "show_relations", ":", "relation", "=", "1", "for", "pointing_chain", "in", "pointing_chains", ":", "last_to_first_mention", "=", "sorted", "(", "pointing_chain", ",", "key", "=", "dg", ".", "util", ".", "natural_sort_key", ",", "reverse", "=", "True", ")", "for", "i", "in", "xrange", "(", "0", ",", "len", "(", "pointing_chain", ")", "-", "1", ")", ":", "chain_element", "=", "markable_dict", "[", "last_to_first_mention", "[", "i", "]", "]", "[", "0", "]", "prev_chain_element", "=", "markable_dict", "[", "last_to_first_mention", "[", "i", "+", "1", "]", "]", "[", "0", "]", "ret_str", "+=", "u\"R{0}\\tCoreference Arg1:T{1} Arg2:T{2}\\n\"", ".", "format", "(", "relation", ",", "chain_element", ",", "prev_chain_element", ")", "relation", "+=", "1", "return", "ret_str" ]
converts a document graph with pointing chains into a string representation of a brat *.ann file. Parameters ---------- docgraph : DiscourseDocumentGraph a document graph which might contain pointing chains (e.g. coreference links) layer : str or None the name of the layer that contains the pointing chains (e.g. 'mmax' or 'pocores'). If unspecified, all pointing chains in the document will be considered Returns ------- ret_str : unicode the content of a brat *.ann file
[ "converts", "a", "document", "graph", "with", "pointing", "chains", "into", "a", "string", "representation", "of", "a", "brat", "*", ".", "ann", "file", "." ]
python
train
m32/endesive
endesive/pdf/fpdf/fpdf.py
https://github.com/m32/endesive/blob/973091dc69847fe2df594c80ac9235a8d08460ff/endesive/pdf/fpdf/fpdf.py#L890-L955
def write(self, h, txt='', link=''): "Output text in flowing mode" txt = self.normalize_text(txt) cw=self.current_font['cw'] w=self.w-self.r_margin-self.x wmax=(w-2*self.c_margin)*1000.0/self.font_size s=txt.replace("\r",'') nb=len(s) sep=-1 i=0 j=0 l=0 nl=1 while(i<nb): #Get next character c=s[i] if(c=="\n"): #Explicit line break self.cell(w,h,substr(s,j,i-j),0,2,'',0,link) i+=1 sep=-1 j=i l=0 if(nl==1): self.x=self.l_margin w=self.w-self.r_margin-self.x wmax=(w-2*self.c_margin)*1000.0/self.font_size nl+=1 continue if(c==' '): sep=i if self.unifontsubset: l += self.get_string_width(c) / self.font_size*1000.0 else: l += cw.get(c,0) if(l>wmax): #Automatic line break if(sep==-1): if(self.x>self.l_margin): #Move to next line self.x=self.l_margin self.y+=h w=self.w-self.r_margin-self.x wmax=(w-2*self.c_margin)*1000.0/self.font_size i+=1 nl+=1 continue if(i==j): i+=1 self.cell(w,h,substr(s,j,i-j),0,2,'',0,link) else: self.cell(w,h,substr(s,j,sep-j),0,2,'',0,link) i=sep+1 sep=-1 j=i l=0 if(nl==1): self.x=self.l_margin w=self.w-self.r_margin-self.x wmax=(w-2*self.c_margin)*1000.0/self.font_size nl+=1 else: i+=1 #Last chunk if(i!=j): self.cell(l/1000.0*self.font_size,h,substr(s,j),0,0,'',0,link)
[ "def", "write", "(", "self", ",", "h", ",", "txt", "=", "''", ",", "link", "=", "''", ")", ":", "txt", "=", "self", ".", "normalize_text", "(", "txt", ")", "cw", "=", "self", ".", "current_font", "[", "'cw'", "]", "w", "=", "self", ".", "w", "-", "self", ".", "r_margin", "-", "self", ".", "x", "wmax", "=", "(", "w", "-", "2", "*", "self", ".", "c_margin", ")", "*", "1000.0", "/", "self", ".", "font_size", "s", "=", "txt", ".", "replace", "(", "\"\\r\"", ",", "''", ")", "nb", "=", "len", "(", "s", ")", "sep", "=", "-", "1", "i", "=", "0", "j", "=", "0", "l", "=", "0", "nl", "=", "1", "while", "(", "i", "<", "nb", ")", ":", "#Get next character", "c", "=", "s", "[", "i", "]", "if", "(", "c", "==", "\"\\n\"", ")", ":", "#Explicit line break", "self", ".", "cell", "(", "w", ",", "h", ",", "substr", "(", "s", ",", "j", ",", "i", "-", "j", ")", ",", "0", ",", "2", ",", "''", ",", "0", ",", "link", ")", "i", "+=", "1", "sep", "=", "-", "1", "j", "=", "i", "l", "=", "0", "if", "(", "nl", "==", "1", ")", ":", "self", ".", "x", "=", "self", ".", "l_margin", "w", "=", "self", ".", "w", "-", "self", ".", "r_margin", "-", "self", ".", "x", "wmax", "=", "(", "w", "-", "2", "*", "self", ".", "c_margin", ")", "*", "1000.0", "/", "self", ".", "font_size", "nl", "+=", "1", "continue", "if", "(", "c", "==", "' '", ")", ":", "sep", "=", "i", "if", "self", ".", "unifontsubset", ":", "l", "+=", "self", ".", "get_string_width", "(", "c", ")", "/", "self", ".", "font_size", "*", "1000.0", "else", ":", "l", "+=", "cw", ".", "get", "(", "c", ",", "0", ")", "if", "(", "l", ">", "wmax", ")", ":", "#Automatic line break", "if", "(", "sep", "==", "-", "1", ")", ":", "if", "(", "self", ".", "x", ">", "self", ".", "l_margin", ")", ":", "#Move to next line", "self", ".", "x", "=", "self", ".", "l_margin", "self", ".", "y", "+=", "h", "w", "=", "self", ".", "w", "-", "self", ".", "r_margin", "-", "self", ".", "x", "wmax", "=", "(", "w", "-", "2", "*", "self", ".", "c_margin", ")", "*", "1000.0", "/", "self", ".", "font_size", "i", "+=", "1", "nl", "+=", "1", "continue", "if", "(", "i", "==", "j", ")", ":", "i", "+=", "1", "self", ".", "cell", "(", "w", ",", "h", ",", "substr", "(", "s", ",", "j", ",", "i", "-", "j", ")", ",", "0", ",", "2", ",", "''", ",", "0", ",", "link", ")", "else", ":", "self", ".", "cell", "(", "w", ",", "h", ",", "substr", "(", "s", ",", "j", ",", "sep", "-", "j", ")", ",", "0", ",", "2", ",", "''", ",", "0", ",", "link", ")", "i", "=", "sep", "+", "1", "sep", "=", "-", "1", "j", "=", "i", "l", "=", "0", "if", "(", "nl", "==", "1", ")", ":", "self", ".", "x", "=", "self", ".", "l_margin", "w", "=", "self", ".", "w", "-", "self", ".", "r_margin", "-", "self", ".", "x", "wmax", "=", "(", "w", "-", "2", "*", "self", ".", "c_margin", ")", "*", "1000.0", "/", "self", ".", "font_size", "nl", "+=", "1", "else", ":", "i", "+=", "1", "#Last chunk", "if", "(", "i", "!=", "j", ")", ":", "self", ".", "cell", "(", "l", "/", "1000.0", "*", "self", ".", "font_size", ",", "h", ",", "substr", "(", "s", ",", "j", ")", ",", "0", ",", "0", ",", "''", ",", "0", ",", "link", ")" ]
Output text in flowing mode
[ "Output", "text", "in", "flowing", "mode" ]
python
train
ReFirmLabs/binwalk
src/binwalk/plugins/unpfs.py
https://github.com/ReFirmLabs/binwalk/blob/a0c5315fd2bae167e5c3d8469ce95d5defc743c2/src/binwalk/plugins/unpfs.py#L33-L40
def _get_fname_len(self, bufflen=128): """Returns the number of bytes designated for the filename.""" buff = self.meta.peek(bufflen) strlen = buff.find('\0') for i, b in enumerate(buff[strlen:]): if b != '\0': return strlen+i return bufflen
[ "def", "_get_fname_len", "(", "self", ",", "bufflen", "=", "128", ")", ":", "buff", "=", "self", ".", "meta", ".", "peek", "(", "bufflen", ")", "strlen", "=", "buff", ".", "find", "(", "'\\0'", ")", "for", "i", ",", "b", "in", "enumerate", "(", "buff", "[", "strlen", ":", "]", ")", ":", "if", "b", "!=", "'\\0'", ":", "return", "strlen", "+", "i", "return", "bufflen" ]
Returns the number of bytes designated for the filename.
[ "Returns", "the", "number", "of", "bytes", "designated", "for", "the", "filename", "." ]
python
train
Stewori/pytypes
pytypes/util.py
https://github.com/Stewori/pytypes/blob/b814d38709e84c0e0825caf8b721c20eb5a8ab3b/pytypes/util.py#L551-L568
def getmodule(code): """More robust variant of inspect.getmodule. E.g. has less issues on Jython. """ try: md = inspect.getmodule(code, code.co_filename) except AttributeError: return inspect.getmodule(code) if md is None: # Jython-specific: # This is currently just a crutch; todo: resolve __pyclasspath__ properly! cfname = code.co_filename.replace('__pyclasspath__', os.path.realpath('')+os.sep+'__pyclasspath__') cfname = cfname.replace('$py.class', '.py') md = inspect.getmodule(code, cfname) if md is None: md = inspect.getmodule(code) return md
[ "def", "getmodule", "(", "code", ")", ":", "try", ":", "md", "=", "inspect", ".", "getmodule", "(", "code", ",", "code", ".", "co_filename", ")", "except", "AttributeError", ":", "return", "inspect", ".", "getmodule", "(", "code", ")", "if", "md", "is", "None", ":", "# Jython-specific:", "# This is currently just a crutch; todo: resolve __pyclasspath__ properly!", "cfname", "=", "code", ".", "co_filename", ".", "replace", "(", "'__pyclasspath__'", ",", "os", ".", "path", ".", "realpath", "(", "''", ")", "+", "os", ".", "sep", "+", "'__pyclasspath__'", ")", "cfname", "=", "cfname", ".", "replace", "(", "'$py.class'", ",", "'.py'", ")", "md", "=", "inspect", ".", "getmodule", "(", "code", ",", "cfname", ")", "if", "md", "is", "None", ":", "md", "=", "inspect", ".", "getmodule", "(", "code", ")", "return", "md" ]
More robust variant of inspect.getmodule. E.g. has less issues on Jython.
[ "More", "robust", "variant", "of", "inspect", ".", "getmodule", ".", "E", ".", "g", ".", "has", "less", "issues", "on", "Jython", "." ]
python
train
totalgood/nlpia
src/nlpia/regexes.py
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/regexes.py#L127-L135
def to_tsv(): """ Save all regular expressions to a tsv file so they can be more easily copy/pasted in Sublime """ with open(os.path.join(DATA_PATH, 'regexes.tsv'), mode='wt') as fout: vars = copy.copy(tuple(globals().items())) for k, v in vars: if k.lower().startswith('cre_'): fout.write(k[4:] + '\t' + v.pattern + '\n') elif k.lower().startswith('re_'): fout.write(k[3:] + '\t' + v.pattern + '\n')
[ "def", "to_tsv", "(", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "DATA_PATH", ",", "'regexes.tsv'", ")", ",", "mode", "=", "'wt'", ")", "as", "fout", ":", "vars", "=", "copy", ".", "copy", "(", "tuple", "(", "globals", "(", ")", ".", "items", "(", ")", ")", ")", "for", "k", ",", "v", "in", "vars", ":", "if", "k", ".", "lower", "(", ")", ".", "startswith", "(", "'cre_'", ")", ":", "fout", ".", "write", "(", "k", "[", "4", ":", "]", "+", "'\\t'", "+", "v", ".", "pattern", "+", "'\\n'", ")", "elif", "k", ".", "lower", "(", ")", ".", "startswith", "(", "'re_'", ")", ":", "fout", ".", "write", "(", "k", "[", "3", ":", "]", "+", "'\\t'", "+", "v", ".", "pattern", "+", "'\\n'", ")" ]
Save all regular expressions to a tsv file so they can be more easily copy/pasted in Sublime
[ "Save", "all", "regular", "expressions", "to", "a", "tsv", "file", "so", "they", "can", "be", "more", "easily", "copy", "/", "pasted", "in", "Sublime" ]
python
train
docker/docker-py
docker/api/exec_api.py
https://github.com/docker/docker-py/blob/613d6aad83acc9931ff2ecfd6a6c7bd8061dc125/docker/api/exec_api.py#L9-L80
def exec_create(self, container, cmd, stdout=True, stderr=True, stdin=False, tty=False, privileged=False, user='', environment=None, workdir=None, detach_keys=None): """ Sets up an exec instance in a running container. Args: container (str): Target container where exec instance will be created cmd (str or list): Command to be executed stdout (bool): Attach to stdout. Default: ``True`` stderr (bool): Attach to stderr. Default: ``True`` stdin (bool): Attach to stdin. Default: ``False`` tty (bool): Allocate a pseudo-TTY. Default: False privileged (bool): Run as privileged. user (str): User to execute command as. Default: root environment (dict or list): A dictionary or a list of strings in the following format ``["PASSWORD=xxx"]`` or ``{"PASSWORD": "xxx"}``. workdir (str): Path to working directory for this exec session detach_keys (str): Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. ~/.docker/config.json is used by default. Returns: (dict): A dictionary with an exec ``Id`` key. Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ if environment is not None and utils.version_lt(self._version, '1.25'): raise errors.InvalidVersion( 'Setting environment for exec is not supported in API < 1.25' ) if isinstance(cmd, six.string_types): cmd = utils.split_command(cmd) if isinstance(environment, dict): environment = utils.utils.format_environment(environment) data = { 'Container': container, 'User': user, 'Privileged': privileged, 'Tty': tty, 'AttachStdin': stdin, 'AttachStdout': stdout, 'AttachStderr': stderr, 'Cmd': cmd, 'Env': environment, } if workdir is not None: if utils.version_lt(self._version, '1.35'): raise errors.InvalidVersion( 'workdir is not supported for API version < 1.35' ) data['WorkingDir'] = workdir if detach_keys: data['detachKeys'] = detach_keys elif 'detachKeys' in self._general_configs: data['detachKeys'] = self._general_configs['detachKeys'] url = self._url('/containers/{0}/exec', container) res = self._post_json(url, data=data) return self._result(res, True)
[ "def", "exec_create", "(", "self", ",", "container", ",", "cmd", ",", "stdout", "=", "True", ",", "stderr", "=", "True", ",", "stdin", "=", "False", ",", "tty", "=", "False", ",", "privileged", "=", "False", ",", "user", "=", "''", ",", "environment", "=", "None", ",", "workdir", "=", "None", ",", "detach_keys", "=", "None", ")", ":", "if", "environment", "is", "not", "None", "and", "utils", ".", "version_lt", "(", "self", ".", "_version", ",", "'1.25'", ")", ":", "raise", "errors", ".", "InvalidVersion", "(", "'Setting environment for exec is not supported in API < 1.25'", ")", "if", "isinstance", "(", "cmd", ",", "six", ".", "string_types", ")", ":", "cmd", "=", "utils", ".", "split_command", "(", "cmd", ")", "if", "isinstance", "(", "environment", ",", "dict", ")", ":", "environment", "=", "utils", ".", "utils", ".", "format_environment", "(", "environment", ")", "data", "=", "{", "'Container'", ":", "container", ",", "'User'", ":", "user", ",", "'Privileged'", ":", "privileged", ",", "'Tty'", ":", "tty", ",", "'AttachStdin'", ":", "stdin", ",", "'AttachStdout'", ":", "stdout", ",", "'AttachStderr'", ":", "stderr", ",", "'Cmd'", ":", "cmd", ",", "'Env'", ":", "environment", ",", "}", "if", "workdir", "is", "not", "None", ":", "if", "utils", ".", "version_lt", "(", "self", ".", "_version", ",", "'1.35'", ")", ":", "raise", "errors", ".", "InvalidVersion", "(", "'workdir is not supported for API version < 1.35'", ")", "data", "[", "'WorkingDir'", "]", "=", "workdir", "if", "detach_keys", ":", "data", "[", "'detachKeys'", "]", "=", "detach_keys", "elif", "'detachKeys'", "in", "self", ".", "_general_configs", ":", "data", "[", "'detachKeys'", "]", "=", "self", ".", "_general_configs", "[", "'detachKeys'", "]", "url", "=", "self", ".", "_url", "(", "'/containers/{0}/exec'", ",", "container", ")", "res", "=", "self", ".", "_post_json", "(", "url", ",", "data", "=", "data", ")", "return", "self", ".", "_result", "(", "res", ",", "True", ")" ]
Sets up an exec instance in a running container. Args: container (str): Target container where exec instance will be created cmd (str or list): Command to be executed stdout (bool): Attach to stdout. Default: ``True`` stderr (bool): Attach to stderr. Default: ``True`` stdin (bool): Attach to stdin. Default: ``False`` tty (bool): Allocate a pseudo-TTY. Default: False privileged (bool): Run as privileged. user (str): User to execute command as. Default: root environment (dict or list): A dictionary or a list of strings in the following format ``["PASSWORD=xxx"]`` or ``{"PASSWORD": "xxx"}``. workdir (str): Path to working directory for this exec session detach_keys (str): Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-<value>` where `<value>` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. ~/.docker/config.json is used by default. Returns: (dict): A dictionary with an exec ``Id`` key. Raises: :py:class:`docker.errors.APIError` If the server returns an error.
[ "Sets", "up", "an", "exec", "instance", "in", "a", "running", "container", "." ]
python
train
google/grr
grr/server/grr_response_server/bigquery.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/bigquery.py#L167-L220
def InsertData(self, table_id, fd, schema, job_id): """Insert data into a bigquery table. If the table specified doesn't exist, it will be created with the specified schema. Args: table_id: string table id fd: open file descriptor containing the newline separated JSON schema: BigQuery schema dict job_id: string job id Returns: API response object on success, None on failure """ configuration = { "schema": { "fields": schema }, "destinationTable": { "projectId": self.project_id, "tableId": table_id, "datasetId": self.dataset_id }, "sourceFormat": "NEWLINE_DELIMITED_JSON", } body = { "configuration": { "load": configuration }, "jobReference": { "projectId": self.project_id, "jobId": job_id } } # File content can be gzipped for bandwidth efficiency. The server handles # it correctly without any changes to the request. mediafile = http.MediaFileUpload( fd.name, mimetype="application/octet-stream") job = self.service.jobs().insert( projectId=self.project_id, body=body, media_body=mediafile) try: response = job.execute() return response except errors.HttpError as e: if self.GetDataset(self.dataset_id): logging.exception("Error with job: %s", job_id) else: # If this is our first export ever, we need to create the dataset. logging.info("Attempting to create dataset: %s", self.dataset_id) self.CreateDataset() return self.RetryUpload(job, job_id, e)
[ "def", "InsertData", "(", "self", ",", "table_id", ",", "fd", ",", "schema", ",", "job_id", ")", ":", "configuration", "=", "{", "\"schema\"", ":", "{", "\"fields\"", ":", "schema", "}", ",", "\"destinationTable\"", ":", "{", "\"projectId\"", ":", "self", ".", "project_id", ",", "\"tableId\"", ":", "table_id", ",", "\"datasetId\"", ":", "self", ".", "dataset_id", "}", ",", "\"sourceFormat\"", ":", "\"NEWLINE_DELIMITED_JSON\"", ",", "}", "body", "=", "{", "\"configuration\"", ":", "{", "\"load\"", ":", "configuration", "}", ",", "\"jobReference\"", ":", "{", "\"projectId\"", ":", "self", ".", "project_id", ",", "\"jobId\"", ":", "job_id", "}", "}", "# File content can be gzipped for bandwidth efficiency. The server handles", "# it correctly without any changes to the request.", "mediafile", "=", "http", ".", "MediaFileUpload", "(", "fd", ".", "name", ",", "mimetype", "=", "\"application/octet-stream\"", ")", "job", "=", "self", ".", "service", ".", "jobs", "(", ")", ".", "insert", "(", "projectId", "=", "self", ".", "project_id", ",", "body", "=", "body", ",", "media_body", "=", "mediafile", ")", "try", ":", "response", "=", "job", ".", "execute", "(", ")", "return", "response", "except", "errors", ".", "HttpError", "as", "e", ":", "if", "self", ".", "GetDataset", "(", "self", ".", "dataset_id", ")", ":", "logging", ".", "exception", "(", "\"Error with job: %s\"", ",", "job_id", ")", "else", ":", "# If this is our first export ever, we need to create the dataset.", "logging", ".", "info", "(", "\"Attempting to create dataset: %s\"", ",", "self", ".", "dataset_id", ")", "self", ".", "CreateDataset", "(", ")", "return", "self", ".", "RetryUpload", "(", "job", ",", "job_id", ",", "e", ")" ]
Insert data into a bigquery table. If the table specified doesn't exist, it will be created with the specified schema. Args: table_id: string table id fd: open file descriptor containing the newline separated JSON schema: BigQuery schema dict job_id: string job id Returns: API response object on success, None on failure
[ "Insert", "data", "into", "a", "bigquery", "table", "." ]
python
train
apache/incubator-heron
heron/tools/tracker/src/python/tracker.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/tracker/src/python/tracker.py#L182-L186
def getTopologiesForStateLocation(self, name): """ Returns all the topologies for a given state manager. """ return filter(lambda t: t.state_manager_name == name, self.topologies)
[ "def", "getTopologiesForStateLocation", "(", "self", ",", "name", ")", ":", "return", "filter", "(", "lambda", "t", ":", "t", ".", "state_manager_name", "==", "name", ",", "self", ".", "topologies", ")" ]
Returns all the topologies for a given state manager.
[ "Returns", "all", "the", "topologies", "for", "a", "given", "state", "manager", "." ]
python
valid
g2p/bedup
bedup/dedup.py
https://github.com/g2p/bedup/blob/9694f6f718844c33017052eb271f68b6c0d0b7d3/bedup/dedup.py#L120-L186
def find_inodes_in_use(fds): """ Find which of these inodes are in use, and give their open modes. Does not count the passed fds as an use of the inode they point to, but if the current process has the same inodes open with different file descriptors these will be listed. Looks at /proc/*/fd and /proc/*/map_files (Linux 3.3). Conceivably there are other uses we're missing, to be foolproof will require support in btrfs itself; a share-same-range ioctl would work well. """ self_pid = os.getpid() id_fd_assoc = collections.defaultdict(list) for fd in fds: st = os.fstat(fd) id_fd_assoc[(st.st_dev, st.st_ino)].append(fd) def st_id_candidates(it): # map proc paths to stat identifiers (devno and ino) for proc_path in it: try: st = os.stat(proc_path) except OSError as e: # glob opens directories during matching, # and other processes might close their fds in the meantime. # This isn't a problem for the immutable-locked use case. # ESTALE could happen with NFS or Docker if e.errno in (errno.ENOENT, errno.ESTALE): continue raise st_id = (st.st_dev, st.st_ino) if st_id not in id_fd_assoc: continue yield proc_path, st_id for proc_path, st_id in st_id_candidates(glob.glob('/proc/[1-9]*/fd/*')): other_pid, other_fd = map( int, PROC_PATH_RE.match(proc_path).groups()) original_fds = id_fd_assoc[st_id] if other_pid == self_pid: if other_fd in original_fds: continue use_info = proc_use_info(proc_path) if not use_info: continue for fd in original_fds: yield (fd, use_info) # Requires Linux 3.3 for proc_path, st_id in st_id_candidates( glob.glob('/proc/[1-9]*/map_files/*') ): use_info = proc_use_info(proc_path) if not use_info: continue original_fds = id_fd_assoc[st_id] for fd in original_fds: yield (fd, use_info)
[ "def", "find_inodes_in_use", "(", "fds", ")", ":", "self_pid", "=", "os", ".", "getpid", "(", ")", "id_fd_assoc", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "fd", "in", "fds", ":", "st", "=", "os", ".", "fstat", "(", "fd", ")", "id_fd_assoc", "[", "(", "st", ".", "st_dev", ",", "st", ".", "st_ino", ")", "]", ".", "append", "(", "fd", ")", "def", "st_id_candidates", "(", "it", ")", ":", "# map proc paths to stat identifiers (devno and ino)", "for", "proc_path", "in", "it", ":", "try", ":", "st", "=", "os", ".", "stat", "(", "proc_path", ")", "except", "OSError", "as", "e", ":", "# glob opens directories during matching,", "# and other processes might close their fds in the meantime.", "# This isn't a problem for the immutable-locked use case.", "# ESTALE could happen with NFS or Docker", "if", "e", ".", "errno", "in", "(", "errno", ".", "ENOENT", ",", "errno", ".", "ESTALE", ")", ":", "continue", "raise", "st_id", "=", "(", "st", ".", "st_dev", ",", "st", ".", "st_ino", ")", "if", "st_id", "not", "in", "id_fd_assoc", ":", "continue", "yield", "proc_path", ",", "st_id", "for", "proc_path", ",", "st_id", "in", "st_id_candidates", "(", "glob", ".", "glob", "(", "'/proc/[1-9]*/fd/*'", ")", ")", ":", "other_pid", ",", "other_fd", "=", "map", "(", "int", ",", "PROC_PATH_RE", ".", "match", "(", "proc_path", ")", ".", "groups", "(", ")", ")", "original_fds", "=", "id_fd_assoc", "[", "st_id", "]", "if", "other_pid", "==", "self_pid", ":", "if", "other_fd", "in", "original_fds", ":", "continue", "use_info", "=", "proc_use_info", "(", "proc_path", ")", "if", "not", "use_info", ":", "continue", "for", "fd", "in", "original_fds", ":", "yield", "(", "fd", ",", "use_info", ")", "# Requires Linux 3.3", "for", "proc_path", ",", "st_id", "in", "st_id_candidates", "(", "glob", ".", "glob", "(", "'/proc/[1-9]*/map_files/*'", ")", ")", ":", "use_info", "=", "proc_use_info", "(", "proc_path", ")", "if", "not", "use_info", ":", "continue", "original_fds", "=", "id_fd_assoc", "[", "st_id", "]", "for", "fd", "in", "original_fds", ":", "yield", "(", "fd", ",", "use_info", ")" ]
Find which of these inodes are in use, and give their open modes. Does not count the passed fds as an use of the inode they point to, but if the current process has the same inodes open with different file descriptors these will be listed. Looks at /proc/*/fd and /proc/*/map_files (Linux 3.3). Conceivably there are other uses we're missing, to be foolproof will require support in btrfs itself; a share-same-range ioctl would work well.
[ "Find", "which", "of", "these", "inodes", "are", "in", "use", "and", "give", "their", "open", "modes", "." ]
python
train
fdb/aufmachen
aufmachen/BeautifulSoup.py
https://github.com/fdb/aufmachen/blob/f2986a0cf087ac53969f82b84d872e3f1c6986f4/aufmachen/BeautifulSoup.py#L778-L793
def decompose(self): """Recursively destroys the contents of this tree.""" self.extract() if len(self.contents) == 0: return current = self.contents[0] while current is not None: next = current.next if isinstance(current, Tag): del current.contents[:] current.parent = None current.previous = None current.previousSibling = None current.next = None current.nextSibling = None current = next
[ "def", "decompose", "(", "self", ")", ":", "self", ".", "extract", "(", ")", "if", "len", "(", "self", ".", "contents", ")", "==", "0", ":", "return", "current", "=", "self", ".", "contents", "[", "0", "]", "while", "current", "is", "not", "None", ":", "next", "=", "current", ".", "next", "if", "isinstance", "(", "current", ",", "Tag", ")", ":", "del", "current", ".", "contents", "[", ":", "]", "current", ".", "parent", "=", "None", "current", ".", "previous", "=", "None", "current", ".", "previousSibling", "=", "None", "current", ".", "next", "=", "None", "current", ".", "nextSibling", "=", "None", "current", "=", "next" ]
Recursively destroys the contents of this tree.
[ "Recursively", "destroys", "the", "contents", "of", "this", "tree", "." ]
python
train
raymondEhlers/pachyderm
pachyderm/yaml.py
https://github.com/raymondEhlers/pachyderm/blob/aaa1d8374fd871246290ce76f1796f2f7582b01d/pachyderm/yaml.py#L156-L181
def enum_to_yaml(cls: Type[T_EnumToYAML], representer: Representer, data: T_EnumToYAML) -> ruamel.yaml.nodes.ScalarNode: """ Encodes YAML representation. This is a mixin method for writing enum values to YAML. It needs to be added to the enum as a classmethod. See the module docstring for further information on this approach and how to implement it. This method writes whatever is used in the string representation of the YAML value. Usually, this will be the unique name of the enumeration value. If the name is used, the corresponding ``EnumFromYAML`` mixin can be used to recreate the value. If the name isn't used, more care may be necessary, so a ``from_yaml`` method for that particular enumeration may be necessary. Note: This method assumes that the name of the enumeration value should be stored as a scalar node. Args: representer: Representation from YAML. data: Enumeration value to be encoded. Returns: Scalar representation of the name of the enumeration value. """ return representer.represent_scalar( f"!{cls.__name__}", f"{str(data)}" )
[ "def", "enum_to_yaml", "(", "cls", ":", "Type", "[", "T_EnumToYAML", "]", ",", "representer", ":", "Representer", ",", "data", ":", "T_EnumToYAML", ")", "->", "ruamel", ".", "yaml", ".", "nodes", ".", "ScalarNode", ":", "return", "representer", ".", "represent_scalar", "(", "f\"!{cls.__name__}\"", ",", "f\"{str(data)}\"", ")" ]
Encodes YAML representation. This is a mixin method for writing enum values to YAML. It needs to be added to the enum as a classmethod. See the module docstring for further information on this approach and how to implement it. This method writes whatever is used in the string representation of the YAML value. Usually, this will be the unique name of the enumeration value. If the name is used, the corresponding ``EnumFromYAML`` mixin can be used to recreate the value. If the name isn't used, more care may be necessary, so a ``from_yaml`` method for that particular enumeration may be necessary. Note: This method assumes that the name of the enumeration value should be stored as a scalar node. Args: representer: Representation from YAML. data: Enumeration value to be encoded. Returns: Scalar representation of the name of the enumeration value.
[ "Encodes", "YAML", "representation", "." ]
python
train
CityOfZion/neo-python
neo/Core/State/UnspentCoinState.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/State/UnspentCoinState.py#L43-L52
def Size(self): """ Get the total size in bytes of the object. Returns: int: size. """ # Items should be an array of type CoinState, not of ints! corrected_items = list(map(lambda i: CoinState(i), self.Items)) return super(UnspentCoinState, self).Size() + GetVarSize(corrected_items)
[ "def", "Size", "(", "self", ")", ":", "# Items should be an array of type CoinState, not of ints!", "corrected_items", "=", "list", "(", "map", "(", "lambda", "i", ":", "CoinState", "(", "i", ")", ",", "self", ".", "Items", ")", ")", "return", "super", "(", "UnspentCoinState", ",", "self", ")", ".", "Size", "(", ")", "+", "GetVarSize", "(", "corrected_items", ")" ]
Get the total size in bytes of the object. Returns: int: size.
[ "Get", "the", "total", "size", "in", "bytes", "of", "the", "object", "." ]
python
train
QInfer/python-qinfer
src/qinfer/domains.py
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/domains.py#L493-L503
def n_members(self): """ Returns the number of members in the domain if it `is_finite`, otherwise, returns `np.inf`. :type: ``int`` or ``np.inf`` """ if self.is_finite: return int(self.max - self.min + 1) else: return np.inf
[ "def", "n_members", "(", "self", ")", ":", "if", "self", ".", "is_finite", ":", "return", "int", "(", "self", ".", "max", "-", "self", ".", "min", "+", "1", ")", "else", ":", "return", "np", ".", "inf" ]
Returns the number of members in the domain if it `is_finite`, otherwise, returns `np.inf`. :type: ``int`` or ``np.inf``
[ "Returns", "the", "number", "of", "members", "in", "the", "domain", "if", "it", "is_finite", "otherwise", "returns", "np", ".", "inf", "." ]
python
train
inasafe/inasafe
safe/report/processors/default.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/report/processors/default.py#L716-L781
def atlas_renderer(layout, coverage_layer, output_path, file_format): """Extract composition using atlas generation. :param layout: QGIS Print Layout object used for producing the report. :type layout: qgis.core.QgsPrintLayout :param coverage_layer: Coverage Layer used for atlas map. :type coverage_layer: QgsMapLayer :param output_path: The output path of the product. :type output_path: str :param file_format: File format of map output, 'pdf' or 'png'. :type file_format: str :return: Generated output path(s). :rtype: str, list """ # set the composer map to be atlas driven composer_map = layout_item( layout, 'impact-map', QgsLayoutItemMap) composer_map.setAtlasDriven(True) composer_map.setAtlasScalingMode(QgsLayoutItemMap.Auto) # setup the atlas composition and composition atlas mode atlas_composition = layout.atlas() atlas_composition.setCoverageLayer(coverage_layer) atlas_on_single_file = layout.customProperty('singleFile', True) if file_format == QgisComposerComponentsMetadata.OutputFormat.PDF: if not atlas_composition.filenameExpression(): atlas_composition.setFilenameExpression( "'output_'||@atlas_featurenumber") output_directory = os.path.dirname(output_path) # we need to set the predefined scales for atlas project_scales = [] scales = QgsProject.instance().readListEntry( "Scales", "/ScalesList")[0] has_project_scales = QgsProject.instance().readBoolEntry( "Scales", "/useProjectScales")[0] if not has_project_scales or not scales: scales_string = str(general_setting("Map/scales", PROJECT_SCALES)) scales = scales_string.split(',') for scale in scales: parts = scale.split(':') if len(parts) == 2: project_scales.append(float(parts[1])) layout.reportContext().setPredefinedScales(project_scales) settings = QgsLayoutExporter.PdfExportSettings() LOGGER.info('Exporting Atlas') atlas_output = [] if atlas_on_single_file: res, error = QgsLayoutExporter.exportToPdf( atlas_composition, output_path, settings) atlas_output.append(output_path) else: res, error = QgsLayoutExporter.exportToPdfs( atlas_composition, output_directory, settings) if res != QgsLayoutExporter.Success: LOGGER.error(error) return atlas_output
[ "def", "atlas_renderer", "(", "layout", ",", "coverage_layer", ",", "output_path", ",", "file_format", ")", ":", "# set the composer map to be atlas driven", "composer_map", "=", "layout_item", "(", "layout", ",", "'impact-map'", ",", "QgsLayoutItemMap", ")", "composer_map", ".", "setAtlasDriven", "(", "True", ")", "composer_map", ".", "setAtlasScalingMode", "(", "QgsLayoutItemMap", ".", "Auto", ")", "# setup the atlas composition and composition atlas mode", "atlas_composition", "=", "layout", ".", "atlas", "(", ")", "atlas_composition", ".", "setCoverageLayer", "(", "coverage_layer", ")", "atlas_on_single_file", "=", "layout", ".", "customProperty", "(", "'singleFile'", ",", "True", ")", "if", "file_format", "==", "QgisComposerComponentsMetadata", ".", "OutputFormat", ".", "PDF", ":", "if", "not", "atlas_composition", ".", "filenameExpression", "(", ")", ":", "atlas_composition", ".", "setFilenameExpression", "(", "\"'output_'||@atlas_featurenumber\"", ")", "output_directory", "=", "os", ".", "path", ".", "dirname", "(", "output_path", ")", "# we need to set the predefined scales for atlas", "project_scales", "=", "[", "]", "scales", "=", "QgsProject", ".", "instance", "(", ")", ".", "readListEntry", "(", "\"Scales\"", ",", "\"/ScalesList\"", ")", "[", "0", "]", "has_project_scales", "=", "QgsProject", ".", "instance", "(", ")", ".", "readBoolEntry", "(", "\"Scales\"", ",", "\"/useProjectScales\"", ")", "[", "0", "]", "if", "not", "has_project_scales", "or", "not", "scales", ":", "scales_string", "=", "str", "(", "general_setting", "(", "\"Map/scales\"", ",", "PROJECT_SCALES", ")", ")", "scales", "=", "scales_string", ".", "split", "(", "','", ")", "for", "scale", "in", "scales", ":", "parts", "=", "scale", ".", "split", "(", "':'", ")", "if", "len", "(", "parts", ")", "==", "2", ":", "project_scales", ".", "append", "(", "float", "(", "parts", "[", "1", "]", ")", ")", "layout", ".", "reportContext", "(", ")", ".", "setPredefinedScales", "(", "project_scales", ")", "settings", "=", "QgsLayoutExporter", ".", "PdfExportSettings", "(", ")", "LOGGER", ".", "info", "(", "'Exporting Atlas'", ")", "atlas_output", "=", "[", "]", "if", "atlas_on_single_file", ":", "res", ",", "error", "=", "QgsLayoutExporter", ".", "exportToPdf", "(", "atlas_composition", ",", "output_path", ",", "settings", ")", "atlas_output", ".", "append", "(", "output_path", ")", "else", ":", "res", ",", "error", "=", "QgsLayoutExporter", ".", "exportToPdfs", "(", "atlas_composition", ",", "output_directory", ",", "settings", ")", "if", "res", "!=", "QgsLayoutExporter", ".", "Success", ":", "LOGGER", ".", "error", "(", "error", ")", "return", "atlas_output" ]
Extract composition using atlas generation. :param layout: QGIS Print Layout object used for producing the report. :type layout: qgis.core.QgsPrintLayout :param coverage_layer: Coverage Layer used for atlas map. :type coverage_layer: QgsMapLayer :param output_path: The output path of the product. :type output_path: str :param file_format: File format of map output, 'pdf' or 'png'. :type file_format: str :return: Generated output path(s). :rtype: str, list
[ "Extract", "composition", "using", "atlas", "generation", "." ]
python
train
IBM/ibm-cos-sdk-python-s3transfer
ibm_s3transfer/aspera/manager.py
https://github.com/IBM/ibm-cos-sdk-python-s3transfer/blob/24ba53137213e26e6b8fc2c3ec1e8198d507d22b/ibm_s3transfer/aspera/manager.py#L133-L160
def store(self, name, value, atype, new_name=None, multiplier=None, allowed_values=None): ''' store a config value in a dictionary, these values are used to populate a trasnfer spec validation -- check type, check allowed values and rename if required ''' if value is not None: _bad_type = (not isinstance(value, atype)) if not _bad_type: # special case _bad_type = (isinstance(value, bool) and atype == int) if _bad_type: # could be a special value if allowed_values and value in allowed_values: allowed_values = None else: raise ValueError("%s should be value of type (%s)" % (name, atype.__name__)) if allowed_values: if isinstance(value, str): if value not in allowed_values: raise ValueError("%s can be %s" % (name, allowed_values)) elif isinstance(value, int): if isinstance(allowed_values[0], int): if value < allowed_values[0]: raise ValueError("%s must be >= %d" % (name, allowed_values[0])) _val = value if not multiplier else (multiplier * value) _name = name if not new_name else new_name self._dict[_name] = _val
[ "def", "store", "(", "self", ",", "name", ",", "value", ",", "atype", ",", "new_name", "=", "None", ",", "multiplier", "=", "None", ",", "allowed_values", "=", "None", ")", ":", "if", "value", "is", "not", "None", ":", "_bad_type", "=", "(", "not", "isinstance", "(", "value", ",", "atype", ")", ")", "if", "not", "_bad_type", ":", "# special case", "_bad_type", "=", "(", "isinstance", "(", "value", ",", "bool", ")", "and", "atype", "==", "int", ")", "if", "_bad_type", ":", "# could be a special value", "if", "allowed_values", "and", "value", "in", "allowed_values", ":", "allowed_values", "=", "None", "else", ":", "raise", "ValueError", "(", "\"%s should be value of type (%s)\"", "%", "(", "name", ",", "atype", ".", "__name__", ")", ")", "if", "allowed_values", ":", "if", "isinstance", "(", "value", ",", "str", ")", ":", "if", "value", "not", "in", "allowed_values", ":", "raise", "ValueError", "(", "\"%s can be %s\"", "%", "(", "name", ",", "allowed_values", ")", ")", "elif", "isinstance", "(", "value", ",", "int", ")", ":", "if", "isinstance", "(", "allowed_values", "[", "0", "]", ",", "int", ")", ":", "if", "value", "<", "allowed_values", "[", "0", "]", ":", "raise", "ValueError", "(", "\"%s must be >= %d\"", "%", "(", "name", ",", "allowed_values", "[", "0", "]", ")", ")", "_val", "=", "value", "if", "not", "multiplier", "else", "(", "multiplier", "*", "value", ")", "_name", "=", "name", "if", "not", "new_name", "else", "new_name", "self", ".", "_dict", "[", "_name", "]", "=", "_val" ]
store a config value in a dictionary, these values are used to populate a trasnfer spec validation -- check type, check allowed values and rename if required
[ "store", "a", "config", "value", "in", "a", "dictionary", "these", "values", "are", "used", "to", "populate", "a", "trasnfer", "spec", "validation", "--", "check", "type", "check", "allowed", "values", "and", "rename", "if", "required" ]
python
train
dwavesystems/penaltymodel
penaltymodel_core/penaltymodel/core/classes/specification.py
https://github.com/dwavesystems/penaltymodel/blob/b9d343233aea8df0f59cea45a07f12d0b3b8d9b3/penaltymodel_core/penaltymodel/core/classes/specification.py#L210-L237
def _check_ising_quadratic_ranges(quad_ranges, graph): """check correctness/populate defaults for ising_quadratic_ranges.""" if quad_ranges is None: quad_ranges = {} # first just populate the top level so we can rely on the structure for u in graph: if u not in quad_ranges: quad_ranges[u] = {} # next let's propgate and check what is already present for u, neighbors in iteritems(quad_ranges): for v, rang in iteritems(neighbors): # check the range rang = Specification._check_range(rang) if u in quad_ranges[v]: # it's symmetric if quad_ranges[u][v] != quad_ranges[v][u]: raise ValueError("mismatched ranges for ising_quadratic_ranges") quad_ranges[v][u] = quad_ranges[u][v] = rang # finally fill in the missing stuff for u, v in graph.edges: if u not in quad_ranges[v]: quad_ranges[u][v] = quad_ranges[v][u] = [-1, 1] return quad_ranges
[ "def", "_check_ising_quadratic_ranges", "(", "quad_ranges", ",", "graph", ")", ":", "if", "quad_ranges", "is", "None", ":", "quad_ranges", "=", "{", "}", "# first just populate the top level so we can rely on the structure", "for", "u", "in", "graph", ":", "if", "u", "not", "in", "quad_ranges", ":", "quad_ranges", "[", "u", "]", "=", "{", "}", "# next let's propgate and check what is already present", "for", "u", ",", "neighbors", "in", "iteritems", "(", "quad_ranges", ")", ":", "for", "v", ",", "rang", "in", "iteritems", "(", "neighbors", ")", ":", "# check the range", "rang", "=", "Specification", ".", "_check_range", "(", "rang", ")", "if", "u", "in", "quad_ranges", "[", "v", "]", ":", "# it's symmetric", "if", "quad_ranges", "[", "u", "]", "[", "v", "]", "!=", "quad_ranges", "[", "v", "]", "[", "u", "]", ":", "raise", "ValueError", "(", "\"mismatched ranges for ising_quadratic_ranges\"", ")", "quad_ranges", "[", "v", "]", "[", "u", "]", "=", "quad_ranges", "[", "u", "]", "[", "v", "]", "=", "rang", "# finally fill in the missing stuff", "for", "u", ",", "v", "in", "graph", ".", "edges", ":", "if", "u", "not", "in", "quad_ranges", "[", "v", "]", ":", "quad_ranges", "[", "u", "]", "[", "v", "]", "=", "quad_ranges", "[", "v", "]", "[", "u", "]", "=", "[", "-", "1", ",", "1", "]", "return", "quad_ranges" ]
check correctness/populate defaults for ising_quadratic_ranges.
[ "check", "correctness", "/", "populate", "defaults", "for", "ising_quadratic_ranges", "." ]
python
train
orbingol/NURBS-Python
geomdl/helpers.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/helpers.py#L115-L137
def find_multiplicity(knot, knot_vector, **kwargs): """ Finds knot multiplicity over the knot vector. Keyword Arguments: * ``tol``: tolerance (delta) value for equality checking :param knot: knot or parameter, :math:`u` :type knot: float :param knot_vector: knot vector, :math:`U` :type knot_vector: list, tuple :return: knot multiplicity, :math:`s` :rtype: int """ # Get tolerance value tol = kwargs.get('tol', 10e-8) mult = 0 # initial multiplicity for kv in knot_vector: if abs(knot - kv) <= tol: mult += 1 return mult
[ "def", "find_multiplicity", "(", "knot", ",", "knot_vector", ",", "*", "*", "kwargs", ")", ":", "# Get tolerance value", "tol", "=", "kwargs", ".", "get", "(", "'tol'", ",", "10e-8", ")", "mult", "=", "0", "# initial multiplicity", "for", "kv", "in", "knot_vector", ":", "if", "abs", "(", "knot", "-", "kv", ")", "<=", "tol", ":", "mult", "+=", "1", "return", "mult" ]
Finds knot multiplicity over the knot vector. Keyword Arguments: * ``tol``: tolerance (delta) value for equality checking :param knot: knot or parameter, :math:`u` :type knot: float :param knot_vector: knot vector, :math:`U` :type knot_vector: list, tuple :return: knot multiplicity, :math:`s` :rtype: int
[ "Finds", "knot", "multiplicity", "over", "the", "knot", "vector", "." ]
python
train
chriso/gauged
gauged/drivers/mysql.py
https://github.com/chriso/gauged/blob/cda3bba2f3e92ce2fb4aa92132dcc0e689bf7976/gauged/drivers/mysql.py#L299-L307
def get_namespace_statistics(self, namespace, start_offset, end_offset): """Get namespace statistics for the period between start_offset and end_offset (inclusive)""" cursor = self.cursor cursor.execute('SELECT SUM(data_points), SUM(byte_count) ' 'FROM gauged_statistics WHERE namespace = %s ' 'AND offset BETWEEN %s AND %s', (namespace, start_offset, end_offset)) return [long(count or 0) for count in cursor.fetchone()]
[ "def", "get_namespace_statistics", "(", "self", ",", "namespace", ",", "start_offset", ",", "end_offset", ")", ":", "cursor", "=", "self", ".", "cursor", "cursor", ".", "execute", "(", "'SELECT SUM(data_points), SUM(byte_count) '", "'FROM gauged_statistics WHERE namespace = %s '", "'AND offset BETWEEN %s AND %s'", ",", "(", "namespace", ",", "start_offset", ",", "end_offset", ")", ")", "return", "[", "long", "(", "count", "or", "0", ")", "for", "count", "in", "cursor", ".", "fetchone", "(", ")", "]" ]
Get namespace statistics for the period between start_offset and end_offset (inclusive)
[ "Get", "namespace", "statistics", "for", "the", "period", "between", "start_offset", "and", "end_offset", "(", "inclusive", ")" ]
python
train
TeamHG-Memex/eli5
eli5/sklearn/explain_weights.py
https://github.com/TeamHG-Memex/eli5/blob/371b402a0676295c05e582a2dd591f7af476b86b/eli5/sklearn/explain_weights.py#L329-L377
def explain_decision_tree(estimator, vec=None, top=_TOP, target_names=None, targets=None, # ignored feature_names=None, feature_re=None, feature_filter=None, **export_graphviz_kwargs): """ Return an explanation of a decision tree. See :func:`eli5.explain_weights` for description of ``top``, ``target_names``, ``feature_names``, ``feature_re`` and ``feature_filter`` parameters. ``targets`` parameter is ignored. ``vec`` is a vectorizer instance used to transform raw features to the input of the estimator (e.g. a fitted CountVectorizer instance); you can pass it instead of ``feature_names``. All other keyword arguments are passed to `sklearn.tree.export_graphviz`_ function. .. _sklearn.tree.export_graphviz: http://scikit-learn.org/stable/modules/generated/sklearn.tree.export_graphviz.html """ feature_names = get_feature_names(estimator, vec, feature_names=feature_names) tree_feature_names = feature_names feature_names, flt_indices = feature_names.handle_filter( feature_filter, feature_re) feature_importances = get_feature_importances_filtered( estimator.feature_importances_, feature_names, flt_indices, top) export_graphviz_kwargs.setdefault("proportion", True) tree_info = get_tree_info( estimator, feature_names=tree_feature_names, class_names=target_names, **export_graphviz_kwargs) return Explanation( feature_importances=feature_importances, decision_tree=tree_info, description=DESCRIPTION_DECISION_TREE, estimator=repr(estimator), method='decision tree', )
[ "def", "explain_decision_tree", "(", "estimator", ",", "vec", "=", "None", ",", "top", "=", "_TOP", ",", "target_names", "=", "None", ",", "targets", "=", "None", ",", "# ignored", "feature_names", "=", "None", ",", "feature_re", "=", "None", ",", "feature_filter", "=", "None", ",", "*", "*", "export_graphviz_kwargs", ")", ":", "feature_names", "=", "get_feature_names", "(", "estimator", ",", "vec", ",", "feature_names", "=", "feature_names", ")", "tree_feature_names", "=", "feature_names", "feature_names", ",", "flt_indices", "=", "feature_names", ".", "handle_filter", "(", "feature_filter", ",", "feature_re", ")", "feature_importances", "=", "get_feature_importances_filtered", "(", "estimator", ".", "feature_importances_", ",", "feature_names", ",", "flt_indices", ",", "top", ")", "export_graphviz_kwargs", ".", "setdefault", "(", "\"proportion\"", ",", "True", ")", "tree_info", "=", "get_tree_info", "(", "estimator", ",", "feature_names", "=", "tree_feature_names", ",", "class_names", "=", "target_names", ",", "*", "*", "export_graphviz_kwargs", ")", "return", "Explanation", "(", "feature_importances", "=", "feature_importances", ",", "decision_tree", "=", "tree_info", ",", "description", "=", "DESCRIPTION_DECISION_TREE", ",", "estimator", "=", "repr", "(", "estimator", ")", ",", "method", "=", "'decision tree'", ",", ")" ]
Return an explanation of a decision tree. See :func:`eli5.explain_weights` for description of ``top``, ``target_names``, ``feature_names``, ``feature_re`` and ``feature_filter`` parameters. ``targets`` parameter is ignored. ``vec`` is a vectorizer instance used to transform raw features to the input of the estimator (e.g. a fitted CountVectorizer instance); you can pass it instead of ``feature_names``. All other keyword arguments are passed to `sklearn.tree.export_graphviz`_ function. .. _sklearn.tree.export_graphviz: http://scikit-learn.org/stable/modules/generated/sklearn.tree.export_graphviz.html
[ "Return", "an", "explanation", "of", "a", "decision", "tree", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_video.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_video.py#L128-L156
def scheduled_sample_count(ground_truth_x, generated_x, batch_size, scheduled_sample_var): """Sample batch with specified mix of groundtruth and generated data points. Args: ground_truth_x: tensor of ground-truth data points. generated_x: tensor of generated data points. batch_size: batch size scheduled_sample_var: number of ground-truth examples to include in batch. Returns: New batch with num_ground_truth sampled from ground_truth_x and the rest from generated_x. """ num_ground_truth = scheduled_sample_var idx = tf.random_shuffle(tf.range(batch_size)) ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth)) generated_idx = tf.gather(idx, tf.range(num_ground_truth, batch_size)) ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx) generated_examps = tf.gather(generated_x, generated_idx) output = tf.dynamic_stitch([ground_truth_idx, generated_idx], [ground_truth_examps, generated_examps]) # if batch size is known set it. if isinstance(batch_size, int): output.set_shape([batch_size] + common_layers.shape_list(output)[1:]) return output
[ "def", "scheduled_sample_count", "(", "ground_truth_x", ",", "generated_x", ",", "batch_size", ",", "scheduled_sample_var", ")", ":", "num_ground_truth", "=", "scheduled_sample_var", "idx", "=", "tf", ".", "random_shuffle", "(", "tf", ".", "range", "(", "batch_size", ")", ")", "ground_truth_idx", "=", "tf", ".", "gather", "(", "idx", ",", "tf", ".", "range", "(", "num_ground_truth", ")", ")", "generated_idx", "=", "tf", ".", "gather", "(", "idx", ",", "tf", ".", "range", "(", "num_ground_truth", ",", "batch_size", ")", ")", "ground_truth_examps", "=", "tf", ".", "gather", "(", "ground_truth_x", ",", "ground_truth_idx", ")", "generated_examps", "=", "tf", ".", "gather", "(", "generated_x", ",", "generated_idx", ")", "output", "=", "tf", ".", "dynamic_stitch", "(", "[", "ground_truth_idx", ",", "generated_idx", "]", ",", "[", "ground_truth_examps", ",", "generated_examps", "]", ")", "# if batch size is known set it.", "if", "isinstance", "(", "batch_size", ",", "int", ")", ":", "output", ".", "set_shape", "(", "[", "batch_size", "]", "+", "common_layers", ".", "shape_list", "(", "output", ")", "[", "1", ":", "]", ")", "return", "output" ]
Sample batch with specified mix of groundtruth and generated data points. Args: ground_truth_x: tensor of ground-truth data points. generated_x: tensor of generated data points. batch_size: batch size scheduled_sample_var: number of ground-truth examples to include in batch. Returns: New batch with num_ground_truth sampled from ground_truth_x and the rest from generated_x.
[ "Sample", "batch", "with", "specified", "mix", "of", "groundtruth", "and", "generated", "data", "points", "." ]
python
train
mikedh/trimesh
trimesh/path/path.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/path/path.py#L651-L663
def discrete(self): """ A sequence of connected vertices in space, corresponding to self.paths. Returns --------- discrete : (len(self.paths),) A sequence of (m*, dimension) float """ discrete = np.array([self.discretize_path(i) for i in self.paths]) return discrete
[ "def", "discrete", "(", "self", ")", ":", "discrete", "=", "np", ".", "array", "(", "[", "self", ".", "discretize_path", "(", "i", ")", "for", "i", "in", "self", ".", "paths", "]", ")", "return", "discrete" ]
A sequence of connected vertices in space, corresponding to self.paths. Returns --------- discrete : (len(self.paths),) A sequence of (m*, dimension) float
[ "A", "sequence", "of", "connected", "vertices", "in", "space", "corresponding", "to", "self", ".", "paths", "." ]
python
train
amzn/ion-python
amazon/ion/reader_text.py
https://github.com/amzn/ion-python/blob/0b21fa3ba7755f55f745e4aa970d86343b82449d/amazon/ion/reader_text.py#L482-L493
def set_annotation(self): """Appends the context's ``pending_symbol`` to its ``annotations`` sequence.""" assert self.pending_symbol is not None assert not self.value annotations = (_as_symbol(self.pending_symbol, is_symbol_value=False),) # pending_symbol becomes an annotation self.annotations = annotations if not self.annotations else self.annotations + annotations self.ion_type = None self.pending_symbol = None # reset pending symbol self.quoted_text = False self.line_comment = False self.is_self_delimiting = False return self
[ "def", "set_annotation", "(", "self", ")", ":", "assert", "self", ".", "pending_symbol", "is", "not", "None", "assert", "not", "self", ".", "value", "annotations", "=", "(", "_as_symbol", "(", "self", ".", "pending_symbol", ",", "is_symbol_value", "=", "False", ")", ",", ")", "# pending_symbol becomes an annotation", "self", ".", "annotations", "=", "annotations", "if", "not", "self", ".", "annotations", "else", "self", ".", "annotations", "+", "annotations", "self", ".", "ion_type", "=", "None", "self", ".", "pending_symbol", "=", "None", "# reset pending symbol", "self", ".", "quoted_text", "=", "False", "self", ".", "line_comment", "=", "False", "self", ".", "is_self_delimiting", "=", "False", "return", "self" ]
Appends the context's ``pending_symbol`` to its ``annotations`` sequence.
[ "Appends", "the", "context", "s", "pending_symbol", "to", "its", "annotations", "sequence", "." ]
python
train
timdiels/pytil
pytil/path.py
https://github.com/timdiels/pytil/blob/086a3f8d52caecdd9d1c9f66c8d8a6d38667b00b/pytil/path.py#L91-L146
def chmod(path, mode, operator='=', recursive=False): ''' Change file mode bits. When recursively chmodding a directory, executable bits in ``mode`` are ignored when applying to a regular file. E.g. ``chmod(path, mode=0o777, recursive=True)`` would apply ``mode=0o666`` to regular files. Symlinks are ignored. Parameters ---------- path : ~pathlib.Path Path to chmod. mode : int Mode bits to apply, e.g. ``0o777``. operator : str How to apply the mode bits to the file, one of: '=' Replace mode with given mode. '+' Add to current mode. '-' Subtract from current mode. recursive : bool Whether to chmod recursively. ''' if mode > 0o777 and operator != '=': raise ValueError('Special bits (i.e. >0o777) only supported when using "=" operator') # first chmod path if operator == '+': mode_ = path.stat().st_mode | mode elif operator == '-': mode_ = path.stat().st_mode & ~mode else: mode_ = mode if path.is_symlink(): # Do not chmod or follow symlinks return path.chmod(mode_) # then its children def chmod_children(parent, files, mode_mask, operator): for file in files: with suppress(FileNotFoundError): file = parent / file if not file.is_symlink(): chmod(file, mode & mode_mask, operator) if recursive and path.is_dir(): for parent, dirs, files in os.walk(str(path)): parent = Path(parent) chmod_children(parent, dirs, 0o777777, operator) chmod_children(parent, files, 0o777666, operator)
[ "def", "chmod", "(", "path", ",", "mode", ",", "operator", "=", "'='", ",", "recursive", "=", "False", ")", ":", "if", "mode", ">", "0o777", "and", "operator", "!=", "'='", ":", "raise", "ValueError", "(", "'Special bits (i.e. >0o777) only supported when using \"=\" operator'", ")", "# first chmod path", "if", "operator", "==", "'+'", ":", "mode_", "=", "path", ".", "stat", "(", ")", ".", "st_mode", "|", "mode", "elif", "operator", "==", "'-'", ":", "mode_", "=", "path", ".", "stat", "(", ")", ".", "st_mode", "&", "~", "mode", "else", ":", "mode_", "=", "mode", "if", "path", ".", "is_symlink", "(", ")", ":", "# Do not chmod or follow symlinks", "return", "path", ".", "chmod", "(", "mode_", ")", "# then its children", "def", "chmod_children", "(", "parent", ",", "files", ",", "mode_mask", ",", "operator", ")", ":", "for", "file", "in", "files", ":", "with", "suppress", "(", "FileNotFoundError", ")", ":", "file", "=", "parent", "/", "file", "if", "not", "file", ".", "is_symlink", "(", ")", ":", "chmod", "(", "file", ",", "mode", "&", "mode_mask", ",", "operator", ")", "if", "recursive", "and", "path", ".", "is_dir", "(", ")", ":", "for", "parent", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "str", "(", "path", ")", ")", ":", "parent", "=", "Path", "(", "parent", ")", "chmod_children", "(", "parent", ",", "dirs", ",", "0o777777", ",", "operator", ")", "chmod_children", "(", "parent", ",", "files", ",", "0o777666", ",", "operator", ")" ]
Change file mode bits. When recursively chmodding a directory, executable bits in ``mode`` are ignored when applying to a regular file. E.g. ``chmod(path, mode=0o777, recursive=True)`` would apply ``mode=0o666`` to regular files. Symlinks are ignored. Parameters ---------- path : ~pathlib.Path Path to chmod. mode : int Mode bits to apply, e.g. ``0o777``. operator : str How to apply the mode bits to the file, one of: '=' Replace mode with given mode. '+' Add to current mode. '-' Subtract from current mode. recursive : bool Whether to chmod recursively.
[ "Change", "file", "mode", "bits", "." ]
python
train
jobovy/galpy
galpy/df/streamdf.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/df/streamdf.py#L3077-L3090
def sample_t(self,n): """ NAME: sample_t PURPOSE: generate a stripping time (time since stripping); simple implementation could be replaced by more complicated distributions in sub-classes of streamdf INPUT: n - number of points to return OUTPUT: array of n stripping times HISTORY: 2015-09-16 - Written - Bovy (UofT) """ return numpy.random.uniform(size=n)*self._tdisrupt
[ "def", "sample_t", "(", "self", ",", "n", ")", ":", "return", "numpy", ".", "random", ".", "uniform", "(", "size", "=", "n", ")", "*", "self", ".", "_tdisrupt" ]
NAME: sample_t PURPOSE: generate a stripping time (time since stripping); simple implementation could be replaced by more complicated distributions in sub-classes of streamdf INPUT: n - number of points to return OUTPUT: array of n stripping times HISTORY: 2015-09-16 - Written - Bovy (UofT)
[ "NAME", ":", "sample_t", "PURPOSE", ":", "generate", "a", "stripping", "time", "(", "time", "since", "stripping", ")", ";", "simple", "implementation", "could", "be", "replaced", "by", "more", "complicated", "distributions", "in", "sub", "-", "classes", "of", "streamdf", "INPUT", ":", "n", "-", "number", "of", "points", "to", "return", "OUTPUT", ":", "array", "of", "n", "stripping", "times", "HISTORY", ":", "2015", "-", "09", "-", "16", "-", "Written", "-", "Bovy", "(", "UofT", ")" ]
python
train
Clinical-Genomics/scout
scout/server/blueprints/variants/controllers.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/variants/controllers.py#L864-L871
def callers(variant_obj, category='snv'): """Return info about callers.""" calls = set() for caller in CALLERS[category]: if variant_obj.get(caller['id']): calls.add((caller['name'], variant_obj[caller['id']])) return list(calls)
[ "def", "callers", "(", "variant_obj", ",", "category", "=", "'snv'", ")", ":", "calls", "=", "set", "(", ")", "for", "caller", "in", "CALLERS", "[", "category", "]", ":", "if", "variant_obj", ".", "get", "(", "caller", "[", "'id'", "]", ")", ":", "calls", ".", "add", "(", "(", "caller", "[", "'name'", "]", ",", "variant_obj", "[", "caller", "[", "'id'", "]", "]", ")", ")", "return", "list", "(", "calls", ")" ]
Return info about callers.
[ "Return", "info", "about", "callers", "." ]
python
test