repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
kivy/python-for-android
pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/compiler.py
https://github.com/kivy/python-for-android/blob/8e0e8056bc22e4d5bd3398a6b0301f38ff167933/pythonforandroid/bootstraps/pygame/build/buildlib/jinja2.egg/jinja2/compiler.py#L861-L878
def visit_Block(self, node, frame): """Call a block and register it for the template.""" level = 1 if frame.toplevel: # if we know that we are a child template, there is no need to # check if we are one if self.has_known_extends: return if self.extends_so_far > 0: self.writeline('if parent_template is None:') self.indent() level += 1 context = node.scoped and 'context.derived(locals())' or 'context' self.writeline('for event in context.blocks[%r][0](%s):' % ( node.name, context), node) self.indent() self.simple_write('event', frame) self.outdent(level)
[ "def", "visit_Block", "(", "self", ",", "node", ",", "frame", ")", ":", "level", "=", "1", "if", "frame", ".", "toplevel", ":", "# if we know that we are a child template, there is no need to", "# check if we are one", "if", "self", ".", "has_known_extends", ":", "return", "if", "self", ".", "extends_so_far", ">", "0", ":", "self", ".", "writeline", "(", "'if parent_template is None:'", ")", "self", ".", "indent", "(", ")", "level", "+=", "1", "context", "=", "node", ".", "scoped", "and", "'context.derived(locals())'", "or", "'context'", "self", ".", "writeline", "(", "'for event in context.blocks[%r][0](%s):'", "%", "(", "node", ".", "name", ",", "context", ")", ",", "node", ")", "self", ".", "indent", "(", ")", "self", ".", "simple_write", "(", "'event'", ",", "frame", ")", "self", ".", "outdent", "(", "level", ")" ]
Call a block and register it for the template.
[ "Call", "a", "block", "and", "register", "it", "for", "the", "template", "." ]
python
train
evhub/coconut
coconut/compiler/grammar.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/grammar.py#L445-L461
def op_funcdef_handle(tokens): """Process infix defs.""" func, base_args = get_infix_items(tokens) args = [] for arg in base_args[:-1]: rstrip_arg = arg.rstrip() if not rstrip_arg.endswith(unwrapper): if not rstrip_arg.endswith(","): arg += ", " elif arg.endswith(","): arg += " " args.append(arg) last_arg = base_args[-1] if last_arg.rstrip().endswith(","): last_arg = last_arg.rsplit(",")[0] args.append(last_arg) return func + "(" + "".join(args) + ")"
[ "def", "op_funcdef_handle", "(", "tokens", ")", ":", "func", ",", "base_args", "=", "get_infix_items", "(", "tokens", ")", "args", "=", "[", "]", "for", "arg", "in", "base_args", "[", ":", "-", "1", "]", ":", "rstrip_arg", "=", "arg", ".", "rstrip", "(", ")", "if", "not", "rstrip_arg", ".", "endswith", "(", "unwrapper", ")", ":", "if", "not", "rstrip_arg", ".", "endswith", "(", "\",\"", ")", ":", "arg", "+=", "\", \"", "elif", "arg", ".", "endswith", "(", "\",\"", ")", ":", "arg", "+=", "\" \"", "args", ".", "append", "(", "arg", ")", "last_arg", "=", "base_args", "[", "-", "1", "]", "if", "last_arg", ".", "rstrip", "(", ")", ".", "endswith", "(", "\",\"", ")", ":", "last_arg", "=", "last_arg", ".", "rsplit", "(", "\",\"", ")", "[", "0", "]", "args", ".", "append", "(", "last_arg", ")", "return", "func", "+", "\"(\"", "+", "\"\"", ".", "join", "(", "args", ")", "+", "\")\"" ]
Process infix defs.
[ "Process", "infix", "defs", "." ]
python
train
iotile/coretools
iotileemulate/iotile/emulate/reference/reference_device.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileemulate/iotile/emulate/reference/reference_device.py#L160-L169
def close_streaming_interface(self): """Called when someone closes the streaming interface to the device. This method will automatically notify sensor_graph that there is a no longer a streaming interface opened. """ super(ReferenceDevice, self).close_streaming_interface() self.rpc(8, rpcs.SG_GRAPH_INPUT, 8, streams.COMM_TILE_CLOSED)
[ "def", "close_streaming_interface", "(", "self", ")", ":", "super", "(", "ReferenceDevice", ",", "self", ")", ".", "close_streaming_interface", "(", ")", "self", ".", "rpc", "(", "8", ",", "rpcs", ".", "SG_GRAPH_INPUT", ",", "8", ",", "streams", ".", "COMM_TILE_CLOSED", ")" ]
Called when someone closes the streaming interface to the device. This method will automatically notify sensor_graph that there is a no longer a streaming interface opened.
[ "Called", "when", "someone", "closes", "the", "streaming", "interface", "to", "the", "device", "." ]
python
train
saltstack/salt
salt/runners/bgp.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/bgp.py#L203-L225
def _display_runner(rows, labels, title, display=_DEFAULT_DISPLAY, outputter=_DEFAULT_OUTPUTTER): ''' Display or return the rows. ''' if display: if outputter == 'table': ret = salt.output.out_format({'rows': rows, 'labels': labels}, 'table', __opts__, title=title, rows_key='rows', labels_key='labels') else: ret = salt.output.out_format(rows, outputter, __opts__) print(ret) else: return rows
[ "def", "_display_runner", "(", "rows", ",", "labels", ",", "title", ",", "display", "=", "_DEFAULT_DISPLAY", ",", "outputter", "=", "_DEFAULT_OUTPUTTER", ")", ":", "if", "display", ":", "if", "outputter", "==", "'table'", ":", "ret", "=", "salt", ".", "output", ".", "out_format", "(", "{", "'rows'", ":", "rows", ",", "'labels'", ":", "labels", "}", ",", "'table'", ",", "__opts__", ",", "title", "=", "title", ",", "rows_key", "=", "'rows'", ",", "labels_key", "=", "'labels'", ")", "else", ":", "ret", "=", "salt", ".", "output", ".", "out_format", "(", "rows", ",", "outputter", ",", "__opts__", ")", "print", "(", "ret", ")", "else", ":", "return", "rows" ]
Display or return the rows.
[ "Display", "or", "return", "the", "rows", "." ]
python
train
saltstack/salt
salt/modules/elasticsearch.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/elasticsearch.py#L517-L537
def index_delete(index, hosts=None, profile=None): ''' Delete an index index Index name CLI example:: salt myminion elasticsearch.index_delete testindex ''' es = _get_instance(hosts, profile) try: result = es.indices.delete(index=index) return result.get('acknowledged', False) except elasticsearch.exceptions.NotFoundError: return True except elasticsearch.TransportError as e: raise CommandExecutionError("Cannot delete index {0}, server returned code {1} with message {2}".format(index, e.status_code, e.error))
[ "def", "index_delete", "(", "index", ",", "hosts", "=", "None", ",", "profile", "=", "None", ")", ":", "es", "=", "_get_instance", "(", "hosts", ",", "profile", ")", "try", ":", "result", "=", "es", ".", "indices", ".", "delete", "(", "index", "=", "index", ")", "return", "result", ".", "get", "(", "'acknowledged'", ",", "False", ")", "except", "elasticsearch", ".", "exceptions", ".", "NotFoundError", ":", "return", "True", "except", "elasticsearch", ".", "TransportError", "as", "e", ":", "raise", "CommandExecutionError", "(", "\"Cannot delete index {0}, server returned code {1} with message {2}\"", ".", "format", "(", "index", ",", "e", ".", "status_code", ",", "e", ".", "error", ")", ")" ]
Delete an index index Index name CLI example:: salt myminion elasticsearch.index_delete testindex
[ "Delete", "an", "index" ]
python
train
insightindustry/validator-collection
validator_collection/validators.py
https://github.com/insightindustry/validator-collection/blob/8c8047a0fa36cc88a021771279898278c4cc98e3/validator_collection/validators.py#L2422-L2471
def ip_address(value, allow_empty = False, **kwargs): """Validate that ``value`` is a valid IP address. .. note:: First, the validator will check if the address is a valid IPv6 address. If that doesn't work, the validator will check if the address is a valid IPv4 address. If neither works, the validator will raise an error (as always). :param value: The value to validate. :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False`` :raises InvalidIPAddressError: if ``value`` is not a valid IP address or empty with ``allow_empty`` set to ``True`` """ if not value and not allow_empty: raise errors.EmptyValueError('value (%s) was empty' % value) elif not value: return None if is_py2 and value and isinstance(value, unicode): value = value.encode('utf-8') try: value = ipv6(value, force_run = True) # pylint: disable=E1123 ipv6_failed = False except ValueError: ipv6_failed = True if ipv6_failed: try: value = ipv4(value, force_run = True) # pylint: disable=E1123 except ValueError: raise errors.InvalidIPAddressError('value (%s) is not a valid IPv6 or ' 'IPv4 address' % value) return value
[ "def", "ip_address", "(", "value", ",", "allow_empty", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "value", "and", "not", "allow_empty", ":", "raise", "errors", ".", "EmptyValueError", "(", "'value (%s) was empty'", "%", "value", ")", "elif", "not", "value", ":", "return", "None", "if", "is_py2", "and", "value", "and", "isinstance", "(", "value", ",", "unicode", ")", ":", "value", "=", "value", ".", "encode", "(", "'utf-8'", ")", "try", ":", "value", "=", "ipv6", "(", "value", ",", "force_run", "=", "True", ")", "# pylint: disable=E1123", "ipv6_failed", "=", "False", "except", "ValueError", ":", "ipv6_failed", "=", "True", "if", "ipv6_failed", ":", "try", ":", "value", "=", "ipv4", "(", "value", ",", "force_run", "=", "True", ")", "# pylint: disable=E1123", "except", "ValueError", ":", "raise", "errors", ".", "InvalidIPAddressError", "(", "'value (%s) is not a valid IPv6 or '", "'IPv4 address'", "%", "value", ")", "return", "value" ]
Validate that ``value`` is a valid IP address. .. note:: First, the validator will check if the address is a valid IPv6 address. If that doesn't work, the validator will check if the address is a valid IPv4 address. If neither works, the validator will raise an error (as always). :param value: The value to validate. :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False`` :raises InvalidIPAddressError: if ``value`` is not a valid IP address or empty with ``allow_empty`` set to ``True``
[ "Validate", "that", "value", "is", "a", "valid", "IP", "address", "." ]
python
train
openstates/billy
billy/web/public/views/bills.py
https://github.com/openstates/billy/blob/5fc795347f12a949e410a8cfad0c911ea6bced67/billy/web/public/views/bills.py#L404-L425
def vote(request, abbr, vote_id): ''' Context: - abbr - metadata - bill - vote - nav_active Templates: - vote.html ''' vote = db.votes.find_one(vote_id) if vote is None: raise Http404('no such vote: {0}'.format(vote_id)) bill = vote.bill() return render(request, templatename('vote'), dict(abbr=abbr, metadata=Metadata.get_object(abbr), bill=bill, vote=vote, nav_active='bills'))
[ "def", "vote", "(", "request", ",", "abbr", ",", "vote_id", ")", ":", "vote", "=", "db", ".", "votes", ".", "find_one", "(", "vote_id", ")", "if", "vote", "is", "None", ":", "raise", "Http404", "(", "'no such vote: {0}'", ".", "format", "(", "vote_id", ")", ")", "bill", "=", "vote", ".", "bill", "(", ")", "return", "render", "(", "request", ",", "templatename", "(", "'vote'", ")", ",", "dict", "(", "abbr", "=", "abbr", ",", "metadata", "=", "Metadata", ".", "get_object", "(", "abbr", ")", ",", "bill", "=", "bill", ",", "vote", "=", "vote", ",", "nav_active", "=", "'bills'", ")", ")" ]
Context: - abbr - metadata - bill - vote - nav_active Templates: - vote.html
[ "Context", ":", "-", "abbr", "-", "metadata", "-", "bill", "-", "vote", "-", "nav_active" ]
python
train
tanghaibao/goatools
goatools/gosubdag/godag_rcnt_init.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/gosubdag/godag_rcnt_init.py#L71-L83
def get_depth2goobjs(go2obj, max_depth=2): """Init depth2goobjs using list sorted by depth, get level-00/01 GO terms.""" depth2goobjs = {d:list() for d in range(max_depth+1)} goid_seen = set() for _, goobj in sorted(go2obj.items(), key=lambda t: t[1].depth): # Save depth-00, depth-01, depth-02 if goobj.depth > max_depth: break goid = goobj.id if not goobj.is_obsolete and goid not in goid_seen: depth2goobjs[goobj.depth].append(goobj) goid_seen.add(goid) return depth2goobjs
[ "def", "get_depth2goobjs", "(", "go2obj", ",", "max_depth", "=", "2", ")", ":", "depth2goobjs", "=", "{", "d", ":", "list", "(", ")", "for", "d", "in", "range", "(", "max_depth", "+", "1", ")", "}", "goid_seen", "=", "set", "(", ")", "for", "_", ",", "goobj", "in", "sorted", "(", "go2obj", ".", "items", "(", ")", ",", "key", "=", "lambda", "t", ":", "t", "[", "1", "]", ".", "depth", ")", ":", "# Save depth-00, depth-01, depth-02", "if", "goobj", ".", "depth", ">", "max_depth", ":", "break", "goid", "=", "goobj", ".", "id", "if", "not", "goobj", ".", "is_obsolete", "and", "goid", "not", "in", "goid_seen", ":", "depth2goobjs", "[", "goobj", ".", "depth", "]", ".", "append", "(", "goobj", ")", "goid_seen", ".", "add", "(", "goid", ")", "return", "depth2goobjs" ]
Init depth2goobjs using list sorted by depth, get level-00/01 GO terms.
[ "Init", "depth2goobjs", "using", "list", "sorted", "by", "depth", "get", "level", "-", "00", "/", "01", "GO", "terms", "." ]
python
train
dshean/demcoreg
demcoreg/dem_mask.py
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/dem_mask.py#L143-L158
def get_bareground_mask(bareground_ds, bareground_thresh=60, out_fn=None): """Generate raster mask for exposed bare ground from global bareground data """ print("Loading bareground") b = bareground_ds.GetRasterBand(1) l = b.ReadAsArray() print("Masking pixels with <%0.1f%% bare ground" % bareground_thresh) if bareground_thresh < 0.0 or bareground_thresh > 100.0: sys.exit("Invalid bare ground percentage") mask = (l>bareground_thresh) #Write out original data if out_fn is not None: print("Writing out %s" % out_fn) iolib.writeGTiff(l, out_fn, bareground_ds) l = None return mask
[ "def", "get_bareground_mask", "(", "bareground_ds", ",", "bareground_thresh", "=", "60", ",", "out_fn", "=", "None", ")", ":", "print", "(", "\"Loading bareground\"", ")", "b", "=", "bareground_ds", ".", "GetRasterBand", "(", "1", ")", "l", "=", "b", ".", "ReadAsArray", "(", ")", "print", "(", "\"Masking pixels with <%0.1f%% bare ground\"", "%", "bareground_thresh", ")", "if", "bareground_thresh", "<", "0.0", "or", "bareground_thresh", ">", "100.0", ":", "sys", ".", "exit", "(", "\"Invalid bare ground percentage\"", ")", "mask", "=", "(", "l", ">", "bareground_thresh", ")", "#Write out original data", "if", "out_fn", "is", "not", "None", ":", "print", "(", "\"Writing out %s\"", "%", "out_fn", ")", "iolib", ".", "writeGTiff", "(", "l", ",", "out_fn", ",", "bareground_ds", ")", "l", "=", "None", "return", "mask" ]
Generate raster mask for exposed bare ground from global bareground data
[ "Generate", "raster", "mask", "for", "exposed", "bare", "ground", "from", "global", "bareground", "data" ]
python
train
PyCQA/pylint
pylint/graph.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/graph.py#L158-L170
def get_cycles(graph_dict, vertices=None): """given a dictionary representing an ordered graph (i.e. key are vertices and values is a list of destination vertices representing edges), return a list of detected cycles """ if not graph_dict: return () result = [] if vertices is None: vertices = graph_dict.keys() for vertice in vertices: _get_cycles(graph_dict, [], set(), result, vertice) return result
[ "def", "get_cycles", "(", "graph_dict", ",", "vertices", "=", "None", ")", ":", "if", "not", "graph_dict", ":", "return", "(", ")", "result", "=", "[", "]", "if", "vertices", "is", "None", ":", "vertices", "=", "graph_dict", ".", "keys", "(", ")", "for", "vertice", "in", "vertices", ":", "_get_cycles", "(", "graph_dict", ",", "[", "]", ",", "set", "(", ")", ",", "result", ",", "vertice", ")", "return", "result" ]
given a dictionary representing an ordered graph (i.e. key are vertices and values is a list of destination vertices representing edges), return a list of detected cycles
[ "given", "a", "dictionary", "representing", "an", "ordered", "graph", "(", "i", ".", "e", ".", "key", "are", "vertices", "and", "values", "is", "a", "list", "of", "destination", "vertices", "representing", "edges", ")", "return", "a", "list", "of", "detected", "cycles" ]
python
test
peterwittek/ncpol2sdpa
ncpol2sdpa/sdp_relaxation.py
https://github.com/peterwittek/ncpol2sdpa/blob/bce75d524d0b9d0093f32e3a0a5611f8589351a7/ncpol2sdpa/sdp_relaxation.py#L1290-L1434
def get_relaxation(self, level, objective=None, inequalities=None, equalities=None, substitutions=None, momentinequalities=None, momentequalities=None, momentsubstitutions=None, removeequalities=False, extramonomials=None, extramomentmatrices=None, extraobjexpr=None, localizing_monomials=None, chordal_extension=False): """Get the SDP relaxation of a noncommutative polynomial optimization problem. :param level: The level of the relaxation. The value -1 will skip automatic monomial generation and use only the monomials supplied by the option `extramonomials`. :type level: int. :param obj: Optional parameter to describe the objective function. :type obj: :class:`sympy.core.exp.Expr`. :param inequalities: Optional parameter to list inequality constraints. :type inequalities: list of :class:`sympy.core.exp.Expr`. :param equalities: Optional parameter to list equality constraints. :type equalities: list of :class:`sympy.core.exp.Expr`. :param substitutions: Optional parameter containing monomials that can be replaced (e.g., idempotent variables). :type substitutions: dict of :class:`sympy.core.exp.Expr`. :param momentinequalities: Optional parameter of inequalities defined on moments. :type momentinequalities: list of :class:`sympy.core.exp.Expr`. :param momentequalities: Optional parameter of equalities defined on moments. :type momentequalities: list of :class:`sympy.core.exp.Expr`. :param momentsubstitutions: Optional parameter containing moments that can be replaced. :type momentsubstitutions: dict of :class:`sympy.core.exp.Expr`. :param removeequalities: Optional parameter to attempt removing the equalities by solving the linear equations. :type removeequalities: bool. :param extramonomials: Optional paramter of monomials to be included, on top of the requested level of relaxation. :type extramonomials: list of :class:`sympy.core.exp.Expr`. :param extramomentmatrices: Optional paramter of duplicating or adding moment matrices. A new moment matrix can be unconstrained (""), a copy of the first one ("copy"), and satisfying a partial positivity constraint ("ppt"). Each new moment matrix is requested as a list of string of these options. For instance, adding a single new moment matrix as a copy of the first would be ``extramomentmatrices=[["copy"]]``. :type extramomentmatrices: list of list of str. :param extraobjexpr: Optional parameter of a string expression of a linear combination of moment matrix elements to be included in the objective function. :type extraobjexpr: str. :param localizing_monomials: Optional parameter to specify sets of localizing monomials for each constraint. The internal order of constraints is inequalities first, followed by the equalities. If the parameter is specified, but for a certain constraint the automatic localization is requested, leave None in its place in this parameter. :type localizing_monomials: list of list of `sympy.core.exp.Expr`. :param chordal_extension: Optional parameter to request a sparse chordal extension. :type chordal_extension: bool. """ if self.level < -1: raise Exception("Invalid level of relaxation") self.level = level if substitutions is None: self.substitutions = {} else: self.substitutions = substitutions for lhs, rhs in substitutions.items(): if not is_pure_substitution_rule(lhs, rhs): self.pure_substitution_rules = False if iscomplex(lhs) or iscomplex(rhs): self.complex_matrix = True if momentsubstitutions is not None: self.moment_substitutions = momentsubstitutions.copy() # If we have a real-valued problem, the moment matrix is symmetric # and moment substitutions also apply to the conjugate monomials if not self.complex_matrix: for key, val in self.moment_substitutions.copy().items(): adjoint_monomial = apply_substitutions(key.adjoint(), self.substitutions) self.moment_substitutions[adjoint_monomial] = val if chordal_extension: self.variables = find_variable_cliques(self.variables, objective, inequalities, equalities, momentinequalities, momentequalities) self.__generate_monomial_sets(extramonomials) self.localizing_monomial_sets = localizing_monomials # Figure out basic structure of the SDP self._calculate_block_structure(inequalities, equalities, momentinequalities, momentequalities, extramomentmatrices, removeequalities) self._estimate_n_vars() if extramomentmatrices is not None: for parameters in extramomentmatrices: copy = False for parameter in parameters: if parameter == "copy": copy = True if copy: self.n_vars += self.n_vars + 1 else: self.n_vars += (self.block_struct[0]**2)/2 if self.complex_matrix: dtype = np.complex128 else: dtype = np.float64 self.F = lil_matrix((sum([bs**2 for bs in self.block_struct]), self.n_vars + 1), dtype=dtype) if self.verbose > 0: print(('Estimated number of SDP variables: %d' % self.n_vars)) print('Generating moment matrix...') # Generate moment matrices new_n_vars, block_index = self.__add_parameters() self._time0 = time.time() new_n_vars, block_index = \ self._generate_all_moment_matrix_blocks(new_n_vars, block_index) if extramomentmatrices is not None: new_n_vars, block_index = \ self.__add_extra_momentmatrices(extramomentmatrices, new_n_vars, block_index) # The initial estimate for the size of F was overly generous. self.n_vars = new_n_vars # We don't correct the size of F, because that would trigger # memory copies, and extra columns in lil_matrix are free anyway. # self.F = self.F[:, 0:self.n_vars + 1] if self.verbose > 0: print(('Reduced number of SDP variables: %d' % self.n_vars)) # Objective function self.set_objective(objective, extraobjexpr) # Process constraints self.constraint_starting_block = block_index self.process_constraints(inequalities, equalities, momentinequalities, momentequalities, block_index, removeequalities)
[ "def", "get_relaxation", "(", "self", ",", "level", ",", "objective", "=", "None", ",", "inequalities", "=", "None", ",", "equalities", "=", "None", ",", "substitutions", "=", "None", ",", "momentinequalities", "=", "None", ",", "momentequalities", "=", "None", ",", "momentsubstitutions", "=", "None", ",", "removeequalities", "=", "False", ",", "extramonomials", "=", "None", ",", "extramomentmatrices", "=", "None", ",", "extraobjexpr", "=", "None", ",", "localizing_monomials", "=", "None", ",", "chordal_extension", "=", "False", ")", ":", "if", "self", ".", "level", "<", "-", "1", ":", "raise", "Exception", "(", "\"Invalid level of relaxation\"", ")", "self", ".", "level", "=", "level", "if", "substitutions", "is", "None", ":", "self", ".", "substitutions", "=", "{", "}", "else", ":", "self", ".", "substitutions", "=", "substitutions", "for", "lhs", ",", "rhs", "in", "substitutions", ".", "items", "(", ")", ":", "if", "not", "is_pure_substitution_rule", "(", "lhs", ",", "rhs", ")", ":", "self", ".", "pure_substitution_rules", "=", "False", "if", "iscomplex", "(", "lhs", ")", "or", "iscomplex", "(", "rhs", ")", ":", "self", ".", "complex_matrix", "=", "True", "if", "momentsubstitutions", "is", "not", "None", ":", "self", ".", "moment_substitutions", "=", "momentsubstitutions", ".", "copy", "(", ")", "# If we have a real-valued problem, the moment matrix is symmetric", "# and moment substitutions also apply to the conjugate monomials", "if", "not", "self", ".", "complex_matrix", ":", "for", "key", ",", "val", "in", "self", ".", "moment_substitutions", ".", "copy", "(", ")", ".", "items", "(", ")", ":", "adjoint_monomial", "=", "apply_substitutions", "(", "key", ".", "adjoint", "(", ")", ",", "self", ".", "substitutions", ")", "self", ".", "moment_substitutions", "[", "adjoint_monomial", "]", "=", "val", "if", "chordal_extension", ":", "self", ".", "variables", "=", "find_variable_cliques", "(", "self", ".", "variables", ",", "objective", ",", "inequalities", ",", "equalities", ",", "momentinequalities", ",", "momentequalities", ")", "self", ".", "__generate_monomial_sets", "(", "extramonomials", ")", "self", ".", "localizing_monomial_sets", "=", "localizing_monomials", "# Figure out basic structure of the SDP", "self", ".", "_calculate_block_structure", "(", "inequalities", ",", "equalities", ",", "momentinequalities", ",", "momentequalities", ",", "extramomentmatrices", ",", "removeequalities", ")", "self", ".", "_estimate_n_vars", "(", ")", "if", "extramomentmatrices", "is", "not", "None", ":", "for", "parameters", "in", "extramomentmatrices", ":", "copy", "=", "False", "for", "parameter", "in", "parameters", ":", "if", "parameter", "==", "\"copy\"", ":", "copy", "=", "True", "if", "copy", ":", "self", ".", "n_vars", "+=", "self", ".", "n_vars", "+", "1", "else", ":", "self", ".", "n_vars", "+=", "(", "self", ".", "block_struct", "[", "0", "]", "**", "2", ")", "/", "2", "if", "self", ".", "complex_matrix", ":", "dtype", "=", "np", ".", "complex128", "else", ":", "dtype", "=", "np", ".", "float64", "self", ".", "F", "=", "lil_matrix", "(", "(", "sum", "(", "[", "bs", "**", "2", "for", "bs", "in", "self", ".", "block_struct", "]", ")", ",", "self", ".", "n_vars", "+", "1", ")", ",", "dtype", "=", "dtype", ")", "if", "self", ".", "verbose", ">", "0", ":", "print", "(", "(", "'Estimated number of SDP variables: %d'", "%", "self", ".", "n_vars", ")", ")", "print", "(", "'Generating moment matrix...'", ")", "# Generate moment matrices", "new_n_vars", ",", "block_index", "=", "self", ".", "__add_parameters", "(", ")", "self", ".", "_time0", "=", "time", ".", "time", "(", ")", "new_n_vars", ",", "block_index", "=", "self", ".", "_generate_all_moment_matrix_blocks", "(", "new_n_vars", ",", "block_index", ")", "if", "extramomentmatrices", "is", "not", "None", ":", "new_n_vars", ",", "block_index", "=", "self", ".", "__add_extra_momentmatrices", "(", "extramomentmatrices", ",", "new_n_vars", ",", "block_index", ")", "# The initial estimate for the size of F was overly generous.", "self", ".", "n_vars", "=", "new_n_vars", "# We don't correct the size of F, because that would trigger", "# memory copies, and extra columns in lil_matrix are free anyway.", "# self.F = self.F[:, 0:self.n_vars + 1]", "if", "self", ".", "verbose", ">", "0", ":", "print", "(", "(", "'Reduced number of SDP variables: %d'", "%", "self", ".", "n_vars", ")", ")", "# Objective function", "self", ".", "set_objective", "(", "objective", ",", "extraobjexpr", ")", "# Process constraints", "self", ".", "constraint_starting_block", "=", "block_index", "self", ".", "process_constraints", "(", "inequalities", ",", "equalities", ",", "momentinequalities", ",", "momentequalities", ",", "block_index", ",", "removeequalities", ")" ]
Get the SDP relaxation of a noncommutative polynomial optimization problem. :param level: The level of the relaxation. The value -1 will skip automatic monomial generation and use only the monomials supplied by the option `extramonomials`. :type level: int. :param obj: Optional parameter to describe the objective function. :type obj: :class:`sympy.core.exp.Expr`. :param inequalities: Optional parameter to list inequality constraints. :type inequalities: list of :class:`sympy.core.exp.Expr`. :param equalities: Optional parameter to list equality constraints. :type equalities: list of :class:`sympy.core.exp.Expr`. :param substitutions: Optional parameter containing monomials that can be replaced (e.g., idempotent variables). :type substitutions: dict of :class:`sympy.core.exp.Expr`. :param momentinequalities: Optional parameter of inequalities defined on moments. :type momentinequalities: list of :class:`sympy.core.exp.Expr`. :param momentequalities: Optional parameter of equalities defined on moments. :type momentequalities: list of :class:`sympy.core.exp.Expr`. :param momentsubstitutions: Optional parameter containing moments that can be replaced. :type momentsubstitutions: dict of :class:`sympy.core.exp.Expr`. :param removeequalities: Optional parameter to attempt removing the equalities by solving the linear equations. :type removeequalities: bool. :param extramonomials: Optional paramter of monomials to be included, on top of the requested level of relaxation. :type extramonomials: list of :class:`sympy.core.exp.Expr`. :param extramomentmatrices: Optional paramter of duplicating or adding moment matrices. A new moment matrix can be unconstrained (""), a copy of the first one ("copy"), and satisfying a partial positivity constraint ("ppt"). Each new moment matrix is requested as a list of string of these options. For instance, adding a single new moment matrix as a copy of the first would be ``extramomentmatrices=[["copy"]]``. :type extramomentmatrices: list of list of str. :param extraobjexpr: Optional parameter of a string expression of a linear combination of moment matrix elements to be included in the objective function. :type extraobjexpr: str. :param localizing_monomials: Optional parameter to specify sets of localizing monomials for each constraint. The internal order of constraints is inequalities first, followed by the equalities. If the parameter is specified, but for a certain constraint the automatic localization is requested, leave None in its place in this parameter. :type localizing_monomials: list of list of `sympy.core.exp.Expr`. :param chordal_extension: Optional parameter to request a sparse chordal extension. :type chordal_extension: bool.
[ "Get", "the", "SDP", "relaxation", "of", "a", "noncommutative", "polynomial", "optimization", "problem", "." ]
python
train
Grk0/python-libconf
libconf.py
https://github.com/Grk0/python-libconf/blob/9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4/libconf.py#L575-L606
def get_dump_type(value): '''Get the libconfig datatype of a value Return values: ``'d'`` (dict), ``'l'`` (list), ``'a'`` (array), ``'i'`` (integer), ``'i64'`` (long integer), ``'b'`` (bool), ``'f'`` (float), or ``'s'`` (string). Produces the proper type for LibconfList, LibconfArray, LibconfInt64 instances. ''' if isinstance(value, dict): return 'd' if isinstance(value, tuple): return 'l' if isinstance(value, list): return 'a' # Test bool before int since isinstance(True, int) == True. if isinstance(value, bool): return 'b' if isint(value): if is_long_int(value): return 'i64' else: return 'i' if isinstance(value, float): return 'f' if isstr(value): return 's' return None
[ "def", "get_dump_type", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "dict", ")", ":", "return", "'d'", "if", "isinstance", "(", "value", ",", "tuple", ")", ":", "return", "'l'", "if", "isinstance", "(", "value", ",", "list", ")", ":", "return", "'a'", "# Test bool before int since isinstance(True, int) == True.", "if", "isinstance", "(", "value", ",", "bool", ")", ":", "return", "'b'", "if", "isint", "(", "value", ")", ":", "if", "is_long_int", "(", "value", ")", ":", "return", "'i64'", "else", ":", "return", "'i'", "if", "isinstance", "(", "value", ",", "float", ")", ":", "return", "'f'", "if", "isstr", "(", "value", ")", ":", "return", "'s'", "return", "None" ]
Get the libconfig datatype of a value Return values: ``'d'`` (dict), ``'l'`` (list), ``'a'`` (array), ``'i'`` (integer), ``'i64'`` (long integer), ``'b'`` (bool), ``'f'`` (float), or ``'s'`` (string). Produces the proper type for LibconfList, LibconfArray, LibconfInt64 instances.
[ "Get", "the", "libconfig", "datatype", "of", "a", "value" ]
python
train
oceanprotocol/squid-py
squid_py/ocean/ocean_conditions.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/ocean/ocean_conditions.py#L28-L40
def grant_access(self, agreement_id, did, grantee_address, account): """ Grant access condition. :param agreement_id: id of the agreement, hex str :param did: DID, str :param grantee_address: Address, hex str :param account: Account :return: """ return self._keeper.access_secret_store_condition.fulfill( agreement_id, add_0x_prefix(did_to_id(did)), grantee_address, account )
[ "def", "grant_access", "(", "self", ",", "agreement_id", ",", "did", ",", "grantee_address", ",", "account", ")", ":", "return", "self", ".", "_keeper", ".", "access_secret_store_condition", ".", "fulfill", "(", "agreement_id", ",", "add_0x_prefix", "(", "did_to_id", "(", "did", ")", ")", ",", "grantee_address", ",", "account", ")" ]
Grant access condition. :param agreement_id: id of the agreement, hex str :param did: DID, str :param grantee_address: Address, hex str :param account: Account :return:
[ "Grant", "access", "condition", "." ]
python
train
GPflow/GPflow
gpflow/training/monitor.py
https://github.com/GPflow/GPflow/blob/549394f0b1b0696c7b521a065e49bdae6e7acf27/gpflow/training/monitor.py#L238-L246
def global_step(self) -> int: """ Evaluates the value of the global step variable if it is set, otherwise returns the current iteration number. """ if self.session is None or self.global_step_tensor is None: return self.iteration_no + self.init_global_step else: return self.session.run(self.global_step_tensor)
[ "def", "global_step", "(", "self", ")", "->", "int", ":", "if", "self", ".", "session", "is", "None", "or", "self", ".", "global_step_tensor", "is", "None", ":", "return", "self", ".", "iteration_no", "+", "self", ".", "init_global_step", "else", ":", "return", "self", ".", "session", ".", "run", "(", "self", ".", "global_step_tensor", ")" ]
Evaluates the value of the global step variable if it is set, otherwise returns the current iteration number.
[ "Evaluates", "the", "value", "of", "the", "global", "step", "variable", "if", "it", "is", "set", "otherwise", "returns", "the", "current", "iteration", "number", "." ]
python
train
textX/textX
textx/metamodel.py
https://github.com/textX/textX/blob/5796ac38116ad86584392dbecdbf923ede746361/textx/metamodel.py#L619-L634
def metamodel_from_file(file_name, **kwargs): """ Creates new metamodel from the given file. Args: file_name(str): The name of the file with textX language description. other params: See metamodel_from_str. """ with codecs.open(file_name, 'r', 'utf-8') as f: lang_desc = f.read() metamodel = metamodel_from_str(lang_desc=lang_desc, file_name=file_name, **kwargs) return metamodel
[ "def", "metamodel_from_file", "(", "file_name", ",", "*", "*", "kwargs", ")", ":", "with", "codecs", ".", "open", "(", "file_name", ",", "'r'", ",", "'utf-8'", ")", "as", "f", ":", "lang_desc", "=", "f", ".", "read", "(", ")", "metamodel", "=", "metamodel_from_str", "(", "lang_desc", "=", "lang_desc", ",", "file_name", "=", "file_name", ",", "*", "*", "kwargs", ")", "return", "metamodel" ]
Creates new metamodel from the given file. Args: file_name(str): The name of the file with textX language description. other params: See metamodel_from_str.
[ "Creates", "new", "metamodel", "from", "the", "given", "file", "." ]
python
train
sailthru/relay
relay/plugins/__init__.py
https://github.com/sailthru/relay/blob/995209346c6663675d96d0cbff3bb67b9758c8e2/relay/plugins/__init__.py#L155-L172
def stop_if_mostly_diverging(errdata): """This is an example stop condition that asks Relay to quit if the error difference between consecutive samples is increasing more than half of the time. It's quite sensitive and designed for the demo, so you probably shouldn't use this is a production setting """ n_increases = sum([ abs(y) - abs(x) > 0 for x, y in zip(errdata, errdata[1:])]) if len(errdata) * 0.5 < n_increases: # most of the time, the next sample is worse than the previous sample # relay is not healthy return 0 else: # most of the time, the next sample is better than the previous sample # realy is in a healthy state return -1
[ "def", "stop_if_mostly_diverging", "(", "errdata", ")", ":", "n_increases", "=", "sum", "(", "[", "abs", "(", "y", ")", "-", "abs", "(", "x", ")", ">", "0", "for", "x", ",", "y", "in", "zip", "(", "errdata", ",", "errdata", "[", "1", ":", "]", ")", "]", ")", "if", "len", "(", "errdata", ")", "*", "0.5", "<", "n_increases", ":", "# most of the time, the next sample is worse than the previous sample", "# relay is not healthy", "return", "0", "else", ":", "# most of the time, the next sample is better than the previous sample", "# realy is in a healthy state", "return", "-", "1" ]
This is an example stop condition that asks Relay to quit if the error difference between consecutive samples is increasing more than half of the time. It's quite sensitive and designed for the demo, so you probably shouldn't use this is a production setting
[ "This", "is", "an", "example", "stop", "condition", "that", "asks", "Relay", "to", "quit", "if", "the", "error", "difference", "between", "consecutive", "samples", "is", "increasing", "more", "than", "half", "of", "the", "time", "." ]
python
train
tensorflow/mesh
mesh_tensorflow/ops.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L4456-L4483
def _parse_string_to_list_of_pairs(s, seconds_to_int=False): r"""Parses a string into a list of pairs. In the input string, each pair is separated by a colon, and the delimiters between pairs are any of " ,.;". e.g. "rows:32,cols:32" Args: s: str to parse. seconds_to_int: Boolean. If True, then the second elements are returned as integers; otherwise they are strings. Returns: List of tuple pairs. Raises: ValueError: Badly formatted string. """ ret = [] for p in [s.split(":") for s in re.sub("[,.;]", " ", s).split()]: if len(p) != 2: raise ValueError("bad input to _parse_string_to_list_of_pairs %s" % s) if seconds_to_int: ret.append((p[0], int(p[1]))) else: ret.append(tuple(p)) return ret
[ "def", "_parse_string_to_list_of_pairs", "(", "s", ",", "seconds_to_int", "=", "False", ")", ":", "ret", "=", "[", "]", "for", "p", "in", "[", "s", ".", "split", "(", "\":\"", ")", "for", "s", "in", "re", ".", "sub", "(", "\"[,.;]\"", ",", "\" \"", ",", "s", ")", ".", "split", "(", ")", "]", ":", "if", "len", "(", "p", ")", "!=", "2", ":", "raise", "ValueError", "(", "\"bad input to _parse_string_to_list_of_pairs %s\"", "%", "s", ")", "if", "seconds_to_int", ":", "ret", ".", "append", "(", "(", "p", "[", "0", "]", ",", "int", "(", "p", "[", "1", "]", ")", ")", ")", "else", ":", "ret", ".", "append", "(", "tuple", "(", "p", ")", ")", "return", "ret" ]
r"""Parses a string into a list of pairs. In the input string, each pair is separated by a colon, and the delimiters between pairs are any of " ,.;". e.g. "rows:32,cols:32" Args: s: str to parse. seconds_to_int: Boolean. If True, then the second elements are returned as integers; otherwise they are strings. Returns: List of tuple pairs. Raises: ValueError: Badly formatted string.
[ "r", "Parses", "a", "string", "into", "a", "list", "of", "pairs", "." ]
python
train
openstack/python-scciclient
scciclient/irmc/elcm.py
https://github.com/openstack/python-scciclient/blob/4585ce2f76853b9773fb190ca0cfff0aa04a7cf8/scciclient/irmc/elcm.py#L821-L842
def set_secure_boot_mode(irmc_info, enable): """Enable/Disable secure boot on the server. :param irmc_info: node info :param enable: True, if secure boot needs to be enabled for next boot, else False. """ bios_config_data = { 'Server': { '@Version': '1.01', 'SystemConfig': { 'BiosConfig': { '@Version': '1.01', 'SecurityConfig': { 'SecureBootControlEnabled': enable } } } } } restore_bios_config(irmc_info=irmc_info, bios_config=bios_config_data)
[ "def", "set_secure_boot_mode", "(", "irmc_info", ",", "enable", ")", ":", "bios_config_data", "=", "{", "'Server'", ":", "{", "'@Version'", ":", "'1.01'", ",", "'SystemConfig'", ":", "{", "'BiosConfig'", ":", "{", "'@Version'", ":", "'1.01'", ",", "'SecurityConfig'", ":", "{", "'SecureBootControlEnabled'", ":", "enable", "}", "}", "}", "}", "}", "restore_bios_config", "(", "irmc_info", "=", "irmc_info", ",", "bios_config", "=", "bios_config_data", ")" ]
Enable/Disable secure boot on the server. :param irmc_info: node info :param enable: True, if secure boot needs to be enabled for next boot, else False.
[ "Enable", "/", "Disable", "secure", "boot", "on", "the", "server", "." ]
python
train
rabitt/pysox
sox/core.py
https://github.com/rabitt/pysox/blob/eae89bde74567136ec3f723c3e6b369916d9b837/sox/core.py#L65-L85
def _get_valid_formats(): ''' Calls SoX help for a lists of audio formats available with the current install of SoX. Returns: -------- formats : list List of audio file extensions that SoX can process. ''' if NO_SOX: return [] so = subprocess.check_output(['sox', '-h']) if type(so) is not str: so = str(so, encoding='UTF-8') so = so.split('\n') idx = [i for i in range(len(so)) if 'AUDIO FILE FORMATS:' in so[i]][0] formats = so[idx].split(' ')[3:] return formats
[ "def", "_get_valid_formats", "(", ")", ":", "if", "NO_SOX", ":", "return", "[", "]", "so", "=", "subprocess", ".", "check_output", "(", "[", "'sox'", ",", "'-h'", "]", ")", "if", "type", "(", "so", ")", "is", "not", "str", ":", "so", "=", "str", "(", "so", ",", "encoding", "=", "'UTF-8'", ")", "so", "=", "so", ".", "split", "(", "'\\n'", ")", "idx", "=", "[", "i", "for", "i", "in", "range", "(", "len", "(", "so", ")", ")", "if", "'AUDIO FILE FORMATS:'", "in", "so", "[", "i", "]", "]", "[", "0", "]", "formats", "=", "so", "[", "idx", "]", ".", "split", "(", "' '", ")", "[", "3", ":", "]", "return", "formats" ]
Calls SoX help for a lists of audio formats available with the current install of SoX. Returns: -------- formats : list List of audio file extensions that SoX can process.
[ "Calls", "SoX", "help", "for", "a", "lists", "of", "audio", "formats", "available", "with", "the", "current", "install", "of", "SoX", "." ]
python
valid
ibis-project/ibis
ibis/expr/rules.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/rules.py#L55-L69
def cast(source, target): """Currently Literal to *Scalar implicit casts are allowed""" import ibis.expr.operations as ops # TODO: don't use ops here if not castable(source, target): raise com.IbisTypeError('Source is not castable to target type!') # currently it prevents column -> scalar implicit castings # however the datatypes are matching op = source.op() if not isinstance(op, ops.Literal): raise com.IbisTypeError('Only able to implicitly cast literals!') out_type = target.type().scalar_type() return out_type(op)
[ "def", "cast", "(", "source", ",", "target", ")", ":", "import", "ibis", ".", "expr", ".", "operations", "as", "ops", "# TODO: don't use ops here", "if", "not", "castable", "(", "source", ",", "target", ")", ":", "raise", "com", ".", "IbisTypeError", "(", "'Source is not castable to target type!'", ")", "# currently it prevents column -> scalar implicit castings", "# however the datatypes are matching", "op", "=", "source", ".", "op", "(", ")", "if", "not", "isinstance", "(", "op", ",", "ops", ".", "Literal", ")", ":", "raise", "com", ".", "IbisTypeError", "(", "'Only able to implicitly cast literals!'", ")", "out_type", "=", "target", ".", "type", "(", ")", ".", "scalar_type", "(", ")", "return", "out_type", "(", "op", ")" ]
Currently Literal to *Scalar implicit casts are allowed
[ "Currently", "Literal", "to", "*", "Scalar", "implicit", "casts", "are", "allowed" ]
python
train
tensorflow/cleverhans
cleverhans/future/tf2/utils_tf.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/future/tf2/utils_tf.py#L5-L33
def clip_eta(eta, ord, eps): """ Helper function to clip the perturbation to epsilon norm ball. :param eta: A tensor with the current perturbation. :param ord: Order of the norm (mimics Numpy). Possible values: np.inf, 1 or 2. :param eps: Epsilon, bound of the perturbation. """ # Clipping perturbation eta to self.ord norm ball if ord not in [np.inf, 1, 2]: raise ValueError('ord must be np.inf, 1, or 2.') axis = list(range(1, len(eta.get_shape()))) avoid_zero_div = 1e-12 if ord == np.inf: eta = tf.clip_by_value(eta, -eps, eps) else: if ord == 1: raise NotImplementedError("") # This is not the correct way to project on the L1 norm ball: # norm = tf.maximum(avoid_zero_div, reduce_sum(tf.abs(eta), reduc_ind, keepdims=True)) elif ord == 2: # avoid_zero_div must go inside sqrt to avoid a divide by zero in the gradient through this operation norm = tf.sqrt( tf.maximum(avoid_zero_div, tf.reduce_sum(tf.square(eta), axis, keepdims=True))) # We must *clip* to within the norm ball, not *normalize* onto the surface of the ball factor = tf.minimum(1., tf.math.divide(eps, norm)) eta = eta * factor return eta
[ "def", "clip_eta", "(", "eta", ",", "ord", ",", "eps", ")", ":", "# Clipping perturbation eta to self.ord norm ball", "if", "ord", "not", "in", "[", "np", ".", "inf", ",", "1", ",", "2", "]", ":", "raise", "ValueError", "(", "'ord must be np.inf, 1, or 2.'", ")", "axis", "=", "list", "(", "range", "(", "1", ",", "len", "(", "eta", ".", "get_shape", "(", ")", ")", ")", ")", "avoid_zero_div", "=", "1e-12", "if", "ord", "==", "np", ".", "inf", ":", "eta", "=", "tf", ".", "clip_by_value", "(", "eta", ",", "-", "eps", ",", "eps", ")", "else", ":", "if", "ord", "==", "1", ":", "raise", "NotImplementedError", "(", "\"\"", ")", "# This is not the correct way to project on the L1 norm ball:", "# norm = tf.maximum(avoid_zero_div, reduce_sum(tf.abs(eta), reduc_ind, keepdims=True))", "elif", "ord", "==", "2", ":", "# avoid_zero_div must go inside sqrt to avoid a divide by zero in the gradient through this operation", "norm", "=", "tf", ".", "sqrt", "(", "tf", ".", "maximum", "(", "avoid_zero_div", ",", "tf", ".", "reduce_sum", "(", "tf", ".", "square", "(", "eta", ")", ",", "axis", ",", "keepdims", "=", "True", ")", ")", ")", "# We must *clip* to within the norm ball, not *normalize* onto the surface of the ball", "factor", "=", "tf", ".", "minimum", "(", "1.", ",", "tf", ".", "math", ".", "divide", "(", "eps", ",", "norm", ")", ")", "eta", "=", "eta", "*", "factor", "return", "eta" ]
Helper function to clip the perturbation to epsilon norm ball. :param eta: A tensor with the current perturbation. :param ord: Order of the norm (mimics Numpy). Possible values: np.inf, 1 or 2. :param eps: Epsilon, bound of the perturbation.
[ "Helper", "function", "to", "clip", "the", "perturbation", "to", "epsilon", "norm", "ball", ".", ":", "param", "eta", ":", "A", "tensor", "with", "the", "current", "perturbation", ".", ":", "param", "ord", ":", "Order", "of", "the", "norm", "(", "mimics", "Numpy", ")", ".", "Possible", "values", ":", "np", ".", "inf", "1", "or", "2", ".", ":", "param", "eps", ":", "Epsilon", "bound", "of", "the", "perturbation", "." ]
python
train
TurboGears/gearbox
gearbox/commands/serve.py
https://github.com/TurboGears/gearbox/blob/df496ab28050ce6a4cc4c502488f5c5812f2baff/gearbox/commands/serve.py#L313-L326
def parse_vars(self, args): """ Given variables like ``['a=b', 'c=d']`` turns it into ``{'a': 'b', 'c': 'd'}`` """ result = {} for arg in args: if '=' not in arg: raise ValueError( 'Variable assignment %r invalid (no "=")' % arg) name, value = arg.split('=', 1) result[name] = value return result
[ "def", "parse_vars", "(", "self", ",", "args", ")", ":", "result", "=", "{", "}", "for", "arg", "in", "args", ":", "if", "'='", "not", "in", "arg", ":", "raise", "ValueError", "(", "'Variable assignment %r invalid (no \"=\")'", "%", "arg", ")", "name", ",", "value", "=", "arg", ".", "split", "(", "'='", ",", "1", ")", "result", "[", "name", "]", "=", "value", "return", "result" ]
Given variables like ``['a=b', 'c=d']`` turns it into ``{'a': 'b', 'c': 'd'}``
[ "Given", "variables", "like", "[", "a", "=", "b", "c", "=", "d", "]", "turns", "it", "into", "{", "a", ":", "b", "c", ":", "d", "}" ]
python
train
draperjames/qtpandas
qtpandas/models/DataFrameModelManager.py
https://github.com/draperjames/qtpandas/blob/64294fb69f1839e53dee5ea453337266bfaf24f4/qtpandas/models/DataFrameModelManager.py#L111-L129
def set_model(self, df_model, file_path): """ Sets a DataFrameModel and registers it to the given file_path. :param df_model: (DataFrameModel) The DataFrameModel to register. :param file_path: The file path to associate with the DataFrameModel. *Overrides the current filePath on the DataFrameModel (if any) :return: None """ assert isinstance(df_model, DataFrameModel), "df_model argument must be a DataFrameModel!" df_model._filePath = file_path try: self._models[file_path] except KeyError: self.signalNewModelRead.emit(file_path) self._models[file_path] = df_model
[ "def", "set_model", "(", "self", ",", "df_model", ",", "file_path", ")", ":", "assert", "isinstance", "(", "df_model", ",", "DataFrameModel", ")", ",", "\"df_model argument must be a DataFrameModel!\"", "df_model", ".", "_filePath", "=", "file_path", "try", ":", "self", ".", "_models", "[", "file_path", "]", "except", "KeyError", ":", "self", ".", "signalNewModelRead", ".", "emit", "(", "file_path", ")", "self", ".", "_models", "[", "file_path", "]", "=", "df_model" ]
Sets a DataFrameModel and registers it to the given file_path. :param df_model: (DataFrameModel) The DataFrameModel to register. :param file_path: The file path to associate with the DataFrameModel. *Overrides the current filePath on the DataFrameModel (if any) :return: None
[ "Sets", "a", "DataFrameModel", "and", "registers", "it", "to", "the", "given", "file_path", ".", ":", "param", "df_model", ":", "(", "DataFrameModel", ")", "The", "DataFrameModel", "to", "register", ".", ":", "param", "file_path", ":", "The", "file", "path", "to", "associate", "with", "the", "DataFrameModel", ".", "*", "Overrides", "the", "current", "filePath", "on", "the", "DataFrameModel", "(", "if", "any", ")", ":", "return", ":", "None" ]
python
train
Beyond-Digital/django-gaekit
gaekit/boot.py
https://github.com/Beyond-Digital/django-gaekit/blob/b587acd52b5cfd48217a70920d4b61d5f923c8c5/gaekit/boot.py#L1-L17
def break_sandbox(): """Patches sandbox to add match-all regex to sandbox whitelist """ class EvilCM(object): def __enter__(self): return self def __exit__(self, exc_type, exc, tb): import re tb.tb_next.tb_next.tb_next.tb_frame.f_locals[ 'self']._enabled_regexes.append(re.compile('.*')) return True try: import sqlite3 # noqa except ImportError: with EvilCM(): __import__('sqlite3')
[ "def", "break_sandbox", "(", ")", ":", "class", "EvilCM", "(", "object", ")", ":", "def", "__enter__", "(", "self", ")", ":", "return", "self", "def", "__exit__", "(", "self", ",", "exc_type", ",", "exc", ",", "tb", ")", ":", "import", "re", "tb", ".", "tb_next", ".", "tb_next", ".", "tb_next", ".", "tb_frame", ".", "f_locals", "[", "'self'", "]", ".", "_enabled_regexes", ".", "append", "(", "re", ".", "compile", "(", "'.*'", ")", ")", "return", "True", "try", ":", "import", "sqlite3", "# noqa", "except", "ImportError", ":", "with", "EvilCM", "(", ")", ":", "__import__", "(", "'sqlite3'", ")" ]
Patches sandbox to add match-all regex to sandbox whitelist
[ "Patches", "sandbox", "to", "add", "match", "-", "all", "regex", "to", "sandbox", "whitelist" ]
python
train
econ-ark/HARK
HARK/ConsumptionSaving/ConsIndShockModel.py
https://github.com/econ-ark/HARK/blob/3d184153a189e618a87c9540df1cd12044039cc5/HARK/ConsumptionSaving/ConsIndShockModel.py#L635-L654
def defUtilityFuncs(self): ''' Defines CRRA utility function for this period (and its derivatives, and their inverses), saving them as attributes of self for other methods to use. Parameters ---------- none Returns ------- none ''' ConsPerfForesightSolver.defUtilityFuncs(self) self.uPinv = lambda u : utilityP_inv(u,gam=self.CRRA) self.uPinvP = lambda u : utilityP_invP(u,gam=self.CRRA) self.uinvP = lambda u : utility_invP(u,gam=self.CRRA) if self.vFuncBool: self.uinv = lambda u : utility_inv(u,gam=self.CRRA)
[ "def", "defUtilityFuncs", "(", "self", ")", ":", "ConsPerfForesightSolver", ".", "defUtilityFuncs", "(", "self", ")", "self", ".", "uPinv", "=", "lambda", "u", ":", "utilityP_inv", "(", "u", ",", "gam", "=", "self", ".", "CRRA", ")", "self", ".", "uPinvP", "=", "lambda", "u", ":", "utilityP_invP", "(", "u", ",", "gam", "=", "self", ".", "CRRA", ")", "self", ".", "uinvP", "=", "lambda", "u", ":", "utility_invP", "(", "u", ",", "gam", "=", "self", ".", "CRRA", ")", "if", "self", ".", "vFuncBool", ":", "self", ".", "uinv", "=", "lambda", "u", ":", "utility_inv", "(", "u", ",", "gam", "=", "self", ".", "CRRA", ")" ]
Defines CRRA utility function for this period (and its derivatives, and their inverses), saving them as attributes of self for other methods to use. Parameters ---------- none Returns ------- none
[ "Defines", "CRRA", "utility", "function", "for", "this", "period", "(", "and", "its", "derivatives", "and", "their", "inverses", ")", "saving", "them", "as", "attributes", "of", "self", "for", "other", "methods", "to", "use", "." ]
python
train
mbedmicro/pyOCD
pyocd/flash/flash_builder.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/flash/flash_builder.py#L174-L212
def add_data(self, addr, data): """! @brief Add a block of data to be programmed. @note Programming does not start until the method program() is called. @param self @param addr Base address of the block of data passed to this method. The entire block of data must be contained within the flash memory region associated with this instance. @param data Data to be programmed. Should be a list of byte values. @exception FlashFailure Address range of added data is outside the address range of the flash region associated with the builder. @exception ValueError Attempt to add overlapping data. """ # Ignore empty data. if len(data) == 0: return # Sanity check if not self.flash.region.contains_range(start=addr, length=len(data)): raise FlashFailure("Flash address range 0x%x-0x%x is not contained within region '%s'" % (addr, addr + len(data) - 1, self.flash.region.name)) # Add operation to list self.flash_operation_list.append(_FlashOperation(addr, data)) self.buffered_data_size += len(data) # Keep list sorted self.flash_operation_list = sorted(self.flash_operation_list, key=lambda operation: operation.addr) # Verify this does not overlap prev_flash_operation = None for operation in self.flash_operation_list: if prev_flash_operation is not None: if prev_flash_operation.addr + len(prev_flash_operation.data) > operation.addr: raise ValueError("Error adding data - Data at 0x%x..0x%x overlaps with 0x%x..0x%x" % (prev_flash_operation.addr, prev_flash_operation.addr + len(prev_flash_operation.data), operation.addr, operation.addr + len(operation.data))) prev_flash_operation = operation
[ "def", "add_data", "(", "self", ",", "addr", ",", "data", ")", ":", "# Ignore empty data.", "if", "len", "(", "data", ")", "==", "0", ":", "return", "# Sanity check", "if", "not", "self", ".", "flash", ".", "region", ".", "contains_range", "(", "start", "=", "addr", ",", "length", "=", "len", "(", "data", ")", ")", ":", "raise", "FlashFailure", "(", "\"Flash address range 0x%x-0x%x is not contained within region '%s'\"", "%", "(", "addr", ",", "addr", "+", "len", "(", "data", ")", "-", "1", ",", "self", ".", "flash", ".", "region", ".", "name", ")", ")", "# Add operation to list", "self", ".", "flash_operation_list", ".", "append", "(", "_FlashOperation", "(", "addr", ",", "data", ")", ")", "self", ".", "buffered_data_size", "+=", "len", "(", "data", ")", "# Keep list sorted", "self", ".", "flash_operation_list", "=", "sorted", "(", "self", ".", "flash_operation_list", ",", "key", "=", "lambda", "operation", ":", "operation", ".", "addr", ")", "# Verify this does not overlap", "prev_flash_operation", "=", "None", "for", "operation", "in", "self", ".", "flash_operation_list", ":", "if", "prev_flash_operation", "is", "not", "None", ":", "if", "prev_flash_operation", ".", "addr", "+", "len", "(", "prev_flash_operation", ".", "data", ")", ">", "operation", ".", "addr", ":", "raise", "ValueError", "(", "\"Error adding data - Data at 0x%x..0x%x overlaps with 0x%x..0x%x\"", "%", "(", "prev_flash_operation", ".", "addr", ",", "prev_flash_operation", ".", "addr", "+", "len", "(", "prev_flash_operation", ".", "data", ")", ",", "operation", ".", "addr", ",", "operation", ".", "addr", "+", "len", "(", "operation", ".", "data", ")", ")", ")", "prev_flash_operation", "=", "operation" ]
! @brief Add a block of data to be programmed. @note Programming does not start until the method program() is called. @param self @param addr Base address of the block of data passed to this method. The entire block of data must be contained within the flash memory region associated with this instance. @param data Data to be programmed. Should be a list of byte values. @exception FlashFailure Address range of added data is outside the address range of the flash region associated with the builder. @exception ValueError Attempt to add overlapping data.
[ "!", "@brief", "Add", "a", "block", "of", "data", "to", "be", "programmed", "." ]
python
train
django-danceschool/django-danceschool
danceschool/financial/admin.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/financial/admin.py#L382-L389
def resetStaffCompensationInfo(self, request, queryset): ''' This action is added to the list for staff member to permit bulk reseting to category defaults of compensation information for staff members. ''' selected = request.POST.getlist(admin.ACTION_CHECKBOX_NAME) ct = ContentType.objects.get_for_model(queryset.model) return HttpResponseRedirect(reverse('resetCompensationRules') + "?ct=%s&ids=%s" % (ct.pk, ",".join(selected)))
[ "def", "resetStaffCompensationInfo", "(", "self", ",", "request", ",", "queryset", ")", ":", "selected", "=", "request", ".", "POST", ".", "getlist", "(", "admin", ".", "ACTION_CHECKBOX_NAME", ")", "ct", "=", "ContentType", ".", "objects", ".", "get_for_model", "(", "queryset", ".", "model", ")", "return", "HttpResponseRedirect", "(", "reverse", "(", "'resetCompensationRules'", ")", "+", "\"?ct=%s&ids=%s\"", "%", "(", "ct", ".", "pk", ",", "\",\"", ".", "join", "(", "selected", ")", ")", ")" ]
This action is added to the list for staff member to permit bulk reseting to category defaults of compensation information for staff members.
[ "This", "action", "is", "added", "to", "the", "list", "for", "staff", "member", "to", "permit", "bulk", "reseting", "to", "category", "defaults", "of", "compensation", "information", "for", "staff", "members", "." ]
python
train
burnash/gspread
gspread/models.py
https://github.com/burnash/gspread/blob/0e8debe208095aeed3e3e7136c2fa5cd74090946/gspread/models.py#L675-L705
def col_values(self, col, value_render_option='FORMATTED_VALUE'): """Returns a list of all values in column `col`. Empty cells in this list will be rendered as :const:`None`. :param col: Column number. :type col: int :param value_render_option: (optional) Determines how values should be rendered in the the output. See `ValueRenderOption`_ in the Sheets API. :type value_render_option: str .. _ValueRenderOption: https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption """ start_label = rowcol_to_a1(1, col) range_label = '%s!%s:%s' % (self.title, start_label, start_label[:-1]) data = self.spreadsheet.values_get( range_label, params={ 'valueRenderOption': value_render_option, 'majorDimension': 'COLUMNS' } ) try: return data['values'][0] except KeyError: return []
[ "def", "col_values", "(", "self", ",", "col", ",", "value_render_option", "=", "'FORMATTED_VALUE'", ")", ":", "start_label", "=", "rowcol_to_a1", "(", "1", ",", "col", ")", "range_label", "=", "'%s!%s:%s'", "%", "(", "self", ".", "title", ",", "start_label", ",", "start_label", "[", ":", "-", "1", "]", ")", "data", "=", "self", ".", "spreadsheet", ".", "values_get", "(", "range_label", ",", "params", "=", "{", "'valueRenderOption'", ":", "value_render_option", ",", "'majorDimension'", ":", "'COLUMNS'", "}", ")", "try", ":", "return", "data", "[", "'values'", "]", "[", "0", "]", "except", "KeyError", ":", "return", "[", "]" ]
Returns a list of all values in column `col`. Empty cells in this list will be rendered as :const:`None`. :param col: Column number. :type col: int :param value_render_option: (optional) Determines how values should be rendered in the the output. See `ValueRenderOption`_ in the Sheets API. :type value_render_option: str .. _ValueRenderOption: https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption
[ "Returns", "a", "list", "of", "all", "values", "in", "column", "col", "." ]
python
train
jangler/readlike
readlike.py
https://github.com/jangler/readlike/blob/2901260c50bd1aecfb981c3990e0c6333de8aac8/readlike.py#L51-L55
def _backward_delete_char(text, pos): """Delete the character behind pos.""" if pos == 0: return text, pos return text[:pos - 1] + text[pos:], pos - 1
[ "def", "_backward_delete_char", "(", "text", ",", "pos", ")", ":", "if", "pos", "==", "0", ":", "return", "text", ",", "pos", "return", "text", "[", ":", "pos", "-", "1", "]", "+", "text", "[", "pos", ":", "]", ",", "pos", "-", "1" ]
Delete the character behind pos.
[ "Delete", "the", "character", "behind", "pos", "." ]
python
train
O365/python-o365
O365/mailbox.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/mailbox.py#L434-L468
def move_folder(self, to_folder, *, update_parent_if_changed=True): """ Move this folder to another folder :param to_folder: the destination Folder/folder_id to move into :type to_folder: mailbox.Folder or str :param bool update_parent_if_changed: updates self.parent with the new parent Folder if changed :return: The new folder after copying :rtype: mailbox.Folder or None """ to_folder_id = to_folder.folder_id if isinstance(to_folder, Folder) else to_folder if self.root or not self.folder_id or not to_folder_id: return False url = self.build_url( self._endpoints.get('move_folder').format(id=self.folder_id)) response = self.con.post(url, data={self._cc('destinationId'): to_folder_id}) if not response: return False folder = response.json() parent_id = folder.get(self._cc('parentFolderId'), None) if parent_id and self.parent_id: if parent_id != self.parent_id: self.parent_id = parent_id self.parent = (self.get_parent_folder() if update_parent_if_changed else None) return True
[ "def", "move_folder", "(", "self", ",", "to_folder", ",", "*", ",", "update_parent_if_changed", "=", "True", ")", ":", "to_folder_id", "=", "to_folder", ".", "folder_id", "if", "isinstance", "(", "to_folder", ",", "Folder", ")", "else", "to_folder", "if", "self", ".", "root", "or", "not", "self", ".", "folder_id", "or", "not", "to_folder_id", ":", "return", "False", "url", "=", "self", ".", "build_url", "(", "self", ".", "_endpoints", ".", "get", "(", "'move_folder'", ")", ".", "format", "(", "id", "=", "self", ".", "folder_id", ")", ")", "response", "=", "self", ".", "con", ".", "post", "(", "url", ",", "data", "=", "{", "self", ".", "_cc", "(", "'destinationId'", ")", ":", "to_folder_id", "}", ")", "if", "not", "response", ":", "return", "False", "folder", "=", "response", ".", "json", "(", ")", "parent_id", "=", "folder", ".", "get", "(", "self", ".", "_cc", "(", "'parentFolderId'", ")", ",", "None", ")", "if", "parent_id", "and", "self", ".", "parent_id", ":", "if", "parent_id", "!=", "self", ".", "parent_id", ":", "self", ".", "parent_id", "=", "parent_id", "self", ".", "parent", "=", "(", "self", ".", "get_parent_folder", "(", ")", "if", "update_parent_if_changed", "else", "None", ")", "return", "True" ]
Move this folder to another folder :param to_folder: the destination Folder/folder_id to move into :type to_folder: mailbox.Folder or str :param bool update_parent_if_changed: updates self.parent with the new parent Folder if changed :return: The new folder after copying :rtype: mailbox.Folder or None
[ "Move", "this", "folder", "to", "another", "folder" ]
python
train
Nic30/hwt
hwt/simulator/hdlSimulator.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/simulator/hdlSimulator.py#L532-L536
def add_process(self, proc) -> None: """ Add process to events with default priority on current time """ self._events.push(self.now, PRIORITY_NORMAL, proc)
[ "def", "add_process", "(", "self", ",", "proc", ")", "->", "None", ":", "self", ".", "_events", ".", "push", "(", "self", ".", "now", ",", "PRIORITY_NORMAL", ",", "proc", ")" ]
Add process to events with default priority on current time
[ "Add", "process", "to", "events", "with", "default", "priority", "on", "current", "time" ]
python
test
gatagat/lap
lap/lapmod.py
https://github.com/gatagat/lap/blob/c2b6309ba246d18205a71228cdaea67210e1a039/lap/lapmod.py#L273-L341
def lapmod(n, cc, ii, kk, fast=True, return_cost=True, fp_version=FP_DYNAMIC): """Solve sparse linear assignment problem using Jonker-Volgenant algorithm. n: number of rows of the assignment cost matrix cc: 1D array of all finite elements of the assignement cost matrix ii: 1D array of indices of the row starts in cc. The following must hold: ii[0] = 0 and ii[n+1] = len(cc). kk: 1D array of the column indices so that: cost[i, kk[ii[i] + k]] == cc[ii[i] + k]. Indices within one row must be sorted. extend_cost: whether or not extend a non-square matrix [default: False] cost_limit: an upper limit for a cost of a single assignment [default: np.inf] return_cost: whether or not to return the assignment cost Returns (opt, x, y) where: opt: cost of the assignment x: vector of columns assigned to rows y: vector of rows assigned to columns or (x, y) if return_cost is not True. When extend_cost and/or cost_limit is set, all unmatched entries will be marked by -1 in x/y. """ # log = logging.getLogger('lapmod') check_cost(n, cc, ii, kk) if fast is True: # log.debug('[----CR & RT & ARR & augmentation ----]') x, y = _lapmod(n, cc, ii, kk, fp_version=fp_version) else: cc = np.ascontiguousarray(cc, dtype=np.float64) ii = np.ascontiguousarray(ii, dtype=np.int32) kk = np.ascontiguousarray(kk, dtype=np.int32) x = np.empty((n,), dtype=np.int32) y = np.empty((n,), dtype=np.int32) v = np.empty((n,), dtype=np.float64) free_rows = np.empty((n,), dtype=np.int32) # log.debug('[----Column reduction & reduction transfer----]') n_free_rows = _pycrrt(n, cc, ii, kk, free_rows, x, y, v) # log.debug( # 'free, x, y, v: %s %s %s %s', free_rows[:n_free_rows], x, y, v) if n_free_rows == 0: # log.info('Reduction solved it.') if return_cost is True: return get_cost(n, cc, ii, kk, x), x, y else: return x, y for it in range(2): # log.debug('[---Augmenting row reduction (iteration: %d)---]', it) n_free_rows = _pyarr( n, cc, ii, kk, n_free_rows, free_rows, x, y, v) # log.debug( # 'free, x, y, v: %s %s %s %s', free_rows[:n_free_rows], x, y, v) if n_free_rows == 0: # log.info('Augmenting row reduction solved it.') if return_cost is True: return get_cost(n, cc, ii, kk, x), x, y else: return x, y # log.info('[----Augmentation----]') _pya(n, cc, ii, kk, n_free_rows, free_rows, x, y, v) # log.debug('x, y, v: %s %s %s', x, y, v) if return_cost is True: return get_cost(n, cc, ii, kk, x), x, y else: return x, y
[ "def", "lapmod", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "fast", "=", "True", ",", "return_cost", "=", "True", ",", "fp_version", "=", "FP_DYNAMIC", ")", ":", "# log = logging.getLogger('lapmod')", "check_cost", "(", "n", ",", "cc", ",", "ii", ",", "kk", ")", "if", "fast", "is", "True", ":", "# log.debug('[----CR & RT & ARR & augmentation ----]')", "x", ",", "y", "=", "_lapmod", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "fp_version", "=", "fp_version", ")", "else", ":", "cc", "=", "np", ".", "ascontiguousarray", "(", "cc", ",", "dtype", "=", "np", ".", "float64", ")", "ii", "=", "np", ".", "ascontiguousarray", "(", "ii", ",", "dtype", "=", "np", ".", "int32", ")", "kk", "=", "np", ".", "ascontiguousarray", "(", "kk", ",", "dtype", "=", "np", ".", "int32", ")", "x", "=", "np", ".", "empty", "(", "(", "n", ",", ")", ",", "dtype", "=", "np", ".", "int32", ")", "y", "=", "np", ".", "empty", "(", "(", "n", ",", ")", ",", "dtype", "=", "np", ".", "int32", ")", "v", "=", "np", ".", "empty", "(", "(", "n", ",", ")", ",", "dtype", "=", "np", ".", "float64", ")", "free_rows", "=", "np", ".", "empty", "(", "(", "n", ",", ")", ",", "dtype", "=", "np", ".", "int32", ")", "# log.debug('[----Column reduction & reduction transfer----]')", "n_free_rows", "=", "_pycrrt", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "free_rows", ",", "x", ",", "y", ",", "v", ")", "# log.debug(", "# 'free, x, y, v: %s %s %s %s', free_rows[:n_free_rows], x, y, v)", "if", "n_free_rows", "==", "0", ":", "# log.info('Reduction solved it.')", "if", "return_cost", "is", "True", ":", "return", "get_cost", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "x", ")", ",", "x", ",", "y", "else", ":", "return", "x", ",", "y", "for", "it", "in", "range", "(", "2", ")", ":", "# log.debug('[---Augmenting row reduction (iteration: %d)---]', it)", "n_free_rows", "=", "_pyarr", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "n_free_rows", ",", "free_rows", ",", "x", ",", "y", ",", "v", ")", "# log.debug(", "# 'free, x, y, v: %s %s %s %s', free_rows[:n_free_rows], x, y, v)", "if", "n_free_rows", "==", "0", ":", "# log.info('Augmenting row reduction solved it.')", "if", "return_cost", "is", "True", ":", "return", "get_cost", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "x", ")", ",", "x", ",", "y", "else", ":", "return", "x", ",", "y", "# log.info('[----Augmentation----]')", "_pya", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "n_free_rows", ",", "free_rows", ",", "x", ",", "y", ",", "v", ")", "# log.debug('x, y, v: %s %s %s', x, y, v)", "if", "return_cost", "is", "True", ":", "return", "get_cost", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "x", ")", ",", "x", ",", "y", "else", ":", "return", "x", ",", "y" ]
Solve sparse linear assignment problem using Jonker-Volgenant algorithm. n: number of rows of the assignment cost matrix cc: 1D array of all finite elements of the assignement cost matrix ii: 1D array of indices of the row starts in cc. The following must hold: ii[0] = 0 and ii[n+1] = len(cc). kk: 1D array of the column indices so that: cost[i, kk[ii[i] + k]] == cc[ii[i] + k]. Indices within one row must be sorted. extend_cost: whether or not extend a non-square matrix [default: False] cost_limit: an upper limit for a cost of a single assignment [default: np.inf] return_cost: whether or not to return the assignment cost Returns (opt, x, y) where: opt: cost of the assignment x: vector of columns assigned to rows y: vector of rows assigned to columns or (x, y) if return_cost is not True. When extend_cost and/or cost_limit is set, all unmatched entries will be marked by -1 in x/y.
[ "Solve", "sparse", "linear", "assignment", "problem", "using", "Jonker", "-", "Volgenant", "algorithm", "." ]
python
train
edx/completion
completion/services.py
https://github.com/edx/completion/blob/5c23806f6db69ce6be3fd068fc5b5fdf4d66bd60/completion/services.py#L114-L120
def blocks_to_mark_complete_on_view(self, blocks): """ Returns a set of blocks which should be marked complete on view and haven't been yet. """ blocks = {block for block in blocks if self.can_mark_block_complete_on_view(block)} completions = self.get_completions({block.location for block in blocks}) return {block for block in blocks if completions.get(block.location, 0) < 1.0}
[ "def", "blocks_to_mark_complete_on_view", "(", "self", ",", "blocks", ")", ":", "blocks", "=", "{", "block", "for", "block", "in", "blocks", "if", "self", ".", "can_mark_block_complete_on_view", "(", "block", ")", "}", "completions", "=", "self", ".", "get_completions", "(", "{", "block", ".", "location", "for", "block", "in", "blocks", "}", ")", "return", "{", "block", "for", "block", "in", "blocks", "if", "completions", ".", "get", "(", "block", ".", "location", ",", "0", ")", "<", "1.0", "}" ]
Returns a set of blocks which should be marked complete on view and haven't been yet.
[ "Returns", "a", "set", "of", "blocks", "which", "should", "be", "marked", "complete", "on", "view", "and", "haven", "t", "been", "yet", "." ]
python
train
i3visio/osrframework
osrframework/thirdparties/pipl_com/lib/search.py
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/thirdparties/pipl_com/lib/search.py#L180-L207
def _prepare_filtering_params(domain=None, category=None, sponsored_source=None, has_field=None, has_fields=None, query_params_match=None, query_person_match=None, **kwargs): """Transform the params to the API format, return a list of params.""" if query_params_match not in (None, True): raise ValueError('query_params_match can only be `True`') if query_person_match not in (None, True): raise ValueError('query_person_match can only be `True`') params = [] if domain is not None: params.append('domain:%s' % domain) if category is not None: Source.validate_categories([category]) params.append('category:%s' % category) if sponsored_source is not None: params.append('sponsored_source:%s' % sponsored_source) if query_params_match is not None: params.append('query_params_match') if query_person_match is not None: params.append('query_person_match') has_fields = has_fields or [] if has_field is not None: has_fields.append(has_field) for has_field in has_fields: params.append('has_field:%s' % has_field.__name__) return params
[ "def", "_prepare_filtering_params", "(", "domain", "=", "None", ",", "category", "=", "None", ",", "sponsored_source", "=", "None", ",", "has_field", "=", "None", ",", "has_fields", "=", "None", ",", "query_params_match", "=", "None", ",", "query_person_match", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "query_params_match", "not", "in", "(", "None", ",", "True", ")", ":", "raise", "ValueError", "(", "'query_params_match can only be `True`'", ")", "if", "query_person_match", "not", "in", "(", "None", ",", "True", ")", ":", "raise", "ValueError", "(", "'query_person_match can only be `True`'", ")", "params", "=", "[", "]", "if", "domain", "is", "not", "None", ":", "params", ".", "append", "(", "'domain:%s'", "%", "domain", ")", "if", "category", "is", "not", "None", ":", "Source", ".", "validate_categories", "(", "[", "category", "]", ")", "params", ".", "append", "(", "'category:%s'", "%", "category", ")", "if", "sponsored_source", "is", "not", "None", ":", "params", ".", "append", "(", "'sponsored_source:%s'", "%", "sponsored_source", ")", "if", "query_params_match", "is", "not", "None", ":", "params", ".", "append", "(", "'query_params_match'", ")", "if", "query_person_match", "is", "not", "None", ":", "params", ".", "append", "(", "'query_person_match'", ")", "has_fields", "=", "has_fields", "or", "[", "]", "if", "has_field", "is", "not", "None", ":", "has_fields", ".", "append", "(", "has_field", ")", "for", "has_field", "in", "has_fields", ":", "params", ".", "append", "(", "'has_field:%s'", "%", "has_field", ".", "__name__", ")", "return", "params" ]
Transform the params to the API format, return a list of params.
[ "Transform", "the", "params", "to", "the", "API", "format", "return", "a", "list", "of", "params", "." ]
python
train
materialsproject/pymatgen
pymatgen/util/serialization.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/util/serialization.py#L77-L98
def persistent_load(self, pid): """ This method is invoked whenever a persistent ID is encountered. Here, pid is the tuple returned by PmgPickler. """ try: type_tag, key_id = pid except Exception as exc: # Sometimes we get a string such as ('Element', u'C') instead # of a real tuple. Use ast to evalute the expression (much safer # than eval). import ast type_tag, key_id = ast.literal_eval(pid) if type_tag == "Element": return Element(key_id) else: # Always raises an error if you cannot return the correct object. # Otherwise, the unpickler will think None is the object referenced # by the persistent ID. raise pickle.UnpicklingError( "unsupported persistent object with pid %s" % pid)
[ "def", "persistent_load", "(", "self", ",", "pid", ")", ":", "try", ":", "type_tag", ",", "key_id", "=", "pid", "except", "Exception", "as", "exc", ":", "# Sometimes we get a string such as ('Element', u'C') instead", "# of a real tuple. Use ast to evalute the expression (much safer", "# than eval).", "import", "ast", "type_tag", ",", "key_id", "=", "ast", ".", "literal_eval", "(", "pid", ")", "if", "type_tag", "==", "\"Element\"", ":", "return", "Element", "(", "key_id", ")", "else", ":", "# Always raises an error if you cannot return the correct object.", "# Otherwise, the unpickler will think None is the object referenced", "# by the persistent ID.", "raise", "pickle", ".", "UnpicklingError", "(", "\"unsupported persistent object with pid %s\"", "%", "pid", ")" ]
This method is invoked whenever a persistent ID is encountered. Here, pid is the tuple returned by PmgPickler.
[ "This", "method", "is", "invoked", "whenever", "a", "persistent", "ID", "is", "encountered", ".", "Here", "pid", "is", "the", "tuple", "returned", "by", "PmgPickler", "." ]
python
train
quikmile/trellio
trellio/utils/log.py
https://github.com/quikmile/trellio/blob/e8b050077562acf32805fcbb9c0c162248a23c62/trellio/utils/log.py#L21-L32
def formatTime(self, record, datefmt=None): # noqa """ Overrides formatTime method to use datetime module instead of time module to display time in microseconds. Time module by default does not resolve time to microseconds. """ if datefmt: s = datetime.datetime.now().strftime(datefmt) else: t = datetime.datetime.now().strftime(self.default_time_format) s = self.default_msec_format % (t, record.msecs) return s
[ "def", "formatTime", "(", "self", ",", "record", ",", "datefmt", "=", "None", ")", ":", "# noqa", "if", "datefmt", ":", "s", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "datefmt", ")", "else", ":", "t", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "self", ".", "default_time_format", ")", "s", "=", "self", ".", "default_msec_format", "%", "(", "t", ",", "record", ".", "msecs", ")", "return", "s" ]
Overrides formatTime method to use datetime module instead of time module to display time in microseconds. Time module by default does not resolve time to microseconds.
[ "Overrides", "formatTime", "method", "to", "use", "datetime", "module", "instead", "of", "time", "module", "to", "display", "time", "in", "microseconds", ".", "Time", "module", "by", "default", "does", "not", "resolve", "time", "to", "microseconds", "." ]
python
train
iotaledger/iota.lib.py
iota/adapter/wrappers.py
https://github.com/iotaledger/iota.lib.py/blob/97cdd1e241498446b46157b79b2a1ea2ec6d387a/iota/adapter/wrappers.py#L77-L98
def add_route(self, command, adapter): # type: (Text, AdapterSpec) -> RoutingWrapper """ Adds a route to the wrapper. :param command: The name of the command to route (e.g., "attachToTangle"). :param adapter: The adapter object or URI to route requests to. """ if not isinstance(adapter, BaseAdapter): try: adapter = self.adapter_aliases[adapter] except KeyError: self.adapter_aliases[adapter] = adapter = resolve_adapter( adapter ) self.routes[command] = adapter return self
[ "def", "add_route", "(", "self", ",", "command", ",", "adapter", ")", ":", "# type: (Text, AdapterSpec) -> RoutingWrapper", "if", "not", "isinstance", "(", "adapter", ",", "BaseAdapter", ")", ":", "try", ":", "adapter", "=", "self", ".", "adapter_aliases", "[", "adapter", "]", "except", "KeyError", ":", "self", ".", "adapter_aliases", "[", "adapter", "]", "=", "adapter", "=", "resolve_adapter", "(", "adapter", ")", "self", ".", "routes", "[", "command", "]", "=", "adapter", "return", "self" ]
Adds a route to the wrapper. :param command: The name of the command to route (e.g., "attachToTangle"). :param adapter: The adapter object or URI to route requests to.
[ "Adds", "a", "route", "to", "the", "wrapper", "." ]
python
test
RJT1990/pyflux
pyflux/arma/nnarx.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/arma/nnarx.py#L430-L447
def general_neg_loglik(self, beta): """ Calculates the negative log-likelihood of the model Parameters ---------- beta : np.ndarray Contains untransformed starting values for latent variables Returns ---------- The negative logliklihood of the model """ mu, Y = self._model(beta) parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])]) #TODO: Replace above with transformation that only acts on scale, shape, skewness in future (speed-up) model_scale, model_shape, model_skewness = self._get_scale_and_shape(parm) return self.family.neg_loglikelihood(Y, self.link(mu), model_scale, model_shape, model_skewness)
[ "def", "general_neg_loglik", "(", "self", ",", "beta", ")", ":", "mu", ",", "Y", "=", "self", ".", "_model", "(", "beta", ")", "parm", "=", "np", ".", "array", "(", "[", "self", ".", "latent_variables", ".", "z_list", "[", "k", "]", ".", "prior", ".", "transform", "(", "beta", "[", "k", "]", ")", "for", "k", "in", "range", "(", "beta", ".", "shape", "[", "0", "]", ")", "]", ")", "#TODO: Replace above with transformation that only acts on scale, shape, skewness in future (speed-up)", "model_scale", ",", "model_shape", ",", "model_skewness", "=", "self", ".", "_get_scale_and_shape", "(", "parm", ")", "return", "self", ".", "family", ".", "neg_loglikelihood", "(", "Y", ",", "self", ".", "link", "(", "mu", ")", ",", "model_scale", ",", "model_shape", ",", "model_skewness", ")" ]
Calculates the negative log-likelihood of the model Parameters ---------- beta : np.ndarray Contains untransformed starting values for latent variables Returns ---------- The negative logliklihood of the model
[ "Calculates", "the", "negative", "log", "-", "likelihood", "of", "the", "model" ]
python
train
bspaans/python-mingus
mingus/extra/tunings.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/extra/tunings.py#L95-L143
def find_fingering(self, notes, max_distance=4, not_strings=[]): """Return a list [(string, fret)] of possible fingerings for 'notes'. The notes parameter should be a list of strings or Notes or a NoteContainer; max_distance denotes the maximum distance between frets; not_strings can be used to disclude certain strings and is used internally to recurse. Example: >>> t = tunings.StringTuning('test', 'test', ['A-3', 'E-4', 'A-5']) >>> t.find_fingering(['E-4', 'B-4']) [[(0, 7), (1, 7)], [(1, 0), (0, 14)]] """ if notes is None: return [] if len(notes) == 0: return [] first = notes[0] notes = notes[1:] frets = self.find_frets(first) result = [] for (string, fret) in enumerate(frets): if fret is not None and string not in not_strings: if len(notes) > 0: # recursively find fingerings for # remaining notes r = self.find_fingering(notes, max_distance, not_strings + [string]) if r != []: for f in r: result.append([(string, fret)] + f) else: result.append([(string, fret)]) # filter impossible fingerings and sort res = [] for r in result: (min, max) = (1000, -1) frets = 0 for (string, fret) in r: if fret > max: max = fret if fret < min and fret != 0: min = fret frets += fret if 0 <= max - min < max_distance or min == 1000 or max == -1: res.append((frets, r)) return [r for (_, r) in sorted(res)]
[ "def", "find_fingering", "(", "self", ",", "notes", ",", "max_distance", "=", "4", ",", "not_strings", "=", "[", "]", ")", ":", "if", "notes", "is", "None", ":", "return", "[", "]", "if", "len", "(", "notes", ")", "==", "0", ":", "return", "[", "]", "first", "=", "notes", "[", "0", "]", "notes", "=", "notes", "[", "1", ":", "]", "frets", "=", "self", ".", "find_frets", "(", "first", ")", "result", "=", "[", "]", "for", "(", "string", ",", "fret", ")", "in", "enumerate", "(", "frets", ")", ":", "if", "fret", "is", "not", "None", "and", "string", "not", "in", "not_strings", ":", "if", "len", "(", "notes", ")", ">", "0", ":", "# recursively find fingerings for", "# remaining notes", "r", "=", "self", ".", "find_fingering", "(", "notes", ",", "max_distance", ",", "not_strings", "+", "[", "string", "]", ")", "if", "r", "!=", "[", "]", ":", "for", "f", "in", "r", ":", "result", ".", "append", "(", "[", "(", "string", ",", "fret", ")", "]", "+", "f", ")", "else", ":", "result", ".", "append", "(", "[", "(", "string", ",", "fret", ")", "]", ")", "# filter impossible fingerings and sort", "res", "=", "[", "]", "for", "r", "in", "result", ":", "(", "min", ",", "max", ")", "=", "(", "1000", ",", "-", "1", ")", "frets", "=", "0", "for", "(", "string", ",", "fret", ")", "in", "r", ":", "if", "fret", ">", "max", ":", "max", "=", "fret", "if", "fret", "<", "min", "and", "fret", "!=", "0", ":", "min", "=", "fret", "frets", "+=", "fret", "if", "0", "<=", "max", "-", "min", "<", "max_distance", "or", "min", "==", "1000", "or", "max", "==", "-", "1", ":", "res", ".", "append", "(", "(", "frets", ",", "r", ")", ")", "return", "[", "r", "for", "(", "_", ",", "r", ")", "in", "sorted", "(", "res", ")", "]" ]
Return a list [(string, fret)] of possible fingerings for 'notes'. The notes parameter should be a list of strings or Notes or a NoteContainer; max_distance denotes the maximum distance between frets; not_strings can be used to disclude certain strings and is used internally to recurse. Example: >>> t = tunings.StringTuning('test', 'test', ['A-3', 'E-4', 'A-5']) >>> t.find_fingering(['E-4', 'B-4']) [[(0, 7), (1, 7)], [(1, 0), (0, 14)]]
[ "Return", "a", "list", "[", "(", "string", "fret", ")", "]", "of", "possible", "fingerings", "for", "notes", "." ]
python
train
dpgaspar/Flask-AppBuilder
flask_appbuilder/security/sqla/manager.py
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/security/sqla/manager.py#L344-L355
def find_permission_view_menu(self, permission_name, view_menu_name): """ Finds and returns a PermissionView by names """ permission = self.find_permission(permission_name) view_menu = self.find_view_menu(view_menu_name) if permission and view_menu: return ( self.get_session.query(self.permissionview_model) .filter_by(permission=permission, view_menu=view_menu) .first() )
[ "def", "find_permission_view_menu", "(", "self", ",", "permission_name", ",", "view_menu_name", ")", ":", "permission", "=", "self", ".", "find_permission", "(", "permission_name", ")", "view_menu", "=", "self", ".", "find_view_menu", "(", "view_menu_name", ")", "if", "permission", "and", "view_menu", ":", "return", "(", "self", ".", "get_session", ".", "query", "(", "self", ".", "permissionview_model", ")", ".", "filter_by", "(", "permission", "=", "permission", ",", "view_menu", "=", "view_menu", ")", ".", "first", "(", ")", ")" ]
Finds and returns a PermissionView by names
[ "Finds", "and", "returns", "a", "PermissionView", "by", "names" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/quaternion.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/quaternion.py#L551-L561
def _dcm_array_to_matrix3(self, dcm): """ Converts dcm array into Matrix3 :param dcm: 3x3 dcm array :returns: Matrix3 """ assert(dcm.shape == (3, 3)) a = Vector3(dcm[0][0], dcm[0][1], dcm[0][2]) b = Vector3(dcm[1][0], dcm[1][1], dcm[1][2]) c = Vector3(dcm[2][0], dcm[2][1], dcm[2][2]) return Matrix3(a, b, c)
[ "def", "_dcm_array_to_matrix3", "(", "self", ",", "dcm", ")", ":", "assert", "(", "dcm", ".", "shape", "==", "(", "3", ",", "3", ")", ")", "a", "=", "Vector3", "(", "dcm", "[", "0", "]", "[", "0", "]", ",", "dcm", "[", "0", "]", "[", "1", "]", ",", "dcm", "[", "0", "]", "[", "2", "]", ")", "b", "=", "Vector3", "(", "dcm", "[", "1", "]", "[", "0", "]", ",", "dcm", "[", "1", "]", "[", "1", "]", ",", "dcm", "[", "1", "]", "[", "2", "]", ")", "c", "=", "Vector3", "(", "dcm", "[", "2", "]", "[", "0", "]", ",", "dcm", "[", "2", "]", "[", "1", "]", ",", "dcm", "[", "2", "]", "[", "2", "]", ")", "return", "Matrix3", "(", "a", ",", "b", ",", "c", ")" ]
Converts dcm array into Matrix3 :param dcm: 3x3 dcm array :returns: Matrix3
[ "Converts", "dcm", "array", "into", "Matrix3", ":", "param", "dcm", ":", "3x3", "dcm", "array", ":", "returns", ":", "Matrix3" ]
python
train
openthread/openthread
tools/harness-thci/OpenThread.py
https://github.com/openthread/openthread/blob/0208d10563aa21c518092985c78ecf9cd223ab74/tools/harness-thci/OpenThread.py#L1988-L2021
def scanJoiner(self, xEUI='*', strPSKd='threadjpaketest'): """scan Joiner Args: xEUI: Joiner's EUI-64 strPSKd: Joiner's PSKd for commissioning Returns: True: successful to add Joiner's steering data False: fail to add Joiner's steering data """ print '%s call scanJoiner' % self.port # long timeout value to avoid automatic joiner removal (in seconds) timeout = 500 if not isinstance(xEUI, str): eui64 = self.__convertLongToString(xEUI) # prepend 0 at the beginning if len(eui64) < 16: eui64 = eui64.zfill(16) print eui64 else: eui64 = xEUI cmd = 'commissioner joiner add %s %s %s' % (eui64, strPSKd, str(timeout)) print cmd if self.__sendCommand(cmd)[0] == 'Done': if self.logThreadStatus == self.logStatus['stop']: self.logThread = ThreadRunner.run(target=self.__readCommissioningLogs, args=(120,)) return True else: return False
[ "def", "scanJoiner", "(", "self", ",", "xEUI", "=", "'*'", ",", "strPSKd", "=", "'threadjpaketest'", ")", ":", "print", "'%s call scanJoiner'", "%", "self", ".", "port", "# long timeout value to avoid automatic joiner removal (in seconds)", "timeout", "=", "500", "if", "not", "isinstance", "(", "xEUI", ",", "str", ")", ":", "eui64", "=", "self", ".", "__convertLongToString", "(", "xEUI", ")", "# prepend 0 at the beginning", "if", "len", "(", "eui64", ")", "<", "16", ":", "eui64", "=", "eui64", ".", "zfill", "(", "16", ")", "print", "eui64", "else", ":", "eui64", "=", "xEUI", "cmd", "=", "'commissioner joiner add %s %s %s'", "%", "(", "eui64", ",", "strPSKd", ",", "str", "(", "timeout", ")", ")", "print", "cmd", "if", "self", ".", "__sendCommand", "(", "cmd", ")", "[", "0", "]", "==", "'Done'", ":", "if", "self", ".", "logThreadStatus", "==", "self", ".", "logStatus", "[", "'stop'", "]", ":", "self", ".", "logThread", "=", "ThreadRunner", ".", "run", "(", "target", "=", "self", ".", "__readCommissioningLogs", ",", "args", "=", "(", "120", ",", ")", ")", "return", "True", "else", ":", "return", "False" ]
scan Joiner Args: xEUI: Joiner's EUI-64 strPSKd: Joiner's PSKd for commissioning Returns: True: successful to add Joiner's steering data False: fail to add Joiner's steering data
[ "scan", "Joiner" ]
python
train
log2timeline/plaso
plaso/preprocessors/macos.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/preprocessors/macos.py#L47-L93
def _ParseFileData(self, knowledge_base, file_object): """Parses file content (data) for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. file_object (dfvfs.FileIO): file-like object that contains the artifact value data. Raises: errors.PreProcessFail: if the preprocessing fails. """ plist_file = plist.PlistFile() try: plist_file.Read(file_object) except IOError as exception: raise errors.PreProcessFail( 'Unable to read: {0:s} with error: {1!s}'.format( self.ARTIFACT_DEFINITION_NAME, exception)) if not plist_file.root_key: raise errors.PreProcessFail(( 'Unable to read: {0:s} with error: missing root key').format( self.ARTIFACT_DEFINITION_NAME)) matches = [] self._FindKeys(plist_file.root_key, self._PLIST_KEYS, matches) if not matches: raise errors.PreProcessFail( 'Unable to read: {0:s} with error: no such keys: {1:s}.'.format( self.ARTIFACT_DEFINITION_NAME, ', '.join(self._PLIST_KEYS))) name = None value = None for name, value in matches: if value: break if value is None: raise errors.PreProcessFail(( 'Unable to read: {0:s} with error: no values found for keys: ' '{1:s}.').format( self.ARTIFACT_DEFINITION_NAME, ', '.join(self._PLIST_KEYS))) self._ParsePlistKeyValue(knowledge_base, name, value)
[ "def", "_ParseFileData", "(", "self", ",", "knowledge_base", ",", "file_object", ")", ":", "plist_file", "=", "plist", ".", "PlistFile", "(", ")", "try", ":", "plist_file", ".", "Read", "(", "file_object", ")", "except", "IOError", "as", "exception", ":", "raise", "errors", ".", "PreProcessFail", "(", "'Unable to read: {0:s} with error: {1!s}'", ".", "format", "(", "self", ".", "ARTIFACT_DEFINITION_NAME", ",", "exception", ")", ")", "if", "not", "plist_file", ".", "root_key", ":", "raise", "errors", ".", "PreProcessFail", "(", "(", "'Unable to read: {0:s} with error: missing root key'", ")", ".", "format", "(", "self", ".", "ARTIFACT_DEFINITION_NAME", ")", ")", "matches", "=", "[", "]", "self", ".", "_FindKeys", "(", "plist_file", ".", "root_key", ",", "self", ".", "_PLIST_KEYS", ",", "matches", ")", "if", "not", "matches", ":", "raise", "errors", ".", "PreProcessFail", "(", "'Unable to read: {0:s} with error: no such keys: {1:s}.'", ".", "format", "(", "self", ".", "ARTIFACT_DEFINITION_NAME", ",", "', '", ".", "join", "(", "self", ".", "_PLIST_KEYS", ")", ")", ")", "name", "=", "None", "value", "=", "None", "for", "name", ",", "value", "in", "matches", ":", "if", "value", ":", "break", "if", "value", "is", "None", ":", "raise", "errors", ".", "PreProcessFail", "(", "(", "'Unable to read: {0:s} with error: no values found for keys: '", "'{1:s}.'", ")", ".", "format", "(", "self", ".", "ARTIFACT_DEFINITION_NAME", ",", "', '", ".", "join", "(", "self", ".", "_PLIST_KEYS", ")", ")", ")", "self", ".", "_ParsePlistKeyValue", "(", "knowledge_base", ",", "name", ",", "value", ")" ]
Parses file content (data) for a preprocessing attribute. Args: knowledge_base (KnowledgeBase): to fill with preprocessing information. file_object (dfvfs.FileIO): file-like object that contains the artifact value data. Raises: errors.PreProcessFail: if the preprocessing fails.
[ "Parses", "file", "content", "(", "data", ")", "for", "a", "preprocessing", "attribute", "." ]
python
train
Clinical-Genomics/scout
scout/server/blueprints/cases/controllers.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/server/blueprints/cases/controllers.py#L261-L306
def coverage_report_contents(store, institute_obj, case_obj, base_url): """Posts a request to chanjo-report and capture the body of the returned response to include it in case report Args: store(adapter.MongoAdapter) institute_obj(models.Institute) case_obj(models.Case) base_url(str): base url of server Returns: coverage_data(str): string rendering of the content between <body </body> tags of a coverage report """ request_data = {} # extract sample ids from case_obj and add them to the post request object: request_data['sample_id'] = [ ind['individual_id'] for ind in case_obj['individuals'] ] # extract default panel names and default genes from case_obj and add them to the post request object distinct_genes = set() panel_names = [] for panel_info in case_obj.get('panels', []): if not panel_info.get('is_default'): continue panel_obj = store.gene_panel(panel_info['panel_name'], version=panel_info.get('version')) full_name = "{} ({})".format(panel_obj['display_name'], panel_obj['version']) panel_names.append(full_name) panel_names = ' ,'.join(panel_names) request_data['panel_name'] = panel_names # add institute-specific cutoff level to the post request object request_data['level'] = institute_obj.get('coverage_cutoff', 15) #send get request to chanjo report resp = requests.get(base_url+'reports/report', params=request_data) #read response content soup = BeautifulSoup(resp.text) # remove links in the printed version of coverage report for tag in soup.find_all('a'): tag.replaceWith('') #extract body content using BeautifulSoup coverage_data = ''.join(['%s' % x for x in soup.body.contents]) return coverage_data
[ "def", "coverage_report_contents", "(", "store", ",", "institute_obj", ",", "case_obj", ",", "base_url", ")", ":", "request_data", "=", "{", "}", "# extract sample ids from case_obj and add them to the post request object:", "request_data", "[", "'sample_id'", "]", "=", "[", "ind", "[", "'individual_id'", "]", "for", "ind", "in", "case_obj", "[", "'individuals'", "]", "]", "# extract default panel names and default genes from case_obj and add them to the post request object", "distinct_genes", "=", "set", "(", ")", "panel_names", "=", "[", "]", "for", "panel_info", "in", "case_obj", ".", "get", "(", "'panels'", ",", "[", "]", ")", ":", "if", "not", "panel_info", ".", "get", "(", "'is_default'", ")", ":", "continue", "panel_obj", "=", "store", ".", "gene_panel", "(", "panel_info", "[", "'panel_name'", "]", ",", "version", "=", "panel_info", ".", "get", "(", "'version'", ")", ")", "full_name", "=", "\"{} ({})\"", ".", "format", "(", "panel_obj", "[", "'display_name'", "]", ",", "panel_obj", "[", "'version'", "]", ")", "panel_names", ".", "append", "(", "full_name", ")", "panel_names", "=", "' ,'", ".", "join", "(", "panel_names", ")", "request_data", "[", "'panel_name'", "]", "=", "panel_names", "# add institute-specific cutoff level to the post request object", "request_data", "[", "'level'", "]", "=", "institute_obj", ".", "get", "(", "'coverage_cutoff'", ",", "15", ")", "#send get request to chanjo report", "resp", "=", "requests", ".", "get", "(", "base_url", "+", "'reports/report'", ",", "params", "=", "request_data", ")", "#read response content", "soup", "=", "BeautifulSoup", "(", "resp", ".", "text", ")", "# remove links in the printed version of coverage report", "for", "tag", "in", "soup", ".", "find_all", "(", "'a'", ")", ":", "tag", ".", "replaceWith", "(", "''", ")", "#extract body content using BeautifulSoup", "coverage_data", "=", "''", ".", "join", "(", "[", "'%s'", "%", "x", "for", "x", "in", "soup", ".", "body", ".", "contents", "]", ")", "return", "coverage_data" ]
Posts a request to chanjo-report and capture the body of the returned response to include it in case report Args: store(adapter.MongoAdapter) institute_obj(models.Institute) case_obj(models.Case) base_url(str): base url of server Returns: coverage_data(str): string rendering of the content between <body </body> tags of a coverage report
[ "Posts", "a", "request", "to", "chanjo", "-", "report", "and", "capture", "the", "body", "of", "the", "returned", "response", "to", "include", "it", "in", "case", "report" ]
python
test
BoGoEngine/bogo-python
bogo/core.py
https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/core.py#L469-L489
def _can_undo(comps, trans_list): """ Return whether a components can be undone with one of the transformation in trans_list. """ comps = list(comps) accent_list = list(map(accent.get_accent_char, comps[1])) mark_list = list(map(mark.get_mark_char, utils.join(comps))) action_list = list(map(lambda x: _get_action(x), trans_list)) def atomic_check(action): """ Check if the `action` created one of the marks, accents, or characters in `comps`. """ return (action[0] == _Action.ADD_ACCENT and action[1] in accent_list) \ or (action[0] == _Action.ADD_MARK and action[1] in mark_list) \ or (action[0] == _Action.ADD_CHAR and action[1] == \ accent.remove_accent_char(comps[1][-1])) # ơ, ư return any(map(atomic_check, action_list))
[ "def", "_can_undo", "(", "comps", ",", "trans_list", ")", ":", "comps", "=", "list", "(", "comps", ")", "accent_list", "=", "list", "(", "map", "(", "accent", ".", "get_accent_char", ",", "comps", "[", "1", "]", ")", ")", "mark_list", "=", "list", "(", "map", "(", "mark", ".", "get_mark_char", ",", "utils", ".", "join", "(", "comps", ")", ")", ")", "action_list", "=", "list", "(", "map", "(", "lambda", "x", ":", "_get_action", "(", "x", ")", ",", "trans_list", ")", ")", "def", "atomic_check", "(", "action", ")", ":", "\"\"\"\n Check if the `action` created one of the marks, accents, or characters\n in `comps`.\n \"\"\"", "return", "(", "action", "[", "0", "]", "==", "_Action", ".", "ADD_ACCENT", "and", "action", "[", "1", "]", "in", "accent_list", ")", "or", "(", "action", "[", "0", "]", "==", "_Action", ".", "ADD_MARK", "and", "action", "[", "1", "]", "in", "mark_list", ")", "or", "(", "action", "[", "0", "]", "==", "_Action", ".", "ADD_CHAR", "and", "action", "[", "1", "]", "==", "accent", ".", "remove_accent_char", "(", "comps", "[", "1", "]", "[", "-", "1", "]", ")", ")", "# ơ, ư", "return", "any", "(", "map", "(", "atomic_check", ",", "action_list", ")", ")" ]
Return whether a components can be undone with one of the transformation in trans_list.
[ "Return", "whether", "a", "components", "can", "be", "undone", "with", "one", "of", "the", "transformation", "in", "trans_list", "." ]
python
train
noahbenson/neuropythy
neuropythy/geometry/util.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/geometry/util.py#L23-L31
def vector_angle_cos(u, v): ''' vector_angle_cos(u, v) yields the cosine of the angle between the two vectors u and v. If u or v (or both) is a (d x n) matrix of n vectors, the result will be a length n vector of the cosines. ''' u = np.asarray(u) v = np.asarray(v) return (u * v).sum(0) / np.sqrt((u ** 2).sum(0) * (v ** 2).sum(0))
[ "def", "vector_angle_cos", "(", "u", ",", "v", ")", ":", "u", "=", "np", ".", "asarray", "(", "u", ")", "v", "=", "np", ".", "asarray", "(", "v", ")", "return", "(", "u", "*", "v", ")", ".", "sum", "(", "0", ")", "/", "np", ".", "sqrt", "(", "(", "u", "**", "2", ")", ".", "sum", "(", "0", ")", "*", "(", "v", "**", "2", ")", ".", "sum", "(", "0", ")", ")" ]
vector_angle_cos(u, v) yields the cosine of the angle between the two vectors u and v. If u or v (or both) is a (d x n) matrix of n vectors, the result will be a length n vector of the cosines.
[ "vector_angle_cos", "(", "u", "v", ")", "yields", "the", "cosine", "of", "the", "angle", "between", "the", "two", "vectors", "u", "and", "v", ".", "If", "u", "or", "v", "(", "or", "both", ")", "is", "a", "(", "d", "x", "n", ")", "matrix", "of", "n", "vectors", "the", "result", "will", "be", "a", "length", "n", "vector", "of", "the", "cosines", "." ]
python
train
bioasp/ingranalyze
src/query.py
https://github.com/bioasp/ingranalyze/blob/60bdd679b6044a4d142abbca5bbe3cd5e9fd7596/src/query.py#L198-L214
def whatsnew(instance,pred): ''' [whatsnew(instance,pred)] is a TermSet equal to [pred] where all predicates vlabel and elabel which have a corresponding obs_vlabel and obs_elabel in [instance] have been deleted. This function is meant to see which of the invariants are not a direct consequence of the observations. ''' accu = TermSet(pred) for t in instance: if t.pred() == 'obs_vlabel': [_,e,v,s] = t.explode() accu.discard(Term('vlabel',[e,v,s])) elif t.p('obs_elabel'): [_,j,i,s] = t.explode() accu.discard(Term('elabel',[j,i,s])) return accu
[ "def", "whatsnew", "(", "instance", ",", "pred", ")", ":", "accu", "=", "TermSet", "(", "pred", ")", "for", "t", "in", "instance", ":", "if", "t", ".", "pred", "(", ")", "==", "'obs_vlabel'", ":", "[", "_", ",", "e", ",", "v", ",", "s", "]", "=", "t", ".", "explode", "(", ")", "accu", ".", "discard", "(", "Term", "(", "'vlabel'", ",", "[", "e", ",", "v", ",", "s", "]", ")", ")", "elif", "t", ".", "p", "(", "'obs_elabel'", ")", ":", "[", "_", ",", "j", ",", "i", ",", "s", "]", "=", "t", ".", "explode", "(", ")", "accu", ".", "discard", "(", "Term", "(", "'elabel'", ",", "[", "j", ",", "i", ",", "s", "]", ")", ")", "return", "accu" ]
[whatsnew(instance,pred)] is a TermSet equal to [pred] where all predicates vlabel and elabel which have a corresponding obs_vlabel and obs_elabel in [instance] have been deleted. This function is meant to see which of the invariants are not a direct consequence of the observations.
[ "[", "whatsnew", "(", "instance", "pred", ")", "]", "is", "a", "TermSet", "equal", "to", "[", "pred", "]", "where", "all", "predicates", "vlabel", "and", "elabel", "which", "have", "a", "corresponding", "obs_vlabel", "and", "obs_elabel", "in", "[", "instance", "]", "have", "been", "deleted", ".", "This", "function", "is", "meant", "to", "see", "which", "of", "the", "invariants", "are", "not", "a", "direct", "consequence", "of", "the", "observations", "." ]
python
train
mnick/scikit-tensor
sktensor/sptensor.py
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/sptensor.py#L179-L195
def _ttm_me_compute(self, V, edims, sdims, transp): """ Assume Y = T x_i V_i for i = 1...n can fit into memory """ shapeY = np.copy(self.shape) # Determine size of Y for n in np.union1d(edims, sdims): shapeY[n] = V[n].shape[1] if transp else V[n].shape[0] # Allocate Y (final result) and v (vectors for elementwise computations) Y = zeros(shapeY) shapeY = array(shapeY) v = [None for _ in range(len(edims))] for i in range(np.prod(shapeY[edims])): rsubs = unravel_index(shapeY[edims], i)
[ "def", "_ttm_me_compute", "(", "self", ",", "V", ",", "edims", ",", "sdims", ",", "transp", ")", ":", "shapeY", "=", "np", ".", "copy", "(", "self", ".", "shape", ")", "# Determine size of Y", "for", "n", "in", "np", ".", "union1d", "(", "edims", ",", "sdims", ")", ":", "shapeY", "[", "n", "]", "=", "V", "[", "n", "]", ".", "shape", "[", "1", "]", "if", "transp", "else", "V", "[", "n", "]", ".", "shape", "[", "0", "]", "# Allocate Y (final result) and v (vectors for elementwise computations)", "Y", "=", "zeros", "(", "shapeY", ")", "shapeY", "=", "array", "(", "shapeY", ")", "v", "=", "[", "None", "for", "_", "in", "range", "(", "len", "(", "edims", ")", ")", "]", "for", "i", "in", "range", "(", "np", ".", "prod", "(", "shapeY", "[", "edims", "]", ")", ")", ":", "rsubs", "=", "unravel_index", "(", "shapeY", "[", "edims", "]", ",", "i", ")" ]
Assume Y = T x_i V_i for i = 1...n can fit into memory
[ "Assume", "Y", "=", "T", "x_i", "V_i", "for", "i", "=", "1", "...", "n", "can", "fit", "into", "memory" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_layers.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L2189-L2214
def running_global_pool_1d(inputs, pooling_type="MAX"): """Same global pool, but only for the elements up to the current element. Useful for outputs where the state of future elements is not known. Takes no mask as all elements up to the current element are assumed to exist. Currently only supports maximum. Equivalent to using a lower triangle bias. Args: inputs: A tensor of shape [batch_size, sequence_length, input_dims] containing the sequences of input vectors. pooling_type: Pooling type to use. Currently only supports 'MAX'. Returns: A tensor of shape [batch_size, sequence_length, input_dims] containing the running 'totals'. """ del pooling_type with tf.name_scope("running_global_pool", values=[inputs]): scan_fct = tf.maximum # Permute inputs so seq_length is first. elems = tf.transpose(inputs, [1, 0, 2]) # Perform scan. cumulatives = tf.scan(scan_fct, elems, swap_memory=True) # Permute output to get back to original order. output = tf.transpose(cumulatives, [1, 0, 2]) return output
[ "def", "running_global_pool_1d", "(", "inputs", ",", "pooling_type", "=", "\"MAX\"", ")", ":", "del", "pooling_type", "with", "tf", ".", "name_scope", "(", "\"running_global_pool\"", ",", "values", "=", "[", "inputs", "]", ")", ":", "scan_fct", "=", "tf", ".", "maximum", "# Permute inputs so seq_length is first.", "elems", "=", "tf", ".", "transpose", "(", "inputs", ",", "[", "1", ",", "0", ",", "2", "]", ")", "# Perform scan.", "cumulatives", "=", "tf", ".", "scan", "(", "scan_fct", ",", "elems", ",", "swap_memory", "=", "True", ")", "# Permute output to get back to original order.", "output", "=", "tf", ".", "transpose", "(", "cumulatives", ",", "[", "1", ",", "0", ",", "2", "]", ")", "return", "output" ]
Same global pool, but only for the elements up to the current element. Useful for outputs where the state of future elements is not known. Takes no mask as all elements up to the current element are assumed to exist. Currently only supports maximum. Equivalent to using a lower triangle bias. Args: inputs: A tensor of shape [batch_size, sequence_length, input_dims] containing the sequences of input vectors. pooling_type: Pooling type to use. Currently only supports 'MAX'. Returns: A tensor of shape [batch_size, sequence_length, input_dims] containing the running 'totals'.
[ "Same", "global", "pool", "but", "only", "for", "the", "elements", "up", "to", "the", "current", "element", "." ]
python
train
rochacbruno/python-pagseguro
pagseguro/__init__.py
https://github.com/rochacbruno/python-pagseguro/blob/18a9ca3301783cb323e838574b59f9ddffa9a593/pagseguro/__init__.py#L260-L277
def query_transactions(self, initial_date, final_date, page=None, max_results=None): """ query transaction by date range """ last_page = False results = [] while last_page is False: search_result = self._consume_query_transactions( initial_date, final_date, page, max_results) results.extend(search_result.transactions) if search_result.current_page is None or \ search_result.total_pages is None or \ search_result.current_page == search_result.total_pages: last_page = True else: page = search_result.current_page + 1 return results
[ "def", "query_transactions", "(", "self", ",", "initial_date", ",", "final_date", ",", "page", "=", "None", ",", "max_results", "=", "None", ")", ":", "last_page", "=", "False", "results", "=", "[", "]", "while", "last_page", "is", "False", ":", "search_result", "=", "self", ".", "_consume_query_transactions", "(", "initial_date", ",", "final_date", ",", "page", ",", "max_results", ")", "results", ".", "extend", "(", "search_result", ".", "transactions", ")", "if", "search_result", ".", "current_page", "is", "None", "or", "search_result", ".", "total_pages", "is", "None", "or", "search_result", ".", "current_page", "==", "search_result", ".", "total_pages", ":", "last_page", "=", "True", "else", ":", "page", "=", "search_result", ".", "current_page", "+", "1", "return", "results" ]
query transaction by date range
[ "query", "transaction", "by", "date", "range" ]
python
train
pandas-dev/pandas
pandas/core/base.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/base.py#L86-L95
def _reset_cache(self, key=None): """ Reset cached properties. If ``key`` is passed, only clears that key. """ if getattr(self, '_cache', None) is None: return if key is None: self._cache.clear() else: self._cache.pop(key, None)
[ "def", "_reset_cache", "(", "self", ",", "key", "=", "None", ")", ":", "if", "getattr", "(", "self", ",", "'_cache'", ",", "None", ")", "is", "None", ":", "return", "if", "key", "is", "None", ":", "self", ".", "_cache", ".", "clear", "(", ")", "else", ":", "self", ".", "_cache", ".", "pop", "(", "key", ",", "None", ")" ]
Reset cached properties. If ``key`` is passed, only clears that key.
[ "Reset", "cached", "properties", ".", "If", "key", "is", "passed", "only", "clears", "that", "key", "." ]
python
train
BD2KOnFHIR/fhirtordf
fhirtordf/loaders/fhirresourceloader.py
https://github.com/BD2KOnFHIR/fhirtordf/blob/f97b3df683fa4caacf5cf4f29699ab060bcc0fbf/fhirtordf/loaders/fhirresourceloader.py#L157-L185
def add_value_node(self, subj: Node, pred: URIRef, val: Union[JsonObj, str, List], valuetype: Optional[URIRef]= None) -> None: """ Expand val according to the range of pred and add it to the graph :param subj: graph subject :param pred: graph predicate :param val: JSON representation of target object :param valuetype: predicate type if it can't be directly determined """ pred_type = self._meta.predicate_type(pred) if not valuetype else valuetype # Transform generic resources into specific types if pred_type == FHIR.Resource: pred_type = FHIR[val.resourceType] val_meta = FHIRMetaVocEntry(self._vocabulary, pred_type) for k, p in val_meta.predicates().items(): if k in val: self.add_val(subj, p, val, k) if pred == FHIR.CodeableConcept.coding: self.add_type_arc(subj, val) elif k == "value" and val_meta.predicate_type(p) == FHIR.Element: # value / Element is the wild card combination -- if there is a "value[x]" in val, emit it where the # type comes from 'x' for vk in val._as_dict.keys(): if vk.startswith(k): self.add_val(subj, FHIR['Extension.' + vk], val, vk, self._meta.value_predicate_to_type(vk)) else: # Can have an extension only without a primary value self.add_extension_val(subj, val, k, p)
[ "def", "add_value_node", "(", "self", ",", "subj", ":", "Node", ",", "pred", ":", "URIRef", ",", "val", ":", "Union", "[", "JsonObj", ",", "str", ",", "List", "]", ",", "valuetype", ":", "Optional", "[", "URIRef", "]", "=", "None", ")", "->", "None", ":", "pred_type", "=", "self", ".", "_meta", ".", "predicate_type", "(", "pred", ")", "if", "not", "valuetype", "else", "valuetype", "# Transform generic resources into specific types", "if", "pred_type", "==", "FHIR", ".", "Resource", ":", "pred_type", "=", "FHIR", "[", "val", ".", "resourceType", "]", "val_meta", "=", "FHIRMetaVocEntry", "(", "self", ".", "_vocabulary", ",", "pred_type", ")", "for", "k", ",", "p", "in", "val_meta", ".", "predicates", "(", ")", ".", "items", "(", ")", ":", "if", "k", "in", "val", ":", "self", ".", "add_val", "(", "subj", ",", "p", ",", "val", ",", "k", ")", "if", "pred", "==", "FHIR", ".", "CodeableConcept", ".", "coding", ":", "self", ".", "add_type_arc", "(", "subj", ",", "val", ")", "elif", "k", "==", "\"value\"", "and", "val_meta", ".", "predicate_type", "(", "p", ")", "==", "FHIR", ".", "Element", ":", "# value / Element is the wild card combination -- if there is a \"value[x]\" in val, emit it where the", "# type comes from 'x'", "for", "vk", "in", "val", ".", "_as_dict", ".", "keys", "(", ")", ":", "if", "vk", ".", "startswith", "(", "k", ")", ":", "self", ".", "add_val", "(", "subj", ",", "FHIR", "[", "'Extension.'", "+", "vk", "]", ",", "val", ",", "vk", ",", "self", ".", "_meta", ".", "value_predicate_to_type", "(", "vk", ")", ")", "else", ":", "# Can have an extension only without a primary value", "self", ".", "add_extension_val", "(", "subj", ",", "val", ",", "k", ",", "p", ")" ]
Expand val according to the range of pred and add it to the graph :param subj: graph subject :param pred: graph predicate :param val: JSON representation of target object :param valuetype: predicate type if it can't be directly determined
[ "Expand", "val", "according", "to", "the", "range", "of", "pred", "and", "add", "it", "to", "the", "graph", ":", "param", "subj", ":", "graph", "subject", ":", "param", "pred", ":", "graph", "predicate", ":", "param", "val", ":", "JSON", "representation", "of", "target", "object", ":", "param", "valuetype", ":", "predicate", "type", "if", "it", "can", "t", "be", "directly", "determined" ]
python
train
kovidgoyal/html5-parser
src/html5_parser/__init__.py
https://github.com/kovidgoyal/html5-parser/blob/65ce451652cbab71ed86a9b53ac8c8906f2a2d67/src/html5_parser/__init__.py#L121-L207
def parse( html, transport_encoding=None, namespace_elements=False, treebuilder='lxml', fallback_encoding=None, keep_doctype=True, maybe_xhtml=False, return_root=True, line_number_attr=None, sanitize_names=True, stack_size=16 * 1024 ): ''' Parse the specified :attr:`html` and return the parsed representation. :param html: The HTML to be parsed. Can be either bytes or a unicode string. :param transport_encoding: If specified, assume the passed in bytes are in this encoding. Ignored if :attr:`html` is unicode. :param namespace_elements: Add XML namespaces when parsing so that the resulting tree is XHTML. :param treebuilder: The type of tree to return. Note that only the lxml treebuilder is fast, as all other treebuilders are implemented in python, not C. Supported values are: * `lxml <http://lxml.de>`_ -- the default, and fastest * etree (the python stdlib :mod:`xml.etree.ElementTree`) * dom (the python stdlib :mod:`xml.dom.minidom`) * `soup <https://www.crummy.com/software/BeautifulSoup>`_ -- BeautifulSoup, which must be installed or it will raise an :class:`ImportError` :param fallback_encoding: If no encoding could be detected, then use this encoding. Defaults to an encoding based on system locale. :param keep_doctype: Keep the <DOCTYPE> (if any). :param maybe_xhtml: Useful when it is unknown if the HTML to be parsed is actually XHTML. Changes the HTML 5 parsing algorithm to be more suitable for XHTML. In particular handles self-closed CDATA elements. So a ``<title/>`` or ``<style/>`` in the HTML will not completely break parsing. Also preserves namespaced tags and attributes even for namespaces not supported by HTML 5 (this works only with the ``lxml`` treebuilder). Note that setting this also implicitly sets ``namespace_elements``. :param return_root: If True, return the root node of the document, otherwise return the tree object for the document. :param line_number_attr: The optional name of an attribute used to store the line number of every element. If set, this attribute will be added to each element with the element's line number. :param sanitize_names: Ensure tag and attributes contain only ASCII alphanumeric charactes, underscores, hyphens and periods. This ensures that the resulting tree is also valid XML. Any characters outside this set are replaced by underscores. Note that this is not strictly HTML 5 spec compliant, so turn it off if you need strict spec compliance. :param stack_size: The initial size (number of items) in the stack. The default is sufficient to avoid memory allocations for all but the largest documents. ''' data = as_utf8(html or b'', transport_encoding, fallback_encoding) treebuilder = normalize_treebuilder(treebuilder) if treebuilder == 'soup': from .soup import parse return parse( data, return_root=return_root, keep_doctype=keep_doctype, stack_size=stack_size) if treebuilder not in NAMESPACE_SUPPORTING_BUILDERS: namespace_elements = False capsule = html_parser.parse( data, namespace_elements=namespace_elements or maybe_xhtml, keep_doctype=keep_doctype, maybe_xhtml=maybe_xhtml, line_number_attr=line_number_attr, sanitize_names=sanitize_names, stack_size=stack_size) ans = etree.adopt_external_document(capsule) if treebuilder == 'lxml': return ans.getroot() if return_root else ans m = importlib.import_module('html5_parser.' + treebuilder) return m.adapt(ans, return_root=return_root)
[ "def", "parse", "(", "html", ",", "transport_encoding", "=", "None", ",", "namespace_elements", "=", "False", ",", "treebuilder", "=", "'lxml'", ",", "fallback_encoding", "=", "None", ",", "keep_doctype", "=", "True", ",", "maybe_xhtml", "=", "False", ",", "return_root", "=", "True", ",", "line_number_attr", "=", "None", ",", "sanitize_names", "=", "True", ",", "stack_size", "=", "16", "*", "1024", ")", ":", "data", "=", "as_utf8", "(", "html", "or", "b''", ",", "transport_encoding", ",", "fallback_encoding", ")", "treebuilder", "=", "normalize_treebuilder", "(", "treebuilder", ")", "if", "treebuilder", "==", "'soup'", ":", "from", ".", "soup", "import", "parse", "return", "parse", "(", "data", ",", "return_root", "=", "return_root", ",", "keep_doctype", "=", "keep_doctype", ",", "stack_size", "=", "stack_size", ")", "if", "treebuilder", "not", "in", "NAMESPACE_SUPPORTING_BUILDERS", ":", "namespace_elements", "=", "False", "capsule", "=", "html_parser", ".", "parse", "(", "data", ",", "namespace_elements", "=", "namespace_elements", "or", "maybe_xhtml", ",", "keep_doctype", "=", "keep_doctype", ",", "maybe_xhtml", "=", "maybe_xhtml", ",", "line_number_attr", "=", "line_number_attr", ",", "sanitize_names", "=", "sanitize_names", ",", "stack_size", "=", "stack_size", ")", "ans", "=", "etree", ".", "adopt_external_document", "(", "capsule", ")", "if", "treebuilder", "==", "'lxml'", ":", "return", "ans", ".", "getroot", "(", ")", "if", "return_root", "else", "ans", "m", "=", "importlib", ".", "import_module", "(", "'html5_parser.'", "+", "treebuilder", ")", "return", "m", ".", "adapt", "(", "ans", ",", "return_root", "=", "return_root", ")" ]
Parse the specified :attr:`html` and return the parsed representation. :param html: The HTML to be parsed. Can be either bytes or a unicode string. :param transport_encoding: If specified, assume the passed in bytes are in this encoding. Ignored if :attr:`html` is unicode. :param namespace_elements: Add XML namespaces when parsing so that the resulting tree is XHTML. :param treebuilder: The type of tree to return. Note that only the lxml treebuilder is fast, as all other treebuilders are implemented in python, not C. Supported values are: * `lxml <http://lxml.de>`_ -- the default, and fastest * etree (the python stdlib :mod:`xml.etree.ElementTree`) * dom (the python stdlib :mod:`xml.dom.minidom`) * `soup <https://www.crummy.com/software/BeautifulSoup>`_ -- BeautifulSoup, which must be installed or it will raise an :class:`ImportError` :param fallback_encoding: If no encoding could be detected, then use this encoding. Defaults to an encoding based on system locale. :param keep_doctype: Keep the <DOCTYPE> (if any). :param maybe_xhtml: Useful when it is unknown if the HTML to be parsed is actually XHTML. Changes the HTML 5 parsing algorithm to be more suitable for XHTML. In particular handles self-closed CDATA elements. So a ``<title/>`` or ``<style/>`` in the HTML will not completely break parsing. Also preserves namespaced tags and attributes even for namespaces not supported by HTML 5 (this works only with the ``lxml`` treebuilder). Note that setting this also implicitly sets ``namespace_elements``. :param return_root: If True, return the root node of the document, otherwise return the tree object for the document. :param line_number_attr: The optional name of an attribute used to store the line number of every element. If set, this attribute will be added to each element with the element's line number. :param sanitize_names: Ensure tag and attributes contain only ASCII alphanumeric charactes, underscores, hyphens and periods. This ensures that the resulting tree is also valid XML. Any characters outside this set are replaced by underscores. Note that this is not strictly HTML 5 spec compliant, so turn it off if you need strict spec compliance. :param stack_size: The initial size (number of items) in the stack. The default is sufficient to avoid memory allocations for all but the largest documents.
[ "Parse", "the", "specified", ":", "attr", ":", "html", "and", "return", "the", "parsed", "representation", "." ]
python
train
JIC-CSB/jicimagelib
jicimagelib/io.py
https://github.com/JIC-CSB/jicimagelib/blob/fbd67accb2e6d55969c6d4ed7e8b4bb4ab65cd44/jicimagelib/io.py#L148-L160
def run_command(self, input_file, output_dir=None): """Return the command for running bfconvert as a list. :param input_file: path to microscopy image to be converted :param ouput_dir: directory to write output tiff files to :returns: list """ base_name = os.path.basename(input_file) name, suffix = base_name.split('.', 1) output_file = '{}{}.tif'.format(name, self.split_pattern) if output_dir: output_file = os.path.join(output_dir, output_file) return ['bfconvert', input_file, output_file]
[ "def", "run_command", "(", "self", ",", "input_file", ",", "output_dir", "=", "None", ")", ":", "base_name", "=", "os", ".", "path", ".", "basename", "(", "input_file", ")", "name", ",", "suffix", "=", "base_name", ".", "split", "(", "'.'", ",", "1", ")", "output_file", "=", "'{}{}.tif'", ".", "format", "(", "name", ",", "self", ".", "split_pattern", ")", "if", "output_dir", ":", "output_file", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "output_file", ")", "return", "[", "'bfconvert'", ",", "input_file", ",", "output_file", "]" ]
Return the command for running bfconvert as a list. :param input_file: path to microscopy image to be converted :param ouput_dir: directory to write output tiff files to :returns: list
[ "Return", "the", "command", "for", "running", "bfconvert", "as", "a", "list", ".", ":", "param", "input_file", ":", "path", "to", "microscopy", "image", "to", "be", "converted", ":", "param", "ouput_dir", ":", "directory", "to", "write", "output", "tiff", "files", "to", ":", "returns", ":", "list" ]
python
train
saltstack/salt
salt/renderers/pyobjects.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/renderers/pyobjects.py#L339-L365
def load_states(): ''' This loads our states into the salt __context__ ''' states = {} # the loader expects to find pillar & grain data __opts__['grains'] = salt.loader.grains(__opts__) __opts__['pillar'] = __pillar__ lazy_utils = salt.loader.utils(__opts__) lazy_funcs = salt.loader.minion_mods(__opts__, utils=lazy_utils) lazy_serializers = salt.loader.serializers(__opts__) lazy_states = salt.loader.states(__opts__, lazy_funcs, lazy_utils, lazy_serializers) # TODO: some way to lazily do this? This requires loading *all* state modules for key, func in six.iteritems(lazy_states): if '.' not in key: continue mod_name, func_name = key.split('.', 1) if mod_name not in states: states[mod_name] = {} states[mod_name][func_name] = func __context__['pyobjects_states'] = states
[ "def", "load_states", "(", ")", ":", "states", "=", "{", "}", "# the loader expects to find pillar & grain data", "__opts__", "[", "'grains'", "]", "=", "salt", ".", "loader", ".", "grains", "(", "__opts__", ")", "__opts__", "[", "'pillar'", "]", "=", "__pillar__", "lazy_utils", "=", "salt", ".", "loader", ".", "utils", "(", "__opts__", ")", "lazy_funcs", "=", "salt", ".", "loader", ".", "minion_mods", "(", "__opts__", ",", "utils", "=", "lazy_utils", ")", "lazy_serializers", "=", "salt", ".", "loader", ".", "serializers", "(", "__opts__", ")", "lazy_states", "=", "salt", ".", "loader", ".", "states", "(", "__opts__", ",", "lazy_funcs", ",", "lazy_utils", ",", "lazy_serializers", ")", "# TODO: some way to lazily do this? This requires loading *all* state modules", "for", "key", ",", "func", "in", "six", ".", "iteritems", "(", "lazy_states", ")", ":", "if", "'.'", "not", "in", "key", ":", "continue", "mod_name", ",", "func_name", "=", "key", ".", "split", "(", "'.'", ",", "1", ")", "if", "mod_name", "not", "in", "states", ":", "states", "[", "mod_name", "]", "=", "{", "}", "states", "[", "mod_name", "]", "[", "func_name", "]", "=", "func", "__context__", "[", "'pyobjects_states'", "]", "=", "states" ]
This loads our states into the salt __context__
[ "This", "loads", "our", "states", "into", "the", "salt", "__context__" ]
python
train
monarch-initiative/dipper
dipper/sources/Decipher.py
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/Decipher.py#L101-L226
def _process_ddg2p_annotations(self, limit): """ The ddg2p annotations associate a gene symbol to an omim disease, along with some HPO ids and pubs. The gene symbols come from gencode, which in turn come from HGNC official gene symbols. Therefore, we use the HGNC source class to get the id/symbol mapping for use in our annotations here. According to http://www.gencodegenes.org/faq.html, "Gene names are usually HGNC or MGI-approved gene symbols mapped to the GENCODE genes by the Ensembl xref pipeline. Sometimes, when there is no official gene symbol, the Havana clone-based name is used." The kind of variation that is linked to a disease is indicated (LOF, GOF, CNV, etc) in the source data. Here, we create an anonymous variant of the specified gene of the indicated type (mapped to the sequence ontology (SO)). :param limit: :return: """ line_counter = 0 if self.graph is not None: graph = self.graph else: graph = self.graph # in order for this to work, we need to map the HGNC id-symbol; hgnc = HGNC(self.graph_type, self.are_bnodes_skolemized) hgnc_symbol_id_map = hgnc.get_symbol_id_map() myzip = ZipFile( '/'.join((self.rawdir, self.files['annot']['file'])), 'r') # use the ddg2p.txt file fname = 'ddg2p.txt' unmapped_omim_counter = 0 unmapped_gene_count = 0 with myzip.open(fname, 'r') as f: f = io.TextIOWrapper(f) reader = csv.reader(f, delimiter='\t', quotechar='\"') # score_means_by_measure = {} # strain_scores_by_measure = {} # TODO theseare unused for row in reader: line_counter += 1 if re.match(r'#', row[0]): # skip comments continue (gencode_gene_name, mode, category, consequence, disease, omim, ddg2p_id, pubmed_ids, hpo_codes) = row hgnc_id = hgnc_symbol_id_map.get(gencode_gene_name.strip()) if hgnc_id is None: LOG.error( "Couldn't map the gene symbol %s to HGNC.", gencode_gene_name) unmapped_gene_count += 1 continue # add the gene self.model.addClassToGraph(hgnc_id, gencode_gene_name) # TODO make VSLC with the variation # to associate with the disorder # TODO use the Inheritance and Mutation consequence # to classify the VSLCs allele_id = self.make_allele_by_consequence( consequence, hgnc_id, gencode_gene_name) if omim.strip() != '': omim_id = 'OMIM:'+str(omim.strip()) # assume this is declared elsewhere in ontology self.model.addClassToGraph(omim_id, None) # ??? rel is never used # if category.strip() == 'Confirmed DD gene': # rel = self.self.globaltt['has phenotype'] # elif category.strip() == 'Probable DD gene': # rel = self.self.globaltt['has phenotype'] # elif category.strip() == 'Possible DD gene': # rel = self.self.globaltt['contributes to'] # elif category.strip() == 'Not DD gene': # # TODO negative annotation # continue assoc = G2PAssoc(graph, self.name, allele_id, omim_id) # TODO 'rel' is assigned to but never used for p in re.split(r';', pubmed_ids): p = p.strip() if p != '': pmid = 'PMID:' + str(p) r = Reference( graph, pmid, self.globaltt['journal article']) r.addRefToGraph() assoc.add_source(pmid) assoc.add_association_to_graph() else: # these are unmapped to a disease id. # note that some match OMIM disease labels # but the identifiers are just not included. # TODO consider mapping to OMIM or DOIDs in other ways LOG.warning( "No omim id on line %d\n%s", line_counter, str(row)) unmapped_omim_counter += 1 # TODO hpo phenotypes # since the DDG2P file is not documented, # I don't know what the HPO annotations are actually about # are they about the gene? the omim disease? something else? # So, we wont create associations until this is clarified if not self.test_mode and limit is not None and line_counter > limit: break myzip.close() LOG.warning( "gene-disorder associations with no omim id: %d", unmapped_omim_counter) LOG.warning("unmapped gene count: %d", unmapped_gene_count) return
[ "def", "_process_ddg2p_annotations", "(", "self", ",", "limit", ")", ":", "line_counter", "=", "0", "if", "self", ".", "graph", "is", "not", "None", ":", "graph", "=", "self", ".", "graph", "else", ":", "graph", "=", "self", ".", "graph", "# in order for this to work, we need to map the HGNC id-symbol;", "hgnc", "=", "HGNC", "(", "self", ".", "graph_type", ",", "self", ".", "are_bnodes_skolemized", ")", "hgnc_symbol_id_map", "=", "hgnc", ".", "get_symbol_id_map", "(", ")", "myzip", "=", "ZipFile", "(", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "self", ".", "files", "[", "'annot'", "]", "[", "'file'", "]", ")", ")", ",", "'r'", ")", "# use the ddg2p.txt file", "fname", "=", "'ddg2p.txt'", "unmapped_omim_counter", "=", "0", "unmapped_gene_count", "=", "0", "with", "myzip", ".", "open", "(", "fname", ",", "'r'", ")", "as", "f", ":", "f", "=", "io", ".", "TextIOWrapper", "(", "f", ")", "reader", "=", "csv", ".", "reader", "(", "f", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'\\\"'", ")", "# score_means_by_measure = {}", "# strain_scores_by_measure = {} # TODO theseare unused", "for", "row", "in", "reader", ":", "line_counter", "+=", "1", "if", "re", ".", "match", "(", "r'#'", ",", "row", "[", "0", "]", ")", ":", "# skip comments", "continue", "(", "gencode_gene_name", ",", "mode", ",", "category", ",", "consequence", ",", "disease", ",", "omim", ",", "ddg2p_id", ",", "pubmed_ids", ",", "hpo_codes", ")", "=", "row", "hgnc_id", "=", "hgnc_symbol_id_map", ".", "get", "(", "gencode_gene_name", ".", "strip", "(", ")", ")", "if", "hgnc_id", "is", "None", ":", "LOG", ".", "error", "(", "\"Couldn't map the gene symbol %s to HGNC.\"", ",", "gencode_gene_name", ")", "unmapped_gene_count", "+=", "1", "continue", "# add the gene", "self", ".", "model", ".", "addClassToGraph", "(", "hgnc_id", ",", "gencode_gene_name", ")", "# TODO make VSLC with the variation", "# to associate with the disorder", "# TODO use the Inheritance and Mutation consequence", "# to classify the VSLCs", "allele_id", "=", "self", ".", "make_allele_by_consequence", "(", "consequence", ",", "hgnc_id", ",", "gencode_gene_name", ")", "if", "omim", ".", "strip", "(", ")", "!=", "''", ":", "omim_id", "=", "'OMIM:'", "+", "str", "(", "omim", ".", "strip", "(", ")", ")", "# assume this is declared elsewhere in ontology", "self", ".", "model", ".", "addClassToGraph", "(", "omim_id", ",", "None", ")", "# ??? rel is never used", "# if category.strip() == 'Confirmed DD gene':", "# rel = self.self.globaltt['has phenotype']", "# elif category.strip() == 'Probable DD gene':", "# rel = self.self.globaltt['has phenotype']", "# elif category.strip() == 'Possible DD gene':", "# rel = self.self.globaltt['contributes to']", "# elif category.strip() == 'Not DD gene':", "# # TODO negative annotation", "# continue", "assoc", "=", "G2PAssoc", "(", "graph", ",", "self", ".", "name", ",", "allele_id", ",", "omim_id", ")", "# TODO 'rel' is assigned to but never used", "for", "p", "in", "re", ".", "split", "(", "r';'", ",", "pubmed_ids", ")", ":", "p", "=", "p", ".", "strip", "(", ")", "if", "p", "!=", "''", ":", "pmid", "=", "'PMID:'", "+", "str", "(", "p", ")", "r", "=", "Reference", "(", "graph", ",", "pmid", ",", "self", ".", "globaltt", "[", "'journal article'", "]", ")", "r", ".", "addRefToGraph", "(", ")", "assoc", ".", "add_source", "(", "pmid", ")", "assoc", ".", "add_association_to_graph", "(", ")", "else", ":", "# these are unmapped to a disease id.", "# note that some match OMIM disease labels", "# but the identifiers are just not included.", "# TODO consider mapping to OMIM or DOIDs in other ways", "LOG", ".", "warning", "(", "\"No omim id on line %d\\n%s\"", ",", "line_counter", ",", "str", "(", "row", ")", ")", "unmapped_omim_counter", "+=", "1", "# TODO hpo phenotypes", "# since the DDG2P file is not documented,", "# I don't know what the HPO annotations are actually about", "# are they about the gene? the omim disease? something else?", "# So, we wont create associations until this is clarified", "if", "not", "self", ".", "test_mode", "and", "limit", "is", "not", "None", "and", "line_counter", ">", "limit", ":", "break", "myzip", ".", "close", "(", ")", "LOG", ".", "warning", "(", "\"gene-disorder associations with no omim id: %d\"", ",", "unmapped_omim_counter", ")", "LOG", ".", "warning", "(", "\"unmapped gene count: %d\"", ",", "unmapped_gene_count", ")", "return" ]
The ddg2p annotations associate a gene symbol to an omim disease, along with some HPO ids and pubs. The gene symbols come from gencode, which in turn come from HGNC official gene symbols. Therefore, we use the HGNC source class to get the id/symbol mapping for use in our annotations here. According to http://www.gencodegenes.org/faq.html, "Gene names are usually HGNC or MGI-approved gene symbols mapped to the GENCODE genes by the Ensembl xref pipeline. Sometimes, when there is no official gene symbol, the Havana clone-based name is used." The kind of variation that is linked to a disease is indicated (LOF, GOF, CNV, etc) in the source data. Here, we create an anonymous variant of the specified gene of the indicated type (mapped to the sequence ontology (SO)). :param limit: :return:
[ "The", "ddg2p", "annotations", "associate", "a", "gene", "symbol", "to", "an", "omim", "disease", "along", "with", "some", "HPO", "ids", "and", "pubs", ".", "The", "gene", "symbols", "come", "from", "gencode", "which", "in", "turn", "come", "from", "HGNC", "official", "gene", "symbols", ".", "Therefore", "we", "use", "the", "HGNC", "source", "class", "to", "get", "the", "id", "/", "symbol", "mapping", "for", "use", "in", "our", "annotations", "here", "." ]
python
train
cosven/feeluown-core
fuocore/xiami/api.py
https://github.com/cosven/feeluown-core/blob/62dc64638f62971b16be0a75c0b8c7ae2999869e/fuocore/xiami/api.py#L54-L72
def _sign_payload(self, payload): """使用 appkey 对 payload 进行签名,返回新的请求参数 """ app_key = self._app_key t = int(time.time() * 1000) requestStr = { 'header': self._req_header, 'model': payload } data = json.dumps({'requestStr': json.dumps(requestStr)}) data_str = '{}&{}&{}&{}'.format(self._req_token, t, app_key, data) sign = hashlib.md5(data_str.encode('utf-8')).hexdigest() params = { 't': t, 'appKey': app_key, 'sign': sign, 'data': data, } return params
[ "def", "_sign_payload", "(", "self", ",", "payload", ")", ":", "app_key", "=", "self", ".", "_app_key", "t", "=", "int", "(", "time", ".", "time", "(", ")", "*", "1000", ")", "requestStr", "=", "{", "'header'", ":", "self", ".", "_req_header", ",", "'model'", ":", "payload", "}", "data", "=", "json", ".", "dumps", "(", "{", "'requestStr'", ":", "json", ".", "dumps", "(", "requestStr", ")", "}", ")", "data_str", "=", "'{}&{}&{}&{}'", ".", "format", "(", "self", ".", "_req_token", ",", "t", ",", "app_key", ",", "data", ")", "sign", "=", "hashlib", ".", "md5", "(", "data_str", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")", "params", "=", "{", "'t'", ":", "t", ",", "'appKey'", ":", "app_key", ",", "'sign'", ":", "sign", ",", "'data'", ":", "data", ",", "}", "return", "params" ]
使用 appkey 对 payload 进行签名,返回新的请求参数
[ "使用", "appkey", "对", "payload", "进行签名,返回新的请求参数" ]
python
train
gem/oq-engine
openquake/baselib/datastore.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/datastore.py#L285-L301
def extend(self, key, array, **attrs): """ Extend the dataset associated to the given key; create it if needed :param key: name of the dataset :param array: array to store :param attrs: a dictionary of attributes """ try: dset = self.hdf5[key] except KeyError: dset = hdf5.create(self.hdf5, key, array.dtype, shape=(None,) + array.shape[1:]) hdf5.extend(dset, array) for k, v in attrs.items(): dset.attrs[k] = v return dset
[ "def", "extend", "(", "self", ",", "key", ",", "array", ",", "*", "*", "attrs", ")", ":", "try", ":", "dset", "=", "self", ".", "hdf5", "[", "key", "]", "except", "KeyError", ":", "dset", "=", "hdf5", ".", "create", "(", "self", ".", "hdf5", ",", "key", ",", "array", ".", "dtype", ",", "shape", "=", "(", "None", ",", ")", "+", "array", ".", "shape", "[", "1", ":", "]", ")", "hdf5", ".", "extend", "(", "dset", ",", "array", ")", "for", "k", ",", "v", "in", "attrs", ".", "items", "(", ")", ":", "dset", ".", "attrs", "[", "k", "]", "=", "v", "return", "dset" ]
Extend the dataset associated to the given key; create it if needed :param key: name of the dataset :param array: array to store :param attrs: a dictionary of attributes
[ "Extend", "the", "dataset", "associated", "to", "the", "given", "key", ";", "create", "it", "if", "needed" ]
python
train
softlayer/softlayer-python
SoftLayer/CLI/order/package_locations.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/CLI/order/package_locations.py#L15-L32
def cli(env, package_keyname): """List Datacenters a package can be ordered in. Use the location Key Name to place orders """ manager = ordering.OrderingManager(env.client) table = formatting.Table(COLUMNS) locations = manager.package_locations(package_keyname) for region in locations: for datacenter in region['locations']: table.add_row([ datacenter['location']['id'], datacenter['location']['name'], region['description'], region['keyname'] ]) env.fout(table)
[ "def", "cli", "(", "env", ",", "package_keyname", ")", ":", "manager", "=", "ordering", ".", "OrderingManager", "(", "env", ".", "client", ")", "table", "=", "formatting", ".", "Table", "(", "COLUMNS", ")", "locations", "=", "manager", ".", "package_locations", "(", "package_keyname", ")", "for", "region", "in", "locations", ":", "for", "datacenter", "in", "region", "[", "'locations'", "]", ":", "table", ".", "add_row", "(", "[", "datacenter", "[", "'location'", "]", "[", "'id'", "]", ",", "datacenter", "[", "'location'", "]", "[", "'name'", "]", ",", "region", "[", "'description'", "]", ",", "region", "[", "'keyname'", "]", "]", ")", "env", ".", "fout", "(", "table", ")" ]
List Datacenters a package can be ordered in. Use the location Key Name to place orders
[ "List", "Datacenters", "a", "package", "can", "be", "ordered", "in", "." ]
python
train
m-weigand/sip_models
lib/sip_models/res/cc.py
https://github.com/m-weigand/sip_models/blob/917da5d956215d9df2bf65b24123ba020e3e17c0/lib/sip_models/res/cc.py#L306-L334
def dim_dc(self, pars): r""" :math:`\frac{\partial \hat{\rho''}(\omega)}{\partial c} = \rho_0 \frac{-m sin(\frac{c \pi}{2}) ln(\omega \tau)(\omega \tau)^c - m (\omega \tau)^c \frac{\pi}{2} cos(\frac{\pi}{2}}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m (\omega \tau)^c cos(\frac{c \pi}{2}) \right] \cdot \left[ -2 ln(\omega \tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c \frac{\pi}{2} cos(\frac{c \pi}{2}) \right] + \left[2 ln(\omega \tau) (\omega \tau)^{2 c}\right]}{\left[1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}` """ self._set_parameters(pars) # term1 nom1a = - self.m * np.log(self.w * self.tau) * self.otc *\ np.sin(self.ang) nom1b = - self.m * self.otc * (np.pi / 2.0) * np.cos(self.ang) term1 = (nom1a + nom1b) / self.denom # term2 nom2 = (self.m * self.otc * np.sin(self.ang)) *\ (2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) - 2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) + 2 * np.log(self.w * self.tau) * self.otc2) term2 = nom2 / self.denom ** 2 result = term1 + term2 result *= self.rho0 return result
[ "def", "dim_dc", "(", "self", ",", "pars", ")", ":", "self", ".", "_set_parameters", "(", "pars", ")", "# term1", "nom1a", "=", "-", "self", ".", "m", "*", "np", ".", "log", "(", "self", ".", "w", "*", "self", ".", "tau", ")", "*", "self", ".", "otc", "*", "np", ".", "sin", "(", "self", ".", "ang", ")", "nom1b", "=", "-", "self", ".", "m", "*", "self", ".", "otc", "*", "(", "np", ".", "pi", "/", "2.0", ")", "*", "np", ".", "cos", "(", "self", ".", "ang", ")", "term1", "=", "(", "nom1a", "+", "nom1b", ")", "/", "self", ".", "denom", "# term2", "nom2", "=", "(", "self", ".", "m", "*", "self", ".", "otc", "*", "np", ".", "sin", "(", "self", ".", "ang", ")", ")", "*", "(", "2", "*", "np", ".", "log", "(", "self", ".", "w", "*", "self", ".", "tau", ")", "*", "self", ".", "otc", "*", "np", ".", "cos", "(", "self", ".", "ang", ")", "-", "2", "*", "self", ".", "otc", "*", "(", "np", ".", "pi", "/", "2.0", ")", "*", "np", ".", "sin", "(", "self", ".", "ang", ")", "+", "2", "*", "np", ".", "log", "(", "self", ".", "w", "*", "self", ".", "tau", ")", "*", "self", ".", "otc2", ")", "term2", "=", "nom2", "/", "self", ".", "denom", "**", "2", "result", "=", "term1", "+", "term2", "result", "*=", "self", ".", "rho0", "return", "result" ]
r""" :math:`\frac{\partial \hat{\rho''}(\omega)}{\partial c} = \rho_0 \frac{-m sin(\frac{c \pi}{2}) ln(\omega \tau)(\omega \tau)^c - m (\omega \tau)^c \frac{\pi}{2} cos(\frac{\pi}{2}}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m (\omega \tau)^c cos(\frac{c \pi}{2}) \right] \cdot \left[ -2 ln(\omega \tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c \frac{\pi}{2} cos(\frac{c \pi}{2}) \right] + \left[2 ln(\omega \tau) (\omega \tau)^{2 c}\right]}{\left[1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
[ "r", ":", "math", ":", "\\", "frac", "{", "\\", "partial", "\\", "hat", "{", "\\", "rho", "}", "(", "\\", "omega", ")", "}", "{", "\\", "partial", "c", "}", "=", "\\", "rho_0", "\\", "frac", "{", "-", "m", "sin", "(", "\\", "frac", "{", "c", "\\", "pi", "}", "{", "2", "}", ")", "ln", "(", "\\", "omega", "\\", "tau", ")", "(", "\\", "omega", "\\", "tau", ")", "^c", "-", "m", "(", "\\", "omega", "\\", "tau", ")", "^c", "\\", "frac", "{", "\\", "pi", "}", "{", "2", "}", "cos", "(", "\\", "frac", "{", "\\", "pi", "}", "{", "2", "}}", "{", "1", "+", "2", "(", "\\", "omega", "\\", "tau", ")", "^c", "cos", "(", "\\", "frac", "{", "c", "\\", "pi", "}", "{", "2", "}", ")", "+", "(", "\\", "omega", "\\", "tau", ")", "^", "{", "2", "c", "}}", "+", "\\", "rho_0", "\\", "frac", "{", "\\", "left", "[", "-", "m", "(", "\\", "omega", "\\", "tau", ")", "^c", "cos", "(", "\\", "frac", "{", "c", "\\", "pi", "}", "{", "2", "}", ")", "\\", "right", "]", "\\", "cdot", "\\", "left", "[", "-", "2", "ln", "(", "\\", "omega", "\\", "tau", ")", "(", "\\", "omega", "\\", "tau", ")", "^c", "cos", "(", "\\", "frac", "{", "c", "\\", "pi", "}", "{", "2", "}", ")", "+", "2", "(", "\\", "omega", "\\", "tau", ")", "^c", "\\", "frac", "{", "\\", "pi", "}", "{", "2", "}", "cos", "(", "\\", "frac", "{", "c", "\\", "pi", "}", "{", "2", "}", ")", "\\", "right", "]", "+", "\\", "left", "[", "2", "ln", "(", "\\", "omega", "\\", "tau", ")", "(", "\\", "omega", "\\", "tau", ")", "^", "{", "2", "c", "}", "\\", "right", "]", "}", "{", "\\", "left", "[", "1", "+", "2", "(", "\\", "omega", "\\", "tau", ")", "^c", "cos", "(", "\\", "frac", "{", "c", "\\", "pi", "}", "{", "2", "}", ")", "+", "(", "\\", "omega", "\\", "tau", ")", "^", "{", "2", "c", "}", "\\", "right", "]", "^2", "}" ]
python
train
HumanCellAtlas/dcp-cli
hca/upload/upload_area.py
https://github.com/HumanCellAtlas/dcp-cli/blob/cc70817bc4e50944c709eaae160de0bf7a19f0f3/hca/upload/upload_area.py#L159-L168
def checksum_status(self, filename): """ Retrieve checksum status and values for a file :param str filename: The name of the file within the Upload Area :return: a dict with checksum information :rtype: dict :raises UploadApiException: if information could not be obtained """ return self.upload_service.api_client.checksum_status(area_uuid=self.uuid, filename=filename)
[ "def", "checksum_status", "(", "self", ",", "filename", ")", ":", "return", "self", ".", "upload_service", ".", "api_client", ".", "checksum_status", "(", "area_uuid", "=", "self", ".", "uuid", ",", "filename", "=", "filename", ")" ]
Retrieve checksum status and values for a file :param str filename: The name of the file within the Upload Area :return: a dict with checksum information :rtype: dict :raises UploadApiException: if information could not be obtained
[ "Retrieve", "checksum", "status", "and", "values", "for", "a", "file" ]
python
train
pkgw/pwkit
pwkit/msmt.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/msmt.py#L806-L818
def _lval_add_towards_polarity(x, polarity): """Compute the appropriate Lval "kind" for the limit of value `x` towards `polarity`. Either 'toinf' or 'pastzero' depending on the sign of `x` and the infinity direction of polarity. """ if x < 0: if polarity < 0: return Lval('toinf', x) return Lval('pastzero', x) elif polarity > 0: return Lval('toinf', x) return Lval('pastzero', x)
[ "def", "_lval_add_towards_polarity", "(", "x", ",", "polarity", ")", ":", "if", "x", "<", "0", ":", "if", "polarity", "<", "0", ":", "return", "Lval", "(", "'toinf'", ",", "x", ")", "return", "Lval", "(", "'pastzero'", ",", "x", ")", "elif", "polarity", ">", "0", ":", "return", "Lval", "(", "'toinf'", ",", "x", ")", "return", "Lval", "(", "'pastzero'", ",", "x", ")" ]
Compute the appropriate Lval "kind" for the limit of value `x` towards `polarity`. Either 'toinf' or 'pastzero' depending on the sign of `x` and the infinity direction of polarity.
[ "Compute", "the", "appropriate", "Lval", "kind", "for", "the", "limit", "of", "value", "x", "towards", "polarity", ".", "Either", "toinf", "or", "pastzero", "depending", "on", "the", "sign", "of", "x", "and", "the", "infinity", "direction", "of", "polarity", "." ]
python
train
materialsproject/pymatgen
pymatgen/analysis/graphs.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/graphs.py#L1796-L1814
def set_node_attributes(self): """ Replicates molecule site properties (specie, coords, etc.) in the MoleculeGraph. :return: """ species = {} coords = {} properties = {} for node in self.graph.nodes(): species[node] = self.molecule[node].specie.symbol coords[node] = self.molecule[node].coords properties[node] = self.molecule[node].properties nx.set_node_attributes(self.graph, species, "specie") nx.set_node_attributes(self.graph, coords, "coords") nx.set_node_attributes(self.graph, properties, "properties")
[ "def", "set_node_attributes", "(", "self", ")", ":", "species", "=", "{", "}", "coords", "=", "{", "}", "properties", "=", "{", "}", "for", "node", "in", "self", ".", "graph", ".", "nodes", "(", ")", ":", "species", "[", "node", "]", "=", "self", ".", "molecule", "[", "node", "]", ".", "specie", ".", "symbol", "coords", "[", "node", "]", "=", "self", ".", "molecule", "[", "node", "]", ".", "coords", "properties", "[", "node", "]", "=", "self", ".", "molecule", "[", "node", "]", ".", "properties", "nx", ".", "set_node_attributes", "(", "self", ".", "graph", ",", "species", ",", "\"specie\"", ")", "nx", ".", "set_node_attributes", "(", "self", ".", "graph", ",", "coords", ",", "\"coords\"", ")", "nx", ".", "set_node_attributes", "(", "self", ".", "graph", ",", "properties", ",", "\"properties\"", ")" ]
Replicates molecule site properties (specie, coords, etc.) in the MoleculeGraph. :return:
[ "Replicates", "molecule", "site", "properties", "(", "specie", "coords", "etc", ".", ")", "in", "the", "MoleculeGraph", "." ]
python
train
secdev/scapy
scapy/contrib/http2.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/contrib/http2.py#L2312-L2326
def resize(self, ns): # type: (int) -> None """Resize the dynamic table. If the new size (ns) must be between 0 and the cap size. If the new size is lower than the current size of the dynamic table, entries are evicted. @param int ns: the new size of the dynamic table @raise AssertionError """ assert 0 <= ns <= self._dynamic_table_cap_size, \ 'EINVAL: ns: out-of-range value; expected value is in the range [0;{}['.format(self._dynamic_table_cap_size) # noqa: E501 old_size = self._dynamic_table_max_size self._dynamic_table_max_size = ns if old_size > self._dynamic_table_max_size: self._reduce_dynamic_table()
[ "def", "resize", "(", "self", ",", "ns", ")", ":", "# type: (int) -> None", "assert", "0", "<=", "ns", "<=", "self", ".", "_dynamic_table_cap_size", ",", "'EINVAL: ns: out-of-range value; expected value is in the range [0;{}['", ".", "format", "(", "self", ".", "_dynamic_table_cap_size", ")", "# noqa: E501", "old_size", "=", "self", ".", "_dynamic_table_max_size", "self", ".", "_dynamic_table_max_size", "=", "ns", "if", "old_size", ">", "self", ".", "_dynamic_table_max_size", ":", "self", ".", "_reduce_dynamic_table", "(", ")" ]
Resize the dynamic table. If the new size (ns) must be between 0 and the cap size. If the new size is lower than the current size of the dynamic table, entries are evicted. @param int ns: the new size of the dynamic table @raise AssertionError
[ "Resize", "the", "dynamic", "table", ".", "If", "the", "new", "size", "(", "ns", ")", "must", "be", "between", "0", "and", "the", "cap", "size", ".", "If", "the", "new", "size", "is", "lower", "than", "the", "current", "size", "of", "the", "dynamic", "table", "entries", "are", "evicted", "." ]
python
train
calston/rhumba
rhumba/http_client.py
https://github.com/calston/rhumba/blob/05e3cbf4e531cc51b4777912eb98a4f006893f5e/rhumba/http_client.py#L69-L75
def abort_request(self, request): """Called to abort request on timeout""" self.timedout = True try: request.cancel() except error.AlreadyCancelled: return
[ "def", "abort_request", "(", "self", ",", "request", ")", ":", "self", ".", "timedout", "=", "True", "try", ":", "request", ".", "cancel", "(", ")", "except", "error", ".", "AlreadyCancelled", ":", "return" ]
Called to abort request on timeout
[ "Called", "to", "abort", "request", "on", "timeout" ]
python
train
EnergieID/smappy
smappy/smappy.py
https://github.com/EnergieID/smappy/blob/1ada3abc9a51c76205c072369258f6f4f4e8fd0f/smappy/smappy.py#L173-L213
def get_consumption(self, service_location_id, start, end, aggregation, raw=False): """ Request Elektricity consumption and Solar production for a given service location. Parameters ---------- service_location_id : int start : int | dt.datetime | pd.Timestamp end : int | dt.datetime | pd.Timestamp start and end support epoch (in milliseconds), datetime and Pandas Timestamp aggregation : int 1 = 5 min values (only available for the last 14 days) 2 = hourly values 3 = daily values 4 = monthly values 5 = quarterly values raw : bool default False if True: Return the data "as is" from the server if False: convert the 'alwaysOn' value to Wh. (the server returns this value as the sum of the power, measured in 5 minute blocks. This means that it is 12 times higher than the consumption in Wh. See https://github.com/EnergieID/smappy/issues/24) Returns ------- dict """ url = urljoin(URLS['servicelocation'], service_location_id, "consumption") d = self._get_consumption(url=url, start=start, end=end, aggregation=aggregation) if not raw: for block in d['consumptions']: if 'alwaysOn' not in block.keys(): break block.update({'alwaysOn': block['alwaysOn'] / 12}) return d
[ "def", "get_consumption", "(", "self", ",", "service_location_id", ",", "start", ",", "end", ",", "aggregation", ",", "raw", "=", "False", ")", ":", "url", "=", "urljoin", "(", "URLS", "[", "'servicelocation'", "]", ",", "service_location_id", ",", "\"consumption\"", ")", "d", "=", "self", ".", "_get_consumption", "(", "url", "=", "url", ",", "start", "=", "start", ",", "end", "=", "end", ",", "aggregation", "=", "aggregation", ")", "if", "not", "raw", ":", "for", "block", "in", "d", "[", "'consumptions'", "]", ":", "if", "'alwaysOn'", "not", "in", "block", ".", "keys", "(", ")", ":", "break", "block", ".", "update", "(", "{", "'alwaysOn'", ":", "block", "[", "'alwaysOn'", "]", "/", "12", "}", ")", "return", "d" ]
Request Elektricity consumption and Solar production for a given service location. Parameters ---------- service_location_id : int start : int | dt.datetime | pd.Timestamp end : int | dt.datetime | pd.Timestamp start and end support epoch (in milliseconds), datetime and Pandas Timestamp aggregation : int 1 = 5 min values (only available for the last 14 days) 2 = hourly values 3 = daily values 4 = monthly values 5 = quarterly values raw : bool default False if True: Return the data "as is" from the server if False: convert the 'alwaysOn' value to Wh. (the server returns this value as the sum of the power, measured in 5 minute blocks. This means that it is 12 times higher than the consumption in Wh. See https://github.com/EnergieID/smappy/issues/24) Returns ------- dict
[ "Request", "Elektricity", "consumption", "and", "Solar", "production", "for", "a", "given", "service", "location", "." ]
python
train
SecurityInnovation/PGPy
pgpy/pgp.py
https://github.com/SecurityInnovation/PGPy/blob/f1c3d68e32c334f5aa14c34580925e97f17f4fde/pgpy/pgp.py#L609-L636
def new(cls, pn, comment="", email=""): """ Create a new User ID or photo. :param pn: User ID name, or photo. If this is a ``bytearray``, it will be loaded as a photo. Otherwise, it will be used as the name field for a User ID. :type pn: ``bytearray``, ``str``, ``unicode`` :param comment: The comment field for a User ID. Ignored if this is a photo. :type comment: ``str``, ``unicode`` :param email: The email address field for a User ID. Ignored if this is a photo. :type email: ``str``, ``unicode`` :returns: :py:obj:`PGPUID` """ uid = PGPUID() if isinstance(pn, bytearray): uid._uid = UserAttribute() uid._uid.image.image = pn uid._uid.image.iencoding = ImageEncoding.encodingof(pn) uid._uid.update_hlen() else: uid._uid = UserID() uid._uid.name = pn uid._uid.comment = comment uid._uid.email = email uid._uid.update_hlen() return uid
[ "def", "new", "(", "cls", ",", "pn", ",", "comment", "=", "\"\"", ",", "email", "=", "\"\"", ")", ":", "uid", "=", "PGPUID", "(", ")", "if", "isinstance", "(", "pn", ",", "bytearray", ")", ":", "uid", ".", "_uid", "=", "UserAttribute", "(", ")", "uid", ".", "_uid", ".", "image", ".", "image", "=", "pn", "uid", ".", "_uid", ".", "image", ".", "iencoding", "=", "ImageEncoding", ".", "encodingof", "(", "pn", ")", "uid", ".", "_uid", ".", "update_hlen", "(", ")", "else", ":", "uid", ".", "_uid", "=", "UserID", "(", ")", "uid", ".", "_uid", ".", "name", "=", "pn", "uid", ".", "_uid", ".", "comment", "=", "comment", "uid", ".", "_uid", ".", "email", "=", "email", "uid", ".", "_uid", ".", "update_hlen", "(", ")", "return", "uid" ]
Create a new User ID or photo. :param pn: User ID name, or photo. If this is a ``bytearray``, it will be loaded as a photo. Otherwise, it will be used as the name field for a User ID. :type pn: ``bytearray``, ``str``, ``unicode`` :param comment: The comment field for a User ID. Ignored if this is a photo. :type comment: ``str``, ``unicode`` :param email: The email address field for a User ID. Ignored if this is a photo. :type email: ``str``, ``unicode`` :returns: :py:obj:`PGPUID`
[ "Create", "a", "new", "User", "ID", "or", "photo", "." ]
python
train
thiagopbueno/pyrddl
pyrddl/rddl.py
https://github.com/thiagopbueno/pyrddl/blob/3bcfa850b1a7532c7744358f3c6b9e0f8ab978c9/pyrddl/rddl.py#L130-L139
def action_fluent_variables(self) -> FluentParamsList: '''Returns the instantiated action fluents in canonical order. Returns: Sequence[Tuple[str, List[str]]]: A tuple of pairs of fluent name and a list of instantiated fluents represented as strings. ''' fluents = self.domain.action_fluents ordering = self.domain.action_fluent_ordering return self._fluent_params(fluents, ordering)
[ "def", "action_fluent_variables", "(", "self", ")", "->", "FluentParamsList", ":", "fluents", "=", "self", ".", "domain", ".", "action_fluents", "ordering", "=", "self", ".", "domain", ".", "action_fluent_ordering", "return", "self", ".", "_fluent_params", "(", "fluents", ",", "ordering", ")" ]
Returns the instantiated action fluents in canonical order. Returns: Sequence[Tuple[str, List[str]]]: A tuple of pairs of fluent name and a list of instantiated fluents represented as strings.
[ "Returns", "the", "instantiated", "action", "fluents", "in", "canonical", "order", "." ]
python
train
Esri/ArcREST
src/arcrest/manageags/_data.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageags/_data.py#L293-L310
def validateDataStore(self, dataStoreName, machineName): """ Checks the status of ArcGIS Data Store and provides a health check response. Inputs: dataStoreName - name of the datastore machineName - name of the machine """ url = self._url + "/items/enterpriseDatabases/%s/machines/%s/validate" % (dataStoreName, machineName) params = { "f" : "json" } return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
[ "def", "validateDataStore", "(", "self", ",", "dataStoreName", ",", "machineName", ")", ":", "url", "=", "self", ".", "_url", "+", "\"/items/enterpriseDatabases/%s/machines/%s/validate\"", "%", "(", "dataStoreName", ",", "machineName", ")", "params", "=", "{", "\"f\"", ":", "\"json\"", "}", "return", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")" ]
Checks the status of ArcGIS Data Store and provides a health check response. Inputs: dataStoreName - name of the datastore machineName - name of the machine
[ "Checks", "the", "status", "of", "ArcGIS", "Data", "Store", "and", "provides", "a", "health", "check", "response", "." ]
python
train
biocore/burrito-fillings
bfillings/seqprep.py
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/seqprep.py#L185-L229
def _get_result_paths(self, data): """Captures SeqPrep output. """ result = {} # Always output: result['UnassembledReads1'] = ResultPath(Path= self._unassembled_reads1_out_file_name( ), IsWritten=True) result['UnassembledReads2'] = ResultPath(Path= self._unassembled_reads2_out_file_name( ), IsWritten=True) # optional output, so we check for each # check for assembled reads file if self.Parameters['-s'].isOn(): result['Assembled'] = ResultPath(Path= self._assembled_out_file_name(), IsWritten=True) # check for discarded (unassembled) reads1 file if self.Parameters['-3'].isOn(): result['Reads1Discarded'] = ResultPath(Path= self._discarded_reads1_out_file_name( ), IsWritten=True) # check for discarded (unassembled) reads2 file if self.Parameters['-4'].isOn(): result['Reads2Discarded'] = ResultPath(Path= self._discarded_reads2_out_file_name( ), IsWritten=True) # check for pretty-alignment file if self.Parameters['-E'].isOn(): result['PrettyAlignments'] = ResultPath(Path= self._pretty_alignment_out_file_name( ), IsWritten=True) return result
[ "def", "_get_result_paths", "(", "self", ",", "data", ")", ":", "result", "=", "{", "}", "# Always output:", "result", "[", "'UnassembledReads1'", "]", "=", "ResultPath", "(", "Path", "=", "self", ".", "_unassembled_reads1_out_file_name", "(", ")", ",", "IsWritten", "=", "True", ")", "result", "[", "'UnassembledReads2'", "]", "=", "ResultPath", "(", "Path", "=", "self", ".", "_unassembled_reads2_out_file_name", "(", ")", ",", "IsWritten", "=", "True", ")", "# optional output, so we check for each", "# check for assembled reads file", "if", "self", ".", "Parameters", "[", "'-s'", "]", ".", "isOn", "(", ")", ":", "result", "[", "'Assembled'", "]", "=", "ResultPath", "(", "Path", "=", "self", ".", "_assembled_out_file_name", "(", ")", ",", "IsWritten", "=", "True", ")", "# check for discarded (unassembled) reads1 file", "if", "self", ".", "Parameters", "[", "'-3'", "]", ".", "isOn", "(", ")", ":", "result", "[", "'Reads1Discarded'", "]", "=", "ResultPath", "(", "Path", "=", "self", ".", "_discarded_reads1_out_file_name", "(", ")", ",", "IsWritten", "=", "True", ")", "# check for discarded (unassembled) reads2 file", "if", "self", ".", "Parameters", "[", "'-4'", "]", ".", "isOn", "(", ")", ":", "result", "[", "'Reads2Discarded'", "]", "=", "ResultPath", "(", "Path", "=", "self", ".", "_discarded_reads2_out_file_name", "(", ")", ",", "IsWritten", "=", "True", ")", "# check for pretty-alignment file", "if", "self", ".", "Parameters", "[", "'-E'", "]", ".", "isOn", "(", ")", ":", "result", "[", "'PrettyAlignments'", "]", "=", "ResultPath", "(", "Path", "=", "self", ".", "_pretty_alignment_out_file_name", "(", ")", ",", "IsWritten", "=", "True", ")", "return", "result" ]
Captures SeqPrep output.
[ "Captures", "SeqPrep", "output", "." ]
python
train
cjdrake/pyeda
pyeda/boolalg/bfarray.py
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/boolalg/bfarray.py#L899-L906
def _coord2offset(self, coord): """Convert a normalized coordinate to an item offset.""" size = self.size offset = 0 for dim, index in enumerate(coord): size //= self._normshape[dim] offset += size * index return offset
[ "def", "_coord2offset", "(", "self", ",", "coord", ")", ":", "size", "=", "self", ".", "size", "offset", "=", "0", "for", "dim", ",", "index", "in", "enumerate", "(", "coord", ")", ":", "size", "//=", "self", ".", "_normshape", "[", "dim", "]", "offset", "+=", "size", "*", "index", "return", "offset" ]
Convert a normalized coordinate to an item offset.
[ "Convert", "a", "normalized", "coordinate", "to", "an", "item", "offset", "." ]
python
train
ellmetha/neojsonrpc
neojsonrpc/client.py
https://github.com/ellmetha/neojsonrpc/blob/e369b633a727482d5f9e310f0c3337ae5f7265db/neojsonrpc/client.py#L74-L83
def get_account_state(self, address, **kwargs): """ Returns the account state information associated with a specific address. :param address: a 34-bit length address (eg. AJBENSwajTzQtwyJFkiJSv7MAaaMc7DsRz) :type address: str :return: dictionary containing the account state information :rtype: dict """ return self._call(JSONRPCMethods.GET_ACCOUNT_STATE.value, params=[address, ], **kwargs)
[ "def", "get_account_state", "(", "self", ",", "address", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_call", "(", "JSONRPCMethods", ".", "GET_ACCOUNT_STATE", ".", "value", ",", "params", "=", "[", "address", ",", "]", ",", "*", "*", "kwargs", ")" ]
Returns the account state information associated with a specific address. :param address: a 34-bit length address (eg. AJBENSwajTzQtwyJFkiJSv7MAaaMc7DsRz) :type address: str :return: dictionary containing the account state information :rtype: dict
[ "Returns", "the", "account", "state", "information", "associated", "with", "a", "specific", "address", "." ]
python
test
bitesofcode/projexui
projexui/widgets/xcombobox.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xcombobox.py#L167-L176
def checkedItems( self ): """ Returns the checked items for this combobox. :return [<str>, ..] """ if not self.isCheckable(): return [] return [nativestring(self.itemText(i)) for i in self.checkedIndexes()]
[ "def", "checkedItems", "(", "self", ")", ":", "if", "not", "self", ".", "isCheckable", "(", ")", ":", "return", "[", "]", "return", "[", "nativestring", "(", "self", ".", "itemText", "(", "i", ")", ")", "for", "i", "in", "self", ".", "checkedIndexes", "(", ")", "]" ]
Returns the checked items for this combobox. :return [<str>, ..]
[ "Returns", "the", "checked", "items", "for", "this", "combobox", ".", ":", "return", "[", "<str", ">", "..", "]" ]
python
train
skibblenybbles/django-commando
commando/management/base.py
https://github.com/skibblenybbles/django-commando/blob/dd1dd6969fc0dd8231fc115fee3eeb690809585b/commando/management/base.py#L286-L307
def handle(self, *arguments, **options): """ Parses arguments and options, runs validate_<action> for each action named by self.get_actions(), then runs handle_<action> for each action named by self.get_actions(). """ self.arguments = arguments self.options = options self.arguments = self.parse_arguments(arguments) self.options = self.parse_options(options) for name in self.get_actions(): validate = getattr(self, "validate_{name:s}".format( name=name), None) if validate is not None and isinstance(validate, collections.Callable): validate(*arguments, **options) for name in self.get_actions(): handle = getattr(self, "handle_{name:s}".format( name=name), None) if handle is not None and isinstance(handle, collections.Callable): handle(*self.arguments, **self.options)
[ "def", "handle", "(", "self", ",", "*", "arguments", ",", "*", "*", "options", ")", ":", "self", ".", "arguments", "=", "arguments", "self", ".", "options", "=", "options", "self", ".", "arguments", "=", "self", ".", "parse_arguments", "(", "arguments", ")", "self", ".", "options", "=", "self", ".", "parse_options", "(", "options", ")", "for", "name", "in", "self", ".", "get_actions", "(", ")", ":", "validate", "=", "getattr", "(", "self", ",", "\"validate_{name:s}\"", ".", "format", "(", "name", "=", "name", ")", ",", "None", ")", "if", "validate", "is", "not", "None", "and", "isinstance", "(", "validate", ",", "collections", ".", "Callable", ")", ":", "validate", "(", "*", "arguments", ",", "*", "*", "options", ")", "for", "name", "in", "self", ".", "get_actions", "(", ")", ":", "handle", "=", "getattr", "(", "self", ",", "\"handle_{name:s}\"", ".", "format", "(", "name", "=", "name", ")", ",", "None", ")", "if", "handle", "is", "not", "None", "and", "isinstance", "(", "handle", ",", "collections", ".", "Callable", ")", ":", "handle", "(", "*", "self", ".", "arguments", ",", "*", "*", "self", ".", "options", ")" ]
Parses arguments and options, runs validate_<action> for each action named by self.get_actions(), then runs handle_<action> for each action named by self.get_actions().
[ "Parses", "arguments", "and", "options", "runs", "validate_<action", ">", "for", "each", "action", "named", "by", "self", ".", "get_actions", "()", "then", "runs", "handle_<action", ">", "for", "each", "action", "named", "by", "self", ".", "get_actions", "()", "." ]
python
train
soimort/you-get
src/you_get/util/log.py
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/util/log.py#L72-L74
def print_log(text, *colors): """Print a log message to standard error.""" sys.stderr.write(sprint("{}: {}".format(script_name, text), *colors) + "\n")
[ "def", "print_log", "(", "text", ",", "*", "colors", ")", ":", "sys", ".", "stderr", ".", "write", "(", "sprint", "(", "\"{}: {}\"", ".", "format", "(", "script_name", ",", "text", ")", ",", "*", "colors", ")", "+", "\"\\n\"", ")" ]
Print a log message to standard error.
[ "Print", "a", "log", "message", "to", "standard", "error", "." ]
python
test
kubernetes-client/python
kubernetes/client/apis/admissionregistration_v1beta1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/admissionregistration_v1beta1_api.py#L1185-L1209
def patch_validating_webhook_configuration(self, name, body, **kwargs): """ partially update the specified ValidatingWebhookConfiguration This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_validating_webhook_configuration(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ValidatingWebhookConfiguration (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. :return: V1beta1ValidatingWebhookConfiguration If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_validating_webhook_configuration_with_http_info(name, body, **kwargs) else: (data) = self.patch_validating_webhook_configuration_with_http_info(name, body, **kwargs) return data
[ "def", "patch_validating_webhook_configuration", "(", "self", ",", "name", ",", "body", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "patch_validating_webhook_configuration_with_http_info", "(", "name", ",", "body", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "patch_validating_webhook_configuration_with_http_info", "(", "name", ",", "body", ",", "*", "*", "kwargs", ")", "return", "data" ]
partially update the specified ValidatingWebhookConfiguration This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_validating_webhook_configuration(name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the ValidatingWebhookConfiguration (required) :param object body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param str field_manager: fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch). :param bool force: Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests. :return: V1beta1ValidatingWebhookConfiguration If the method is called asynchronously, returns the request thread.
[ "partially", "update", "the", "specified", "ValidatingWebhookConfiguration", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ".", "patch_validating_webhook_configuration", "(", "name", "body", "async_req", "=", "True", ")", ">>>", "result", "=", "thread", ".", "get", "()" ]
python
train
Netflix-Skunkworks/swag-client
swag_client/cli.py
https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/cli.py#L146-L154
def list_service(ctx, name): """Retrieve accounts pertaining to named service.""" swag = create_swag_from_ctx(ctx) accounts = swag.get_service_enabled(name) _table = [[result['name'], result.get('id')] for result in accounts] click.echo( tabulate(_table, headers=["Account Name", "Account Number"]) )
[ "def", "list_service", "(", "ctx", ",", "name", ")", ":", "swag", "=", "create_swag_from_ctx", "(", "ctx", ")", "accounts", "=", "swag", ".", "get_service_enabled", "(", "name", ")", "_table", "=", "[", "[", "result", "[", "'name'", "]", ",", "result", ".", "get", "(", "'id'", ")", "]", "for", "result", "in", "accounts", "]", "click", ".", "echo", "(", "tabulate", "(", "_table", ",", "headers", "=", "[", "\"Account Name\"", ",", "\"Account Number\"", "]", ")", ")" ]
Retrieve accounts pertaining to named service.
[ "Retrieve", "accounts", "pertaining", "to", "named", "service", "." ]
python
train
mlperf/training
rnn_translator/pytorch/seq2seq/data/tokenizer.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/rnn_translator/pytorch/seq2seq/data/tokenizer.py#L44-L56
def pad_vocabulary(self, vocab, pad): """ Pads vocabulary to a multiple of 'pad' tokens. :param vocab: list with vocabulary :param pad: integer """ vocab_size = len(vocab) padded_vocab_size = (vocab_size + pad - 1) // pad * pad for i in range(0, padded_vocab_size - vocab_size): token = f'madeupword{i:04d}' vocab.append(token) assert len(vocab) % pad == 0
[ "def", "pad_vocabulary", "(", "self", ",", "vocab", ",", "pad", ")", ":", "vocab_size", "=", "len", "(", "vocab", ")", "padded_vocab_size", "=", "(", "vocab_size", "+", "pad", "-", "1", ")", "//", "pad", "*", "pad", "for", "i", "in", "range", "(", "0", ",", "padded_vocab_size", "-", "vocab_size", ")", ":", "token", "=", "f'madeupword{i:04d}'", "vocab", ".", "append", "(", "token", ")", "assert", "len", "(", "vocab", ")", "%", "pad", "==", "0" ]
Pads vocabulary to a multiple of 'pad' tokens. :param vocab: list with vocabulary :param pad: integer
[ "Pads", "vocabulary", "to", "a", "multiple", "of", "pad", "tokens", "." ]
python
train
vaexio/vaex
packages/vaex-core/vaex/functions.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L770-L805
def str_get(x, i): """Extract a character from each sample at the specified position from a string column. Note that if the specified position is out of bound of the string sample, this method returns '', while pandas retunrs nan. :param int i: The index location, at which to extract the character. :returns: an expression containing the extracted characters. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.get(5) Expression = str_get(text, 5) Length: 5 dtype: str (expression) --------------------------------- 0 h 1 p 2 m 3 4 """ x = _to_string_sequence(x) if i == -1: sl = x.slice_string_end(-1) else: sl = x.slice_string(i, i+1) return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
[ "def", "str_get", "(", "x", ",", "i", ")", ":", "x", "=", "_to_string_sequence", "(", "x", ")", "if", "i", "==", "-", "1", ":", "sl", "=", "x", ".", "slice_string_end", "(", "-", "1", ")", "else", ":", "sl", "=", "x", ".", "slice_string", "(", "i", ",", "i", "+", "1", ")", "return", "column", ".", "ColumnStringArrow", "(", "sl", ".", "bytes", ",", "sl", ".", "indices", ",", "sl", ".", "length", ",", "sl", ".", "offset", ",", "string_sequence", "=", "sl", ")" ]
Extract a character from each sample at the specified position from a string column. Note that if the specified position is out of bound of the string sample, this method returns '', while pandas retunrs nan. :param int i: The index location, at which to extract the character. :returns: an expression containing the extracted characters. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.get(5) Expression = str_get(text, 5) Length: 5 dtype: str (expression) --------------------------------- 0 h 1 p 2 m 3 4
[ "Extract", "a", "character", "from", "each", "sample", "at", "the", "specified", "position", "from", "a", "string", "column", ".", "Note", "that", "if", "the", "specified", "position", "is", "out", "of", "bound", "of", "the", "string", "sample", "this", "method", "returns", "while", "pandas", "retunrs", "nan", "." ]
python
test
abusque/qng
qng/generator.py
https://github.com/abusque/qng/blob/93d2efd637b2a6bba7d3872fb9ff2bb3fc5c979d/qng/generator.py#L111-L119
def _get_surnames(self): """Get the list of surnames. :return: A list of surname entries. """ names = self._read_name_file('surnames.json') names = self._compute_weights(names) return names
[ "def", "_get_surnames", "(", "self", ")", ":", "names", "=", "self", ".", "_read_name_file", "(", "'surnames.json'", ")", "names", "=", "self", ".", "_compute_weights", "(", "names", ")", "return", "names" ]
Get the list of surnames. :return: A list of surname entries.
[ "Get", "the", "list", "of", "surnames", "." ]
python
train
tanghaibao/jcvi
jcvi/formats/psl.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/psl.py#L105-L113
def _isProtein(self): """ check if blockSizes and scores are in the protein space or not """ last = self.blockCount - 1 return ((self.tEnd == self.tStarts[last] + 3 * self.blockSizes[last]) \ and self.strand == "+") or \ ((self.tStart == self.tSize - (self.tStarts[last] + 3 * self.blockSizes[last])\ and self.strand == "-"))
[ "def", "_isProtein", "(", "self", ")", ":", "last", "=", "self", ".", "blockCount", "-", "1", "return", "(", "(", "self", ".", "tEnd", "==", "self", ".", "tStarts", "[", "last", "]", "+", "3", "*", "self", ".", "blockSizes", "[", "last", "]", ")", "and", "self", ".", "strand", "==", "\"+\"", ")", "or", "(", "(", "self", ".", "tStart", "==", "self", ".", "tSize", "-", "(", "self", ".", "tStarts", "[", "last", "]", "+", "3", "*", "self", ".", "blockSizes", "[", "last", "]", ")", "and", "self", ".", "strand", "==", "\"-\"", ")", ")" ]
check if blockSizes and scores are in the protein space or not
[ "check", "if", "blockSizes", "and", "scores", "are", "in", "the", "protein", "space", "or", "not" ]
python
train
ulule/django-badgify
badgify/recipe.py
https://github.com/ulule/django-badgify/blob/1bf233ffeb6293ee659454de7b3794682128b6ca/badgify/recipe.py#L174-L180
def get_current_user_ids(self, db_read=None): """ Returns current user ids and the count. """ db_read = db_read or self.db_read return self.user_ids.using(db_read)
[ "def", "get_current_user_ids", "(", "self", ",", "db_read", "=", "None", ")", ":", "db_read", "=", "db_read", "or", "self", ".", "db_read", "return", "self", ".", "user_ids", ".", "using", "(", "db_read", ")" ]
Returns current user ids and the count.
[ "Returns", "current", "user", "ids", "and", "the", "count", "." ]
python
train
has2k1/plotnine
plotnine/facets/facet.py
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/facets/facet.py#L507-L553
def draw_strip_text(self, text_lines, location, pid): """ Create a background patch and put a label on it """ ax = self.axs[pid] themeable = self.figure._themeable dim = self.strip_dimensions(text_lines, location, pid) if location == 'right': rotation = -90 label = '\n'.join(reversed(text_lines)) else: rotation = 0 label = '\n'.join(text_lines) rect = mpatch.FancyBboxPatch((dim.box_x, dim.box_y), width=dim.box_width, height=dim.box_height, facecolor='lightgrey', edgecolor='None', transform=ax.transAxes, zorder=2.2, # > ax line & boundary boxstyle='square, pad=0', clip_on=False) text = mtext.Text(dim.x, dim.y, label, rotation=rotation, verticalalignment='center', horizontalalignment='center', transform=ax.transAxes, zorder=3.3, # > rect clip_on=False) ax.add_artist(rect) ax.add_artist(text) for key in ('strip_text_x', 'strip_text_y', 'strip_background_x', 'strip_background_y'): if key not in themeable: themeable[key] = [] if location == 'right': themeable['strip_background_y'].append(rect) themeable['strip_text_y'].append(text) else: themeable['strip_background_x'].append(rect) themeable['strip_text_x'].append(text)
[ "def", "draw_strip_text", "(", "self", ",", "text_lines", ",", "location", ",", "pid", ")", ":", "ax", "=", "self", ".", "axs", "[", "pid", "]", "themeable", "=", "self", ".", "figure", ".", "_themeable", "dim", "=", "self", ".", "strip_dimensions", "(", "text_lines", ",", "location", ",", "pid", ")", "if", "location", "==", "'right'", ":", "rotation", "=", "-", "90", "label", "=", "'\\n'", ".", "join", "(", "reversed", "(", "text_lines", ")", ")", "else", ":", "rotation", "=", "0", "label", "=", "'\\n'", ".", "join", "(", "text_lines", ")", "rect", "=", "mpatch", ".", "FancyBboxPatch", "(", "(", "dim", ".", "box_x", ",", "dim", ".", "box_y", ")", ",", "width", "=", "dim", ".", "box_width", ",", "height", "=", "dim", ".", "box_height", ",", "facecolor", "=", "'lightgrey'", ",", "edgecolor", "=", "'None'", ",", "transform", "=", "ax", ".", "transAxes", ",", "zorder", "=", "2.2", ",", "# > ax line & boundary", "boxstyle", "=", "'square, pad=0'", ",", "clip_on", "=", "False", ")", "text", "=", "mtext", ".", "Text", "(", "dim", ".", "x", ",", "dim", ".", "y", ",", "label", ",", "rotation", "=", "rotation", ",", "verticalalignment", "=", "'center'", ",", "horizontalalignment", "=", "'center'", ",", "transform", "=", "ax", ".", "transAxes", ",", "zorder", "=", "3.3", ",", "# > rect", "clip_on", "=", "False", ")", "ax", ".", "add_artist", "(", "rect", ")", "ax", ".", "add_artist", "(", "text", ")", "for", "key", "in", "(", "'strip_text_x'", ",", "'strip_text_y'", ",", "'strip_background_x'", ",", "'strip_background_y'", ")", ":", "if", "key", "not", "in", "themeable", ":", "themeable", "[", "key", "]", "=", "[", "]", "if", "location", "==", "'right'", ":", "themeable", "[", "'strip_background_y'", "]", ".", "append", "(", "rect", ")", "themeable", "[", "'strip_text_y'", "]", ".", "append", "(", "text", ")", "else", ":", "themeable", "[", "'strip_background_x'", "]", ".", "append", "(", "rect", ")", "themeable", "[", "'strip_text_x'", "]", ".", "append", "(", "text", ")" ]
Create a background patch and put a label on it
[ "Create", "a", "background", "patch", "and", "put", "a", "label", "on", "it" ]
python
train
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/mmax2.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/mmax2.py#L225-L227
def get_token_nodes_from_sentence(self, sentence_root_node): """returns a list of token node IDs belonging to the given sentence""" return spanstring2tokens(self, self.node[sentence_root_node][self.ns+':span'])
[ "def", "get_token_nodes_from_sentence", "(", "self", ",", "sentence_root_node", ")", ":", "return", "spanstring2tokens", "(", "self", ",", "self", ".", "node", "[", "sentence_root_node", "]", "[", "self", ".", "ns", "+", "':span'", "]", ")" ]
returns a list of token node IDs belonging to the given sentence
[ "returns", "a", "list", "of", "token", "node", "IDs", "belonging", "to", "the", "given", "sentence" ]
python
train
CI-WATER/mapkit
mapkit/__init__.py
https://github.com/CI-WATER/mapkit/blob/ce5fbded6af7adabdf1eec85631c6811ef8ecc34/mapkit/__init__.py#L22-L48
def lookupSpatialReferenceID(wellKnownText): """ This function can be used to look up the EPSG spatial reference system using the web service available at: http://prj2epsg.org Args: wellKnownText (str): The Well Known Text definition of the spatial reference system. Returns: int: Spatial Reference ID """ payload = {'mode': 'wkt', 'terms': wellKnownText} try: r = requests.get('http://prj2epsg.org/search.json', params=payload) except requests.exceptions.ConnectionError: print("SRID Lookup Error: Could not automatically determine spatial " "reference ID, because there is no internet connection. " "Please check connection and try again.") exit(1) if r.status_code == 200: json = r.json() for code in json['codes']: return code['code']
[ "def", "lookupSpatialReferenceID", "(", "wellKnownText", ")", ":", "payload", "=", "{", "'mode'", ":", "'wkt'", ",", "'terms'", ":", "wellKnownText", "}", "try", ":", "r", "=", "requests", ".", "get", "(", "'http://prj2epsg.org/search.json'", ",", "params", "=", "payload", ")", "except", "requests", ".", "exceptions", ".", "ConnectionError", ":", "print", "(", "\"SRID Lookup Error: Could not automatically determine spatial \"", "\"reference ID, because there is no internet connection. \"", "\"Please check connection and try again.\"", ")", "exit", "(", "1", ")", "if", "r", ".", "status_code", "==", "200", ":", "json", "=", "r", ".", "json", "(", ")", "for", "code", "in", "json", "[", "'codes'", "]", ":", "return", "code", "[", "'code'", "]" ]
This function can be used to look up the EPSG spatial reference system using the web service available at: http://prj2epsg.org Args: wellKnownText (str): The Well Known Text definition of the spatial reference system. Returns: int: Spatial Reference ID
[ "This", "function", "can", "be", "used", "to", "look", "up", "the", "EPSG", "spatial", "reference", "system", "using", "the", "web", "service", "available", "at", ":", "http", ":", "//", "prj2epsg", ".", "org" ]
python
train
gautammishra/lyft-rides-python-sdk
lyft_rides/client.py
https://github.com/gautammishra/lyft-rides-python-sdk/blob/b6d96a0fceaf7dc3425153c418a8e25c57803431/lyft_rides/client.py#L54-L77
def _api_call(self, method, target, args=None): """Create a Request object and execute the call to the API Server. Parameters method (str) HTTP request (e.g. 'POST'). target (str) The target URL with leading slash (e.g. '/v1/products'). args (dict) Optional dictionary of arguments to attach to the request. Returns (Response) The server's response to an HTTP request. """ self.refresh_oauth_credential() request = Request( auth_session=self.session, api_host=self.api_host, method=method, path=target, args=args, ) return request.execute()
[ "def", "_api_call", "(", "self", ",", "method", ",", "target", ",", "args", "=", "None", ")", ":", "self", ".", "refresh_oauth_credential", "(", ")", "request", "=", "Request", "(", "auth_session", "=", "self", ".", "session", ",", "api_host", "=", "self", ".", "api_host", ",", "method", "=", "method", ",", "path", "=", "target", ",", "args", "=", "args", ",", ")", "return", "request", ".", "execute", "(", ")" ]
Create a Request object and execute the call to the API Server. Parameters method (str) HTTP request (e.g. 'POST'). target (str) The target URL with leading slash (e.g. '/v1/products'). args (dict) Optional dictionary of arguments to attach to the request. Returns (Response) The server's response to an HTTP request.
[ "Create", "a", "Request", "object", "and", "execute", "the", "call", "to", "the", "API", "Server", ".", "Parameters", "method", "(", "str", ")", "HTTP", "request", "(", "e", ".", "g", ".", "POST", ")", ".", "target", "(", "str", ")", "The", "target", "URL", "with", "leading", "slash", "(", "e", ".", "g", ".", "/", "v1", "/", "products", ")", ".", "args", "(", "dict", ")", "Optional", "dictionary", "of", "arguments", "to", "attach", "to", "the", "request", ".", "Returns", "(", "Response", ")", "The", "server", "s", "response", "to", "an", "HTTP", "request", "." ]
python
train
cjdrake/pyeda
pyeda/boolalg/bdd.py
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/boolalg/bdd.py#L116-L131
def _expr2bddnode(expr): """Convert an expression into a BDD node.""" if expr.is_zero(): return BDDNODEZERO elif expr.is_one(): return BDDNODEONE else: top = expr.top # Register this variable _ = bddvar(top.names, top.indices) root = top.uniqid lo = _expr2bddnode(expr.restrict({top: 0})) hi = _expr2bddnode(expr.restrict({top: 1})) return _bddnode(root, lo, hi)
[ "def", "_expr2bddnode", "(", "expr", ")", ":", "if", "expr", ".", "is_zero", "(", ")", ":", "return", "BDDNODEZERO", "elif", "expr", ".", "is_one", "(", ")", ":", "return", "BDDNODEONE", "else", ":", "top", "=", "expr", ".", "top", "# Register this variable", "_", "=", "bddvar", "(", "top", ".", "names", ",", "top", ".", "indices", ")", "root", "=", "top", ".", "uniqid", "lo", "=", "_expr2bddnode", "(", "expr", ".", "restrict", "(", "{", "top", ":", "0", "}", ")", ")", "hi", "=", "_expr2bddnode", "(", "expr", ".", "restrict", "(", "{", "top", ":", "1", "}", ")", ")", "return", "_bddnode", "(", "root", ",", "lo", ",", "hi", ")" ]
Convert an expression into a BDD node.
[ "Convert", "an", "expression", "into", "a", "BDD", "node", "." ]
python
train
hodgesds/elasticsearch_tornado
elasticsearch_tornado/client.py
https://github.com/hodgesds/elasticsearch_tornado/blob/5acc1385589c92ffe3587ad05b7921c2cd1a30da/elasticsearch_tornado/client.py#L1260-L1272
def abort_benchmark(self, name=None, params={}, body='', callback=None, **kwargs): """ Aborts a running benchmark. `<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-benchmark.html>`_ :arg name: A benchmark name """ url = self.mk_url(*['_bench', 'abort', name]) self.client.fetch( self.mk_req(url, method='POST', body=body, **kwargs), callback = callback )
[ "def", "abort_benchmark", "(", "self", ",", "name", "=", "None", ",", "params", "=", "{", "}", ",", "body", "=", "''", ",", "callback", "=", "None", ",", "*", "*", "kwargs", ")", ":", "url", "=", "self", ".", "mk_url", "(", "*", "[", "'_bench'", ",", "'abort'", ",", "name", "]", ")", "self", ".", "client", ".", "fetch", "(", "self", ".", "mk_req", "(", "url", ",", "method", "=", "'POST'", ",", "body", "=", "body", ",", "*", "*", "kwargs", ")", ",", "callback", "=", "callback", ")" ]
Aborts a running benchmark. `<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/search-benchmark.html>`_ :arg name: A benchmark name
[ "Aborts", "a", "running", "benchmark", ".", "<http", ":", "//", "www", ".", "elasticsearch", ".", "org", "/", "guide", "/", "en", "/", "elasticsearch", "/", "reference", "/", "master", "/", "search", "-", "benchmark", ".", "html", ">", "_", ":", "arg", "name", ":", "A", "benchmark", "name" ]
python
train
datadotworld/data.world-py
datadotworld/client/api.py
https://github.com/datadotworld/data.world-py/blob/ffaeb115f358731ab0b805b0c43b7ff2e3cf0a77/datadotworld/client/api.py#L162-L206
def update_dataset(self, dataset_key, **kwargs): """Update an existing dataset :param description: Dataset description :type description: str, optional :param summary: Dataset summary markdown :type summary: str, optional :param tags: Dataset tags :type tags: list, optional :param license: Dataset license :type license: {'Public Domain', 'PDDL', 'CC-0', 'CC-BY', 'ODC-BY', 'CC-BY-SA', 'ODC-ODbL', 'CC BY-NC', 'CC BY-NC-SA', 'Other'} :param visibility: Dataset visibility :type visibility: {'OPEN', 'PRIVATE'}, optional :param files: File names and source URLs to add or update :type files: dict, optional :param dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.update_dataset( ... 'username/test-dataset', ... tags=['demo', 'datadotworld']) # doctest: +SKIP """ request = self.__build_dataset_obj( lambda: _swagger.DatasetPatchRequest(), lambda name, url, expand_archive, description, labels: _swagger.FileCreateOrUpdateRequest( name=name, source=_swagger.FileSourceCreateOrUpdateRequest( url=url, expand_archive=expand_archive) if url is not None else None, description=description, labels=labels), kwargs) owner_id, dataset_id = parse_dataset_key(dataset_key) try: self._datasets_api.patch_dataset(owner_id, dataset_id, request) except _swagger.rest.ApiException as e: raise RestApiError(cause=e)
[ "def", "update_dataset", "(", "self", ",", "dataset_key", ",", "*", "*", "kwargs", ")", ":", "request", "=", "self", ".", "__build_dataset_obj", "(", "lambda", ":", "_swagger", ".", "DatasetPatchRequest", "(", ")", ",", "lambda", "name", ",", "url", ",", "expand_archive", ",", "description", ",", "labels", ":", "_swagger", ".", "FileCreateOrUpdateRequest", "(", "name", "=", "name", ",", "source", "=", "_swagger", ".", "FileSourceCreateOrUpdateRequest", "(", "url", "=", "url", ",", "expand_archive", "=", "expand_archive", ")", "if", "url", "is", "not", "None", "else", "None", ",", "description", "=", "description", ",", "labels", "=", "labels", ")", ",", "kwargs", ")", "owner_id", ",", "dataset_id", "=", "parse_dataset_key", "(", "dataset_key", ")", "try", ":", "self", ".", "_datasets_api", ".", "patch_dataset", "(", "owner_id", ",", "dataset_id", ",", "request", ")", "except", "_swagger", ".", "rest", ".", "ApiException", "as", "e", ":", "raise", "RestApiError", "(", "cause", "=", "e", ")" ]
Update an existing dataset :param description: Dataset description :type description: str, optional :param summary: Dataset summary markdown :type summary: str, optional :param tags: Dataset tags :type tags: list, optional :param license: Dataset license :type license: {'Public Domain', 'PDDL', 'CC-0', 'CC-BY', 'ODC-BY', 'CC-BY-SA', 'ODC-ODbL', 'CC BY-NC', 'CC BY-NC-SA', 'Other'} :param visibility: Dataset visibility :type visibility: {'OPEN', 'PRIVATE'}, optional :param files: File names and source URLs to add or update :type files: dict, optional :param dataset_key: Dataset identifier, in the form of owner/id :type dataset_key: str :raises RestApiException: If a server error occurs Examples -------- >>> import datadotworld as dw >>> api_client = dw.api_client() >>> api_client.update_dataset( ... 'username/test-dataset', ... tags=['demo', 'datadotworld']) # doctest: +SKIP
[ "Update", "an", "existing", "dataset" ]
python
train
echonest/pyechonest
pyechonest/song.py
https://github.com/echonest/pyechonest/blob/d8c7af6c1da699b50b2f4b1bd3c0febe72e7f1ee/pyechonest/song.py#L120-L143
def get_song_hotttnesss(self, cache=True): """Get our numerical description of how hottt a song currently is Args: Kwargs: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. Returns: A float representing hotttnesss. Example: >>> s = song.Song('SOLUHKP129F0698D49') >>> s.get_song_hotttnesss() 0.57344379999999995 >>> s.song_hotttnesss 0.57344379999999995 >>> """ if not (cache and ('song_hotttnesss' in self.cache)): response = self.get_attribute('profile', bucket='song_hotttnesss') self.cache['song_hotttnesss'] = response['songs'][0]['song_hotttnesss'] return self.cache['song_hotttnesss']
[ "def", "get_song_hotttnesss", "(", "self", ",", "cache", "=", "True", ")", ":", "if", "not", "(", "cache", "and", "(", "'song_hotttnesss'", "in", "self", ".", "cache", ")", ")", ":", "response", "=", "self", ".", "get_attribute", "(", "'profile'", ",", "bucket", "=", "'song_hotttnesss'", ")", "self", ".", "cache", "[", "'song_hotttnesss'", "]", "=", "response", "[", "'songs'", "]", "[", "0", "]", "[", "'song_hotttnesss'", "]", "return", "self", ".", "cache", "[", "'song_hotttnesss'", "]" ]
Get our numerical description of how hottt a song currently is Args: Kwargs: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. Returns: A float representing hotttnesss. Example: >>> s = song.Song('SOLUHKP129F0698D49') >>> s.get_song_hotttnesss() 0.57344379999999995 >>> s.song_hotttnesss 0.57344379999999995 >>>
[ "Get", "our", "numerical", "description", "of", "how", "hottt", "a", "song", "currently", "is", "Args", ":", "Kwargs", ":", "cache", "(", "bool", ")", ":", "A", "boolean", "indicating", "whether", "or", "not", "the", "cached", "value", "should", "be", "used", "(", "if", "available", ")", ".", "Defaults", "to", "True", ".", "Returns", ":", "A", "float", "representing", "hotttnesss", ".", "Example", ":", ">>>", "s", "=", "song", ".", "Song", "(", "SOLUHKP129F0698D49", ")", ">>>", "s", ".", "get_song_hotttnesss", "()", "0", ".", "57344379999999995", ">>>", "s", ".", "song_hotttnesss", "0", ".", "57344379999999995", ">>>" ]
python
train
miso-belica/sumy
sumy/evaluation/content_based.py
https://github.com/miso-belica/sumy/blob/099ab4938e2c1b6a011297375586bac2953641b9/sumy/evaluation/content_based.py#L36-L57
def unit_overlap(evaluated_model, reference_model): """ Computes unit overlap of two text documents. Documents has to be represented as TF models of non-empty document. :returns float: 0 <= overlap <= 1, where 0 means no match and 1 means exactly the same. """ if not (isinstance(evaluated_model, TfModel) and isinstance(reference_model, TfModel)): raise ValueError( "Arguments has to be instances of 'sumy.models.TfDocumentModel'") terms1 = frozenset(evaluated_model.terms) terms2 = frozenset(reference_model.terms) if not terms1 and not terms2: raise ValueError( "Documents can't be empty. Please pass the valid documents.") common_terms_count = len(terms1 & terms2) return common_terms_count / (len(terms1) + len(terms2) - common_terms_count)
[ "def", "unit_overlap", "(", "evaluated_model", ",", "reference_model", ")", ":", "if", "not", "(", "isinstance", "(", "evaluated_model", ",", "TfModel", ")", "and", "isinstance", "(", "reference_model", ",", "TfModel", ")", ")", ":", "raise", "ValueError", "(", "\"Arguments has to be instances of 'sumy.models.TfDocumentModel'\"", ")", "terms1", "=", "frozenset", "(", "evaluated_model", ".", "terms", ")", "terms2", "=", "frozenset", "(", "reference_model", ".", "terms", ")", "if", "not", "terms1", "and", "not", "terms2", ":", "raise", "ValueError", "(", "\"Documents can't be empty. Please pass the valid documents.\"", ")", "common_terms_count", "=", "len", "(", "terms1", "&", "terms2", ")", "return", "common_terms_count", "/", "(", "len", "(", "terms1", ")", "+", "len", "(", "terms2", ")", "-", "common_terms_count", ")" ]
Computes unit overlap of two text documents. Documents has to be represented as TF models of non-empty document. :returns float: 0 <= overlap <= 1, where 0 means no match and 1 means exactly the same.
[ "Computes", "unit", "overlap", "of", "two", "text", "documents", ".", "Documents", "has", "to", "be", "represented", "as", "TF", "models", "of", "non", "-", "empty", "document", "." ]
python
train
willkg/socorro-siggen
siggen/cmd_fetch_data.py
https://github.com/willkg/socorro-siggen/blob/db7e3233e665a458a961c48da22e93a69b1d08d6/siggen/cmd_fetch_data.py#L20-L39
def _fill_text(self, text, width, indent): """Wraps text like HelpFormatter, but doesn't squash lines This makes it easier to do lists and paragraphs. """ parts = text.split('\n\n') for i, part in enumerate(parts): # Check to see if it's a bulleted list--if so, then fill each line if part.startswith('* '): subparts = part.split('\n') for j, subpart in enumerate(subparts): subparts[j] = super(WrappedTextHelpFormatter, self)._fill_text( subpart, width, indent ) parts[i] = '\n'.join(subparts) else: parts[i] = super(WrappedTextHelpFormatter, self)._fill_text(part, width, indent) return '\n\n'.join(parts)
[ "def", "_fill_text", "(", "self", ",", "text", ",", "width", ",", "indent", ")", ":", "parts", "=", "text", ".", "split", "(", "'\\n\\n'", ")", "for", "i", ",", "part", "in", "enumerate", "(", "parts", ")", ":", "# Check to see if it's a bulleted list--if so, then fill each line", "if", "part", ".", "startswith", "(", "'* '", ")", ":", "subparts", "=", "part", ".", "split", "(", "'\\n'", ")", "for", "j", ",", "subpart", "in", "enumerate", "(", "subparts", ")", ":", "subparts", "[", "j", "]", "=", "super", "(", "WrappedTextHelpFormatter", ",", "self", ")", ".", "_fill_text", "(", "subpart", ",", "width", ",", "indent", ")", "parts", "[", "i", "]", "=", "'\\n'", ".", "join", "(", "subparts", ")", "else", ":", "parts", "[", "i", "]", "=", "super", "(", "WrappedTextHelpFormatter", ",", "self", ")", ".", "_fill_text", "(", "part", ",", "width", ",", "indent", ")", "return", "'\\n\\n'", ".", "join", "(", "parts", ")" ]
Wraps text like HelpFormatter, but doesn't squash lines This makes it easier to do lists and paragraphs.
[ "Wraps", "text", "like", "HelpFormatter", "but", "doesn", "t", "squash", "lines" ]
python
train
sloria/environs
environs.py
https://github.com/sloria/environs/blob/cf0b5e865b0ce96ce77d459124a1dba84c9deda7/environs.py#L83-L94
def _dict2schema(dct): """Generate a `marshmallow.Schema` class given a dictionary of `Fields <marshmallow.fields.Field>`. """ attrs = dct.copy() if MARSHMALLOW_VERSION_INFO[0] < 3: class Meta(object): strict = True attrs["Meta"] = Meta return type(str(""), (ma.Schema,), attrs)
[ "def", "_dict2schema", "(", "dct", ")", ":", "attrs", "=", "dct", ".", "copy", "(", ")", "if", "MARSHMALLOW_VERSION_INFO", "[", "0", "]", "<", "3", ":", "class", "Meta", "(", "object", ")", ":", "strict", "=", "True", "attrs", "[", "\"Meta\"", "]", "=", "Meta", "return", "type", "(", "str", "(", "\"\"", ")", ",", "(", "ma", ".", "Schema", ",", ")", ",", "attrs", ")" ]
Generate a `marshmallow.Schema` class given a dictionary of `Fields <marshmallow.fields.Field>`.
[ "Generate", "a", "marshmallow", ".", "Schema", "class", "given", "a", "dictionary", "of", "Fields", "<marshmallow", ".", "fields", ".", "Field", ">", "." ]
python
train
Rikanishu/static-bundle
static_bundle/bundles.py
https://github.com/Rikanishu/static-bundle/blob/2f6458cb9d9d9049b4fd829f7d6951a45d547c68/static_bundle/bundles.py#L94-L105
def add_prepare_handler(self, prepare_handlers): """ Add prepare handler to bundle :type: prepare_handler: static_bundle.handlers.AbstractPrepareHandler """ if not isinstance(prepare_handlers, static_bundle.BUNDLE_ITERABLE_TYPES): prepare_handlers = [prepare_handlers] if self.prepare_handlers_chain is None: self.prepare_handlers_chain = [] for handler in prepare_handlers: self.prepare_handlers_chain.append(handler)
[ "def", "add_prepare_handler", "(", "self", ",", "prepare_handlers", ")", ":", "if", "not", "isinstance", "(", "prepare_handlers", ",", "static_bundle", ".", "BUNDLE_ITERABLE_TYPES", ")", ":", "prepare_handlers", "=", "[", "prepare_handlers", "]", "if", "self", ".", "prepare_handlers_chain", "is", "None", ":", "self", ".", "prepare_handlers_chain", "=", "[", "]", "for", "handler", "in", "prepare_handlers", ":", "self", ".", "prepare_handlers_chain", ".", "append", "(", "handler", ")" ]
Add prepare handler to bundle :type: prepare_handler: static_bundle.handlers.AbstractPrepareHandler
[ "Add", "prepare", "handler", "to", "bundle" ]
python
valid
SmileyChris/easy-thumbnails
easy_thumbnails/files.py
https://github.com/SmileyChris/easy-thumbnails/blob/b08ab44883bf7b221a98dadb9b589cb95d35b0bf/easy_thumbnails/files.py#L749-L769
def save(self, name, content, *args, **kwargs): """ Save the image. The image will be resized down using a ``ThumbnailField`` if ``resize_source`` (a dictionary of thumbnail options) is provided by the field. """ options = getattr(self.field, 'resize_source', None) if options: if 'quality' not in options: options['quality'] = self.thumbnail_quality content = Thumbnailer(content, name).generate_thumbnail(options) # If the generated extension differs from the original, use it # instead. orig_name, ext = os.path.splitext(name) generated_ext = os.path.splitext(content.name)[1] if generated_ext.lower() != ext.lower(): name = orig_name + generated_ext super(ThumbnailerImageFieldFile, self).save(name, content, *args, **kwargs)
[ "def", "save", "(", "self", ",", "name", ",", "content", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "options", "=", "getattr", "(", "self", ".", "field", ",", "'resize_source'", ",", "None", ")", "if", "options", ":", "if", "'quality'", "not", "in", "options", ":", "options", "[", "'quality'", "]", "=", "self", ".", "thumbnail_quality", "content", "=", "Thumbnailer", "(", "content", ",", "name", ")", ".", "generate_thumbnail", "(", "options", ")", "# If the generated extension differs from the original, use it", "# instead.", "orig_name", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "name", ")", "generated_ext", "=", "os", ".", "path", ".", "splitext", "(", "content", ".", "name", ")", "[", "1", "]", "if", "generated_ext", ".", "lower", "(", ")", "!=", "ext", ".", "lower", "(", ")", ":", "name", "=", "orig_name", "+", "generated_ext", "super", "(", "ThumbnailerImageFieldFile", ",", "self", ")", ".", "save", "(", "name", ",", "content", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Save the image. The image will be resized down using a ``ThumbnailField`` if ``resize_source`` (a dictionary of thumbnail options) is provided by the field.
[ "Save", "the", "image", "." ]
python
train
tornadoweb/tornado
tornado/iostream.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/iostream.py#L159-L179
def append(self, data: Union[bytes, bytearray, memoryview]) -> None: """ Append the given piece of data (should be a buffer-compatible object). """ size = len(data) if size > self._large_buf_threshold: if not isinstance(data, memoryview): data = memoryview(data) self._buffers.append((True, data)) elif size > 0: if self._buffers: is_memview, b = self._buffers[-1] new_buf = is_memview or len(b) >= self._large_buf_threshold else: new_buf = True if new_buf: self._buffers.append((False, bytearray(data))) else: b += data # type: ignore self._size += size
[ "def", "append", "(", "self", ",", "data", ":", "Union", "[", "bytes", ",", "bytearray", ",", "memoryview", "]", ")", "->", "None", ":", "size", "=", "len", "(", "data", ")", "if", "size", ">", "self", ".", "_large_buf_threshold", ":", "if", "not", "isinstance", "(", "data", ",", "memoryview", ")", ":", "data", "=", "memoryview", "(", "data", ")", "self", ".", "_buffers", ".", "append", "(", "(", "True", ",", "data", ")", ")", "elif", "size", ">", "0", ":", "if", "self", ".", "_buffers", ":", "is_memview", ",", "b", "=", "self", ".", "_buffers", "[", "-", "1", "]", "new_buf", "=", "is_memview", "or", "len", "(", "b", ")", ">=", "self", ".", "_large_buf_threshold", "else", ":", "new_buf", "=", "True", "if", "new_buf", ":", "self", ".", "_buffers", ".", "append", "(", "(", "False", ",", "bytearray", "(", "data", ")", ")", ")", "else", ":", "b", "+=", "data", "# type: ignore", "self", ".", "_size", "+=", "size" ]
Append the given piece of data (should be a buffer-compatible object).
[ "Append", "the", "given", "piece", "of", "data", "(", "should", "be", "a", "buffer", "-", "compatible", "object", ")", "." ]
python
train
trolldbois/ctypeslib
ctypeslib/codegen/cursorhandler.py
https://github.com/trolldbois/ctypeslib/blob/2aeb1942a5a32a5cc798c287cd0d9e684a0181a8/ctypeslib/codegen/cursorhandler.py#L362-L405
def _get_var_decl_init_value_single(self, _ctype, child): """ Handling of a single child for initialization value. Accepted types are expressions and declarations """ init_value = None # FIXME: always return (child.kind, child.value) log.debug( '_get_var_decl_init_value_single: _ctype: %s Child.kind: %s', _ctype.kind, child.kind) # shorcuts. if not child.kind.is_expression() and not child.kind.is_declaration(): raise CursorKindException(child.kind) if child.kind == CursorKind.CALL_EXPR: raise CursorKindException(child.kind) # POD init values handling. # As of clang 3.3, int, double literals are exposed. # float, long double, char , char* are not exposed directly in level1. # but really it depends... if child.kind.is_unexposed(): # recurse until we find a literal kind init_value = self._get_var_decl_init_value(_ctype, child.get_children()) if len(init_value) == 0: init_value = None elif len(init_value) == 1: init_value = init_value[0] else: log.error('_get_var_decl_init_value_single: Unhandled case') assert len(init_value) <= 1 else: # literal or others _v = self.parse_cursor(child) if isinstance( _v, list) and child.kind not in [CursorKind.INIT_LIST_EXPR, CursorKind.STRING_LITERAL]: log.warning( '_get_var_decl_init_value_single: TOKENIZATION BUG CHECK: %s', _v) _v = _v[0] init_value = (child.kind, _v) log.debug( '_get_var_decl_init_value_single: returns %s', str(init_value)) return init_value
[ "def", "_get_var_decl_init_value_single", "(", "self", ",", "_ctype", ",", "child", ")", ":", "init_value", "=", "None", "# FIXME: always return (child.kind, child.value)", "log", ".", "debug", "(", "'_get_var_decl_init_value_single: _ctype: %s Child.kind: %s'", ",", "_ctype", ".", "kind", ",", "child", ".", "kind", ")", "# shorcuts.", "if", "not", "child", ".", "kind", ".", "is_expression", "(", ")", "and", "not", "child", ".", "kind", ".", "is_declaration", "(", ")", ":", "raise", "CursorKindException", "(", "child", ".", "kind", ")", "if", "child", ".", "kind", "==", "CursorKind", ".", "CALL_EXPR", ":", "raise", "CursorKindException", "(", "child", ".", "kind", ")", "# POD init values handling.", "# As of clang 3.3, int, double literals are exposed.", "# float, long double, char , char* are not exposed directly in level1.", "# but really it depends...", "if", "child", ".", "kind", ".", "is_unexposed", "(", ")", ":", "# recurse until we find a literal kind", "init_value", "=", "self", ".", "_get_var_decl_init_value", "(", "_ctype", ",", "child", ".", "get_children", "(", ")", ")", "if", "len", "(", "init_value", ")", "==", "0", ":", "init_value", "=", "None", "elif", "len", "(", "init_value", ")", "==", "1", ":", "init_value", "=", "init_value", "[", "0", "]", "else", ":", "log", ".", "error", "(", "'_get_var_decl_init_value_single: Unhandled case'", ")", "assert", "len", "(", "init_value", ")", "<=", "1", "else", ":", "# literal or others", "_v", "=", "self", ".", "parse_cursor", "(", "child", ")", "if", "isinstance", "(", "_v", ",", "list", ")", "and", "child", ".", "kind", "not", "in", "[", "CursorKind", ".", "INIT_LIST_EXPR", ",", "CursorKind", ".", "STRING_LITERAL", "]", ":", "log", ".", "warning", "(", "'_get_var_decl_init_value_single: TOKENIZATION BUG CHECK: %s'", ",", "_v", ")", "_v", "=", "_v", "[", "0", "]", "init_value", "=", "(", "child", ".", "kind", ",", "_v", ")", "log", ".", "debug", "(", "'_get_var_decl_init_value_single: returns %s'", ",", "str", "(", "init_value", ")", ")", "return", "init_value" ]
Handling of a single child for initialization value. Accepted types are expressions and declarations
[ "Handling", "of", "a", "single", "child", "for", "initialization", "value", ".", "Accepted", "types", "are", "expressions", "and", "declarations" ]
python
train
etcher-be/emiz
emiz/weather/custom_metar/custom_metar.py
https://github.com/etcher-be/emiz/blob/1c3e32711921d7e600e85558ffe5d337956372de/emiz/weather/custom_metar/custom_metar.py#L118-L143
def _handlePressure(self, d): """ Parse an altimeter-pressure group. The following attributes are set: press [int] """ press = d['press'] if press != '////': press = float(press.replace('O', '0')) if d['unit']: if d['unit'] == 'A' or (d['unit2'] and d['unit2'] == 'INS'): self.press = CustomPressure(press / 100, 'IN') elif d['unit'] == 'SLP': if press < 500: press = press / 10 + 1000 else: press = press / 10 + 900 self.press = CustomPressure(press) self._remarks.append("sea-level pressure %.1fhPa" % press) else: self.press = CustomPressure(press) elif press > 2500: self.press = CustomPressure(press / 100, 'IN') else: self.press = CustomPressure(press)
[ "def", "_handlePressure", "(", "self", ",", "d", ")", ":", "press", "=", "d", "[", "'press'", "]", "if", "press", "!=", "'////'", ":", "press", "=", "float", "(", "press", ".", "replace", "(", "'O'", ",", "'0'", ")", ")", "if", "d", "[", "'unit'", "]", ":", "if", "d", "[", "'unit'", "]", "==", "'A'", "or", "(", "d", "[", "'unit2'", "]", "and", "d", "[", "'unit2'", "]", "==", "'INS'", ")", ":", "self", ".", "press", "=", "CustomPressure", "(", "press", "/", "100", ",", "'IN'", ")", "elif", "d", "[", "'unit'", "]", "==", "'SLP'", ":", "if", "press", "<", "500", ":", "press", "=", "press", "/", "10", "+", "1000", "else", ":", "press", "=", "press", "/", "10", "+", "900", "self", ".", "press", "=", "CustomPressure", "(", "press", ")", "self", ".", "_remarks", ".", "append", "(", "\"sea-level pressure %.1fhPa\"", "%", "press", ")", "else", ":", "self", ".", "press", "=", "CustomPressure", "(", "press", ")", "elif", "press", ">", "2500", ":", "self", ".", "press", "=", "CustomPressure", "(", "press", "/", "100", ",", "'IN'", ")", "else", ":", "self", ".", "press", "=", "CustomPressure", "(", "press", ")" ]
Parse an altimeter-pressure group. The following attributes are set: press [int]
[ "Parse", "an", "altimeter", "-", "pressure", "group", "." ]
python
train
MacHu-GWU/angora-project
angora/filesystem/filesystem.py
https://github.com/MacHu-GWU/angora-project/blob/689a60da51cd88680ddbe26e28dbe81e6b01d275/angora/filesystem/filesystem.py#L942-L958
def sort_by(self, attr_name, reverse=False): """Sort files by one of it's attributes. **中文文档** 对容器内的WinFile根据其某一个属性升序或者降序排序。 """ try: d = dict() for abspath, winfile in self.files.items(): d[abspath] = getattr(winfile, attr_name) self.order = [item[0] for item in sorted( list(d.items()), key=lambda t: t[1], reverse = reverse)] except AttributeError: raise ValueError("valid sortable attributes are: " "abspath, dirname, basename, fname, ext, " "size_on_disk, atime, ctime, mtime;")
[ "def", "sort_by", "(", "self", ",", "attr_name", ",", "reverse", "=", "False", ")", ":", "try", ":", "d", "=", "dict", "(", ")", "for", "abspath", ",", "winfile", "in", "self", ".", "files", ".", "items", "(", ")", ":", "d", "[", "abspath", "]", "=", "getattr", "(", "winfile", ",", "attr_name", ")", "self", ".", "order", "=", "[", "item", "[", "0", "]", "for", "item", "in", "sorted", "(", "list", "(", "d", ".", "items", "(", ")", ")", ",", "key", "=", "lambda", "t", ":", "t", "[", "1", "]", ",", "reverse", "=", "reverse", ")", "]", "except", "AttributeError", ":", "raise", "ValueError", "(", "\"valid sortable attributes are: \"", "\"abspath, dirname, basename, fname, ext, \"", "\"size_on_disk, atime, ctime, mtime;\"", ")" ]
Sort files by one of it's attributes. **中文文档** 对容器内的WinFile根据其某一个属性升序或者降序排序。
[ "Sort", "files", "by", "one", "of", "it", "s", "attributes", ".", "**", "中文文档", "**", "对容器内的WinFile根据其某一个属性升序或者降序排序。" ]
python
train
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/request_parser.py
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/request_parser.py#L114-L124
def _safe_read_meta_file(fn, error_protocol): """ Try to read MetadataFile. If the exception is raised, log the errors to the `error_protocol` and return None. """ try: return MetadataFile(fn) except Exception, e: error_protocol.append( "Can't read MetadataFile '%s':\n\t%s\n" % (fn, e.message) )
[ "def", "_safe_read_meta_file", "(", "fn", ",", "error_protocol", ")", ":", "try", ":", "return", "MetadataFile", "(", "fn", ")", "except", "Exception", ",", "e", ":", "error_protocol", ".", "append", "(", "\"Can't read MetadataFile '%s':\\n\\t%s\\n\"", "%", "(", "fn", ",", "e", ".", "message", ")", ")" ]
Try to read MetadataFile. If the exception is raised, log the errors to the `error_protocol` and return None.
[ "Try", "to", "read", "MetadataFile", ".", "If", "the", "exception", "is", "raised", "log", "the", "errors", "to", "the", "error_protocol", "and", "return", "None", "." ]
python
train