text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_Bytes(self, node: ast.Bytes) -> bytes: """Recompute the value as the bytes at the node."""
result = node.s self.recomputed_values[node] = result return node.s
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_List(self, node: ast.List) -> List[Any]: """Visit the elements and assemble the results into a list."""
if isinstance(node.ctx, ast.Store): raise NotImplementedError("Can not compute the value of a Store on a list") result = [self.visit(node=elt) for elt in node.elts] self.recomputed_values[node] = result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: """Visit the elements and assemble the results into a tuple."""
if isinstance(node.ctx, ast.Store): raise NotImplementedError("Can not compute the value of a Store on a tuple") result = tuple(self.visit(node=elt) for elt in node.elts) self.recomputed_values[node] = result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_Set(self, node: ast.Set) -> Set[Any]: """Visit the elements and assemble the results into a set."""
result = set(self.visit(node=elt) for elt in node.elts) self.recomputed_values[node] = result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_Dict(self, node: ast.Dict) -> Dict[Any, Any]: """Visit keys and values and assemble a dictionary with the results."""
recomputed_dict = dict() # type: Dict[Any, Any] for key, val in zip(node.keys, node.values): recomputed_dict[self.visit(node=key)] = self.visit(node=val) self.recomputed_values[node] = recomputed_dict return recomputed_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_NameConstant(self, node: ast.NameConstant) -> Any: """Forward the node value as a result."""
self.recomputed_values[node] = node.value return node.value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_Name(self, node: ast.Name) -> Any: """Load the variable by looking it up in the variable look-up and in the built-ins."""
if not isinstance(node.ctx, ast.Load): raise NotImplementedError("Can only compute a value of Load on a name {}, but got context: {}".format( node.id, node.ctx)) result = None # type: Optional[Any] if node.id in self._name_to_value: result = self._name_to_value[node.id] if result is None and hasattr(builtins, node.id): result = getattr(builtins, node.id) if result is None and node.id != "None": # The variable refers to a name local of the lambda (e.g., a target in the generator expression). # Since we evaluate generator expressions with runtime compilation, None is returned here as a placeholder. return PLACEHOLDER self.recomputed_values[node] = result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_UnaryOp(self, node: ast.UnaryOp) -> Any: """Visit the node operand and apply the operation on the result."""
if isinstance(node.op, ast.UAdd): result = +self.visit(node=node.operand) elif isinstance(node.op, ast.USub): result = -self.visit(node=node.operand) elif isinstance(node.op, ast.Not): result = not self.visit(node=node.operand) elif isinstance(node.op, ast.Invert): result = ~self.visit(node=node.operand) else: raise NotImplementedError("Unhandled op of {}: {}".format(node, node.op)) self.recomputed_values[node] = result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_BinOp(self, node: ast.BinOp) -> Any: """Recursively visit the left and right operand, respectively, and apply the operation on the results."""
# pylint: disable=too-many-branches left = self.visit(node=node.left) right = self.visit(node=node.right) if isinstance(node.op, ast.Add): result = left + right elif isinstance(node.op, ast.Sub): result = left - right elif isinstance(node.op, ast.Mult): result = left * right elif isinstance(node.op, ast.Div): result = left / right elif isinstance(node.op, ast.FloorDiv): result = left // right elif isinstance(node.op, ast.Mod): result = left % right elif isinstance(node.op, ast.Pow): result = left**right elif isinstance(node.op, ast.LShift): result = left << right elif isinstance(node.op, ast.RShift): result = left >> right elif isinstance(node.op, ast.BitOr): result = left | right elif isinstance(node.op, ast.BitXor): result = left ^ right elif isinstance(node.op, ast.BitAnd): result = left & right elif isinstance(node.op, ast.MatMult): result = left @ right else: raise NotImplementedError("Unhandled op of {}: {}".format(node, node.op)) self.recomputed_values[node] = result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_BoolOp(self, node: ast.BoolOp) -> Any: """Recursively visit the operands and apply the operation on them."""
values = [self.visit(value_node) for value_node in node.values] if isinstance(node.op, ast.And): result = functools.reduce(lambda left, right: left and right, values, True) elif isinstance(node.op, ast.Or): result = functools.reduce(lambda left, right: left or right, values, True) else: raise NotImplementedError("Unhandled op of {}: {}".format(node, node.op)) self.recomputed_values[node] = result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_Compare(self, node: ast.Compare) -> Any: """Recursively visit the comparators and apply the operations on them."""
# pylint: disable=too-many-branches left = self.visit(node=node.left) comparators = [self.visit(node=comparator) for comparator in node.comparators] result = None # type: Optional[Any] for comparator, op in zip(comparators, node.ops): if isinstance(op, ast.Eq): comparison = left == comparator elif isinstance(op, ast.NotEq): comparison = left != comparator elif isinstance(op, ast.Lt): comparison = left < comparator elif isinstance(op, ast.LtE): comparison = left <= comparator elif isinstance(op, ast.Gt): comparison = left > comparator elif isinstance(op, ast.GtE): comparison = left >= comparator elif isinstance(op, ast.Is): comparison = left is comparator elif isinstance(op, ast.IsNot): comparison = left is not comparator elif isinstance(op, ast.In): comparison = left in comparator elif isinstance(op, ast.NotIn): comparison = left not in comparator else: raise NotImplementedError("Unhandled op of {}: {}".format(node, op)) if result is None: result = comparison else: result = result and comparison left = comparator self.recomputed_values[node] = result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_Call(self, node: ast.Call) -> Any: """Visit the function and the arguments and finally make the function call with them."""
func = self.visit(node=node.func) args = [] # type: List[Any] for arg_node in node.args: if isinstance(arg_node, ast.Starred): args.extend(self.visit(node=arg_node)) else: args.append(self.visit(node=arg_node)) kwargs = dict() # type: Dict[str, Any] for keyword in node.keywords: if keyword.arg is None: kw = self.visit(node=keyword.value) for key, val in kw.items(): kwargs[key] = val else: kwargs[keyword.arg] = self.visit(node=keyword.value) result = func(*args, **kwargs) self.recomputed_values[node] = result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_IfExp(self, node: ast.IfExp) -> Any: """Visit the ``test``, and depending on its outcome, the ``body`` or ``orelse``."""
test = self.visit(node=node.test) if test: result = self.visit(node=node.body) else: result = self.visit(node=node.orelse) self.recomputed_values[node] = result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_Attribute(self, node: ast.Attribute) -> Any: """Visit the node's ``value`` and get the attribute from the result."""
value = self.visit(node=node.value) if not isinstance(node.ctx, ast.Load): raise NotImplementedError( "Can only compute a value of Load on the attribute {}, but got context: {}".format(node.attr, node.ctx)) result = getattr(value, node.attr) self.recomputed_values[node] = result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_Slice(self, node: ast.Slice) -> slice: """Visit ``lower``, ``upper`` and ``step`` and recompute the node as a ``slice``."""
lower = None # type: Optional[int] if node.lower is not None: lower = self.visit(node=node.lower) upper = None # type: Optional[int] if node.upper is not None: upper = self.visit(node=node.upper) step = None # type: Optional[int] if node.step is not None: step = self.visit(node=node.step) result = slice(lower, upper, step) self.recomputed_values[node] = result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: """Visit each dimension of the advanced slicing and assemble the dimensions in a tuple."""
result = tuple(self.visit(node=dim) for dim in node.dims) self.recomputed_values[node] = result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_Subscript(self, node: ast.Subscript) -> Any: """Visit the ``slice`` and a ``value`` and get the element."""
value = self.visit(node=node.value) a_slice = self.visit(node=node.slice) result = value[a_slice] self.recomputed_values[node] = result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _execute_comprehension(self, node: Union[ast.ListComp, ast.SetComp, ast.GeneratorExp, ast.DictComp]) -> Any: """Compile the generator or comprehension from the node and execute the compiled code."""
args = [ast.arg(arg=name) for name in sorted(self._name_to_value.keys())] func_def_node = ast.FunctionDef( name="generator_expr", args=ast.arguments(args=args, kwonlyargs=[], kw_defaults=[], defaults=[]), decorator_list=[], body=[ast.Return(node)]) module_node = ast.Module(body=[func_def_node]) ast.fix_missing_locations(module_node) code = compile(source=module_node, filename='<ast>', mode='exec') module_locals = {} # type: Dict[str, Any] module_globals = {} # type: Dict[str, Any] exec(code, module_globals, module_locals) # pylint: disable=exec-used generator_expr_func = module_locals["generator_expr"] return generator_expr_func(**self._name_to_value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_GeneratorExp(self, node: ast.GeneratorExp) -> Any: """Compile the generator expression as a function and call it."""
result = self._execute_comprehension(node=node) for generator in node.generators: self.visit(generator.iter) # Do not set the computed value of the node since its representation would be non-informative. return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_ListComp(self, node: ast.ListComp) -> Any: """Compile the list comprehension as a function and call it."""
result = self._execute_comprehension(node=node) for generator in node.generators: self.visit(generator.iter) self.recomputed_values[node] = result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_SetComp(self, node: ast.SetComp) -> Any: """Compile the set comprehension as a function and call it."""
result = self._execute_comprehension(node=node) for generator in node.generators: self.visit(generator.iter) self.recomputed_values[node] = result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_DictComp(self, node: ast.DictComp) -> Any: """Compile the dictionary comprehension as a function and call it."""
result = self._execute_comprehension(node=node) for generator in node.generators: self.visit(generator.iter) self.recomputed_values[node] = result return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visit_Return(self, node: ast.Return) -> Any: # pylint: disable=no-self-use """Raise an exception that this node is unexpected."""
raise AssertionError("Unexpected return node during the re-computation: {}".format(ast.dump(node)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generic_visit(self, node: ast.AST) -> None: """Raise an exception that this node has not been handled."""
raise NotImplementedError("Unhandled recomputation of the node: {} {}".format(type(node), node))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tokenize_words(string): """ Tokenize input text to words. :param string: Text to tokenize :type string: str or unicode :return: words :rtype: list of strings """
string = six.text_type(string) return re.findall(WORD_TOKENIZATION_RULES, string)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tokenize_text(string): """ Tokenize input text to paragraphs, sentences and words. Tokenization to paragraphs is done using simple Newline algorithm For sentences and words tokenizers above are used :param string: Text to tokenize :type string: str or unicode :return: text, tokenized into paragraphs, sentences and words :rtype: list of list of list of words """
string = six.text_type(string) rez = [] for part in string.split('\n'): par = [] for sent in tokenize_sents(part): par.append(tokenize_words(sent)) if par: rez.append(par) return rez
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fromlist(cls, rnglist, autostep=None): """Class method that returns a new RangeSet with ranges from provided list."""
inst = RangeSet(autostep=autostep) inst.updaten(rnglist) return inst
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def contiguous(self): """Object-based iterator over contiguous range sets."""
pad = self.padding or 0 for sli in self._contiguous_slices(): yield RangeSet.fromone(slice(sli.start, sli.stop, sli.step), pad)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _contiguous_slices(self): """Internal iterator over contiguous slices in RangeSet."""
k = j = None for i in self._sorted(): if k is None: k = j = i if i - j > 1: yield slice(k, j + 1, 1) k = i j = i if k is not None: yield slice(k, j + 1, 1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy(self): """Return a shallow copy of a RangeSet."""
cpy = self.__class__() cpy._autostep = self._autostep cpy.padding = self.padding cpy.update(self) return cpy
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def intersection(self, other): """Return the intersection of two RangeSets as a new RangeSet. (I.e. all elements that are in both sets.) """
#NOTE: This is a work around # Python 3 return as the result of set.intersection a new set instance. # Python 2 however returns as a the result a ClusterShell.RangeSet.RangeSet instance. # ORIGINAL CODE: return self._wrap_set_op(set.intersection, other) copy = self.copy() copy.intersection_update(other) return copy
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def difference(self, other): """Return the difference of two RangeSets as a new RangeSet. (I.e. all elements that are in this set and not in the other.) """
#NOTE: This is a work around # Python 3 return as the result of set.intersection a new set instance. # Python 2 however returns as a the result a ClusterShell.RangeSet.RangeSet instance. # ORIGINAL CODE: return self._wrap_set_op(set.difference, other) copy = self.copy() copy.difference_update(other) return copy
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def issubset(self, other): """Report whether another set contains this RangeSet."""
self._binary_sanity_check(other) return set.issubset(self, other)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def issuperset(self, other): """Report whether this RangeSet contains another set."""
self._binary_sanity_check(other) return set.issuperset(self, other)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def updaten(self, rangesets): """ Update a rangeset with the union of itself and several others. """
for rng in rangesets: if isinstance(rng, set): self.update(rng) else: self.update(RangeSet(rng))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add(self, element, pad=0): """Add an element to a RangeSet. This has no effect if the element is already present. """
set.add(self, int(element)) if pad > 0 and self.padding is None: self.padding = pad
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def discard(self, element): """Remove element from the RangeSet if it is a member. If the element is not a member, do nothing. """
try: i = int(element) set.discard(self, i) except ValueError: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _open(filename, mode="r"): """ Universal open file facility. With normal files, this function behaves as the open builtin. With gzip-ed files, it decompress or compress according to the specified mode. In addition, when filename is '-', it opens the standard input or output according to the specified mode. Mode are expected to be either 'r' or 'w'. """
if filename.endswith(".gz"): return GzipFile(filename, mode, COMPRESSION_LEVEL) elif filename == "-": if mode == "r": return _stdin elif mode == "w": return _stdout else: # TODO: set encoding to UTF-8? return open(filename, mode=mode)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _radixPass(a, b, r, n, K): """ Stable sort of the sequence a according to the keys given in r. [3, 1, 0, 2, 4] When n is less than the length of a, the end of b must be left unaltered. [1, 0, 5, 5, 5] [0, 1] [1, 1] [0, 0, 1, 1] """
c = _array("i", [0] * (K + 1)) # counter array for i in range(n): # count occurrences c[r[a[i]]] += 1 sum = 0 for i in range(K + 1): # exclusive prefix sums t = c[i] c[i] = sum sum += t for a_i in a[:n]: # sort b[c[r[a_i]]] = a_i c[r[a_i]] += 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _nbOperations(n): """ Exact number of atomic operations in _radixPass. """
if n < 2: return 0 else: n0 = (n + 2) // 3 n02 = n0 + n // 3 return 3 * (n02) + n0 + _nbOperations(n02)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _longestCommonPrefix(seq1, seq2, start1=0, start2=0): """ Returns the length of the longest common prefix of seq1 starting at offset start1 and seq2 starting at offset start2. 3 3 0 0 128 3 3 2 2 """
len1 = len(seq1) - start1 len2 = len(seq2) - start2 # We set seq2 as the shortest sequence if len1 < len2: seq1, seq2 = seq2, seq1 start1, start2 = start2, start1 len1, len2 = len2, len1 # if seq2 is empty returns 0 if len2 == 0: return 0 i = 0 pos2 = start2 for i in range(min(len1, len2)): # print seq1, seq2, start1, start2 if seq1[start1 + i] != seq2[start2 + i]: return i # we have reached the end of seq2 (need to increment i) return i + 1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def LCP(SA): """ Compute the longest common prefix for every adjacent suffixes. The result is a list of same size as SA. Given two suffixes at positions i and i+1, their LCP is stored at position i+1. A zero is stored at position 0 of the output. array('i', [0, 1, 0, 1]) array('i') array('i') array('i') array('i', [0, 2, 0, 1]) """
string = SA.string length = SA.length lcps = _array("i", [0] * length) SA = SA.SA if _trace: delta = max(length // 100, 1) for i, pos in enumerate(SA): if i % delta == 0: percent = float((i + 1) * 100) / length print >> _stderr, "Compute_LCP %.2f%% (%i/%i)\r" % (percent, i + 1, length), lcps[i] = _longestCommonPrefix(string, string, SA[i - 1], pos) else: for i, pos in enumerate(SA): lcps[i] = _longestCommonPrefix(string, string, SA[i - 1], pos) if _trace: print >> _stderr, "Compute_LCP %.2f%% (%i/%i)\r" % (100.0, length, length) if lcps: # Correct the case where string[0] == string[-1] lcps[0] = 0 return lcps
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parseArgv(): """ Command line option parser. """
parser = OptionParser() parser.usage = r""" cat <TEXT> | %prog [--unit <UNIT>] [--output <SA_FILE>] Create the suffix array of TEXT with the processing UNIT and optionally store it in SA_FILE for subsequent use. UNIT may be set to 'byte', 'character' (given an encoding with the --encoding option) or 'word', which is the default. """ parser.add_option("-i", "--input", action="store", type="string", dest="input", default=False, help="Path of the file containing the input text. When '-' is given, read the standard input (default). If the path ends with '.gz', reads the decompressed file.") parser.add_option("-o", "--output", action="store", type="string", dest="output", default=False, help="Store the suffix array of the input to the file OUTPUT. When '-' is given, writes to the standard output. If the filename ends with '.gz', the suffix array will be stored compressed.") parser.add_option("", "--load", action="store", type="string", dest="SAFile", default=False, help="Load a suffix array from SAFILE, this option and --input are mutually exclusive.") parser.add_option("-u", "--unit", action="store", type="string", dest="unit", default=DEFAULT_UNIT_STR, help="Processing unit used for the creation of the suffix array." + \ 'Possible values are "byte", "character" and "word". Default is "%s".' % DEFAULT_UNIT_STR + \ "This option is ignored when the suffix array is loaded from SAFILE." + \ 'For characters, the input is decoded according to the encoding set via the option --encoding.') parser.add_option("-e", "--encoding", action="store", type="string", dest="encoding", default=DEFAULT_ENCODING, help="Encoding of the input. This information is required only when processing characters. Default is '%s'." % DEFAULT_ENCODING) parser.add_option("-p", "--print", action="store_true", dest="printSA", default=False, help="Prints the suffix array in a human readable format to the standard error output.") parser.add_option("", "--verbose", action="store_true", dest="verbose", default=False, help="Prints more information.") parser.add_option("", "--no-lcps", action="store_true", dest="noLCPs", default=False, help="Switch off the computation of LCPs. By doing so, the find functions are unusable.") (options, args) = parser.parse_args(_argv) strings = args[1:] return (options, strings)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(): """ Entry point for the standalone script. """
(options, strings) = parseArgv() global _suffixArray, _trace ############# # Verbosity # ############# _trace = options.verbose ################### # Processing unit # ################### if options.unit == "byte": options.unit = UNIT_BYTE elif options.unit == "character": options.unit = UNIT_CHARACTER elif options.unit == "word": options.unit = UNIT_WORD else: print >> _stderr, "Please specify a valid unit type." exit(EXIT_BAD_OPTION) ###################### # Build suffix array # ###################### if not options.SAFile: # Build the suffix array from INPUT if not options.input: # default is standard input options.input = "-" try: string = _open(options.input, "r").read() except IOError: print >> _stderr, "File %s does not exist." % options.input exit(EXIT_ERROR_FILE) SA = SuffixArray(string, options.unit, options.encoding, options.noLCPs) ######################## # Or load suffix array # ######################## elif not options.input and options.SAFile: # Load suffix array from SA_FILE try: SA = SuffixArray.fromFile(options.SAFile) except IOError: print >> _stderr, "SA_FILE %s does not exist." % options.SAFile exit(EXIT_ERROR_FILE) else: print >> _stderr, "Please set only one option amongst --input and --load.\n" + \ "Type %s --help for more details." % _argv[0] exit(EXIT_BAD_OPTION) ###################### # Print suffix array # ###################### if options.printSA: # Buffered ouptut deltaLength = 1000 start = 0 while start < SA.length: print >> _stderr, SA.__str__(start, start + deltaLength) start += deltaLength #################################### # Look for every string in strings # #################################### for string in strings: print >> _stderr, "" print >> _stderr, "Positions of %s:" % string print >> _stderr, " %s" % list(SA.find(string)) ######################### # Save SAFILE if needed # ######################### if options.output: SA.toFile(options.output) if _trace: print >> _stderr, "Done\r\n"
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tokenize(self, string): """ Tokenizer utility. When processing byte, outputs the string unaltered. The character unit type is used for unicode data, the string is decoded according to the encoding provided. In the case of word unit, EOL characters are detached from the preceding word, and outputs the list of words, i.e. the list of non-space strings separated by space strings. True 10 True 10 ['miss', 'issi', 'ppi'] ['miss', 'issi', '\\n', 'ppi'] """
if self.unit == UNIT_WORD: # the EOL character is treated as a word, hence a substitution # before split return [token for token in string.replace("\n", " \n ").split(self.tokSep) if token != ""] elif self.unit == UNIT_CHARACTER: return string.decode(self.encoding) else: return string
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def toFile(self, filename): """ Save the suffix array instance including all features attached in filename. Accept any filename following the _open conventions, for example if it ends with .gz the file created will be a compressed GZip file. """
start = _time() fd = _open(filename, "w") savedData = [self.string, self.unit, self.voc, self.vocSize, self.SA, self.features] for featureName in self.features: featureValues = getattr(self, "_%s_values" % featureName) featureDefault = getattr(self, "%s_default" % featureName) savedData.append((featureValues, featureDefault)) fd.write(_dumps(savedData, _HIGHEST_PROTOCOL)) fd.flush() try: self.sizeOfSavedFile = getsize(fd.name) except OSError: # if stdout is used self.sizeOfSavedFile = "-1" self.toFileTime = _time() - start if _trace: print >> _stderr, "toFileTime %.2fs" % self.toFileTime if _trace: print >> _stderr, "sizeOfSavedFile %sb" % self.sizeOfSavedFile fd.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fromFile(cls, filename): """ Load a suffix array instance from filename, a file created by toFile. Accept any filename following the _open conventions. """
self = cls.__new__(cls) # new instance which does not call __init__ start = _time() savedData = _loads(_open(filename, "r").read()) # load common attributes self.string, self.unit, self.voc, self.vocSize, self.SA, features = savedData[:6] self.length = len(self.SA) # determine token delimiter if self.unit == UNIT_WORD: self.tokSep = " " elif self.unit in (UNIT_CHARACTER, UNIT_BYTE): self.tokSep = "" else: raise Exception("Unknown unit type identifier:", self.unit) # recompute tokId based on voc self.tokId = dict((char, iChar) for iChar, char in enumerate(self.voc)) self.nbSentences = self.string.count(self.tokId.get("\n", 0)) # Load features self.features = [] for featureName, (featureValues, featureDefault) in zip(features, savedData[6:]): self.addFeatureSA((lambda _: featureValues), name=featureName, default=featureDefault) self.fromFileTime = _time() - start if _trace: print >> _stderr, "fromFileTime %.2fs" % self.fromFileTime return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find(self, subString, features=[]): """ Dichotomy search of subString in the suffix array. As soon as a suffix which starts with subString is found, it uses the LCPs in order to find the other matching suffixes. at position pos. Features are listed in the same order as requested in the input list of array('i', [5, 2]) array('i', [0]) array('i', [0, 3]) [(4, 1), (1, 4)] array('i') array('i', [8]) array('i') array('i') """
SA = self.SA LCPs = self._LCP_values string = self.string middle = self._findOne(subString) if middle is False: return _array('i') subString = _array("i", [self.tokId[c] for c in self.tokenize(subString)]) lenSubString = len(subString) ########################################### # Use LCPS to retrieve the other suffixes # ########################################### lower = middle upper = middle + 1 middleLCP = LCPs[middle] while lower > 0 and LCPs[lower] >= lenSubString: lower -= 1 while upper < self.length and LCPs[upper] >= lenSubString: upper += 1 ############################################### # When features is empty, outputs a flat list # ############################################### res = SA[lower:upper] if len(features) == 0: return res ############################################## # When features is non empty, outputs a list # # of tuples (pos, feature_1, feature_2, ...) # ############################################## else: features = [getattr(self, "_%s_values" % featureName) for featureName in features] features = [featureValues[lower:upper] for featureValues in features] return zip(res, *features)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate(self): """Return a generator that yields pieces of XML."""
# atom demands either an author element in every entry or a global one if not self.author: if False in map(lambda e: bool(e.author), self.entries): self.author = ({'name': u'unbekannter Autor'},) if not self.updated: dates = sorted([entry.updated for entry in self.entries]) self.updated = dates and dates[-1] or datetime.utcnow() yield u'<?xml version="1.0" encoding="utf-8"?>\n' yield u'<feed xmlns="http://www.w3.org/2005/Atom">\n' yield ' ' + _make_text_block('title', self.title, self.title_type) yield u' <id>%s</id>\n' % escape(self.id) yield u' <updated>%s</updated>\n' % format_iso8601(self.updated, self.timezone) if self.url: yield u' <link href="%s" />\n' % escape(self.url, True) if self.feed_url: yield u' <link href="%s" rel="self" />\n' % \ escape(self.feed_url, True) for link in self.links: yield u' <link %s/>\n' % ''.join('%s="%s" ' % \ (k, escape(link[k], True)) for k in link) for author in self.author: yield u' <author>\n' yield u' <name>%s</name>\n' % escape(author['name']) if 'uri' in author: yield u' <uri>%s</uri>\n' % escape(author['uri']) if 'email' in author: yield ' <email>%s</email>\n' % escape(author['email']) yield ' </author>\n' if self.subtitle: yield ' ' + _make_text_block('subtitle', self.subtitle, self.subtitle_type) if self.icon: yield u' <icon>%s</icon>\n' % escape(self.icon) if self.logo: yield u' <logo>%s</logo>\n' % escape(self.logo) if self.rights: yield ' ' + _make_text_block('rights', self.rights, self.rights_type) generator_name, generator_url, generator_version = self.generator if generator_name or generator_url or generator_version: tmp = [u' <generator'] if generator_url: tmp.append(u' uri="%s"' % escape(generator_url, True)) if generator_version: tmp.append(u' version="%s"' % escape(generator_version, True)) tmp.append(u'>%s</generator>\n' % escape(generator_name)) yield u''.join(tmp) for entry in self.entries: for line in entry.generate(): yield u' ' + line yield u'</feed>\n'
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate(self): """Yields pieces of ATOM XML."""
base = '' if self.xml_base: base = ' xml:base="%s"' % escape(self.xml_base, True) yield u'<entry%s>\n' % base yield u' ' + _make_text_block('title', self.title, self.title_type) yield u' <id>%s</id>\n' % escape(self.id) yield u' <updated>%s</updated>\n' % format_iso8601(self.updated, self.timezone) if self.published: yield u' <published>%s</published>\n' % \ format_iso8601(self.published, self.timezone) if self.url: yield u' <link href="%s" />\n' % escape(self.url) for author in self.author: yield u' <author>\n' yield u' <name>%s</name>\n' % escape(author['name']) if 'uri' in author: yield u' <uri>%s</uri>\n' % escape(author['uri']) if 'email' in author: yield u' <email>%s</email>\n' % escape(author['email']) yield u' </author>\n' for link in self.links: yield u' <link %s/>\n' % ''.join('%s="%s" ' % \ (k, escape(link[k], True)) for k in link) if self.summary: yield u' ' + _make_text_block('summary', self.summary, self.summary_type) if self.content: if issubclass(self.content.__class__, dict): if "content" in self.content: yield u' <content %s>%s</content>\n' % (' '.join('%s="%s"' % \ (k, escape(self.content[k], True)) for k in self.content if k != "content"), escape(self.content["content"])) else: yield u' <content %s/>\n' % ' '.join('%s="%s" ' % \ (k, escape(self.content[k], True)) for k in self.content) else: yield u' ' + _make_text_block('content', self.content, self.content_type) yield u'</entry>\n'
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def override_djconfig(**new_cache_values): """ Temporarily override config values. This is similar to :py:func:`django.test.override_settings`,\ use it in testing. :param new_cache_values: Keyword arguments,\ the key should match one in the config,\ a new one is created otherwise,\ the value is overridden within\ the decorated function """
def decorator(func): @wraps(func) def func_wrapper(*args, **kw): old_cache_values = { key: getattr(conf.config, key) for key in new_cache_values} conf.config._set_many(new_cache_values) try: # todo: make a note about this in the docs: # don't populate the config within migrations # This works coz the config table is empty, # so even if the middleware gets called, # it won't update the config (_updated_at # will be None), this is assuming the table # is not populated by the user (ie: within # a migration), in which case it will load # all the default values return func(*args, **kw) finally: conf.config._set_many(old_cache_values) return func_wrapper return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def serialize(value, field): """ Form values serialization :param object value: A value to be serialized\ for saving it into the database and later\ loading it into the form as initial value """
assert isinstance(field, forms.Field) if isinstance(field, forms.ModelMultipleChoiceField): return json.dumps([v.pk for v in value]) # todo: remove if isinstance(value, models.Model): return value.pk return value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_version(package): """Get version without importing the lib"""
with io.open(os.path.join(BASE_DIR, package, '__init__.py'), encoding='utf-8') as fh: return [ l.split('=', 1)[1].strip().strip("'").strip('"') for l in fh.readlines() if '__version__' in l][0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _register(self, form_class, check_middleware=True): """ Register a config form into the registry :param object form_class: The form class to register.\ Must be an instance of :py:class:`djconfig.forms.ConfigForm` :param bool check_middleware: Check\ :py:class:`djconfig.middleware.DjConfigMiddleware`\ is registered into ``settings.MIDDLEWARE_CLASSES``. Default True """
if not issubclass(form_class, _ConfigFormBase): raise ValueError( "The form does not inherit from `forms.ConfigForm`") self._registry.add(form_class) if check_middleware: _check_backend()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _reload(self): """ Gets every registered form's field value.\ If a field name is found in the db, it will load it from there.\ Otherwise, the initial value from the field form is used """
ConfigModel = apps.get_model('djconfig.Config') cache = {} data = dict( ConfigModel.objects .all() .values_list('key', 'value')) # populate cache with initial form values, # then with cleaned database values, # then with raw database file/image paths for form_class in self._registry: empty_form = form_class() cache.update({ name: field.initial for name, field in empty_form.fields.items()}) form = form_class(data={ name: _deserialize(data[name], field) for name, field in empty_form.fields.items() if name in data and not isinstance(field, forms.FileField)}) form.is_valid() cache.update({ name: _unlazify(value) for name, value in form.cleaned_data.items() if name in data}) # files are special because they don't have an initial value # and the POSTED data must contain the file. So, we keep # the stored path as is # TODO: see if serialize/deserialize/unlazify can be used for this instead cache.update({ name: data[name] for name, field in empty_form.fields.items() if name in data and isinstance(field, forms.FileField)}) cache['_updated_at'] = data.get('_updated_at') self._cache = cache
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _reload_maybe(self): """ Reload the config if the config\ model has been updated. This is called\ once on every request by the middleware.\ Should not be called directly. """
ConfigModel = apps.get_model('djconfig.Config') data = dict( ConfigModel.objects .filter(key='_updated_at') .values_list('key', 'value')) if (not hasattr(self, '_updated_at') or self._updated_at != data.get('_updated_at')): self._reload()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register(conf, conf_admin, **options): """ Register a new admin section. :param conf: A subclass of ``djconfig.admin.Config`` :param conf_admin: A subclass of ``djconfig.admin.ConfigAdmin`` :param options: Extra options passed to ``django.contrib.admin.site.register`` """
assert issubclass(conf_admin, ConfigAdmin), ( 'conf_admin is not a ConfigAdmin subclass') assert issubclass( getattr(conf_admin, 'change_list_form', None), ConfigForm), 'No change_list_form set' assert issubclass(conf, Config), ( 'conf is not a Config subclass') assert conf.app_label, 'No app_label set' assert conf.verbose_name_plural, 'No verbose_name_plural set' assert not conf.name or re.match(r"^[a-zA-Z_]+$", conf.name), ( 'Not a valid name. Valid chars are [a-zA-Z_]') config_class = type("Config", (), {}) config_class._meta = type("Meta", (_ConfigMeta,), { 'app_label': conf.app_label, 'verbose_name_plural': conf.verbose_name_plural, 'object_name': 'Config', 'model_name': conf.name, 'module_name': conf.name}) admin.site.register([config_class], conf_admin, **options)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def one_of(*args): """ Validates that a field value matches one of the values given to this validator. """
if len(args) == 1 and isinstance(args[0], list): items = args[0] else: items = list(args) def validate(value): if not value in items: return e("{} is not in the list {}", value, items) return validate
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gte(min_value): """ Validates that a field value is greater than or equal to the value given to this validator. """
def validate(value): if value < min_value: return e("{} is not greater than or equal to {}", value, min_value) return validate
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lte(max_value): """ Validates that a field value is less than or equal to the value given to this validator. """
def validate(value): if value > max_value: return e("{} is not less than or equal to {}", value, max_value) return validate
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gt(gt_value): """ Validates that a field value is greater than the value given to this validator. """
def validate(value): if value <= gt_value: return e("{} is not greater than {}", value, gt_value) return validate
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lt(lt_value): """ Validates that a field value is less than the value given to this validator. """
def validate(value): if value >= lt_value: return e("{} is not less than {}", value, lt_value) return validate
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def between(min_value, max_value): """ Validates that a field value is between the two values given to this validator. """
def validate(value): if value < min_value: return e("{} is not greater than or equal to {}", value, min_value) if value > max_value: return e("{} is not less than or equal to {}", value, max_value) return validate
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def length(min=None, max=None): """ Validates that a field value's length is between the bounds given to this validator. """
def validate(value): if min and len(value) < min: return e("{} does not have a length of at least {}", value, min) if max and len(value) > max: return e("{} does not have a length of at most {}", value, max) return validate
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match(pattern): """ Validates that a field value matches the regex given to this validator. """
regex = re.compile(pattern) def validate(value): if not regex.match(value): return e("{} does not match the pattern {}", value, pattern) return validate
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_email(): """ Validates that a fields value is a valid email address. """
email = ( ur'(?!^\.)' # No dot at start ur'(?!.*\.@)' # No dot before at sign ur'(?!.*@\.)' # No dot after at sign ur'(?!.*\.$)' # No dot at the end ur'(?!.*\.\.)' # No double dots anywhere ur'^\S+' # Starts with one or more non-whitespace characters ur'@' # Contains an at sign ur'\S+$' # Ends with one or more non-whitespace characters ) regex = re.compile(email, re.IGNORECASE | re.UNICODE) def validate(value): if not regex.match(value): return e("{} is not a valid email address", value) return validate
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_url(): """ Validates that a fields value is a valid URL. """
# Stolen from Django regex = re.compile( r'^(?:http|ftp)s?://' # http:// or https:// r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain... r'localhost|' #localhost... r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip r'(?::\d+)?' # optional port r'(?:/?|[/?]\S+)$', re.IGNORECASE) def validate(value): if not regex.match(value): return e("{} is not a valid URL", value) return validate
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def each_item(*validators): """ A wrapper which applies the given validators to each item in a field value of type `list`. Example usage in a Schema: "my_list_field": {"type": Array(int), "validates": each_item(lte(10))} """
def validate(value): for item in value: for validator in validators: error = validator(item) if error: return error return None return validate
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def distinct(): """ Validates that all items in the given field list value are distinct, i.e. that the list contains no duplicates. """
def validate(value): for i, item in enumerate(value): if item in value[i+1:]: return e("{} is not a distinct set of values", value) return validate
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def apply_defaults(self, instance): """Applies the defaults described by the this schema to the given document instance as appropriate. Defaults are only applied to fields which are currently unset."""
for field, spec in self.doc_spec.iteritems(): field_type = spec['type'] if field not in instance: if 'default' in spec: default = spec['default'] if callable(default): instance[field] = default() else: instance[field] = copy.deepcopy(default) # Determine if a value already exists for the field if field in instance: value = instance[field] # recurse into nested docs if isinstance(field_type, Schema) and isinstance(value, dict): field_type.apply_defaults(value) elif isinstance(field_type, Array) and isinstance(field_type.contained_type, Schema) and isinstance(value, list): for item in value: field_type.contained_type.apply_defaults(item)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate(self, instance): """Validates the given document against this schema. Raises a ValidationException if there are any failures."""
errors = {} self._validate_instance(instance, errors) if len(errors) > 0: raise ValidationException(errors)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _verify(self, path_prefix=None): """Verifies that this schema's doc spec is valid and makes sense."""
for field, spec in self.doc_spec.iteritems(): path = self._append_path(path_prefix, field) # Standard dict-based spec if isinstance(spec, dict): self._verify_field_spec(spec, path) else: raise SchemaFormatException("Invalid field definition for {}", path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _verify_field_spec(self, spec, path): """Verifies a given field specification is valid, recursing into nested schemas if required."""
# Required should be a boolean if 'required' in spec and not isinstance(spec['required'], bool): raise SchemaFormatException("{} required declaration should be True or False", path) # Required should be a boolean if 'nullable' in spec and not isinstance(spec['nullable'], bool): raise SchemaFormatException("{} nullable declaration should be True or False", path) # Must have a type specified if 'type' not in spec: raise SchemaFormatException("{} has no type declared.", path) self._verify_type(spec, path) # Validations should be either a single function or array of functions if 'validates' in spec: self._verify_validates(spec, path) # Defaults must be of the correct type or a function if 'default' in spec: self._verify_default(spec, path) # Only expected spec keys are supported if not set(spec.keys()).issubset(set(['type', 'required', 'validates', 'default', 'nullable'])): raise SchemaFormatException("Unsupported field spec item at {}. Items: "+repr(spec.keys()), path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _verify_type(self, spec, path): """Verify that the 'type' in the spec is valid"""
field_type = spec['type'] if isinstance(field_type, Schema): # Nested documents cannot have validation if not set(spec.keys()).issubset(set(['type', 'required', 'nullable', 'default'])): raise SchemaFormatException("Unsupported field spec item at {}. Items: "+repr(spec.keys()), path) return elif isinstance(field_type, Array): if not isinstance(field_type.contained_type, (type, Schema, Array, types.FunctionType)): raise SchemaFormatException("Unsupported field type contained by Array at {}.", path) elif not isinstance(field_type, type) and not isinstance(field_type, types.FunctionType): raise SchemaFormatException("Unsupported field type at {}. Type must be a type, a function, an Array or another Schema", path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _verify_default(self, spec, path): """Verifies that the default specified in the given spec is valid."""
field_type = spec['type'] default = spec['default'] # If it's a function there's nothing we can really do except assume its valid if callable(default): return if isinstance(field_type, Array): # Verify we'd got a list as our default if not isinstance(default, list): raise SchemaFormatException("Default value for Array at {} is not a list of values.", path) # Ensure the contents are of the correct type for i, item in enumerate(default): if isinstance(field_type.contained_type, Schema): if not self._valid_schema_default(item): raise SchemaFormatException("Default value for Schema is not valid.", path) elif not isinstance(item, field_type.contained_type): raise SchemaFormatException("Not all items in the default list for the Array field at {} are of the correct type.", path) elif isinstance(field_type, Schema): if not self._valid_schema_default(default): raise SchemaFormatException("Default value for Schema is not valid.", path) else: if not isinstance(default, field_type): raise SchemaFormatException("Default value for {} is not of the nominated type.", path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _verify_validates(self, spec, path): """Verify thats the 'validates' argument is valid."""
validates = spec['validates'] if isinstance(validates, list): for validator in validates: self._verify_validator(validator, path) else: self._verify_validator(validates, path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _verify_validator(self, validator, path): """Verifies that a given validator associated with the field at the given path is legitimate."""
# Validator should be a function if not callable(validator): raise SchemaFormatException("Invalid validations for {}", path) # Validator should accept a single argument (args, varargs, keywords, defaults) = getargspec(validator) if len(args) != 1: raise SchemaFormatException("Invalid validations for {}", path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _validate_instance(self, instance, errors, path_prefix=''): """Validates that the given instance of a document conforms to the given schema's structure and validations. Any validation errors are added to the given errors collection. The caller should assume the instance is considered valid if the errors collection is empty when this method returns."""
if not isinstance(instance, dict): errors[path_prefix] = "Expected instance of dict to validate against schema." return # validate against the schema level validators self._apply_validations(errors, path_prefix, self._validates, instance) # Loop over each field in the schema and check the instance value conforms # to its spec for field, spec in self.doc_spec.iteritems(): path = self._append_path(path_prefix, field) # If the field is present, validate it's value. if field in instance: self._validate_value(instance[field], spec, path, errors) else: # If not, add an error if it was a required key. if spec.get('required', False): errors[path] = "{} is required.".format(path) # Now loop over each field in the given instance and make sure we don't # have any fields not declared in the schema, unless strict mode has been # explicitly disabled. if self._strict: for field in instance: if field not in self.doc_spec: errors[self._append_path(path_prefix, field)] = "Unexpected document field not present in schema"
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _validate_value(self, value, field_spec, path, errors): """Validates that the given field value is valid given the associated field spec and path. Any validation failures are added to the given errors collection."""
# Check if the value is None and add an error if the field is not nullable. # Note that for backward compatibility reasons, the default value of 'nullable' # is the inverse of 'required' (which use to mean both that the key be present # and not set to None). if value is None: if not field_spec.get('nullable', not field_spec.get('required', False)): errors[path] = "{} is not nullable.".format(path) return # All fields should have a type field_type = field_spec['type'] if isinstance(field_type, types.FunctionType): try: field_type = field_type(value) except Exception as e: raise SchemaFormatException("Dynamic schema function raised exception: {}".format(str(e)), path) if not isinstance(field_type, (type, Schema, Array)): raise SchemaFormatException("Dynamic schema function did not return a type at path {}", path) # If our field is an embedded document, recurse into it if isinstance(field_type, Schema): if isinstance(value, dict): field_type._validate_instance(value, errors, path) else: errors[path] = "{} should be an embedded document".format(path) return elif isinstance(field_type, Array): if isinstance(value, list): is_dynamic = isinstance(field_type.contained_type, types.FunctionType) for i, item in enumerate(value): contained_type = field_type.contained_type if is_dynamic: contained_type = contained_type(item) instance_path = self._append_path(path, i) if isinstance(contained_type, Schema): contained_type._validate_instance(item, errors, instance_path) elif not isinstance(item, contained_type): errors[instance_path] = "Array item at {} is of incorrect type".format(instance_path) continue else: errors[path] = "{} should be an embedded array".format(path) return elif not isinstance(value, field_type): errors[path] = "Field should be of type {}".format(field_type) return validations = field_spec.get('validates', None) if validations is None: return self._apply_validations(errors, path, validations, value)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def as_dict(config): """ Converts a ConfigParser object into a dictionary. The resulting dictionary has sections as keys which point to a dict of the sections options as key => value pairs. """
settings = defaultdict(lambda: {}) for section in config.sections(): for key, val in config.items(section): settings[section][key] = val return settings
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def initialize(self, timeouts): """ Bind or connect the nanomsg socket to some address """
# Bind or connect to address if self.bind is True: self.socket.bind(self.address) else: self.socket.connect(self.address) # Set send and recv timeouts self._set_timeouts(timeouts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _set_timeouts(self, timeouts): """ Set socket timeouts for send and receive respectively """
(send_timeout, recv_timeout) = (None, None) try: (send_timeout, recv_timeout) = timeouts except TypeError: raise EndpointError( '`timeouts` must be a pair of numbers (2, 3) which represent ' 'the timeout values for send and receive respectively') if send_timeout is not None: self.socket.set_int_option( nanomsg.SOL_SOCKET, nanomsg.SNDTIMEO, send_timeout) if recv_timeout is not None: self.socket.set_int_option( nanomsg.SOL_SOCKET, nanomsg.RCVTIMEO, recv_timeout)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def receive(self, decode=True): """ Receive from socket, authenticate and decode payload """
payload = self.socket.recv() payload = self.verify(payload) if decode: payload = self.decode(payload) return payload
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sign(self, payload): """ Sign payload using the supplied authenticator """
if self.authenticator: return self.authenticator.signed(payload) return payload
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def verify(self, payload): """ Verify payload authenticity via the supplied authenticator """
if not self.authenticator: return payload try: self.authenticator.auth(payload) return self.authenticator.unsigned(payload) except AuthenticatorInvalidSignature: raise except Exception as exception: raise AuthenticateError(str(exception))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self): """ Start and listen for calls """
if threading.current_thread().name == 'MainThread': signal.signal(signal.SIGINT, self.stop) logging.info('Started on {}'.format(self.address)) while True: self.process()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_summary(list_all=[], **kwargs): ''' summarize the report data @param list_all: a list which save the report data @param kwargs: such as show_all: True/False report show all status cases proj_name: project name home_page: home page url ''' all_summary = [] for module in list_all: summary = { "module_name" : module['Name'], "show_all" : kwargs.get("show_all",True), "project_name" : kwargs.get("proj_name","TestProject"), "home_page" : kwargs.get("home_page",__about__.HOME_PAGE), "start_time" : "", "end_time" : "", "duration_seconds" : "", "total_case_num" : len(module["TestCases"]), "pass_cases_num" : 0, "fail_cases_num" : 0, "details" : [] } for case in module["TestCases"]: case_detail = {} case_detail["linkurl"] = "./caselogs/%s_%s.log" %(case["case_name"],case["exec_date"]) if case["status"].lower() == "pass": summary["pass_cases_num"] += 1 case_detail["c_style"] = "tr_pass" else: summary["fail_cases_num"] += 1 case_detail["c_style"] = "tr_fail" case_detail.update(case) summary["details"].append(case_detail) try: st = module["TestCases"][0].get("start_at") et = module["TestCases"][-1].get("end_at") summary["start_time"] = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(st)) summary["end_time"] = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(et)) summary["duration_seconds"] = float("%.2f" %(et - st)) except Exception as _: logger.log_warning("Will set 'start_at' and 'end_at' to 'None'") (summary["start_time"], summary["end_time"], summary["duration_seconds"]) = (None,None,None) if summary["fail_cases_num"] > 0: summary["dict_report"] = {"result":0,"message":"failure","pass":summary["pass_cases_num"],"fail":summary["fail_cases_num"]} else: summary["dict_report"] = {"result":1,"message":"success","pass":summary["pass_cases_num"],"fail":summary["fail_cases_num"]} all_summary.append(summary) return all_summary
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def add_report_data(list_all=[], module_name="TestModule", **kwargs): ''' add report data to a list @param list_all: a list which save the report data @param module_name: test set name or test module name @param kwargs: such as case_name: testcase name status: test result, Pass or Fail resp_tester: responsible tester who write this case tester: tester who execute the test start_at: tester run this case at time end_at: tester stop this case at time ''' start_at = kwargs.get("start_at") case_name = kwargs.get("case_name","TestCase") raw_case_name = kwargs.get("raw_case_name","TestCase") exec_date_time = time.localtime(start_at) execdate = time.strftime("%Y-%m-%d",exec_date_time) exectime = time.strftime("%H:%M:%S",exec_date_time) _case_report = { 'resp_tester': kwargs.get("resp_tester","administrator"), 'tester': kwargs.get("tester","administrator"), 'case_name': case_name, 'raw_case_name': raw_case_name, 'status': kwargs.get("status","Pass"), 'exec_date': execdate, 'exec_time': exectime, 'start_at': start_at, 'end_at': kwargs.get("end_at"), } for module in list_all: if module_name != module["Name"]: continue for case in module["TestCases"]: if raw_case_name == case["raw_case_name"]: case.update(_case_report) return list_all module["TestCases"].append(_case_report) return list_all list_all.append({"Name": module_name, "TestCases": [_case_report]}) return list_all
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse(self, subscription): """ Fetch the function registered for a certain subscription """
for name in self.methods: tag = bytes(name.encode('utf-8')) if subscription.startswith(tag): fun = self.methods.get(name) message = subscription[len(tag):] return tag, message, fun return None, None, None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def subscribe(self, tag, fun, description=None): """ Subscribe to something and register a function """
self.methods[tag] = fun self.descriptions[tag] = description self.socket.set_string_option(nanomsg.SUB, nanomsg.SUB_SUBSCRIBE, tag)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process(self): """ Receive a subscription from the socket and process it """
subscription = None result = None try: subscription = self.socket.recv() except AuthenticateError as exception: logging.error( 'Subscriber error while authenticating request: {}' .format(exception), exc_info=1) except AuthenticatorInvalidSignature as exception: logging.error( 'Subscriber error while authenticating request: {}' .format(exception), exc_info=1) except DecodeError as exception: logging.error( 'Subscriber error while decoding request: {}' .format(exception), exc_info=1) except RequestParseError as exception: logging.error( 'Subscriber error while parsing request: {}' .format(exception), exc_info=1) else: logging.debug( 'Subscriber received payload: {}' .format(subscription)) _tag, message, fun = self.parse(subscription) message = self.verify(message) message = self.decode(message) try: result = fun(message) except Exception as exception: logging.error(exception, exc_info=1) # Return result to check successful execution of `fun` when testing return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def publish(self, tag, message): """ Publish a message down the socket """
payload = self.build_payload(tag, message) self.socket.send(payload)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_webpack(request, name='DEFAULT'): """ Get the Webpack object for a given webpack config. Called at most once per request per config name. """
if not hasattr(request, '_webpack_map'): request._webpack_map = {} wp = request._webpack_map.get(name) if wp is None: wp = request._webpack_map[name] = Webpack(request, name) return wp
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def includeme(config): """ Add pyramid_webpack methods and config to the app """
settings = config.registry.settings root_package_name = config.root_package.__name__ config.registry.webpack = { 'DEFAULT': WebpackState(settings, root_package_name) } for extra_config in aslist(settings.get('webpack.configs', [])): state = WebpackState(settings, root_package_name, name=extra_config) config.registry.webpack[extra_config] = state # Set up any static views for state in six.itervalues(config.registry.webpack): if state.static_view: config.add_static_view(name=state.static_view_name, path=state.static_view_path, cache_max_age=state.cache_max_age) config.add_request_method(get_webpack, 'webpack')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_setting(self, setting, default=None, name=None, inherit=True): """ Helper function to fetch settings, inheriting from the base """
if name is None: name = self.name if name == 'DEFAULT': return self._settings.get('webpack.{0}'.format(setting), default) else: val = self._settings.get('webpack.{0}.{1}'.format(name, setting), SENTINEL) if val is SENTINEL: if inherit: return self._get_setting(setting, default, 'DEFAULT') else: return default else: return val
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_stats(self, cache=None, wait=None): """ Load and cache the webpack-stats file """
if cache is None: cache = not self.debug if wait is None: wait = self.debug if not cache or self._stats is None: self._stats = self._load_stats() start = time.time() while wait and self._stats.get('status') == 'compiling': if self.timeout and (time.time() - start > self.timeout): raise RuntimeError("Webpack {0!r} timed out while compiling" .format(self.stats_file.path)) time.sleep(0.1) self._stats = self._load_stats() return self._stats
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _load_stats(self): """ Load the webpack-stats file """
for attempt in range(0, 3): try: with self.stats_file.open() as f: return json.load(f) except ValueError: # If we failed to parse the JSON, it's possible that the # webpack process is writing to it concurrently and it's in a # bad state. Sleep and retry. if attempt < 2: time.sleep(attempt * 0.2) else: raise except IOError: raise IOError( "Could not read stats file {0}. Make sure you are using the " "webpack-bundle-tracker plugin" .format(self.stats_file))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _chunk_filter(self, extensions): """ Create a filter from the extensions and ignore files """
if isinstance(extensions, six.string_types): extensions = extensions.split() def _filter(chunk): """ Exclusion filter """ name = chunk['name'] if extensions is not None: if not any(name.endswith(e) for e in extensions): return False for pattern in self.state.ignore_re: if pattern.match(name): return False for pattern in self.state.ignore: if fnmatch.fnmatchcase(name, pattern): return False return True return _filter
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_url(self, chunk): """ Add a 'url' property to a chunk and return it """
if 'url' in chunk: return chunk public_path = chunk.get('publicPath') if public_path: chunk['url'] = public_path else: fullpath = posixpath.join(self.state.static_view_path, chunk['name']) chunk['url'] = self._request.static_url(fullpath) return chunk
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_bundle(self, bundle_name, extensions=None): """ Get all the chunks contained in a bundle """
if self.stats.get('status') == 'done': bundle = self.stats.get('chunks', {}).get(bundle_name, None) if bundle is None: raise KeyError('No such bundle {0!r}.'.format(bundle_name)) test = self._chunk_filter(extensions) return [self._add_url(c) for c in bundle if test(c)] elif self.stats.get('status') == 'error': raise RuntimeError("{error}: {message}".format(**self.stats)) else: raise RuntimeError( "Bad webpack stats file {0} status: {1!r}" .format(self.state.stats_file, self.stats.get('status')))