text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_func_def(self, node, scope, ctxt, stream): """Handle FuncDef nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """
self._dlog("handling function definition") func = self._handle_node(node.decl, scope, ctxt, stream) func.body = node.body
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_param_list(self, node, scope, ctxt, stream): """Handle ParamList nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """
self._dlog("handling param list") # params should be a list of tuples: # [(<name>, <field_class>), ...] params = [] for param in node.params: self._mark_id_as_lazy(param) param_info = self._handle_node(param, scope, ctxt, stream) params.append(param_info) param_list = functions.ParamListDef(params, node.coord) return param_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_func_decl(self, node, scope, ctxt, stream): """Handle FuncDecl nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """
self._dlog("handling func decl") if node.args is not None: # could just call _handle_param_list directly... for param in node.args.params: # see the check in _handle_decl for how this is kept from # being added to the local context/scope param.is_func_param = True params = self._handle_node(node.args, scope, ctxt, stream) else: params = functions.ParamListDef([], node.coord) func_type = self._handle_node(node.type, scope, ctxt, stream) func = functions.Function(func_type, params, scope) return func
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_func_call(self, node, scope, ctxt, stream): """Handle FuncCall nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """
self._dlog("handling function call to '{}'".format(node.name.name)) if node.args is None: func_args = [] else: func_args = self._handle_node(node.args, scope, ctxt, stream) func = self._handle_node(node.name, scope, ctxt, stream) return func.call(func_args, ctxt, scope, stream, self, node.coord)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_expr_list(self, node, scope, ctxt, stream): """Handle ExprList nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """
self._dlog("handling expression list") exprs = [ self._handle_node(expr, scope, ctxt, stream) for expr in node.exprs ] return exprs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_compound(self, node, scope, ctxt, stream): """Handle Compound nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """
self._dlog("handling compound statement") #scope.push() try: for child in node.children(): self._handle_node(child, scope, ctxt, stream) # in case a return occurs, be sure to pop the scope # (returns are implemented by raising an exception) finally: #scope.pop() pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_return(self, node, scope, ctxt, stream): """Handle Return nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """
self._dlog("handling return") if node.expr is None: ret_val = None else: ret_val = self._handle_node(node.expr, scope, ctxt, stream) self._dlog("return value = {}".format(ret_val)) raise errors.InterpReturn(ret_val)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_enum(self, node, scope, ctxt, stream): """Handle enum nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """
self._dlog("handling enum") if node.type is None: enum_cls = fields.Int else: enum_cls = self._handle_node(node.type, scope, ctxt, stream) enum_vals = {} curr_val = enum_cls() curr_val._pfp__value = -1 for enumerator in node.values.enumerators: if enumerator.value is not None: curr_val = self._handle_node(enumerator.value, scope, ctxt, stream) else: curr_val = curr_val + 1 curr_val._pfp__freeze() enum_vals[enumerator.name] = curr_val enum_vals[fields.PYVAL(curr_val)] = enumerator.name scope.add_local(enumerator.name, curr_val) if node.name is not None: enum_cls = EnumDef(node.name, enum_cls, enum_vals) scope.add_type_class(node.name, enum_cls) else: enum_cls = EnumDef("enum_" + enum_cls.__name__, enum_cls, enum_vals) # don't add to scope if we don't have a name return enum_cls
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_array_decl(self, node, scope, ctxt, stream): """Handle ArrayDecl nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """
self._dlog("handling array declaration '{}'".format(node.type.declname)) if node.dim is None: # will be used array_size = None else: array_size = self._handle_node(node.dim, scope, ctxt, stream) self._dlog("array size = {}".format(array_size)) # TODO node.dim_quals # node.type field_cls = self._handle_node(node.type, scope, ctxt, stream) self._dlog("field class = {}".format(field_cls)) array = ArrayDecl(field_cls, array_size) #array = fields.Array(array_size, field_cls) array._pfp__name = node.type.declname #array._pfp__parse(stream) return array
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_array_ref(self, node, scope, ctxt, stream): """Handle ArrayRef nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """
ary = self._handle_node(node.name, scope, ctxt, stream) subscript = self._handle_node(node.subscript, scope, ctxt, stream) return ary[fields.PYVAL(subscript)]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_if(self, node, scope, ctxt, stream): """Handle If nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """
self._dlog("handling if/ternary_op") cond = self._handle_node(node.cond, scope, ctxt, stream) if cond: # there should always be an iftrue return self._handle_node(node.iftrue, scope, ctxt, stream) else: if node.iffalse is not None: return self._handle_node(node.iffalse, scope, ctxt, stream)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _handle_continue(self, node, scope, ctxt, stream): """Handle continue node :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """
self._dlog("handling continue") raise errors.InterpContinue()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_value(self, node, scope, ctxt, stream): """Return the value of the node. It is expected to be either an AST.ID instance or a constant :node: TODO :returns: TODO """
res = self._handle_node(node, scope, ctxt, stream) if isinstance(res, fields.Field): return res._pfp__value # assume it's a constant else: return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _resolve_to_field_class(self, names, scope): """Resolve the names to a class in fields.py, resolving past typedefs, etc :names: TODO :scope: TODO :ctxt: TODO :returns: TODO """
switch = { "char" : "Char", "int" : "Int", "long" : "Int", "int64" : "Int64", "uint64" : "UInt64", "short" : "Short", "double" : "Double", "float" : "Float", "void" : "Void", "string" : "String", "wstring" : "WString" } core = names[-1] if core not in switch: # will return a list of resolved names type_info = scope.get_type(core) if type(type_info) is type and issubclass(type_info, fields.Field): return type_info resolved_names = type_info if resolved_names is None: raise errors.UnresolvedType(self._coord, " ".join(names), " ") if resolved_names[-1] not in switch: raise errors.UnresolvedType(self._coord, " ".join(names), " ".join(resolved_names)) names = copy.copy(names) names.pop() names += resolved_names if len(names) >= 2 and names[-1] == names[-2] and names[-1] == "long": res = "Int64" else: res = switch[names[-1]] if names[-1] in ["char", "short", "int", "long"] and "unsigned" in names[:-1]: res = "U" + res cls = getattr(fields, res) return cls
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bytes_to_bits(bytes_): """Convert bytes to a list of bits """
res = [] for x in bytes_: if not isinstance(x, int): x = ord(x) res += byte_to_bits(x) return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_eof(self): """Return if the stream has reached EOF or not without discarding any unflushed bits :returns: True/False """
pos = self._stream.tell() byte = self._stream.read(1) self._stream.seek(pos, 0) return utils.binary(byte) == utils.binary("")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def close(self): """Close the stream """
self.closed = True self._flush_bits_to_stream() self._stream.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_bits(self, num): """Read ``num`` number of bits from the stream :num: number of bits to read :returns: a list of ``num`` bits, or an empty list if EOF has been reached """
if num > len(self._bits): needed = num - len(self._bits) num_bytes = int(math.ceil(needed / 8.0)) read_bytes = self._stream.read(num_bytes) for bit in bytes_to_bits(read_bytes): self._bits.append(bit) res = [] while len(res) < num and len(self._bits) > 0: res.append(self._bits.popleft()) return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write(self, data): """Write data to the stream :data: the data to write to the stream :returns: None """
if self.padded: # flush out any remaining bits first if len(self._bits) > 0: self._flush_bits_to_stream() self._stream.write(data) else: # nothing to do here if len(data) == 0: return bits = bytes_to_bits(data) self.write_bits(bits)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write_bits(self, bits): """Write the bits to the stream. Add the bits to the existing unflushed bits and write complete bytes to the stream. """
for bit in bits: self._bits.append(bit) while len(self._bits) >= 8: byte_bits = [self._bits.popleft() for x in six.moves.range(8)] byte = bits_to_bytes(byte_bits) self._stream.write(byte)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def seek(self, pos, seek_type=0): """Seek to the specified position in the stream with seek_type. Unflushed bits will be discarded in the case of a seek. The stream will also keep track of which bytes have and have not been consumed so that the dom will capture all of the bytes in the stream. :pos: offset :seek_type: direction :returns: TODO """
self._bits.clear() return self._stream.seek(pos, seek_type)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def size(self): """Return the size of the stream, or -1 if it cannot be determined. """
pos = self._stream.tell() # seek to the end of the stream self._stream.seek(0,2) size = self._stream.tell() self._stream.seek(pos, 0) return size
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unconsumed_ranges(self): """Return an IntervalTree of unconsumed ranges, of the format (start, end] with the end value not being included """
res = IntervalTree() prev = None # normal iteration is not in a predictable order ranges = sorted([x for x in self.range_set], key=lambda x: x.begin) for rng in ranges: if prev is None: prev = rng continue res.add(Interval(prev.end, rng.begin)) prev = rng # means we've seeked past the end if len(self.range_set[self.tell()]) != 1: res.add(Interval(prev.end, self.tell())) return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _update_consumed_ranges(self, start_pos, end_pos): """Update the ``self.consumed_ranges`` array with which byte ranges have been consecutively consumed. """
self.range_set.add(Interval(start_pos, end_pos+1)) self.range_set.merge_overlaps()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _validate_markdown(self, expfile): '''ensure that fields are present in markdown file''' try: import yaml except: bot.error('Python yaml is required for testing yml/markdown files.') sys.exit(1) self.metadata = {} uid = os.path.basename(expfile).strip('.md') if os.path.exists(expfile): with open(expfile, "r") as stream: docs = yaml.load_all(stream) for doc in docs: if isinstance(doc,dict): for k,v in doc.items(): print('%s: %s' %(k,v)) self.metadata[k] = v self.metadata['uid'] = uid fields = ['github', 'preview', 'name', 'layout', 'tags', 'uid', 'maintainer'] # Tests for all fields for field in fields: if field not in self.metadata: return False if self.metadata[field] in ['',None]: return False if 'github' not in self.metadata['github']: return notvalid('%s: not a valid github repository' % name) if not isinstance(self.metadata['tags'],list): return notvalid('%s: tags must be a list' % name) if not re.search("(\w+://)(.+@)*([\w\d\.]+)(:[\d]+){0,1}/*(.*)", self.metadata['github']): return notvalid('%s is not a valid URL.' %(self.metadata['github'])) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def perform_checks(template, do_redirect=False, context=None, next=None, quiet=False): '''return all checks for required variables before returning to desired view Parameters ========== template: the html template to render do_redirect: if True, perform a redirect and not render context: dictionary of context variables to pass to render_template next: a pre-defined next experiment, will calculate if None quiet: decrease verbosity ''' from expfactory.server import app username = session.get('username') subid = session.get('subid') # If redirect, "last" is currently active (about to start) # If render, "last" is last completed / active experiment (just finished) last = session.get('exp_id') if next is None: next = app.get_next(session) session['exp_id'] = next # Headless mode requires token if "token" not in session and app.headless is True: flash('A token is required for these experiments.') return redirect('/') # Update the user / log if quiet is False: app.logger.info("[router] %s --> %s [subid] %s [user] %s" %(last, next, subid, username)) if username is None and app.headless is False: flash('You must start a session before doing experiments.') return redirect('/') if subid is None: flash('You must have a participant identifier before doing experiments') return redirect('/') if next is None: flash('Congratulations, you have finished the battery!') return redirect('/finish') if do_redirect is True: app.logger.debug('Redirecting to %s' %template) return redirect(template) if context is not None and isinstance(context, dict): app.logger.debug('Rendering %s' %template) return render_template(template, **context) return render_template(template)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pack_gzip(params, ctxt, scope, stream, coord): """``PackGZip`` - Concats the build output of all params and gzips the resulting data, returning a char array. Example: :: """
if len(params) == 0: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at least one argument") built = utils.binary("") for param in params: if isinstance(param, pfp.fields.Field): built += param._pfp__build() else: built += param return zlib.compress(built)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _validate_folder(self, folder=None): ''' validate folder takes a cloned github repo, ensures the existence of the config.json, and validates it. ''' from expfactory.experiment import load_experiment if folder is None: folder=os.path.abspath(os.getcwd()) config = load_experiment(folder, return_path=True) if not config: return notvalid("%s is not an experiment." %(folder)) return self._validate_config(folder)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _validate_config(self, folder, validate_folder=True): ''' validate config is the primary validation function that checks for presence and format of required fields. Parameters ========== :folder: full path to folder with config.json :name: if provided, the folder name to check against exp_id ''' config = "%s/config.json" % folder name = os.path.basename(folder) if not os.path.exists(config): return notvalid("%s: config.json not found." %(folder)) # Load the config try: config = read_json(config) except: return notvalid("%s: cannot load json, invalid." %(name)) # Config.json should be single dict if isinstance(config, list): return notvalid("%s: config.json is a list, not valid." %(name)) # Check over required fields fields = self.get_validation_fields() for field,value,ftype in fields: bot.verbose('field: %s, required: %s' %(field,value)) # Field must be in the keys if required if field not in config.keys(): if value == 1: return notvalid("%s: config.json is missing required field %s" %(name,field)) # Field is present, check type else: if not isinstance(config[field], ftype): return notvalid("%s: invalid type, must be %s." %(name,str(ftype))) # Expid gets special treatment if field == "exp_id" and validate_folder is True: if config[field] != name: return notvalid("%s: exp_id parameter %s does not match folder name." %(name,config[field])) # name cannot have special characters, only _ and letters/numbers if not re.match("^[a-z0-9_-]*$", config[field]): message = "%s: exp_id parameter %s has invalid characters" message += "only lowercase [a-z],[0-9], -, and _ allowed." return notvalid(message %(name,config[field])) return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _read_runtime_vars(variable_file, sep=','): '''read the entire runtime variable file, and return a list of lists, each corresponding to a row. We also check the header, and exit if anything is missing or malformed. Parameters ========== variable_file: full path to the tabular file with token, exp_id, etc. sep: the default delimiter to use, if not set in enironment. Returns ======= valid_rows: a list of lists, each a valid row [['test-parse-url', 'globalname', 'globalvalue', '*'], ['test-parse-url', 'color', 'red', '123'], ['test-parse-url', 'color', 'blue', '456'], ['test-parse-url', 'words', 'at the thing', '123'], ['test-parse-url', 'words', 'omg tacos', '456']] ''' rows = [x for x in read_file(variable_file).split('\n') if x.strip()] valid_rows = [] if len(rows) > 0: # Validate header and rows, exit if not valid header = rows.pop(0).split(sep) validate_header(header) for row in rows: row = _validate_row(row, sep=sep, required_length=4) # If the row is returned, it is valid if row: valid_rows.append(row) return valid_rows
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _parse_row(row, sep=','): '''parse row is a helper function to simply clean up a string, and parse into a row based on a delimiter. If a required length is provided, we check for this too. ''' parsed = row.split(sep) parsed = [x for x in parsed if x.strip()] return parsed
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def superuser_required(view_func): """ Decorator for views that checks that the user is logged in and is a staff member, displaying the login page if necessary. """
@wraps(view_func) def _checklogin(request, *args, **kwargs): if request.user.is_active and request.user.is_superuser: # The user is valid. Continue to the admin page. return view_func(request, *args, **kwargs) assert hasattr(request, 'session'), "The Django admin requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'." defaults = { 'template_name': 'admin/login.html', 'redirect_field_name': request.get_full_path(), 'authentication_form': AdminAuthenticationForm, 'extra_context': { 'title': _('Log in'), 'app_path': request.get_full_path() } } return LoginView(request, **defaults) return _checklogin
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_lines(cls, pattern_factory, lines): """ Compiles the pattern lines. *pattern_factory* can be either the name of a registered pattern factory (:class:`str`), or a :class:`~collections.abc.Callable` used to compile patterns. It must accept an uncompiled pattern (:class:`str`) and return the compiled pattern (:class:`.Pattern`). *lines* (:class:`~collections.abc.Iterable`) yields each uncompiled pattern (:class:`str`). This simply has to yield each line so it can be a :class:`file` (e.g., from :func:`open` or :class:`io.StringIO`) or the result from :meth:`str.splitlines`. Returns the :class:`PathSpec` instance. """
if isinstance(pattern_factory, string_types): pattern_factory = util.lookup_pattern(pattern_factory) if not callable(pattern_factory): raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory)) if isinstance(lines, (bytes, unicode)): raise TypeError("lines:{!r} is not an iterable.".format(lines)) lines = [pattern_factory(line) for line in lines if line] return cls(lines)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match_file(self, file, separators=None): """ Matches the file to this path-spec. *file* (:class:`str`) is the file path to be matched against :attr:`self.patterns <PathSpec.patterns>`. *separators* (:class:`~collections.abc.Collection` of :class:`str`) optionally contains the path separators to normalize. See :func:`~pathspec.util.normalize_file` for more information. Returns :data:`True` if *file* matched; otherwise, :data:`False`. """
norm_file = util.normalize_file(file, separators=separators) return util.match_file(self.patterns, norm_file)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match_files(self, files, separators=None): """ Matches the files to this path-spec. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the file paths to be matched against :attr:`self.patterns <PathSpec.patterns>`. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. See :func:`~pathspec.util.normalize_file` for more information. Returns the matched files (:class:`~collections.abc.Iterable` of :class:`str`). """
if isinstance(files, (bytes, unicode)): raise TypeError("files:{!r} is not an iterable.".format(files)) file_map = util.normalize_files(files, separators=separators) matched_files = util.match_files(self.patterns, iterkeys(file_map)) for path in matched_files: yield file_map[path]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match_tree(self, root, on_error=None, follow_links=None): """ Walks the specified root path for all files and matches them to this path-spec. *root* (:class:`str`) is the root directory to search for files. *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. See :func:`~pathspec.util.iter_tree` for more information. *follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk symbolik links that resolve to directories. See :func:`~pathspec.util.iter_tree` for more information. Returns the matched files (:class:`~collections.abc.Iterable` of :class:`str`). """
files = util.iter_tree(root, on_error=on_error, follow_links=follow_links) return self.match_files(files)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pattern_to_regex(cls, pattern): """ Convert the pattern into a regular expression. *pattern* (:class:`unicode` or :class:`bytes`) is the pattern to convert into a regular expression. Returns the uncompiled regular expression (:class:`unicode`, :class:`bytes`, or :data:`None`), and whether matched files should be included (:data:`True`), excluded (:data:`False`), or if it is a null-operation (:data:`None`). """
if isinstance(pattern, unicode): return_type = unicode elif isinstance(pattern, bytes): return_type = bytes pattern = pattern.decode(_BYTES_ENCODING) else: raise TypeError("pattern:{!r} is not a unicode or byte string.".format(pattern)) pattern = pattern.strip() if pattern.startswith('#'): # A pattern starting with a hash ('#') serves as a comment # (neither includes nor excludes files). Escape the hash with a # back-slash to match a literal hash (i.e., '\#'). regex = None include = None elif pattern == '/': # EDGE CASE: According to `git check-ignore` (v2.4.1), a single # '/' does not match any file. regex = None include = None elif pattern: if pattern.startswith('!'): # A pattern starting with an exclamation mark ('!') negates the # pattern (exclude instead of include). Escape the exclamation # mark with a back-slash to match a literal exclamation mark # (i.e., '\!'). include = False # Remove leading exclamation mark. pattern = pattern[1:] else: include = True if pattern.startswith('\\'): # Remove leading back-slash escape for escaped hash ('#') or # exclamation mark ('!'). pattern = pattern[1:] # Split pattern into segments. pattern_segs = pattern.split('/') # Normalize pattern to make processing easier. if not pattern_segs[0]: # A pattern beginning with a slash ('/') will only match paths # directly on the root directory instead of any descendant # paths. So, remove empty first segment to make pattern relative # to root. del pattern_segs[0] elif len(pattern_segs) == 1 or (len(pattern_segs) == 2 and not pattern_segs[1]): # A single pattern without a beginning slash ('/') will match # any descendant path. This is equivalent to "**/{pattern}". So, # prepend with double-asterisks to make pattern relative to # root. # EDGE CASE: This also holds for a single pattern with a # trailing slash (e.g. dir/). if pattern_segs[0] != '**': pattern_segs.insert(0, '**') else: # EDGE CASE: A pattern without a beginning slash ('/') but # contains at least one prepended directory (e.g. # "dir/{pattern}") should not match "**/dir/{pattern}", # according to `git check-ignore` (v2.4.1). pass if not pattern_segs[-1] and len(pattern_segs) > 1: # A pattern ending with a slash ('/') will match all descendant # paths if it is a directory but not if it is a regular file. # This is equivilent to "{pattern}/**". So, set last segment to # double asterisks to include all descendants. pattern_segs[-1] = '**' # Build regular expression from pattern. output = ['^'] need_slash = False end = len(pattern_segs) - 1 for i, seg in enumerate(pattern_segs): if seg == '**': if i == 0 and i == end: # A pattern consisting solely of double-asterisks ('**') # will match every path. output.append('.+') elif i == 0: # A normalized pattern beginning with double-asterisks # ('**') will match any leading path segments. output.append('(?:.+/)?') need_slash = False elif i == end: # A normalized pattern ending with double-asterisks ('**') # will match any trailing path segments. output.append('/.*') else: # A pattern with inner double-asterisks ('**') will match # multiple (or zero) inner path segments. output.append('(?:/.+)?') need_slash = True elif seg == '*': # Match single path segment. if need_slash: output.append('/') output.append('[^/]+') need_slash = True else: # Match segment glob pattern. if need_slash: output.append('/') output.append(cls._translate_segment_glob(seg)) if i == end and include is True: # A pattern ending without a slash ('/') will match a file # or a directory (with paths underneath it). E.g., "foo" # matches "foo", "foo/bar", "foo/bar/baz", etc. # EDGE CASE: However, this does not hold for exclusion cases # according to `git check-ignore` (v2.4.1). output.append('(?:/.*)?') need_slash = True output.append('$') regex = ''.join(output) else: # A blank pattern is a null-operation (neither includes nor # excludes files). regex = None include = None if regex is not None and return_type is bytes: regex = regex.encode(_BYTES_ENCODING) return regex, include
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _translate_segment_glob(pattern): """ Translates the glob pattern to a regular expression. This is used in the constructor to translate a path segment glob pattern to its corresponding regular expression. *pattern* (:class:`str`) is the glob pattern. Returns the regular expression (:class:`str`). """
# NOTE: This is derived from `fnmatch.translate()` and is similar to # the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set. escape = False regex = '' i, end = 0, len(pattern) while i < end: # Get next character. char = pattern[i] i += 1 if escape: # Escape the character. escape = False regex += re.escape(char) elif char == '\\': # Escape character, escape next character. escape = True elif char == '*': # Multi-character wildcard. Match any string (except slashes), # including an empty string. regex += '[^/]*' elif char == '?': # Single-character wildcard. Match any single character (except # a slash). regex += '[^/]' elif char == '[': # Braket expression wildcard. Except for the beginning # exclamation mark, the whole braket expression can be used # directly as regex but we have to find where the expression # ends. # - "[][!]" matchs ']', '[' and '!'. # - "[]-]" matchs ']' and '-'. # - "[!]a-]" matchs any character except ']', 'a' and '-'. j = i # Pass brack expression negation. if j < end and pattern[j] == '!': j += 1 # Pass first closing braket if it is at the beginning of the # expression. if j < end and pattern[j] == ']': j += 1 # Find closing braket. Stop once we reach the end or find it. while j < end and pattern[j] != ']': j += 1 if j < end: # Found end of braket expression. Increment j to be one past # the closing braket: # # [...] # ^ ^ # i j # j += 1 expr = '[' if pattern[i] == '!': # Braket expression needs to be negated. expr += '^' i += 1 elif pattern[i] == '^': # POSIX declares that the regex braket expression negation # "[^...]" is undefined in a glob pattern. Python's # `fnmatch.translate()` escapes the caret ('^') as a # literal. To maintain consistency with undefined behavior, # I am escaping the '^' as well. expr += '\\^' i += 1 # Build regex braket expression. Escape slashes so they are # treated as literal slashes by regex as defined by POSIX. expr += pattern[i:j].replace('\\', '\\\\') # Add regex braket expression to regex result. regex += expr # Set i to one past the closing braket. i = j else: # Failed to find closing braket, treat opening braket as a # braket literal instead of as an expression. regex += '\\[' else: # Regular character, escape it for regex. regex += re.escape(char) return regex
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pattern_to_regex(cls, *args, **kw): """ Warn about deprecation. """
cls._deprecated() return super(GitIgnorePattern, cls).pattern_to_regex(*args, **kw)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def iter_tree(root, on_error=None, follow_links=None): """ Walks the specified directory for all files. *root* (:class:`str`) is the root directory to search for files. *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. It will be called with the exception (:exc:`OSError`). Reraise the exception to abort the walk. Default is :data:`None` to ignore file-system exceptions. *follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk symbolik links that resolve to directories. Default is :data:`None` for :data:`True`. Raises :exc:`RecursionError` if recursion is detected. Returns an :class:`~collections.abc.Iterable` yielding the path to each file (:class:`str`) relative to *root*. """
if on_error is not None and not callable(on_error): raise TypeError("on_error:{!r} is not callable.".format(on_error)) if follow_links is None: follow_links = True for file_rel in _iter_tree_next(os.path.abspath(root), '', {}, on_error, follow_links): yield file_rel
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _iter_tree_next(root_full, dir_rel, memo, on_error, follow_links): """ Scan the directory for all descendant files. *root_full* (:class:`str`) the absolute path to the root directory. *dir_rel* (:class:`str`) the path to the directory to scan relative to *root_full*. *memo* (:class:`dict`) keeps track of ancestor directories encountered. Maps each ancestor real path (:class:`str``) to relative path (:class:`str`). *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. *follow_links* (:class:`bool`) is whether to walk symbolik links that resolve to directories. """
dir_full = os.path.join(root_full, dir_rel) dir_real = os.path.realpath(dir_full) # Remember each encountered ancestor directory and its canonical # (real) path. If a canonical path is encountered more than once, # recursion has occurred. if dir_real not in memo: memo[dir_real] = dir_rel else: raise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel) for node in os.listdir(dir_full): node_rel = os.path.join(dir_rel, node) node_full = os.path.join(root_full, node_rel) # Inspect child node. try: node_stat = os.lstat(node_full) except OSError as e: if on_error is not None: on_error(e) continue if stat.S_ISLNK(node_stat.st_mode): # Child node is a link, inspect the target node. is_link = True try: node_stat = os.stat(node_full) except OSError as e: if on_error is not None: on_error(e) continue else: is_link = False if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link): # Child node is a directory, recurse into it and yield its # decendant files. for file_rel in _iter_tree_next(root_full, node_rel, memo, on_error, follow_links): yield file_rel elif stat.S_ISREG(node_stat.st_mode): # Child node is a file, yield it. yield node_rel # NOTE: Make sure to remove the canonical (real) path of the directory # from the ancestors memo once we are done with it. This allows the # same directory to appear multiple times. If this is not done, the # second occurance of the directory will be incorrectly interpreted as # a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>. del memo[dir_real]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match_file(patterns, file): """ Matches the file to the patterns. *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) contains the patterns to use. *file* (:class:`str`) is the normalized file path to be matched against *patterns*. Returns :data:`True` if *file* matched; otherwise, :data:`False`. """
matched = False for pattern in patterns: if pattern.include is not None: if file in pattern.match((file,)): matched = pattern.include return matched
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def match_files(patterns, files): """ Matches the files to the patterns. *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) contains the patterns to use. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the normalized file paths to be matched against *patterns*. Returns the matched files (:class:`set` of :class:`str`). """
all_files = files if isinstance(files, collection_type) else list(files) return_files = set() for pattern in patterns: if pattern.include is not None: result_files = pattern.match(all_files) if pattern.include: return_files.update(result_files) else: return_files.difference_update(result_files) return return_files
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def normalize_files(files, separators=None): """ Normalizes the file paths to use the POSIX path separator. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the file paths to be normalized. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. See :func:`normalize_file` for more information. Returns a :class:`dict` mapping the each normalized file path (:class:`str`) to the original file path (:class:`str`) """
norm_files = {} for path in files: norm_files[normalize_file(path, separators=separators)] = path return norm_files
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_pattern(name, pattern_factory, override=None): """ Registers the specified pattern factory. *name* (:class:`str`) is the name to register the pattern factory under. *pattern_factory* (:class:`~collections.abc.Callable`) is used to compile patterns. It must accept an uncompiled pattern (:class:`str`) and return the compiled pattern (:class:`.Pattern`). *override* (:class:`bool` or :data:`None`) optionally is whether to allow overriding an already registered pattern under the same name (:data:`True`), instead of raising an :exc:`AlreadyRegisteredError` (:data:`False`). Default is :data:`None` for :data:`False`. """
if not isinstance(name, string_types): raise TypeError("name:{!r} is not a string.".format(name)) if not callable(pattern_factory): raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory)) if name in _registered_patterns and not override: raise AlreadyRegisteredError(name, _registered_patterns[name]) _registered_patterns[name] = pattern_factory
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def user_default_serializer(self, obj): """Convert a User to a cached instance representation."""
if not obj: return None self.user_default_add_related_pks(obj) return dict(( ('id', obj.id), ('username', obj.username), self.field_to_json('DateTime', 'date_joined', obj.date_joined), self.field_to_json( 'PKList', 'votes', model=Choice, pks=obj._votes_pks), ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def user_default_loader(self, pk): """Load a User from the database."""
try: obj = User.objects.get(pk=pk) except User.DoesNotExist: return None else: self.user_default_add_related_pks(obj) return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def user_default_add_related_pks(self, obj): """Add related primary keys to a User instance."""
if not hasattr(obj, '_votes_pks'): obj._votes_pks = list(obj.votes.values_list('pk', flat=True))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def group_default_invalidator(self, obj): """Invalidated cached items when the Group changes."""
user_pks = User.objects.values_list('pk', flat=True) return [('User', pk, False) for pk in user_pks]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def question_default_serializer(self, obj): """Convert a Question to a cached instance representation."""
if not obj: return None self.question_default_add_related_pks(obj) return dict(( ('id', obj.id), ('question_text', obj.question_text), self.field_to_json('DateTime', 'pub_date', obj.pub_date), self.field_to_json( 'PKList', 'choices', model=Choice, pks=obj._choice_pks), ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def question_default_loader(self, pk): """Load a Question from the database."""
try: obj = Question.objects.get(pk=pk) except Question.DoesNotExist: return None else: self.question_default_add_related_pks(obj) return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def question_default_add_related_pks(self, obj): """Add related primary keys to a Question instance."""
if not hasattr(obj, '_choice_pks'): obj._choice_pks = list(obj.choices.values_list('pk', flat=True))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def choice_default_serializer(self, obj): """Convert a Choice to a cached instance representation."""
if not obj: return None self.choice_default_add_related_pks(obj) return dict(( ('id', obj.id), ('choice_text', obj.choice_text), self.field_to_json( 'PK', 'question', model=Question, pk=obj.question_id), self.field_to_json( 'PKList', 'voters', model=User, pks=obj._voter_pks) ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def choice_default_loader(self, pk): """Load a Choice from the database."""
try: obj = Choice.objects.get(pk=pk) except Choice.DoesNotExist: return None else: self.choice_default_add_related_pks(obj) return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def choice_default_add_related_pks(self, obj): """Add related primary keys to a Choice instance."""
if not hasattr(obj, '_voter_pks'): obj._voter_pks = obj.voters.values_list('pk', flat=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def choice_default_invalidator(self, obj): """Invalidated cached items when the Choice changes."""
invalid = [('Question', obj.question_id, True)] for pk in obj.voters.values_list('pk', flat=True): invalid.append(('User', pk, False)) return invalid
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cache(self): """Get the Django cache interface. This allows disabling the cache with settings.USE_DRF_INSTANCE_CACHE=False. It also delays import so that Django Debug Toolbar will record cache requests. """
if not self._cache: use_cache = getattr(settings, 'USE_DRF_INSTANCE_CACHE', True) if use_cache: from django.core.cache import cache self._cache = cache return self._cache
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_all_versions(self, model_name, obj_pk): """Delete all versions of a cached instance."""
if self.cache: for version in self.versions: key = self.key_for(version, model_name, obj_pk) self.cache.delete(key)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def model_function(self, model_name, version, func_name): """Return the model-specific caching function."""
assert func_name in ('serializer', 'loader', 'invalidator') name = "%s_%s_%s" % (model_name.lower(), version, func_name) return getattr(self, name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def field_function(self, type_code, func_name): """Return the field function."""
assert func_name in ('to_json', 'from_json') name = "field_%s_%s" % (type_code.lower(), func_name) return getattr(self, name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def field_to_json(self, type_code, key, *args, **kwargs): """Convert a field to a JSON-serializable representation."""
assert ':' not in key to_json = self.field_function(type_code, 'to_json') key_and_type = "%s:%s" % (key, type_code) json_value = to_json(*args, **kwargs) return key_and_type, json_value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def field_from_json(self, key_and_type, json_value): """Convert a JSON-serializable representation back to a field."""
assert ':' in key_and_type key, type_code = key_and_type.split(':', 1) from_json = self.field_function(type_code, 'from_json') value = from_json(json_value) return key, value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_instances(self, object_specs, version=None): """Get the cached native representation for one or more objects. Keyword arguments: object_specs - A sequence of triples (model name, pk, obj): - model name - the name of the model - pk - the primary key of the instance - obj - the instance, or None to load it version - The cache version to use, or None for default To get the 'new object' representation, set pk and obj to None Return is a dictionary: key - (model name, pk) value - (native representation, pk, object or None) """
ret = dict() spec_keys = set() cache_keys = [] version = version or self.default_version # Construct all the cache keys to fetch for model_name, obj_pk, obj in object_specs: assert model_name assert obj_pk # Get cache keys to fetch obj_key = self.key_for(version, model_name, obj_pk) spec_keys.add((model_name, obj_pk, obj, obj_key)) cache_keys.append(obj_key) # Fetch the cache keys if cache_keys and self.cache: cache_vals = self.cache.get_many(cache_keys) else: cache_vals = {} # Use cached representations, or recreate cache_to_set = {} for model_name, obj_pk, obj, obj_key in spec_keys: # Load cached objects obj_val = cache_vals.get(obj_key) obj_native = json.loads(obj_val) if obj_val else None # Invalid or not set - load from database if not obj_native: if not obj: loader = self.model_function(model_name, version, 'loader') obj = loader(obj_pk) serializer = self.model_function( model_name, version, 'serializer') obj_native = serializer(obj) or {} if obj_native: cache_to_set[obj_key] = json.dumps(obj_native) # Get fields to convert keys = [key for key in obj_native.keys() if ':' in key] for key in keys: json_value = obj_native.pop(key) name, value = self.field_from_json(key, json_value) assert name not in obj_native obj_native[name] = value if obj_native: ret[(model_name, obj_pk)] = (obj_native, obj_key, obj) # Save any new cached representations if cache_to_set and self.cache: self.cache.set_many(cache_to_set) return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_instance( self, model_name, pk, instance=None, version=None, update_only=False): """Create or update a cached instance. Keyword arguments are: model_name - The name of the model pk - The primary key of the instance instance - The Django model instance, or None to load it versions - Version to update, or None for all update_only - If False (default), then missing cache entries will be populated and will cause follow-on invalidation. If True, then only entries already in the cache will be updated and cause follow-on invalidation. Return is a list of tuples (model name, pk, immediate) that also needs to be updated. """
versions = [version] if version else self.versions invalid = [] for version in versions: serializer = self.model_function(model_name, version, 'serializer') loader = self.model_function(model_name, version, 'loader') invalidator = self.model_function( model_name, version, 'invalidator') if serializer is None and loader is None and invalidator is None: continue if self.cache is None: continue # Try to load the instance if not instance: instance = loader(pk) if serializer: # Get current value, if in cache key = self.key_for(version, model_name, pk) current_raw = self.cache.get(key) current = json.loads(current_raw) if current_raw else None # Get new value if update_only and current_raw is None: new = None else: new = serializer(instance) deleted = not instance # If cache is invalid, update cache invalidate = (current != new) or deleted if invalidate: if deleted: self.cache.delete(key) else: self.cache.set(key, json.dumps(new)) else: invalidate = True # Invalidate upstream caches if instance and invalidate: for upstream in invalidator(instance): if isinstance(upstream, str): self.cache.delete(upstream) else: m, i, immediate = upstream if immediate: invalidate_key = self.key_for(version, m, i) self.cache.delete(invalidate_key) invalid.append((m, i, version)) return invalid
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def field_date_to_json(self, day): """Convert a date to a date triple."""
if isinstance(day, six.string_types): day = parse_date(day) return [day.year, day.month, day.day] if day else None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def field_datetime_from_json(self, json_val): """Convert a UTC timestamp to a UTC datetime."""
if type(json_val) == int: seconds = int(json_val) dt = datetime.fromtimestamp(seconds, utc) elif json_val is None: dt = None else: seconds, microseconds = [int(x) for x in json_val.split('.')] dt = datetime.fromtimestamp(seconds, utc) dt += timedelta(microseconds=microseconds) return dt
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def field_timedelta_from_json(self, json_val): """Convert json_val to a timedelta object. json_val contains total number of seconds in the timedelta. If json_val is a string it will be converted to a float. """
if isinstance(json_val, str): return timedelta(seconds=float(json_val)) elif json_val is None: return None else: return timedelta(seconds=json_val)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def field_timedelta_to_json(self, td): """Convert timedelta to value containing total number of seconds. If there are fractions of a second the return value will be a string, otherwise it will be an int. """
if isinstance(td, six.string_types): td = parse_duration(td) if not td: return None if td.microseconds > 0: return str(td.total_seconds()) else: return int(td.total_seconds())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def field_pklist_from_json(self, data): """Load a PkOnlyQueryset from a JSON dict. This uses the same format as cached_queryset_from_json """
model = get_model(data['app'], data['model']) return PkOnlyQueryset(self, model, data['pks'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def field_pklist_to_json(self, model, pks): """Convert a list of primary keys to a JSON dict. This uses the same format as cached_queryset_to_json """
app_label = model._meta.app_label model_name = model._meta.model_name return { 'app': app_label, 'model': model_name, 'pks': list(pks), }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def field_pk_from_json(self, data): """Load a PkOnlyModel from a JSON dict."""
model = get_model(data['app'], data['model']) return PkOnlyModel(self, model, data['pk'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def field_pk_to_json(self, model, pk): """Convert a primary key to a JSON dict."""
app_label = model._meta.app_label model_name = model._meta.model_name return { 'app': app_label, 'model': model_name, 'pk': pk, }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def choice_voters_changed_update_cache( sender, instance, action, reverse, model, pk_set, **kwargs): """Update cache when choice.voters changes."""
if action not in ('post_add', 'post_remove', 'post_clear'): # post_clear is not handled, because clear is called in # django.db.models.fields.related.ReverseManyRelatedObjects.__set__ # before setting the new order return if model == User: assert type(instance) == Choice choices = [instance] if pk_set: users = list(User.objects.filter(pk__in=pk_set)) else: users = [] else: if pk_set: choices = list(Choice.objects.filter(pk__in=pk_set)) else: choices = [] users = [instance] from .tasks import update_cache_for_instance for choice in choices: update_cache_for_instance('Choice', choice.pk, choice) for user in users: update_cache_for_instance('User', user.pk, user)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def post_delete_update_cache(sender, instance, **kwargs): """Update the cache when an instance is deleted."""
name = sender.__name__ if name in cached_model_names: from .tasks import update_cache_for_instance update_cache_for_instance(name, instance.pk, instance)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def post_save_update_cache(sender, instance, created, raw, **kwargs): """Update the cache when an instance is created or modified."""
if raw: return name = sender.__name__ if name in cached_model_names: delay_cache = getattr(instance, '_delay_cache', False) if not delay_cache: from .tasks import update_cache_for_instance update_cache_for_instance(name, instance.pk, instance)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_queryset(self): """Get the queryset for the action. If action is read action, return a CachedQueryset Otherwise, return a Django queryset """
queryset = super(CachedViewMixin, self).get_queryset() if self.action in ('list', 'retrieve'): return CachedQueryset(self.get_queryset_cache(), queryset=queryset) else: return queryset
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_object(self, queryset=None): """ Return the object the view is displaying. Same as rest_framework.generics.GenericAPIView, but: - Failed assertions instead of deprecations """
# Determine the base queryset to use. assert queryset is None, "Passing a queryset is disabled" queryset = self.filter_queryset(self.get_queryset()) # Perform the lookup filtering. lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field lookup = self.kwargs.get(lookup_url_kwarg, None) assert lookup is not None, "Other lookup methods are disabled" filter_kwargs = {self.lookup_field: lookup} obj = self.get_object_or_404(queryset, **filter_kwargs) # May raise a permission denied self.check_object_permissions(self.request, obj) return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_object_or_404(self, queryset, *filter_args, **filter_kwargs): """Return an object or raise a 404. Same as Django's standard shortcut, but make sure to raise 404 if the filter_kwargs don't match the required types. """
if isinstance(queryset, CachedQueryset): try: return queryset.get(*filter_args, **filter_kwargs) except queryset.model.DoesNotExist: raise Http404( 'No %s matches the given query.' % queryset.model) else: return get_object_or_404(queryset, *filter_args, **filter_kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_cache_for_instance( model_name, instance_pk, instance=None, version=None): """Update the cache for an instance, with cascading updates."""
cache = SampleCache() invalid = cache.update_instance(model_name, instance_pk, instance, version) for invalid_name, invalid_pk, invalid_version in invalid: update_cache_for_instance.delay( invalid_name, invalid_pk, version=invalid_version)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def values_list(self, *args, **kwargs): """Return the primary keys as a list. The only valid call is values_list('pk', flat=True) """
flat = kwargs.pop('flat', False) assert flat is True assert len(args) == 1 assert args[0] == self.model._meta.pk.name return self.pks
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pks(self): """Lazy-load the primary keys."""
if self._primary_keys is None: self._primary_keys = list( self.queryset.values_list('pk', flat=True)) return self._primary_keys
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def count(self): """Return a count of instances."""
if self._primary_keys is None: return self.queryset.count() else: return len(self.pks)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter(self, **kwargs): """Filter the base queryset."""
assert not self._primary_keys self.queryset = self.queryset.filter(**kwargs) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, *args, **kwargs): """Return the single item from the filtered queryset."""
assert not args assert list(kwargs.keys()) == ['pk'] pk = kwargs['pk'] model_name = self.model.__name__ object_spec = (model_name, pk, None) instances = self.cache.get_instances((object_spec,)) try: model_data = instances[(model_name, pk)][0] except KeyError: raise self.model.DoesNotExist( "No match for %r with args %r, kwargs %r" % (self.model, args, kwargs)) else: return CachedModel(self.model, model_data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def collect(cls): """ Load all constant generators from settings.WEBPACK_CONSTANT_PROCESSORS and concat their values. """
constants = {} for method_path in WebpackConstants.get_constant_processors(): method = import_string(method_path) if not callable(method): raise ImproperlyConfigured('Constant processor "%s" is not callable' % method_path) result = method(constants) if isinstance(result, dict): constants.update(result) return constants
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_catalog(self, locale): """Create Django translation catalogue for `locale`."""
with translation.override(locale): translation_engine = DjangoTranslation(locale, domain=self.domain, localedirs=self.paths) trans_cat = translation_engine._catalog trans_fallback_cat = translation_engine._fallback._catalog if translation_engine._fallback else {} return trans_cat, trans_fallback_cat
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_paths(cls, packages): """Create list of matching packages for translation engine."""
allowable_packages = dict((app_config.name, app_config) for app_config in apps.get_app_configs()) app_configs = [allowable_packages[p] for p in packages if p in allowable_packages] # paths of requested packages return [os.path.join(app.path, 'locale') for app in app_configs]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_catalogue_header_value(cls, catalog, key): """Get `.po` header value."""
header_value = None if '' in catalog: for line in catalog[''].split('\n'): if line.startswith('%s:' % key): header_value = line.split(':', 1)[1].strip() return header_value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _num_plurals(self, catalogue): """ Return the number of plurals for this catalog language, or 2 if no plural string is available. """
match = re.search(r'nplurals=\s*(\d+)', self.get_plural(catalogue) or '') if match: return int(match.groups()[0]) return 2
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_header(self, locale, catalog): """Populate header with correct data from top-most locale file."""
return { "po-revision-date": self.get_catalogue_header_value(catalog, 'PO-Revision-Date'), "mime-version": self.get_catalogue_header_value(catalog, 'MIME-Version'), "last-translator": 'Automatic <[email protected]>', "x-generator": "Python", "language": self.get_catalogue_header_value(catalog, 'Language') or locale, "lang": locale, "content-transfer-encoding": self.get_catalogue_header_value(catalog, 'Content-Transfer-Encoding'), "project-id-version": self.get_catalogue_header_value(catalog, 'Project-Id-Version'), "pot-creation-date": self.get_catalogue_header_value(catalog, 'POT-Creation-Date'), "domain": self.domain, "report-msgid-bugs-to": self.get_catalogue_header_value(catalog, 'Report-Msgid-Bugs-To'), "content-type": self.get_catalogue_header_value(catalog, 'Content-Type'), "plural-forms": self.get_plural(catalog), "language-team": self.get_catalogue_header_value(catalog, 'Language-Team') }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_endpoint_obj(client, endpoint, object_id): ''' Tiny helper function that gets used all over the place to join the object ID to the endpoint and run a GET request, returning the result ''' endpoint = '/'.join([endpoint, str(object_id)]) return client.authenticated_request(endpoint).json()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _validate_response(self, method, response): ''' Helper method to validate the given to a Wunderlist API request is as expected ''' # TODO Fill this out using the error codes here: https://developer.wunderlist.com/documentation/concepts/formats # The expected results can change based on API version, so validate this here if self.api_version: if response.status_code >= 400: raise ValueError('{} {}'.format(response.status_code, str(response.json()))) if method == 'GET': assert response.status_code == 200 elif method == 'POST': assert response.status_code == 201 elif method == 'PATCH': assert response.status_code == 200 elif method == 'DELETE': assert response.status_code == 204
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def request(self, endpoint, method='GET', headers=None, params=None, data=None): ''' Send a request to the given Wunderlist API endpoint Params: endpoint -- API endpoint to send request to Keyword Args: headers -- headers to add to the request method -- GET, PUT, PATCH, DELETE, etc. params -- parameters to encode in the request data -- data to send with the request ''' if not headers: headers = {} if method in ['POST', 'PATCH', 'PUT']: headers['Content-Type'] = 'application/json' url = '/'.join([self.api_url, 'v' + self.api_version, endpoint]) data = json.dumps(data) if data else None try: response = requests.request(method=method, url=url, params=params, headers=headers, data=data) # TODO Does recreating the exception classes 'requests' use suck? Yes, but it sucks more to expose the underlying library I use except requests.exceptions.Timeout as e: raise wp_exceptions.TimeoutError(e) except requests.exceptions.ConnectionError as e: raise wp_exceptions.ConnectionError(e) self._validate_response(method, response) return response
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_access_token(self, code, client_id, client_secret): ''' Exchange a temporary code for an access token allowing access to a user's account See https://developer.wunderlist.com/documentation/concepts/authorization for more info ''' headers = { 'Content-Type' : 'application/json' } data = { 'client_id' : client_id, 'client_secret' : client_secret, 'code' : code, } str_data = json.dumps(data) response = requests.request(method='POST', url=ACCESS_TOKEN_URL, headers=headers, data=str_data) status_code = response.status_code if status_code != 200: raise ValueError("{} -- {}".format(status_code, response.json())) return body['access_token']
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def authenticated_request(self, endpoint, method='GET', params=None, data=None): ''' Send a request to the given Wunderlist API with 'X-Access-Token' and 'X-Client-ID' headers and ensure the response code is as expected given the request type Params: endpoint -- API endpoint to send request to Keyword Args: method -- GET, PUT, PATCH, DELETE, etc. params -- parameters to encode in the request data -- data to send with the request ''' headers = { 'X-Access-Token' : self.access_token, 'X-Client-ID' : self.client_id } return self.api.request(endpoint, method=method, headers=headers, params=params, data=data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def update_list(self, list_id, revision, title=None, public=None): ''' Updates the list with the given ID to have the given title and public flag ''' return lists_endpoint.update_list(self, list_id, revision, title=title, public=public)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get_tasks(self, list_id, completed=False): ''' Gets tasks for the list with the given ID, filtered by the given completion flag ''' return tasks_endpoint.get_tasks(self, list_id, completed=completed)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def create_task(self, list_id, title, assignee_id=None, completed=None, recurrence_type=None, recurrence_count=None, due_date=None, starred=None): ''' Creates a new task with the given information in the list with the given ID ''' return tasks_endpoint.create_task(self, list_id, title, assignee_id=assignee_id, completed=completed, recurrence_type=recurrence_type, recurrence_count=recurrence_count, due_date=due_date, starred=starred)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def update_note(self, note_id, revision, content): ''' Updates the note with the given ID to have the given content ''' return notes_endpoint.update_note(self, note_id, revision, content)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def update_task_positions_obj(self, positions_obj_id, revision, values): ''' Updates the ordering of tasks in the positions object with the given ID to the ordering in the given values. See https://developer.wunderlist.com/documentation/endpoints/positions for more info Return: The updated TaskPositionsObj-mapped object defining the order of list layout ''' return positions_endpoints.update_task_positions_obj(self, positions_obj_id, revision, values)