text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Report an error and exit. <END_TASK> <USER_TASK:> Description: def exit_with_error(self, error, **kwargs): """Report an error and exit. This raises a SystemExit exception to ask the interpreter to quit. Parameters ---------- error: string The error to report before quitting. """
self.error(error, **kwargs) raise SystemExit(error)
<SYSTEM_TASK:> Decorator defining that this test gets parameters from the given <END_TASK> <USER_TASK:> Description: def genty_dataprovider(builder_function): """Decorator defining that this test gets parameters from the given build_function. :param builder_function: A callable that returns parameters that will be passed to the method decorated by this decorator. If the builder_function returns a tuple or list, then that will be passed as *args to the decorated method. If the builder_function returns a :class:`GentyArgs`, then that will be used to pass *args and **kwargs to the decorated method. Any other return value will be treated as a single parameter, and passed as such to the decorated method. :type builder_function: `callable` """
datasets = getattr(builder_function, 'genty_datasets', {None: ()}) def wrap(test_method): # Save the data providers in the test method. This data will be # consumed by the @genty decorator. if not hasattr(test_method, 'genty_dataproviders'): test_method.genty_dataproviders = [] test_method.genty_dataproviders.append( (builder_function, datasets), ) return test_method return wrap
<SYSTEM_TASK:> Decorator defining data sets to provide to a test. <END_TASK> <USER_TASK:> Description: def genty_dataset(*args, **kwargs): """Decorator defining data sets to provide to a test. Inspired by http://sebastian-bergmann.de/archives/ 702-Data-Providers-in-PHPUnit-3.2.html The canonical way to call @genty_dataset, with each argument each representing a data set to be injected in the test method call: @genty_dataset( ('a1', 'b1'), ('a2', 'b2'), ) def test_some_function(a, b) ... If the test function takes only one parameter, you can replace the tuples by a single value. So instead of the more verbose: @genty_dataset( ('c1',), ('c2',), ) def test_some_other_function(c) ... One can write: @genty_dataset('c1', 'c2') def test_some_other_function(c) ... For each set of arguments, a suffix identifying that argument set is built by concatenating the string representation of the arguments together. You can control the test names for each data set by passing the data sets as keyword args, where the keyword is the desired suffix. For example: @genty_dataset( ('a1', 'b1), ) def test_function(a, b) ... produces a test named 'test_function_for_a1_and_b1', while @genty_dataset( happy_path=('a1', 'b1'), ) def test_function(a, b) ... produces a test named test_function_for_happy_path. These are just parameters to a method call, so one can have unnamed args first followed by keyword args @genty_dataset( ('x', 'y'), ('p', 'q'), Monday=('a1', 'b1'), Tuesday=('t1', 't2'), ) def test_function(a, b) ... Finally, datasets can be chained. Useful for example if there are distinct sets of params that make sense (cleaner, more readable, or semantically nicer) if kept separate. A fabricated example: @genty_dataset( *([i for i in range(10)] + [(i, i) for i in range(10)]) ) def test_some_other_function(param1, param2=None) ... -- vs -- @genty_dataset(*[i for i in range(10)]) @genty_dataset(*[(i, i) for i in range(10)]) def test_some_other_function(param1, param2=None) ... If the names of datasets conflict across chained genty_datasets, the key&value pair from the outer (first) decorator will override the data from the inner. :param args: Tuple of unnamed data sets. :type args: `tuple` of varies :param kwargs: Dict of pre-named data sets. :type kwargs: `dict` of `unicode` to varies """
datasets = _build_datasets(*args, **kwargs) def wrap(test_method): # Save the datasets in the test method. This data will be consumed # by the @genty decorator. if not hasattr(test_method, 'genty_datasets'): test_method.genty_datasets = OrderedDict() test_method.genty_datasets.update(datasets) return test_method return wrap
<SYSTEM_TASK:> Build the datasets into a dict, where the keys are the name of the <END_TASK> <USER_TASK:> Description: def _build_datasets(*args, **kwargs): """Build the datasets into a dict, where the keys are the name of the data set and the values are the data sets themselves. :param args: Tuple of unnamed data sets. :type args: `tuple` of varies :param kwargs: Dict of pre-named data sets. :type kwargs: `dict` of `unicode` to varies :return: The dataset dict. :rtype: `dict` """
datasets = OrderedDict() _add_arg_datasets(datasets, args) _add_kwarg_datasets(datasets, kwargs) return datasets
<SYSTEM_TASK:> Add data sets of the given args. <END_TASK> <USER_TASK:> Description: def _add_arg_datasets(datasets, args): """Add data sets of the given args. :param datasets: The dict where to accumulate data sets. :type datasets: `dict` :param args: Tuple of unnamed data sets. :type args: `tuple` of varies """
for dataset in args: # turn a value into a 1-tuple. if not isinstance(dataset, (tuple, GentyArgs)): dataset = (dataset,) # Create a test_name_suffix - basically the parameter list if isinstance(dataset, GentyArgs): dataset_strings = dataset # GentyArgs supports iteration else: dataset_strings = [format_arg(data) for data in dataset] test_method_suffix = ", ".join(dataset_strings) datasets[test_method_suffix] = dataset
<SYSTEM_TASK:> Add data sets of the given kwargs. <END_TASK> <USER_TASK:> Description: def _add_kwarg_datasets(datasets, kwargs): """Add data sets of the given kwargs. :param datasets: The dict where to accumulate data sets. :type datasets: `dict` :param kwargs: Dict of pre-named data sets. :type kwargs: `dict` of `unicode` to varies """
for test_method_suffix, dataset in six.iteritems(kwargs): datasets[test_method_suffix] = dataset
<SYSTEM_TASK:> Removes the hanging dedent from all the first line of a string. <END_TASK> <USER_TASK:> Description: def dedent(s): """Removes the hanging dedent from all the first line of a string."""
head, _, tail = s.partition('\n') dedented_tail = textwrap.dedent(tail) result = "{head}\n{tail}".format( head=head, tail=dedented_tail) return result
<SYSTEM_TASK:> The top-level documentation string for the program. <END_TASK> <USER_TASK:> Description: def top_level_doc(self): """The top-level documentation string for the program. """
return self._doc_template.format( available_commands='\n '.join(sorted(self._commands)), program=self.program)
<SYSTEM_TASK:> A decorator to add subcommands. <END_TASK> <USER_TASK:> Description: def command(self, name=None): """A decorator to add subcommands. """
def decorator(f): self.add_command(f, name) return f return decorator
<SYSTEM_TASK:> Add a subcommand `name` which invokes `handler`. <END_TASK> <USER_TASK:> Description: def add_command(self, handler, name=None): """Add a subcommand `name` which invokes `handler`. """
if name is None: name = docstring_to_subcommand(handler.__doc__) # TODO: Prevent overwriting 'help'? self._commands[name] = handler
<SYSTEM_TASK:> Convert a single word from Finglish to Persian. <END_TASK> <USER_TASK:> Description: def f2p_word(word, max_word_size=15, cutoff=3): """Convert a single word from Finglish to Persian. max_word_size: Maximum size of the words to consider. Words larger than this will be kept unchanged. cutoff: The cut-off point. For each word, there could be many possibilities. By default 3 of these possibilities are considered for each word. This number can be changed by this argument. """
original_word = word word = word.lower() c = dictionary.get(word) if c: return [(c, 1.0)] if word == '': return [] elif len(word) > max_word_size: return [(original_word, 1.0)] results = [] for w in variations(word): results.extend(f2p_word_internal(w, original_word)) # sort results based on the confidence value results.sort(key=lambda r: r[1], reverse=True) # return the top three results in order to cut down on the number # of possibilities. return results[:cutoff]
<SYSTEM_TASK:> Convert a phrase from Finglish to Persian. <END_TASK> <USER_TASK:> Description: def f2p_list(phrase, max_word_size=15, cutoff=3): """Convert a phrase from Finglish to Persian. phrase: The phrase to convert. max_word_size: Maximum size of the words to consider. Words larger than this will be kept unchanged. cutoff: The cut-off point. For each word, there could be many possibilities. By default 3 of these possibilities are considered for each word. This number can be changed by this argument. Returns a list of lists, each sub-list contains a number of possibilities for each word as a pair of (word, confidence) values. """
# split the phrase into words results = [w for w in sep_regex.split(phrase) if w] # return an empty list if no words if results == []: return [] # convert each word separately results = [f2p_word(w, max_word_size, cutoff) for w in results] return results
<SYSTEM_TASK:> Convert a Finglish phrase to the most probable Persian phrase. <END_TASK> <USER_TASK:> Description: def f2p(phrase, max_word_size=15, cutoff=3): """Convert a Finglish phrase to the most probable Persian phrase. """
results = f2p_list(phrase, max_word_size, cutoff) return ' '.join(i[0][0] for i in results)
<SYSTEM_TASK:> try to get the version of the named distribution, <END_TASK> <USER_TASK:> Description: def distribution_version(name): """try to get the version of the named distribution, returs None on failure"""
from pkg_resources import get_distribution, DistributionNotFound try: dist = get_distribution(name) except DistributionNotFound: pass else: return dist.version
<SYSTEM_TASK:> initialize given package from the export definitions. <END_TASK> <USER_TASK:> Description: def initpkg(pkgname, exportdefs, attr=None, eager=False): """ initialize given package from the export definitions. """
attr = attr or {} oldmod = sys.modules.get(pkgname) d = {} f = getattr(oldmod, '__file__', None) if f: f = _py_abspath(f) d['__file__'] = f if hasattr(oldmod, '__version__'): d['__version__'] = oldmod.__version__ if hasattr(oldmod, '__loader__'): d['__loader__'] = oldmod.__loader__ if hasattr(oldmod, '__path__'): d['__path__'] = [_py_abspath(p) for p in oldmod.__path__] if hasattr(oldmod, '__package__'): d['__package__'] = oldmod.__package__ if '__doc__' not in exportdefs and getattr(oldmod, '__doc__', None): d['__doc__'] = oldmod.__doc__ d.update(attr) if hasattr(oldmod, "__dict__"): oldmod.__dict__.update(d) mod = ApiModule(pkgname, exportdefs, implprefix=pkgname, attr=d) sys.modules[pkgname] = mod # eagerload in bypthon to avoid their monkeypatching breaking packages if 'bpython' in sys.modules or eager: for module in list(sys.modules.values()): if isinstance(module, ApiModule): module.__dict__
<SYSTEM_TASK:> imports a module, then resolves the attrname on it <END_TASK> <USER_TASK:> Description: def importobj(modpath, attrname): """imports a module, then resolves the attrname on it"""
module = __import__(modpath, None, None, ['__doc__']) if not attrname: return module retval = module names = attrname.split(".") for x in names: retval = getattr(retval, x) return retval
<SYSTEM_TASK:> lazily compute value for name or raise AttributeError if unknown. <END_TASK> <USER_TASK:> Description: def __makeattr(self, name): """lazily compute value for name or raise AttributeError if unknown."""
# print "makeattr", self.__name__, name target = None if '__onfirstaccess__' in self.__map__: target = self.__map__.pop('__onfirstaccess__') importobj(*target)() try: modpath, attrname = self.__map__[name] except KeyError: if target is not None and name != '__onfirstaccess__': # retry, onfirstaccess might have set attrs return getattr(self, name) raise AttributeError(name) else: result = importobj(modpath, attrname) setattr(self, name, result) try: del self.__map__[name] except KeyError: pass # in a recursive-import situation a double-del can happen return result
<SYSTEM_TASK:> Create new Url which points to new url. <END_TASK> <USER_TASK:> Description: def _new_url(self, relative_url): """Create new Url which points to new url."""
return Url( urljoin(self._base_url, relative_url), **self._default_kwargs )
<SYSTEM_TASK:> deletes a group <END_TASK> <USER_TASK:> Description: def del_role(self, role): """ deletes a group """
target = AuthGroup.objects(role=role, creator=self.client).first() if target: target.delete() return True else: return False
<SYSTEM_TASK:> make user a member of a group <END_TASK> <USER_TASK:> Description: def add_membership(self, user, role): """ make user a member of a group """
targetGroup = AuthGroup.objects(role=role, creator=self.client).first() if not targetGroup: return False target = AuthMembership.objects(user=user, creator=self.client).first() if not target: target = AuthMembership(user=user, creator=self.client) if not role in [i.role for i in target.groups]: target.groups.append(targetGroup) target.save() return True
<SYSTEM_TASK:> dismember user from a group <END_TASK> <USER_TASK:> Description: def del_membership(self, user, role): """ dismember user from a group """
if not self.has_membership(user, role): return True targetRecord = AuthMembership.objects(creator=self.client, user=user).first() if not targetRecord: return True for group in targetRecord.groups: if group.role==role: targetRecord.groups.remove(group) targetRecord.save() return True
<SYSTEM_TASK:> checks if user is member of a group <END_TASK> <USER_TASK:> Description: def has_membership(self, user, role): """ checks if user is member of a group"""
targetRecord = AuthMembership.objects(creator=self.client, user=user).first() if targetRecord: return role in [i.role for i in targetRecord.groups] return False
<SYSTEM_TASK:> authorize a group for something <END_TASK> <USER_TASK:> Description: def add_permission(self, role, name): """ authorize a group for something """
if self.has_permission(role, name): return True targetGroup = AuthGroup.objects(role=role, creator=self.client).first() if not targetGroup: return False # Create or update permission = AuthPermission.objects(name=name).update( add_to_set__groups=[targetGroup], creator=self.client, upsert=True ) return True
<SYSTEM_TASK:> revoke authorization of a group <END_TASK> <USER_TASK:> Description: def del_permission(self, role, name): """ revoke authorization of a group """
if not self.has_permission(role, name): return True targetGroup = AuthGroup.objects(role=role, creator=self.client).first() target = AuthPermission.objects(groups=targetGroup, name=name, creator=self.client).first() if not target: return True target.delete() return True
<SYSTEM_TASK:> verify user has permission <END_TASK> <USER_TASK:> Description: def user_has_permission(self, user, name): """ verify user has permission """
targetRecord = AuthMembership.objects(creator=self.client, user=user).first() if not targetRecord: return False for group in targetRecord.groups: if self.has_permission(group.role, name): return True return False
<SYSTEM_TASK:> Signal decorator to allow use of callback functions as class decorators. <END_TASK> <USER_TASK:> Description: def handler(event): """Signal decorator to allow use of callback functions as class decorators."""
def decorator(fn): def apply(cls): event.connect(fn, sender=cls) return cls fn.apply = apply return fn return decorator
<SYSTEM_TASK:> dumps all valid jsons <END_TASK> <USER_TASK:> Description: def stringify(req, resp): """ dumps all valid jsons This is the latest after hook """
if isinstance(resp.body, dict): try: resp.body = json.dumps(resp.body) except(nameError): resp.status = falcon.HTTP_500
<SYSTEM_TASK:> Create a ParamList instance for actual interpretation <END_TASK> <USER_TASK:> Description: def instantiate(self, scope, args, interp): """Create a ParamList instance for actual interpretation :args: TODO :returns: A ParamList object """
param_instances = [] BYREF = "byref" # TODO are default values for function parameters allowed in 010? for param_name, param_cls in self._params: # we don't instantiate a copy of byref params if getattr(param_cls, "byref", False): param_instances.append(BYREF) else: field = param_cls() field._pfp__name = param_name param_instances.append(field) if len(args) != len(param_instances): raise errors.InvalidArguments( self._coords, [x.__class__.__name__ for x in args], [x.__class__.__name__ for x in param_instances] ) # TODO type checking on provided types for x in six.moves.range(len(args)): param = param_instances[x] # arrays are simply passed through into the function. We shouldn't # have to worry about frozenness/unfrozenness at this point if param is BYREF or isinstance(param, pfp.fields.Array): param = args[x] param_instances[x] = param scope.add_local(self._params[x][0], param) else: param._pfp__set_value(args[x]) scope.add_local(param._pfp__name, param) param._pfp__interp = interp return ParamList(param_instances)
<SYSTEM_TASK:> Parse the data stream using the supplied template. The data stream <END_TASK> <USER_TASK:> Description: def parse( data = None, template = None, data_file = None, template_file = None, interp = None, debug = False, predefines = True, int3 = True, keep_successful = False, printf = True, ): """Parse the data stream using the supplied template. The data stream WILL NOT be automatically closed. :data: Input data, can be either a string or a file-like object (StringIO, file, etc) :template: template contents (str) :data_file: PATH to the data to be used as the input stream :template_file: template file path :interp: the interpretor to be used (a default one will be created if ``None``) :debug: if debug information should be printed while interpreting the template (false) :predefines: if built-in type information should be inserted (true) :int3: if debugger breaks are allowed while interpreting the template (true) :keep_successful: return any succesfully parsed data instead of raising an error. If an error occurred and ``keep_successful`` is True, then ``_pfp__error`` will be contain the exception object :printf: if ``False``, all calls to ``Printf`` (:any:`pfp.native.compat_interface.Printf`) will be noops. (default=``True``) :returns: pfp DOM """
if data is None and data_file is None: raise Exception("No input data was specified") if data is not None and data_file is not None: raise Exception("Only one input data may be specified") if isinstance(data, six.string_types): data = six.StringIO(data) if data_file is not None: data = open(os.path.expanduser(data_file), "rb") if template is None and template_file is None: raise Exception("No template specified!") if template is not None and template_file is not None: raise Exception("Only one template may be specified!") orig_filename = "string" if template_file is not None: orig_filename = template_file try: with open(os.path.expanduser(template_file), "r") as f: template = f.read() except Exception as e: raise Exception("Could not open template file '{}'".format(template_file)) # the user may specify their own instance of PfpInterp to be # used if interp is None: interp = pfp.interp.PfpInterp( debug = debug, parser = PARSER, int3 = int3, ) # so we can consume single bits at a time data = BitwrappedStream(data) dom = interp.parse( data, template, predefines = predefines, orig_filename = orig_filename, keep_successful = keep_successful, printf = printf, ) # close the data stream if a data_file was specified if data_file is not None: data.close() return dom
<SYSTEM_TASK:> This function is identical to the FindAll function except that the <END_TASK> <USER_TASK:> Description: def FindFirst(params, ctxt, scope, stream, coord, interp): """ This function is identical to the FindAll function except that the return value is the position of the first occurrence of the target found. A negative number is returned if the value could not be found. """
global FIND_MATCHES_ITER FIND_MATCHES_ITER = _find_helper(params, ctxt, scope, stream, coord, interp) try: first = six.next(FIND_MATCHES_ITER) return first.start() + FIND_MATCHES_START_OFFSET except StopIteration as e: return -1
<SYSTEM_TASK:> This function returns the position of the next occurrence of the <END_TASK> <USER_TASK:> Description: def FindNext(params, ctxt, scope, stream, coord): """ This function returns the position of the next occurrence of the target value specified with the FindFirst function. If dir is 1, the find direction is down. If dir is 0, the find direction is up. The return value is the address of the found data, or -1 if the target is not found. """
if FIND_MATCHES_ITER is None: raise errors.InvalidState() direction = 1 if len(params) > 0: direction = PYVAL(params[0]) if direction != 1: # TODO maybe instead of storing the iterator in FIND_MATCHES_ITER, # we should go ahead and find _all the matches in the file and store them # in a list, keeping track of the idx of the current match. # # This would be highly inefficient on large files though. raise NotImplementedError("Reverse searching is not yet implemented") try: next_match = six.next(FIND_MATCHES_ITER) return next_match.start() + FIND_MATCHES_START_OFFSET except StopIteration as e: return -1
<SYSTEM_TASK:> Used as a decorator to add the decorated function to the <END_TASK> <USER_TASK:> Description: def native(name, ret, interp=None, send_interp=False): """Used as a decorator to add the decorated function to the pfp interpreter so that it can be used from within scripts. :param str name: The name of the function as it will be exposed in template scripts. :param pfp.fields.Field ret: The return type of the function (a class) :param pfp.interp.PfpInterp interp: The specific interpreter to add the function to :param bool send_interp: If the current interpreter should be passed to the function. Examples: The example below defines a ``Sum`` function that will return the sum of all parameters passed to the function: :: from pfp.fields import PYVAL @native(name="Sum", ret=pfp.fields.Int64) def sum_numbers(params, ctxt, scope, stream, coord): res = 0 for param in params: res += PYVAL(param) return res The code below is the code for the :any:`Int3 <pfp.native.dbg.int3>` function. Notice that it requires that the interpreter be sent as a parameter: :: @native(name="Int3", ret=pfp.fields.Void, send_interp=True) def int3(params, ctxt, scope, stream, coord, interp): if interp._no_debug: return if interp._int3: interp.debugger = PfpDbg(interp) interp.debugger.cmdloop() """
def native_decorator(func): @functools.wraps(func) def native_wrapper(*args, **kwargs): return func(*args, **kwargs) pfp.interp.PfpInterp.add_native(name, func, ret, interp=interp, send_interp=send_interp) return native_wrapper return native_decorator
<SYSTEM_TASK:> Eval the user-supplied statement. Note that you can do anything with <END_TASK> <USER_TASK:> Description: def do_eval(self, args): """Eval the user-supplied statement. Note that you can do anything with this command that you can do in a template. The resulting value of your statement will be displayed. """
try: res = self._interp.eval(args) if res is not None: if hasattr(res, "_pfp__show"): print(res._pfp__show()) else: print(repr(res)) except errors.UnresolvedID as e: print("ERROR: " + e.message) except Exception as e: raise print("ERROR: " + e.message) return False
<SYSTEM_TASK:> Replace passwords with a secret in a dictionary. <END_TASK> <USER_TASK:> Description: def mask_dict_password(dictionary, secret='***'): """Replace passwords with a secret in a dictionary."""
d = dictionary.copy() for k in d: if 'password' in k: d[k] = secret return d
<SYSTEM_TASK:> Return ``num_bits`` bits, taking into account endianness and <END_TASK> <USER_TASK:> Description: def read_bits(self, stream, num_bits, padded, left_right, endian): """Return ``num_bits`` bits, taking into account endianness and left-right bit directions """
if self._cls_bits is None and padded: raw_bits = stream.read_bits(self.cls.width*8) self._cls_bits = self._endian_transform(raw_bits, endian) if self._cls_bits is not None: if num_bits > len(self._cls_bits): raise errors.PfpError("BitfieldRW reached invalid state") if left_right: res = self._cls_bits[:num_bits] self._cls_bits = self._cls_bits[num_bits:] else: res = self._cls_bits[-num_bits:] self._cls_bits = self._cls_bits[:-num_bits] return res else: return stream.read_bits(num_bits)
<SYSTEM_TASK:> Write the bits. Once the size of the written bits is equal <END_TASK> <USER_TASK:> Description: def write_bits(self, stream, raw_bits, padded, left_right, endian): """Write the bits. Once the size of the written bits is equal to the number of the reserved bits, flush it to the stream """
if padded: if left_right: self._write_bits += raw_bits else: self._write_bits = raw_bits + self._write_bits if len(self._write_bits) == self.reserved_bits: bits = self._endian_transform(self._write_bits, endian) # if it's padded, and all of the bits in the field weren't used, # we need to flush out the unused bits # TODO should we save the value of the unused bits so the data that # is written out matches exactly what was read? if self.reserved_bits < self.cls.width * 8: filler = [0] * ((self.cls.width * 8) - self.reserved_bits) if left_right: bits += filler else: bits = filler + bits stream.write_bits(bits) self._write_bits = [] else: # if an unpadded field ended up using the same BitfieldRW and # as a previous padded field, there will be unwritten bits left in # self._write_bits. These need to be flushed out as well if len(self._write_bits) > 0: stream.write_bits(self._write_bits) self._write_bits = [] stream.write_bits(raw_bits)
<SYSTEM_TASK:> Process the metadata once the entire struct has been <END_TASK> <USER_TASK:> Description: def _pfp__process_metadata(self): """Process the metadata once the entire struct has been declared. """
if self._pfp__metadata_processor is None: return metadata_info = self._pfp__metadata_processor() if isinstance(metadata_info, list): for metadata in metadata_info: if metadata["type"] == "watch": self._pfp__set_watch( metadata["watch_fields"], metadata["update_func"], *metadata["func_call_info"] ) elif metadata["type"] == "packed": del metadata["type"] self._pfp__set_packer(**metadata) if self._pfp__can_unpack(): self._pfp__unpack_data(self.raw_data)
<SYSTEM_TASK:> Add the watcher to the list of fields that <END_TASK> <USER_TASK:> Description: def _pfp__watch(self, watcher): """Add the watcher to the list of fields that are watching this field """
if self._pfp__parent is not None and isinstance(self._pfp__parent, Union): self._pfp__parent._pfp__watch(watcher) else: self._pfp__watchers.append(watcher)
<SYSTEM_TASK:> Subscribe to update events on each field in ``watch_fields``, using <END_TASK> <USER_TASK:> Description: def _pfp__set_watch(self, watch_fields, update_func, *func_call_info): """Subscribe to update events on each field in ``watch_fields``, using ``update_func`` to update self's value when ``watch_field`` changes"""
self._pfp__watch_fields = watch_fields for watch_field in watch_fields: watch_field._pfp__watch(self) self._pfp__update_func = update_func self._pfp__update_func_call_info = func_call_info
<SYSTEM_TASK:> Pack the nested field <END_TASK> <USER_TASK:> Description: def _pfp__pack_data(self): """Pack the nested field """
if self._pfp__pack_type is None: return tmp_stream = six.BytesIO() self._._pfp__build(bitwrap.BitwrappedStream(tmp_stream)) raw_data = tmp_stream.getvalue() unpack_func = self._pfp__packer unpack_args = [] if self._pfp__packer is not None: unpack_func = self._pfp__packer unpack_args = [true(), raw_data] elif self._pfp__pack is not None: unpack_func = self._pfp__pack unpack_args = [raw_data] # does not need to be converted to a char array if not isinstance(unpack_func, functions.NativeFunction): io_stream = bitwrap.BitwrappedStream(six.BytesIO(raw_data)) unpack_args[-1] = Array(len(raw_data), Char, io_stream) res = unpack_func.call(unpack_args, *self._pfp__pack_func_call_info, no_cast=True) if isinstance(res, Array): res = res._pfp__build() io_stream = six.BytesIO(res) tmp_stream = bitwrap.BitwrappedStream(io_stream) self._pfp__no_unpack = True self._pfp__parse(tmp_stream) self._pfp__no_unpack = False
<SYSTEM_TASK:> Means that the field has already been parsed normally, <END_TASK> <USER_TASK:> Description: def _pfp__unpack_data(self, raw_data): """Means that the field has already been parsed normally, and that it now needs to be unpacked. :raw_data: A string of the data that the field consumed while parsing """
if self._pfp__pack_type is None: return if self._pfp__no_unpack: return unpack_func = self._pfp__packer unpack_args = [] if self._pfp__packer is not None: unpack_func = self._pfp__packer unpack_args = [false(), raw_data] elif self._pfp__unpack is not None: unpack_func = self._pfp__unpack unpack_args = [raw_data] # does not need to be converted to a char array if not isinstance(unpack_func, functions.NativeFunction): io_stream = bitwrap.BitwrappedStream(six.BytesIO(raw_data)) unpack_args[-1] = Array(len(raw_data), Char, io_stream) res = unpack_func.call(unpack_args, *self._pfp__pack_func_call_info, no_cast=True) if isinstance(res, Array): res = res._pfp__build() io_stream = six.BytesIO(res) tmp_stream = bitwrap.BitwrappedStream(io_stream) tmp_stream.padded = self._pfp__interp.get_bitfield_padded() self._ = self._pfp__parsed_packed = self._pfp__pack_type(tmp_stream) self._._pfp__watch(self)
<SYSTEM_TASK:> Handle the watched field that was updated <END_TASK> <USER_TASK:> Description: def _pfp__handle_updated(self, watched_field): """Handle the watched field that was updated """
self._pfp__no_notify = True # nested data has been changed, so rebuild the # nested data to update the field # TODO a global setting to determine this behavior? # could slow things down a bit for large nested structures # notice the use of _is_ here - 'is' != '=='. '==' uses # the __eq__ operator, while is compares id(object) results if watched_field is self._: self._pfp__pack_data() elif self._pfp__update_func is not None: self._pfp__update_func.call( [self] + self._pfp__watch_fields, *self._pfp__update_func_call_info ) self._pfp__no_notify = False
<SYSTEM_TASK:> Restore the snapshotted value without triggering any events <END_TASK> <USER_TASK:> Description: def _pfp__restore_snapshot(self, recurse=True): """Restore the snapshotted value without triggering any events """
super(Struct, self)._pfp__restore_snapshot(recurse=recurse) if recurse: for child in self._pfp__children: child._pfp__restore_snapshot(recurse=recurse)
<SYSTEM_TASK:> Initialize the struct. Value should be an array of <END_TASK> <USER_TASK:> Description: def _pfp__set_value(self, value): """Initialize the struct. Value should be an array of fields, one each for each struct member. :value: An array of fields to initialize the struct with :returns: None """
if self._pfp__frozen: raise errors.UnmodifiableConst() if len(value) != len(self._pfp__children): raise errors.PfpError("struct initialization has wrong number of members") for x in six.moves.range(len(self._pfp__children)): self._pfp__children[x]._pfp__set_value(value[x])
<SYSTEM_TASK:> Add a child to the Struct field. If multiple consecutive fields are <END_TASK> <USER_TASK:> Description: def _pfp__add_child(self, name, child, stream=None, overwrite=False): """Add a child to the Struct field. If multiple consecutive fields are added with the same name, an implicit array will be created to store all fields of that name. :param str name: The name of the child :param pfp.fields.Field child: The field to add :param bool overwrite: Overwrite existing fields (False) :param pfp.bitwrap.BitwrappedStream stream: unused, but her for compatability with Union._pfp__add_child :returns: The resulting field added """
if not overwrite and self._pfp__is_non_consecutive_duplicate(name, child): return self._pfp__handle_non_consecutive_duplicate(name, child) elif not overwrite and name in self._pfp__children_map: return self._pfp__handle_implicit_array(name, child) else: child._pfp__parent = self self._pfp__children.append(child) child._pfp__name = name self._pfp__children_map[name] = child return child
<SYSTEM_TASK:> Handle inserting implicit array elements <END_TASK> <USER_TASK:> Description: def _pfp__handle_implicit_array(self, name, child): """Handle inserting implicit array elements """
existing_child = self._pfp__children_map[name] if isinstance(existing_child, Array): # I don't think we should check this # #if existing_child.field_cls != child.__class__: # raise errors.PfpError("implicit arrays must be sequential!") existing_child.append(child) return existing_child else: cls = child._pfp__class if hasattr(child, "_pfp__class") else child.__class__ ary = Array(0, cls) # since the array starts with the first item ary._pfp__offset = existing_child._pfp__offset ary._pfp__parent = self ary._pfp__name = name ary.implicit = True ary.append(existing_child) ary.append(child) exist_idx = -1 for idx,child in enumerate(self._pfp__children): if child is existing_child: exist_idx = idx break self._pfp__children[exist_idx] = ary self._pfp__children_map[name] = ary return ary
<SYSTEM_TASK:> Show the contents of the struct <END_TASK> <USER_TASK:> Description: def _pfp__show(self, level=0, include_offset=False): """Show the contents of the struct """
res = [] res.append("{}{} {{".format( "{:04x} ".format(self._pfp__offset) if include_offset else "", self._pfp__show_name )) for child in self._pfp__children: res.append("{}{}{:10s} = {}".format( " "*(level+1), "{:04x} ".format(child._pfp__offset) if include_offset else "", child._pfp__name, child._pfp__show(level+1, include_offset) )) res.append("{}}}".format(" "*level)) return "\n".join(res)
<SYSTEM_TASK:> Add a child to the Union field <END_TASK> <USER_TASK:> Description: def _pfp__add_child(self, name, child, stream=None): """Add a child to the Union field :name: The name of the child :child: A :class:`.Field` instance :returns: The resulting field """
res = super(Union, self)._pfp__add_child(name, child) self._pfp__buff.seek(0, 0) child._pfp__build(stream=self._pfp__buff) size = len(self._pfp__buff.getvalue()) self._pfp__buff.seek(0, 0) if stream is not None: curr_pos = stream.tell() stream.seek(curr_pos-size, 0) return res
<SYSTEM_TASK:> Build the union and write the result into the stream. <END_TASK> <USER_TASK:> Description: def _pfp__build(self, stream=None, save_offset=False): """Build the union and write the result into the stream. :stream: None :returns: None """
max_size = -1 if stream is None: core_stream = six.BytesIO() new_stream = bitwrap.BitwrappedStream(core_stream) else: new_stream = stream for child in self._pfp__children: curr_pos = new_stream.tell() child._pfp__build(new_stream, save_offset) size = new_stream.tell() - curr_pos new_stream.seek(-size, 1) if size > max_size: max_size = size new_stream.seek(max_size, 1) if stream is None: return core_stream.getvalue() else: return max_size
<SYSTEM_TASK:> Parse the IO stream for this numeric field <END_TASK> <USER_TASK:> Description: def _pfp__parse(self, stream, save_offset=False): """Parse the IO stream for this numeric field :stream: An IO stream that can be read from :returns: The number of bytes parsed """
if save_offset: self._pfp__offset = stream.tell() if self.bitsize is None: raw_data = stream.read(self.width) data = utils.binary(raw_data) else: bits = self.bitfield_rw.read_bits(stream, self.bitsize, self.bitfield_padded, self.bitfield_left_right, self.endian) width_diff = self.width - (len(bits)//8) - 1 bits_diff = 8 - (len(bits) % 8) padding = [0] * (width_diff * 8 + bits_diff) bits = padding + bits data = bitwrap.bits_to_bytes(bits) if self.endian == LITTLE_ENDIAN: # reverse the data data = data[::-1] if len(data) < self.width: raise errors.PrematureEOF() self._pfp__data = data self._pfp__value = struct.unpack( "{}{}".format(self.endian, self.format), data )[0] return self.width
<SYSTEM_TASK:> Return the dominating numeric class between the two <END_TASK> <USER_TASK:> Description: def _dom_class(self, obj1, obj2): """Return the dominating numeric class between the two :obj1: TODO :obj2: TODO :returns: TODO """
if isinstance(obj1, Double) or isinstance(obj2, Double): return Double if isinstance(obj1, Float) or isinstance(obj2, Float): return Float
<SYSTEM_TASK:> Parse the IO stream for this enum <END_TASK> <USER_TASK:> Description: def _pfp__parse(self, stream, save_offset=False): """Parse the IO stream for this enum :stream: An IO stream that can be read from :returns: The number of bytes parsed """
res = super(Enum, self)._pfp__parse(stream, save_offset) if self._pfp__value in self.enum_vals: self.enum_name = self.enum_vals[self._pfp__value] else: self.enum_name = "?? UNK_ENUM ??" return res
<SYSTEM_TASK:> Set the value of the String, taking into account <END_TASK> <USER_TASK:> Description: def _pfp__set_value(self, new_val): """Set the value of the String, taking into account escaping and such as well """
if not isinstance(new_val, Field): new_val = utils.binary(utils.string_escape(new_val)) super(String, self)._pfp__set_value(new_val)
<SYSTEM_TASK:> Read from the stream until the string is null-terminated <END_TASK> <USER_TASK:> Description: def _pfp__parse(self, stream, save_offset=False): """Read from the stream until the string is null-terminated :stream: The input stream :returns: None """
if save_offset: self._pfp__offset = stream.tell() res = utils.binary("") while True: byte = utils.binary(stream.read(self.read_size)) if len(byte) < self.read_size: raise errors.PrematureEOF() # note that the null terminator must be added back when # built again! if byte == self.terminator: break res += byte self._pfp__value = res
<SYSTEM_TASK:> Return the strategy identified by its name. If ``name_or_class`` is a class, <END_TASK> <USER_TASK:> Description: def get_strategy(name_or_cls): """Return the strategy identified by its name. If ``name_or_class`` is a class, it will be simply returned. """
if isinstance(name_or_cls, six.string_types): if name_or_cls not in STRATS: raise MutationError("strat is not defined") return STRATS[name_or_cls]() return name_or_cls()
<SYSTEM_TASK:> Mutate the given field, modifying it directly. This is not <END_TASK> <USER_TASK:> Description: def mutate(self, field): """Mutate the given field, modifying it directly. This is not intended to preserve the value of the field. :field: The pfp.fields.Field instance that will receive the new value """
new_val = self.next_val(field) field._pfp__set_value(new_val) return field
<SYSTEM_TASK:> Wrap the creation of the type so that we can provide <END_TASK> <USER_TASK:> Description: def _wrap_type_instantiation(self, type_cls): """Wrap the creation of the type so that we can provide a null-stream to initialize it"""
def wrapper(*args, **kwargs): # use args for struct arguments?? return type_cls(stream=self._null_stream) return wrapper
<SYSTEM_TASK:> Return the current scope level <END_TASK> <USER_TASK:> Description: def level(self): """Return the current scope level """
res = len(self._scope_stack) if self._parent is not None: res += self._parent.level() return res
<SYSTEM_TASK:> Add a local variable in the current scope <END_TASK> <USER_TASK:> Description: def add_local(self, field_name, field): """Add a local variable in the current scope :field_name: The field's name :field: The field :returns: None """
self._dlog("adding local '{}'".format(field_name)) field._pfp__name = field_name # TODO do we allow clobbering of locals??? self._curr_scope["vars"][field_name] = field
<SYSTEM_TASK:> Store the node with the name. When it is instantiated, <END_TASK> <USER_TASK:> Description: def add_type_struct_or_union(self, name, interp, node): """Store the node with the name. When it is instantiated, the node itself will be handled. :name: name of the typedefd struct/union :node: the union/struct node :interp: the 010 interpreter """
self.add_type_class(name, StructUnionDef(name, interp, node))
<SYSTEM_TASK:> Record the typedefd name for orig_names. Resolve orig_names <END_TASK> <USER_TASK:> Description: def add_type(self, new_name, orig_names): """Record the typedefd name for orig_names. Resolve orig_names to their core names and save those. :new_name: TODO :orig_names: TODO :returns: TODO """
self._dlog("adding a type '{}'".format(new_name)) # TODO do we allow clobbering of types??? res = copy.copy(orig_names) resolved_names = self._resolve_name(res[-1]) if resolved_names is not None: res.pop() res += resolved_names self._curr_scope["types"][new_name] = res
<SYSTEM_TASK:> Get the first id matching ``name``. Will either be a local <END_TASK> <USER_TASK:> Description: def get_id(self, name, recurse=True): """Get the first id matching ``name``. Will either be a local or a var. :name: TODO :returns: TODO """
self._dlog("getting id '{}'".format(name)) var = self._search("vars", name, recurse) return var
<SYSTEM_TASK:> Add the native python function ``func`` into the pfp interpreter with the <END_TASK> <USER_TASK:> Description: def add_native(cls, name, func, ret, interp=None, send_interp=False): """Add the native python function ``func`` into the pfp interpreter with the name ``name`` and return value ``ret`` so that it can be called from within a template script. .. note:: The :any:`@native <pfp.native.native>` decorator exists to simplify this. All native functions must have the signature ``def func(params, ctxt, scope, stream, coord [,interp])``, optionally allowing an interpreter param if ``send_interp`` is ``True``. Example: The example below defines a function ``Sum`` using the ``add_native`` method. :: import pfp.fields from pfp.fields import PYVAL def native_sum(params, ctxt, scope, stream, coord): return PYVAL(params[0]) + PYVAL(params[1]) pfp.interp.PfpInterp.add_native("Sum", native_sum, pfp.fields.Int64) :param basestring name: The name the function will be exposed as in the interpreter. :param function func: The native python function that will be referenced. :param type(pfp.fields.Field) ret: The field class that the return value should be cast to. :param pfp.interp.PfpInterp interp: The specific pfp interpreter the function should be defined in. :param bool send_interp: If true, the current pfp interpreter will be added as an argument to the function. """
if interp is None: natives = cls._natives else: # the instance's natives natives = interp._natives natives[name] = functions.NativeFunction( name, func, ret, send_interp )
<SYSTEM_TASK:> log the message to the log <END_TASK> <USER_TASK:> Description: def _dlog(self, msg, indent_increase=0): """log the message to the log"""
self._log.debug("interp", msg, indent_increase, filename=self._orig_filename, coord=self._coord)
<SYSTEM_TASK:> Return the current line number in the template, <END_TASK> <USER_TASK:> Description: def get_curr_lines(self): """Return the current line number in the template, as well as the surrounding source lines """
start = max(0, self._coord.line - 5) end = min(len(self._template_lines), self._coord.line + 4) lines = [(x, self._template_lines[x]) for x in six.moves.range(start, end, 1)] return self._coord.line, lines
<SYSTEM_TASK:> Get the name of the node - check for node.name and <END_TASK> <USER_TASK:> Description: def _get_node_name(self, node): """Get the name of the node - check for node.name and node.type.declname. Not sure why the second one occurs exactly - it happens with declaring a new struct field with parameters"""
res = getattr(node, "name", None) if res is None: return res if isinstance(res, AST.TypeDecl): return res.declname return res
<SYSTEM_TASK:> Handle metadata for the node <END_TASK> <USER_TASK:> Description: def _handle_metadata(self, node, scope, ctxt, stream): """Handle metadata for the node """
self._dlog("handling node metadata {}".format(node.metadata.keyvals)) keyvals = node.metadata.keyvals metadata_info = [] if "watch" in node.metadata.keyvals or "update" in keyvals: metadata_info.append( self._handle_watch_metadata(node, scope, ctxt, stream) ) if "packtype" in node.metadata.keyvals or "packer" in keyvals: metadata_info.append( self._handle_packed_metadata(node, scope, ctxt, stream) ) return metadata_info
<SYSTEM_TASK:> Handle watch vars for fields <END_TASK> <USER_TASK:> Description: def _handle_watch_metadata(self, node, scope, ctxt, stream): """Handle watch vars for fields """
keyvals = node.metadata.keyvals if "watch" not in keyvals: raise errors.PfpError("Packed fields require a packer function set") if "update" not in keyvals: raise errors.PfpError("Packed fields require a packer function set") watch_field_name = keyvals["watch"] update_func_name = keyvals["update"] watch_fields = list(map(lambda x: self.eval(x.strip()), watch_field_name.split(";"))) update_func = scope.get_id(update_func_name) return { "type": "watch", "watch_fields": watch_fields, "update_func": update_func, "func_call_info": (ctxt, scope, stream, self, self._coord) }
<SYSTEM_TASK:> Return the value of the node. It is expected to be <END_TASK> <USER_TASK:> Description: def _get_value(self, node, scope, ctxt, stream): """Return the value of the node. It is expected to be either an AST.ID instance or a constant :node: TODO :returns: TODO """
res = self._handle_node(node, scope, ctxt, stream) if isinstance(res, fields.Field): return res._pfp__value # assume it's a constant else: return res
<SYSTEM_TASK:> Resolve the names to a class in fields.py, resolving past <END_TASK> <USER_TASK:> Description: def _resolve_to_field_class(self, names, scope): """Resolve the names to a class in fields.py, resolving past typedefs, etc :names: TODO :scope: TODO :ctxt: TODO :returns: TODO """
switch = { "char" : "Char", "int" : "Int", "long" : "Int", "int64" : "Int64", "uint64" : "UInt64", "short" : "Short", "double" : "Double", "float" : "Float", "void" : "Void", "string" : "String", "wstring" : "WString" } core = names[-1] if core not in switch: # will return a list of resolved names type_info = scope.get_type(core) if type(type_info) is type and issubclass(type_info, fields.Field): return type_info resolved_names = type_info if resolved_names is None: raise errors.UnresolvedType(self._coord, " ".join(names), " ") if resolved_names[-1] not in switch: raise errors.UnresolvedType(self._coord, " ".join(names), " ".join(resolved_names)) names = copy.copy(names) names.pop() names += resolved_names if len(names) >= 2 and names[-1] == names[-2] and names[-1] == "long": res = "Int64" else: res = switch[names[-1]] if names[-1] in ["char", "short", "int", "long"] and "unsigned" in names[:-1]: res = "U" + res cls = getattr(fields, res) return cls
<SYSTEM_TASK:> Convert bytes to a list of bits <END_TASK> <USER_TASK:> Description: def bytes_to_bits(bytes_): """Convert bytes to a list of bits """
res = [] for x in bytes_: if not isinstance(x, int): x = ord(x) res += byte_to_bits(x) return res
<SYSTEM_TASK:> Return if the stream has reached EOF or not <END_TASK> <USER_TASK:> Description: def is_eof(self): """Return if the stream has reached EOF or not without discarding any unflushed bits :returns: True/False """
pos = self._stream.tell() byte = self._stream.read(1) self._stream.seek(pos, 0) return utils.binary(byte) == utils.binary("")
<SYSTEM_TASK:> Read ``num`` number of bits from the stream <END_TASK> <USER_TASK:> Description: def read_bits(self, num): """Read ``num`` number of bits from the stream :num: number of bits to read :returns: a list of ``num`` bits, or an empty list if EOF has been reached """
if num > len(self._bits): needed = num - len(self._bits) num_bytes = int(math.ceil(needed / 8.0)) read_bytes = self._stream.read(num_bytes) for bit in bytes_to_bits(read_bytes): self._bits.append(bit) res = [] while len(res) < num and len(self._bits) > 0: res.append(self._bits.popleft()) return res
<SYSTEM_TASK:> Write data to the stream <END_TASK> <USER_TASK:> Description: def write(self, data): """Write data to the stream :data: the data to write to the stream :returns: None """
if self.padded: # flush out any remaining bits first if len(self._bits) > 0: self._flush_bits_to_stream() self._stream.write(data) else: # nothing to do here if len(data) == 0: return bits = bytes_to_bits(data) self.write_bits(bits)
<SYSTEM_TASK:> Write the bits to the stream. <END_TASK> <USER_TASK:> Description: def write_bits(self, bits): """Write the bits to the stream. Add the bits to the existing unflushed bits and write complete bytes to the stream. """
for bit in bits: self._bits.append(bit) while len(self._bits) >= 8: byte_bits = [self._bits.popleft() for x in six.moves.range(8)] byte = bits_to_bytes(byte_bits) self._stream.write(byte)
<SYSTEM_TASK:> Seek to the specified position in the stream with seek_type. <END_TASK> <USER_TASK:> Description: def seek(self, pos, seek_type=0): """Seek to the specified position in the stream with seek_type. Unflushed bits will be discarded in the case of a seek. The stream will also keep track of which bytes have and have not been consumed so that the dom will capture all of the bytes in the stream. :pos: offset :seek_type: direction :returns: TODO """
self._bits.clear() return self._stream.seek(pos, seek_type)
<SYSTEM_TASK:> Return the size of the stream, or -1 if it cannot <END_TASK> <USER_TASK:> Description: def size(self): """Return the size of the stream, or -1 if it cannot be determined. """
pos = self._stream.tell() # seek to the end of the stream self._stream.seek(0,2) size = self._stream.tell() self._stream.seek(pos, 0) return size
<SYSTEM_TASK:> Return an IntervalTree of unconsumed ranges, of the format <END_TASK> <USER_TASK:> Description: def unconsumed_ranges(self): """Return an IntervalTree of unconsumed ranges, of the format (start, end] with the end value not being included """
res = IntervalTree() prev = None # normal iteration is not in a predictable order ranges = sorted([x for x in self.range_set], key=lambda x: x.begin) for rng in ranges: if prev is None: prev = rng continue res.add(Interval(prev.end, rng.begin)) prev = rng # means we've seeked past the end if len(self.range_set[self.tell()]) != 1: res.add(Interval(prev.end, self.tell())) return res
<SYSTEM_TASK:> Update the ``self.consumed_ranges`` array with which <END_TASK> <USER_TASK:> Description: def _update_consumed_ranges(self, start_pos, end_pos): """Update the ``self.consumed_ranges`` array with which byte ranges have been consecutively consumed. """
self.range_set.add(Interval(start_pos, end_pos+1)) self.range_set.merge_overlaps()
<SYSTEM_TASK:> ``PackGZip`` - Concats the build output of all params and gzips the <END_TASK> <USER_TASK:> Description: def pack_gzip(params, ctxt, scope, stream, coord): """``PackGZip`` - Concats the build output of all params and gzips the resulting data, returning a char array. Example: :: char data[0x100]<pack=PackGZip, ...>; """
if len(params) == 0: raise errors.InvalidArguments(coord, "{} args".format(len(params)), "at least one argument") built = utils.binary("") for param in params: if isinstance(param, pfp.fields.Field): built += param._pfp__build() else: built += param return zlib.compress(built)
<SYSTEM_TASK:> Decorator for views that checks that the user is logged in and is a staff <END_TASK> <USER_TASK:> Description: def superuser_required(view_func): """ Decorator for views that checks that the user is logged in and is a staff member, displaying the login page if necessary. """
@wraps(view_func) def _checklogin(request, *args, **kwargs): if request.user.is_active and request.user.is_superuser: # The user is valid. Continue to the admin page. return view_func(request, *args, **kwargs) assert hasattr(request, 'session'), "The Django admin requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'." defaults = { 'template_name': 'admin/login.html', 'redirect_field_name': request.get_full_path(), 'authentication_form': AdminAuthenticationForm, 'extra_context': { 'title': _('Log in'), 'app_path': request.get_full_path() } } return LoginView(request, **defaults) return _checklogin
<SYSTEM_TASK:> Compiles the pattern lines. <END_TASK> <USER_TASK:> Description: def from_lines(cls, pattern_factory, lines): """ Compiles the pattern lines. *pattern_factory* can be either the name of a registered pattern factory (:class:`str`), or a :class:`~collections.abc.Callable` used to compile patterns. It must accept an uncompiled pattern (:class:`str`) and return the compiled pattern (:class:`.Pattern`). *lines* (:class:`~collections.abc.Iterable`) yields each uncompiled pattern (:class:`str`). This simply has to yield each line so it can be a :class:`file` (e.g., from :func:`open` or :class:`io.StringIO`) or the result from :meth:`str.splitlines`. Returns the :class:`PathSpec` instance. """
if isinstance(pattern_factory, string_types): pattern_factory = util.lookup_pattern(pattern_factory) if not callable(pattern_factory): raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory)) if isinstance(lines, (bytes, unicode)): raise TypeError("lines:{!r} is not an iterable.".format(lines)) lines = [pattern_factory(line) for line in lines if line] return cls(lines)
<SYSTEM_TASK:> Matches the file to this path-spec. <END_TASK> <USER_TASK:> Description: def match_file(self, file, separators=None): """ Matches the file to this path-spec. *file* (:class:`str`) is the file path to be matched against :attr:`self.patterns <PathSpec.patterns>`. *separators* (:class:`~collections.abc.Collection` of :class:`str`) optionally contains the path separators to normalize. See :func:`~pathspec.util.normalize_file` for more information. Returns :data:`True` if *file* matched; otherwise, :data:`False`. """
norm_file = util.normalize_file(file, separators=separators) return util.match_file(self.patterns, norm_file)
<SYSTEM_TASK:> Matches the files to this path-spec. <END_TASK> <USER_TASK:> Description: def match_files(self, files, separators=None): """ Matches the files to this path-spec. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the file paths to be matched against :attr:`self.patterns <PathSpec.patterns>`. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. See :func:`~pathspec.util.normalize_file` for more information. Returns the matched files (:class:`~collections.abc.Iterable` of :class:`str`). """
if isinstance(files, (bytes, unicode)): raise TypeError("files:{!r} is not an iterable.".format(files)) file_map = util.normalize_files(files, separators=separators) matched_files = util.match_files(self.patterns, iterkeys(file_map)) for path in matched_files: yield file_map[path]
<SYSTEM_TASK:> Walks the specified root path for all files and matches them to this <END_TASK> <USER_TASK:> Description: def match_tree(self, root, on_error=None, follow_links=None): """ Walks the specified root path for all files and matches them to this path-spec. *root* (:class:`str`) is the root directory to search for files. *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. See :func:`~pathspec.util.iter_tree` for more information. *follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk symbolik links that resolve to directories. See :func:`~pathspec.util.iter_tree` for more information. Returns the matched files (:class:`~collections.abc.Iterable` of :class:`str`). """
files = util.iter_tree(root, on_error=on_error, follow_links=follow_links) return self.match_files(files)
<SYSTEM_TASK:> Convert the pattern into a regular expression. <END_TASK> <USER_TASK:> Description: def pattern_to_regex(cls, pattern): """ Convert the pattern into a regular expression. *pattern* (:class:`unicode` or :class:`bytes`) is the pattern to convert into a regular expression. Returns the uncompiled regular expression (:class:`unicode`, :class:`bytes`, or :data:`None`), and whether matched files should be included (:data:`True`), excluded (:data:`False`), or if it is a null-operation (:data:`None`). """
if isinstance(pattern, unicode): return_type = unicode elif isinstance(pattern, bytes): return_type = bytes pattern = pattern.decode(_BYTES_ENCODING) else: raise TypeError("pattern:{!r} is not a unicode or byte string.".format(pattern)) pattern = pattern.strip() if pattern.startswith('#'): # A pattern starting with a hash ('#') serves as a comment # (neither includes nor excludes files). Escape the hash with a # back-slash to match a literal hash (i.e., '\#'). regex = None include = None elif pattern == '/': # EDGE CASE: According to `git check-ignore` (v2.4.1), a single # '/' does not match any file. regex = None include = None elif pattern: if pattern.startswith('!'): # A pattern starting with an exclamation mark ('!') negates the # pattern (exclude instead of include). Escape the exclamation # mark with a back-slash to match a literal exclamation mark # (i.e., '\!'). include = False # Remove leading exclamation mark. pattern = pattern[1:] else: include = True if pattern.startswith('\\'): # Remove leading back-slash escape for escaped hash ('#') or # exclamation mark ('!'). pattern = pattern[1:] # Split pattern into segments. pattern_segs = pattern.split('/') # Normalize pattern to make processing easier. if not pattern_segs[0]: # A pattern beginning with a slash ('/') will only match paths # directly on the root directory instead of any descendant # paths. So, remove empty first segment to make pattern relative # to root. del pattern_segs[0] elif len(pattern_segs) == 1 or (len(pattern_segs) == 2 and not pattern_segs[1]): # A single pattern without a beginning slash ('/') will match # any descendant path. This is equivalent to "**/{pattern}". So, # prepend with double-asterisks to make pattern relative to # root. # EDGE CASE: This also holds for a single pattern with a # trailing slash (e.g. dir/). if pattern_segs[0] != '**': pattern_segs.insert(0, '**') else: # EDGE CASE: A pattern without a beginning slash ('/') but # contains at least one prepended directory (e.g. # "dir/{pattern}") should not match "**/dir/{pattern}", # according to `git check-ignore` (v2.4.1). pass if not pattern_segs[-1] and len(pattern_segs) > 1: # A pattern ending with a slash ('/') will match all descendant # paths if it is a directory but not if it is a regular file. # This is equivilent to "{pattern}/**". So, set last segment to # double asterisks to include all descendants. pattern_segs[-1] = '**' # Build regular expression from pattern. output = ['^'] need_slash = False end = len(pattern_segs) - 1 for i, seg in enumerate(pattern_segs): if seg == '**': if i == 0 and i == end: # A pattern consisting solely of double-asterisks ('**') # will match every path. output.append('.+') elif i == 0: # A normalized pattern beginning with double-asterisks # ('**') will match any leading path segments. output.append('(?:.+/)?') need_slash = False elif i == end: # A normalized pattern ending with double-asterisks ('**') # will match any trailing path segments. output.append('/.*') else: # A pattern with inner double-asterisks ('**') will match # multiple (or zero) inner path segments. output.append('(?:/.+)?') need_slash = True elif seg == '*': # Match single path segment. if need_slash: output.append('/') output.append('[^/]+') need_slash = True else: # Match segment glob pattern. if need_slash: output.append('/') output.append(cls._translate_segment_glob(seg)) if i == end and include is True: # A pattern ending without a slash ('/') will match a file # or a directory (with paths underneath it). E.g., "foo" # matches "foo", "foo/bar", "foo/bar/baz", etc. # EDGE CASE: However, this does not hold for exclusion cases # according to `git check-ignore` (v2.4.1). output.append('(?:/.*)?') need_slash = True output.append('$') regex = ''.join(output) else: # A blank pattern is a null-operation (neither includes nor # excludes files). regex = None include = None if regex is not None and return_type is bytes: regex = regex.encode(_BYTES_ENCODING) return regex, include
<SYSTEM_TASK:> Translates the glob pattern to a regular expression. This is used in <END_TASK> <USER_TASK:> Description: def _translate_segment_glob(pattern): """ Translates the glob pattern to a regular expression. This is used in the constructor to translate a path segment glob pattern to its corresponding regular expression. *pattern* (:class:`str`) is the glob pattern. Returns the regular expression (:class:`str`). """
# NOTE: This is derived from `fnmatch.translate()` and is similar to # the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set. escape = False regex = '' i, end = 0, len(pattern) while i < end: # Get next character. char = pattern[i] i += 1 if escape: # Escape the character. escape = False regex += re.escape(char) elif char == '\\': # Escape character, escape next character. escape = True elif char == '*': # Multi-character wildcard. Match any string (except slashes), # including an empty string. regex += '[^/]*' elif char == '?': # Single-character wildcard. Match any single character (except # a slash). regex += '[^/]' elif char == '[': # Braket expression wildcard. Except for the beginning # exclamation mark, the whole braket expression can be used # directly as regex but we have to find where the expression # ends. # - "[][!]" matchs ']', '[' and '!'. # - "[]-]" matchs ']' and '-'. # - "[!]a-]" matchs any character except ']', 'a' and '-'. j = i # Pass brack expression negation. if j < end and pattern[j] == '!': j += 1 # Pass first closing braket if it is at the beginning of the # expression. if j < end and pattern[j] == ']': j += 1 # Find closing braket. Stop once we reach the end or find it. while j < end and pattern[j] != ']': j += 1 if j < end: # Found end of braket expression. Increment j to be one past # the closing braket: # # [...] # ^ ^ # i j # j += 1 expr = '[' if pattern[i] == '!': # Braket expression needs to be negated. expr += '^' i += 1 elif pattern[i] == '^': # POSIX declares that the regex braket expression negation # "[^...]" is undefined in a glob pattern. Python's # `fnmatch.translate()` escapes the caret ('^') as a # literal. To maintain consistency with undefined behavior, # I am escaping the '^' as well. expr += '\\^' i += 1 # Build regex braket expression. Escape slashes so they are # treated as literal slashes by regex as defined by POSIX. expr += pattern[i:j].replace('\\', '\\\\') # Add regex braket expression to regex result. regex += expr # Set i to one past the closing braket. i = j else: # Failed to find closing braket, treat opening braket as a # braket literal instead of as an expression. regex += '\\[' else: # Regular character, escape it for regex. regex += re.escape(char) return regex
<SYSTEM_TASK:> Warn about deprecation. <END_TASK> <USER_TASK:> Description: def pattern_to_regex(cls, *args, **kw): """ Warn about deprecation. """
cls._deprecated() return super(GitIgnorePattern, cls).pattern_to_regex(*args, **kw)
<SYSTEM_TASK:> Walks the specified directory for all files. <END_TASK> <USER_TASK:> Description: def iter_tree(root, on_error=None, follow_links=None): """ Walks the specified directory for all files. *root* (:class:`str`) is the root directory to search for files. *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. It will be called with the exception (:exc:`OSError`). Reraise the exception to abort the walk. Default is :data:`None` to ignore file-system exceptions. *follow_links* (:class:`bool` or :data:`None`) optionally is whether to walk symbolik links that resolve to directories. Default is :data:`None` for :data:`True`. Raises :exc:`RecursionError` if recursion is detected. Returns an :class:`~collections.abc.Iterable` yielding the path to each file (:class:`str`) relative to *root*. """
if on_error is not None and not callable(on_error): raise TypeError("on_error:{!r} is not callable.".format(on_error)) if follow_links is None: follow_links = True for file_rel in _iter_tree_next(os.path.abspath(root), '', {}, on_error, follow_links): yield file_rel
<SYSTEM_TASK:> Scan the directory for all descendant files. <END_TASK> <USER_TASK:> Description: def _iter_tree_next(root_full, dir_rel, memo, on_error, follow_links): """ Scan the directory for all descendant files. *root_full* (:class:`str`) the absolute path to the root directory. *dir_rel* (:class:`str`) the path to the directory to scan relative to *root_full*. *memo* (:class:`dict`) keeps track of ancestor directories encountered. Maps each ancestor real path (:class:`str``) to relative path (:class:`str`). *on_error* (:class:`~collections.abc.Callable` or :data:`None`) optionally is the error handler for file-system exceptions. *follow_links* (:class:`bool`) is whether to walk symbolik links that resolve to directories. """
dir_full = os.path.join(root_full, dir_rel) dir_real = os.path.realpath(dir_full) # Remember each encountered ancestor directory and its canonical # (real) path. If a canonical path is encountered more than once, # recursion has occurred. if dir_real not in memo: memo[dir_real] = dir_rel else: raise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel) for node in os.listdir(dir_full): node_rel = os.path.join(dir_rel, node) node_full = os.path.join(root_full, node_rel) # Inspect child node. try: node_stat = os.lstat(node_full) except OSError as e: if on_error is not None: on_error(e) continue if stat.S_ISLNK(node_stat.st_mode): # Child node is a link, inspect the target node. is_link = True try: node_stat = os.stat(node_full) except OSError as e: if on_error is not None: on_error(e) continue else: is_link = False if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link): # Child node is a directory, recurse into it and yield its # decendant files. for file_rel in _iter_tree_next(root_full, node_rel, memo, on_error, follow_links): yield file_rel elif stat.S_ISREG(node_stat.st_mode): # Child node is a file, yield it. yield node_rel # NOTE: Make sure to remove the canonical (real) path of the directory # from the ancestors memo once we are done with it. This allows the # same directory to appear multiple times. If this is not done, the # second occurance of the directory will be incorrectly interpreted as # a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>. del memo[dir_real]
<SYSTEM_TASK:> Matches the file to the patterns. <END_TASK> <USER_TASK:> Description: def match_file(patterns, file): """ Matches the file to the patterns. *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) contains the patterns to use. *file* (:class:`str`) is the normalized file path to be matched against *patterns*. Returns :data:`True` if *file* matched; otherwise, :data:`False`. """
matched = False for pattern in patterns: if pattern.include is not None: if file in pattern.match((file,)): matched = pattern.include return matched
<SYSTEM_TASK:> Matches the files to the patterns. <END_TASK> <USER_TASK:> Description: def match_files(patterns, files): """ Matches the files to the patterns. *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) contains the patterns to use. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the normalized file paths to be matched against *patterns*. Returns the matched files (:class:`set` of :class:`str`). """
all_files = files if isinstance(files, collection_type) else list(files) return_files = set() for pattern in patterns: if pattern.include is not None: result_files = pattern.match(all_files) if pattern.include: return_files.update(result_files) else: return_files.difference_update(result_files) return return_files
<SYSTEM_TASK:> Normalizes the file paths to use the POSIX path separator. <END_TASK> <USER_TASK:> Description: def normalize_files(files, separators=None): """ Normalizes the file paths to use the POSIX path separator. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the file paths to be normalized. *separators* (:class:`~collections.abc.Collection` of :class:`str`; or :data:`None`) optionally contains the path separators to normalize. See :func:`normalize_file` for more information. Returns a :class:`dict` mapping the each normalized file path (:class:`str`) to the original file path (:class:`str`) """
norm_files = {} for path in files: norm_files[normalize_file(path, separators=separators)] = path return norm_files
<SYSTEM_TASK:> Registers the specified pattern factory. <END_TASK> <USER_TASK:> Description: def register_pattern(name, pattern_factory, override=None): """ Registers the specified pattern factory. *name* (:class:`str`) is the name to register the pattern factory under. *pattern_factory* (:class:`~collections.abc.Callable`) is used to compile patterns. It must accept an uncompiled pattern (:class:`str`) and return the compiled pattern (:class:`.Pattern`). *override* (:class:`bool` or :data:`None`) optionally is whether to allow overriding an already registered pattern under the same name (:data:`True`), instead of raising an :exc:`AlreadyRegisteredError` (:data:`False`). Default is :data:`None` for :data:`False`. """
if not isinstance(name, string_types): raise TypeError("name:{!r} is not a string.".format(name)) if not callable(pattern_factory): raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory)) if name in _registered_patterns and not override: raise AlreadyRegisteredError(name, _registered_patterns[name]) _registered_patterns[name] = pattern_factory
<SYSTEM_TASK:> Convert a User to a cached instance representation. <END_TASK> <USER_TASK:> Description: def user_default_serializer(self, obj): """Convert a User to a cached instance representation."""
if not obj: return None self.user_default_add_related_pks(obj) return dict(( ('id', obj.id), ('username', obj.username), self.field_to_json('DateTime', 'date_joined', obj.date_joined), self.field_to_json( 'PKList', 'votes', model=Choice, pks=obj._votes_pks), ))
<SYSTEM_TASK:> Load a User from the database. <END_TASK> <USER_TASK:> Description: def user_default_loader(self, pk): """Load a User from the database."""
try: obj = User.objects.get(pk=pk) except User.DoesNotExist: return None else: self.user_default_add_related_pks(obj) return obj
<SYSTEM_TASK:> Add related primary keys to a User instance. <END_TASK> <USER_TASK:> Description: def user_default_add_related_pks(self, obj): """Add related primary keys to a User instance."""
if not hasattr(obj, '_votes_pks'): obj._votes_pks = list(obj.votes.values_list('pk', flat=True))
<SYSTEM_TASK:> Invalidated cached items when the Group changes. <END_TASK> <USER_TASK:> Description: def group_default_invalidator(self, obj): """Invalidated cached items when the Group changes."""
user_pks = User.objects.values_list('pk', flat=True) return [('User', pk, False) for pk in user_pks]
<SYSTEM_TASK:> Convert a Question to a cached instance representation. <END_TASK> <USER_TASK:> Description: def question_default_serializer(self, obj): """Convert a Question to a cached instance representation."""
if not obj: return None self.question_default_add_related_pks(obj) return dict(( ('id', obj.id), ('question_text', obj.question_text), self.field_to_json('DateTime', 'pub_date', obj.pub_date), self.field_to_json( 'PKList', 'choices', model=Choice, pks=obj._choice_pks), ))
<SYSTEM_TASK:> Load a Question from the database. <END_TASK> <USER_TASK:> Description: def question_default_loader(self, pk): """Load a Question from the database."""
try: obj = Question.objects.get(pk=pk) except Question.DoesNotExist: return None else: self.question_default_add_related_pks(obj) return obj