text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> List directory entries in the repository. <END_TASK> <USER_TASK:> Description: def do_list(self, subcmd, opts, *args): """List directory entries in the repository. usage: list [TARGET...] List each TARGET file and the contents of each TARGET directory as they exist in the repository. If TARGET is a working copy path, the corresponding repository URL will be used. The default TARGET is '.', meaning the repository URL of the current working directory. With --verbose, the following fields show the status of the item: Revision number of the last commit Author of the last commit Size (in bytes) Date and time of the last commit ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Apply the differences between two sources to a working copy path. <END_TASK> <USER_TASK:> Description: def do_merge(self, subcmd, opts, *args): """Apply the differences between two sources to a working copy path. usage: 1. merge sourceURL1[@N] sourceURL2[@M] [WCPATH] 2. merge sourceWCPATH1@N sourceWCPATH2@M [WCPATH] 3. merge -r N:M SOURCE[@REV] [WCPATH] 1. In the first form, the source URLs are specified at revisions N and M. These are the two sources to be compared. The revisions default to HEAD if omitted. 2. In the second form, the URLs corresponding to the source working copy paths define the sources to be compared. The revisions must be specified. 3. In the third form, SOURCE can be a URL, or working copy item in which case the corresponding URL is used. This URL in revision REV is compared as it existed between revisions N and M. If REV is not specified, HEAD is assumed. WCPATH is the working copy path that will receive the changes. If WCPATH is omitted, a default value of '.' is assumed, unless the sources have identical basenames that match a file within '.': in which case, the differences will be applied to that file. ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Create a new directory under version control. <END_TASK> <USER_TASK:> Description: def do_mkdir(self, subcmd, opts, *args): """Create a new directory under version control. usage: 1. mkdir PATH... 2. mkdir URL... Create version controlled directories. 1. Each directory specified by a working copy PATH is created locally and scheduled for addition upon the next commit. 2. Each directory specified by a URL is created in the repository via an immediate commit. In both cases, all the intermediate directories must already exist. ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Remove PROPNAME from files, dirs, or revisions. <END_TASK> <USER_TASK:> Description: def do_propdel(self, subcmd, opts, *args): """Remove PROPNAME from files, dirs, or revisions. usage: 1. propdel PROPNAME [PATH...] 2. propdel PROPNAME --revprop -r REV [URL] 1. Removes versioned props in working copy. 2. Removes unversioned remote prop on repos revision. ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Edit property PROPNAME with an external editor on targets. <END_TASK> <USER_TASK:> Description: def do_propedit(self, subcmd, opts, *args): """Edit property PROPNAME with an external editor on targets. usage: 1. propedit PROPNAME PATH... 2. propedit PROPNAME --revprop -r REV [URL] 1. Edits versioned props in working copy. 2. Edits unversioned remote prop on repos revision. ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Print value of PROPNAME on files, dirs, or revisions. <END_TASK> <USER_TASK:> Description: def do_propget(self, subcmd, opts, *args): """Print value of PROPNAME on files, dirs, or revisions. usage: 1. propget PROPNAME [PATH...] 2. propget PROPNAME --revprop -r REV [URL] 1. Prints versioned prop in working copy. 2. Prints unversioned remote prop on repos revision. By default, this subcommand will add an extra newline to the end of the property values so that the output looks pretty. Also, whenever there are multiple paths involved, each property value is prefixed with the path with which it is associated. Use the --strict option to disable these beautifications (useful, for example, when redirecting binary property values to a file). ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> List all properties on files, dirs, or revisions. <END_TASK> <USER_TASK:> Description: def do_proplist(self, subcmd, opts, *args): """List all properties on files, dirs, or revisions. usage: 1. proplist [PATH...] 2. proplist --revprop -r REV [URL] 1. Lists versioned props in working copy. 2. Lists unversioned remote props on repos revision. ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Set PROPNAME to PROPVAL on files, dirs, or revisions. <END_TASK> <USER_TASK:> Description: def do_propset(self, subcmd, opts, *args): """Set PROPNAME to PROPVAL on files, dirs, or revisions. usage: 1. propset PROPNAME [PROPVAL | -F VALFILE] PATH... 2. propset PROPNAME --revprop -r REV [PROPVAL | -F VALFILE] [URL] 1. Creates a versioned, local propchange in working copy. 2. Creates an unversioned, remote propchange on repos revision. Note: svn recognizes the following special versioned properties but will store any arbitrary properties set: svn:ignore - A newline separated list of file patterns to ignore. svn:keywords - Keywords to be expanded. Valid keywords are: URL, HeadURL - The URL for the head version of the object. Author, LastChangedBy - The last person to modify the file. Date, LastChangedDate - The date/time the object was last modified. Rev, Revision, - The last revision the object changed. LastChangedRevision Id - A compressed summary of the previous 4 keywords. svn:executable - If present, make the file executable. This property cannot be set on a directory. A non-recursive attempt will fail, and a recursive attempt will set the property only on the file children of the directory. svn:eol-style - One of 'native', 'LF', 'CR', 'CRLF'. svn:mime-type - The mimetype of the file. Used to determine whether to merge the file, and how to serve it from Apache. A mimetype beginning with 'text/' (or an absent mimetype) is treated as text. Anything else is treated as binary. svn:externals - A newline separated list of module specifiers, each of which consists of a relative directory path, optional revision flags, and an URL. For example foo http://example.com/repos/zig foo/bar -r 1234 http://example.com/repos/zag ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Remove 'conflicted' state on working copy files or directories. <END_TASK> <USER_TASK:> Description: def do_resolved(self, subcmd, opts, *args): """Remove 'conflicted' state on working copy files or directories. usage: resolved PATH... Note: this subcommand does not semantically resolve conflicts or remove conflict markers; it merely removes the conflict-related artifact files and allows PATH to be committed again. ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Print the status of working copy files and directories. <END_TASK> <USER_TASK:> Description: def do_status(self, subcmd, opts, *args): """Print the status of working copy files and directories. usage: status [PATH...] With no args, print only locally modified items (no network access). With -u, add working revision and server out-of-date information. With -v, print full revision information on every item. The first five columns in the output are each one character wide: First column: Says if item was added, deleted, or otherwise changed ' ' no modifications 'A' Added 'C' Conflicted 'D' Deleted 'G' Merged 'I' Ignored 'M' Modified 'R' Replaced 'X' item is unversioned, but is used by an externals definition '?' item is not under version control '!' item is missing (removed by non-svn command) or incomplete '~' versioned item obstructed by some item of a different kind Second column: Modifications of a file's or directory's properties ' ' no modifications 'C' Conflicted 'M' Modified Third column: Whether the working copy directory is locked ' ' not locked 'L' locked Fourth column: Scheduled commit will contain addition-with-history ' ' no history scheduled with commit '+' history scheduled with commit Fifth column: Whether the item is switched relative to its parent ' ' normal 'S' switched The out-of-date information appears in the eighth column (with -u): '*' a newer revision exists on the server ' ' the working copy is up to date Remaining fields are variable width and delimited by spaces: The working revision (with -u or -v) The last committed revision and last committed author (with -v) The working copy path is always the final field, so it can include spaces. Example output: svn status wc M wc/bar.c A + wc/qax.c svn status -u wc M 965 wc/bar.c * 965 wc/foo.c A + 965 wc/qax.c Head revision: 981 svn status --show-updates --verbose wc M 965 938 kfogel wc/bar.c * 965 922 sussman wc/foo.c A + 965 687 joe wc/qax.c 965 687 joe wc/zig.c Head revision: 981 ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Update the working copy to a different URL. <END_TASK> <USER_TASK:> Description: def do_switch(self, subcmd, opts, *args): """Update the working copy to a different URL. usage: 1. switch URL [PATH] 2. switch --relocate FROM TO [PATH...] 1. Update the working copy to mirror a new URL within the repository. This behaviour is similar to 'svn update', and is the way to move a working copy to a branch or tag within the same repository. 2. Rewrite working copy URL metadata to reflect a syntactic change only. This is used when repository's root URL changes (such as a schema or hostname change) but your working copy still reflects the same directory within the same repository. ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Bring changes from the repository into the working copy. <END_TASK> <USER_TASK:> Description: def do_update(self, subcmd, opts, *args): """Bring changes from the repository into the working copy. usage: update [PATH...] If no revision given, bring working copy up-to-date with HEAD rev. Else synchronize working copy to revision given by -r. For each updated item a line will start with a character reporting the action taken. These characters have the following meaning: A Added D Deleted U Updated C Conflict G Merged A character in the first column signifies an update to the actual file, while updates to the file's properties are shown in the second column. ${cmd_option_list} """
print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
<SYSTEM_TASK:> Evaluates a redirect url by consulting GET, POST and the session. <END_TASK> <USER_TASK:> Description: def default_redirect(request, fallback_url, **kwargs): """ Evaluates a redirect url by consulting GET, POST and the session. """
redirect_field_name = kwargs.get("redirect_field_name", "next") next = request.POST.get(redirect_field_name, request.GET.get(redirect_field_name, '')) if not next: # try the session if available if hasattr(request, "session"): session_key_value = kwargs.get("session_key_value", "redirect_to") next = request.session.get(session_key_value) is_safe = functools.partial( ensure_safe_url, allowed_protocols=kwargs.get("allowed_protocols"), allowed_host=request.get_host() ) redirect_to = next if next and is_safe(next) else fallback_url # perform one last check to ensure the URL is safe to redirect to. if it # is not then we should bail here as it is likely developer error and # they should be notified is_safe(redirect_to, raise_on_fail=True) return redirect_to
<SYSTEM_TASK:> Add error message for given field path. <END_TASK> <USER_TASK:> Description: def add_error(self, path, error): """Add error message for given field path. Example: :: builder = ValidationErrorBuilder() builder.add_error('foo.bar.baz', 'Some error') print builder.errors # => {'foo': {'bar': {'baz': 'Some error'}}} :param str path: '.'-separated list of field names :param str error: Error message """
self.errors = merge_errors(self.errors, self._make_error(path, error))
<SYSTEM_TASK:> Automatically find a suitable period to use. <END_TASK> <USER_TASK:> Description: def find_suitable_period(): """ Automatically find a suitable period to use. Factors are best, because they will have 1 left over when dividing SIZE+1. This only needs to be run once, on import. """
# The highest acceptable factor will be the square root of the size. highest_acceptable_factor = int(math.sqrt(SIZE)) # Too high a factor (eg SIZE/2) and the interval is too small, too # low (eg 2) and the period is too small. # We would prefer it to be lower than the number of VALID_CHARS, but more # than say 4. starting_point = len(VALID_CHARS) > 14 and len(VALID_CHARS) / 2 or 13 for p in range(starting_point, 7, -1) \ + range(highest_acceptable_factor, starting_point + 1, -1) \ + [6, 5, 4, 3, 2]: if SIZE % p == 0: return p raise Exception("No valid period could be found for SIZE=%d.\n" "Try avoiding prime numbers" % SIZE)
<SYSTEM_TASK:> Convert a base 10 number to a base X string. <END_TASK> <USER_TASK:> Description: def friendly_number(num): """ Convert a base 10 number to a base X string. Charcters from VALID_CHARS are chosen, to convert the number to eg base 24, if there are 24 characters to choose from. Use valid chars to choose characters that are friendly, avoiding ones that could be confused in print or over the phone. """
# Convert to a (shorter) string for human consumption string = "" # The length of the string can be determined by STRING_LENGTH or by how many # characters are necessary to present a base 30 representation of SIZE. while STRING_LENGTH and len(string) <= STRING_LENGTH \ or len(VALID_CHARS) ** len(string) <= SIZE: # PREpend string (to remove all obvious signs of order) string = VALID_CHARS[num % len(VALID_CHARS)] + string num = num / len(VALID_CHARS) return string
<SYSTEM_TASK:> Returns a function that takes a dictionary and returns value of <END_TASK> <USER_TASK:> Description: def dict_value_hint(key, mapper=None): """Returns a function that takes a dictionary and returns value of particular key. The returned value can be optionally processed by `mapper` function. To be used as a type hint in :class:`OneOf`. """
if mapper is None: mapper = identity def hinter(data): return mapper(data.get(key)) return hinter
<SYSTEM_TASK:> Convenient way to create a new type by adding validation to existing type. <END_TASK> <USER_TASK:> Description: def validated_type(base_type, name=None, validate=None): """Convenient way to create a new type by adding validation to existing type. Example: :: Ipv4Address = validated_type( String, 'Ipv4Address', # regexp simplified for demo purposes Regexp('^\d+\.\d+\.\d+\.\d+$', error='Invalid IP address') ) Percentage = validated_type(Integer, validate=Range(0, 100)) # The above is the same as class Ipv4Address(String): def __init__(self, *args, **kwargs): super(Ipv4Address, self).__init__(*args, **kwargs) self.validators.insert(0, Regexp('^\d+\.\d+\.\d+\.\d+$', error='Invalid IP address')) class Percentage(Integer): def __init__(self, *args, **kwargs): super(Percentage, self).__init__(*args, **kwargs) self.validators.insert(0, Range(0, 100)) :param Type base_type: Base type for a new type. :param name str: Optional class name for new type (will be shown in places like repr). :param validate: A validator or list of validators for this data type. See `Type.validate` for details. """
if validate is None: validate = [] if not is_sequence(validate): validate = [validate] class ValidatedSubtype(base_type): if name is not None: __name__ = name def __init__(self, *args, **kwargs): super(ValidatedSubtype, self).__init__(*args, **kwargs) for validator in reversed(validate): self.validators.insert(0, validator) return ValidatedSubtype
<SYSTEM_TASK:> Takes serialized data and returns validation errors or None. <END_TASK> <USER_TASK:> Description: def validate(self, data, context=None): """Takes serialized data and returns validation errors or None. :param data: Data to validate. :param context: Context data. :returns: validation errors or None """
try: self.load(data, context) return None except ValidationError as ve: return ve.messages
<SYSTEM_TASK:> Load data and update existing object. <END_TASK> <USER_TASK:> Description: def load_into(self, obj, data, inplace=True, *args, **kwargs): """Load data and update existing object. :param obj: Object to update with deserialized data. :param data: Raw data to get value to deserialize from. :param bool inplace: If True update data inplace; otherwise - create new data. :param kwargs: Same keyword arguments as for :meth:`Type.load`. :returns: Updated object. :raises: :exc:`~lollipop.errors.ValidationError` """
if obj is None: raise ValueError('Load target should not be None') if data is MISSING: return if data is None: self._fail('required') if not is_mapping(data): self._fail('invalid', data=data) errors_builder = ValidationErrorBuilder() data1 = {} for name, field in iteritems(self.fields): try: if name in data: # Load new data value = field.load_into(obj, name, data, inplace=not self.immutable and inplace, *args, **kwargs) else: # Retrive data from existing object value = field.load(name, { name: field.dump(name, obj, *args, **kwargs) }) if value is not MISSING: data1[name] = value except ValidationError as ve: errors_builder.add_error(name, ve.messages) if self.allow_extra_fields is False: field_names = [name for name, _ in iteritems(self.fields)] for name in data: if name not in field_names: errors_builder.add_error(name, self._error_messages['unknown']) elif isinstance(self.allow_extra_fields, Field): field_names = [name for name, _ in iteritems(self.fields)] for name in data: if name not in field_names: try: loaded = self.allow_extra_fields.load_into( obj, name, data, inplace=not self.immutable and inplace, *args, **kwargs ) if loaded != MISSING: data1[name] = loaded except ValidationError as ve: errors_builder.add_error(name, ve.messages) errors_builder.raise_errors() data2 = super(Object, self).load(data1, *args, **kwargs) if self.immutable or not inplace: result = data2 if self.constructor: result = self.constructor(**result) else: for name, value in iteritems(data2): field = self.fields.get(name, self.allow_extra_fields) if not isinstance(field, Field): continue field.set_value(name, obj, value, *args, **kwargs) result = obj return result
<SYSTEM_TASK:> Takes target object and serialized data, tries to update that object <END_TASK> <USER_TASK:> Description: def validate_for(self, obj, data, *args, **kwargs): """Takes target object and serialized data, tries to update that object with data and validate result. Returns validation errors or None. Object is not updated. :param obj: Object to check data validity against. In case the data is partial object is used to get the rest of data from. :param data: Data to validate. Can be partial (not all schema field data is present). :param kwargs: Same keyword arguments as for :meth:`Type.load`. :returns: validation errors or None """
try: self.load_into(obj, data, inplace=False, *args, **kwargs) return None except ValidationError as ve: return ve.messages
<SYSTEM_TASK:> Filter the response to only include the elements that are countries. <END_TASK> <USER_TASK:> Description: def filter_country_locations(api_response, is_country=True): """ Filter the response to only include the elements that are countries. This uses the 'api_response' object as input. Plain `list`s are also valid, but they must contain the location elements, not the `items` wrapper. """
return [item for item in api_response if item[ISCOUNTRY]==is_country]
<SYSTEM_TASK:> Returnes next invoice number - reset yearly. <END_TASK> <USER_TASK:> Description: def _get_next_number(self): """ Returnes next invoice number - reset yearly. .. warning:: This is only used to prepopulate ``number`` field on saving new invoice. To get invoice number always use ``number`` field. .. note:: To get invoice full number use ``invoice_id`` field. :return: string (generated next number) """
# Recupere les facture de l annee relative_invoices = Invoice.objects.filter(invoice_date__year=self.invoice_date.year) # on prend le numero le plus eleve du champs number, sinon on met 0 last_number = relative_invoices.aggregate(Max('number'))['number__max'] or 0 return last_number + 1
<SYSTEM_TASK:> Check if given function has no more arguments than given. If so, wrap it <END_TASK> <USER_TASK:> Description: def make_context_aware(func, numargs): """ Check if given function has no more arguments than given. If so, wrap it into another function that takes extra argument and drops it. Used to support user providing callback functions that are not context aware. """
try: if inspect.ismethod(func): arg_count = len(inspect.getargspec(func).args) - 1 elif inspect.isfunction(func): arg_count = len(inspect.getargspec(func).args) elif inspect.isclass(func): arg_count = len(inspect.getargspec(func.__init__).args) - 1 else: arg_count = len(inspect.getargspec(func.__call__).args) - 1 except TypeError: arg_count = numargs if arg_count <= numargs: def normalized(*args): return func(*args[:-1]) return normalized return func
<SYSTEM_TASK:> Check if given function has more arguments than given. Call it with context <END_TASK> <USER_TASK:> Description: def call_with_context(func, context, *args): """ Check if given function has more arguments than given. Call it with context as last argument or without it. """
return make_context_aware(func, len(args))(*args + (context,))
<SYSTEM_TASK:> Ensure all user-supplied settings exist, or throw a useful error message. <END_TASK> <USER_TASK:> Description: def validate_settings(settings): """Ensure all user-supplied settings exist, or throw a useful error message. :param obj settings: The Django settings object. """
if not (settings.STORMPATH_ID and settings.STORMPATH_SECRET): raise ImproperlyConfigured('Both STORMPATH_ID and STORMPATH_SECRET must be specified in settings.py.') if not settings.STORMPATH_APPLICATION: raise ImproperlyConfigured('STORMPATH_APPLICATION must be specified in settings.py.')
<SYSTEM_TASK:> Stormpath user is active by default if e-mail verification is <END_TASK> <USER_TASK:> Description: def get_default_is_active(): """ Stormpath user is active by default if e-mail verification is disabled. """
directory = APPLICATION.default_account_store_mapping.account_store verif_email = directory.account_creation_policy.verification_email_status return verif_email == AccountCreationPolicy.EMAIL_STATUS_DISABLED
<SYSTEM_TASK:> Check if Stormpath authentication works <END_TASK> <USER_TASK:> Description: def _stormpath_authenticate(self, username, password): """Check if Stormpath authentication works :param username: Can be actual username or email :param password: Account password Returns an account object if successful or None otherwise. """
APPLICATION = get_application() try: result = APPLICATION.authenticate_account(username, password) return result.account except Error as e: log.debug(e) return None
<SYSTEM_TASK:> Helper method for gettings the groups that <END_TASK> <USER_TASK:> Description: def _get_group_difference(self, sp_groups): """Helper method for gettings the groups that are present in the local db but not on stormpath and the other way around."""
db_groups = set(Group.objects.all().values_list('name', flat=True)) missing_from_db = set(sp_groups).difference(db_groups) missing_from_sp = db_groups.difference(sp_groups) return (missing_from_db, missing_from_sp)
<SYSTEM_TASK:> Helper method for saving to the local db groups <END_TASK> <USER_TASK:> Description: def _mirror_groups_from_stormpath(self): """Helper method for saving to the local db groups that are missing but are on Stormpath"""
APPLICATION = get_application() sp_groups = [g.name for g in APPLICATION.groups] missing_from_db, missing_from_sp = self._get_group_difference(sp_groups) if missing_from_db: groups_to_create = [] for g_name in missing_from_db: groups_to_create.append(Group(name=g_name)) Group.objects.bulk_create(groups_to_create)
<SYSTEM_TASK:> Build a format string that writes given data to given locations. Can be <END_TASK> <USER_TASK:> Description: def fmtstring(offset, writes, written=0, max_width=2, target=None): """ Build a format string that writes given data to given locations. Can be used easily create format strings to exploit format string bugs. `writes` is a list of 2- or 3-item tuples. Each tuple represents a memory write starting with an absolute address, then the data to write as an integer and finally the width (1, 2, 4 or 8) of the write. :func:`fmtstring` will break up the writes and try to optimise the order to minimise the amount of dummy output generated. Args: offset(int): The parameter offset where the format string start. writes(list): A list of 2 or 3 item tuples. written(int): How many bytes have already been written before the built format string starts. max_width(int): The maximum width of the writes (1, 2 or 4). target(:class:`pwnypack.target.Target`): The target architecture. Returns: bytes: The format string that will execute the specified memory writes. Example: The following example will (on a 32bit architecture) build a format string that write 0xc0debabe to the address 0xdeadbeef and the byte 0x90 to 0xdeadbeef + 4 assuming that the input buffer is located at offset 3 on the stack. >>> from pwny import * >>> fmtstring(3, [(0xdeadbeef, 0xc0debabe), (0xdeadbeef + 4, 0x90, 1)]) """
if max_width not in (1, 2, 4): raise ValueError('max_width should be 1, 2 or 4') if target is None: target = pwnypack.target.target addrs = [] cmds = [] piece_writes = [] for write in writes: if len(write) == 2: addr, value = write width = target.bits // 8 else: addr, value, width = write if width not in (1, 2, 4, 8): raise ValueError('Invalid write width') piece_width = min(max_width, width) piece_value = getattr(pwnypack.packing, 'P%d' % (8 * width))(value, target=target) piece_unpack = getattr(pwnypack.packing, 'U%d' % (piece_width * 8)) for i in range(0, width, piece_width): piece_writes.append((piece_width, addr, piece_unpack(piece_value[i:i + piece_width], target=target))) addr += piece_width written += len(piece_writes) * int(target.bits) // 8 piece_writes.sort(key=lambda w_a_v: (w_a_v[2] - written) % (2 ** (max_width * 8))) for piece_width, piece_addr, piece_value in piece_writes: addrs.append(pwnypack.packing.P(piece_addr, target=target)) piece_modulo = 2 ** (piece_width * 8) padding = (piece_value - written) % piece_modulo if padding: cmds.append(b'%' + str(padding).encode('ascii') + b'c') written = piece_value cmds.append(b'%' + str(offset).encode('ascii') + b'$' + FMTSTRING_OPS[piece_width]) offset += 1 return b''.join(addrs + cmds)
<SYSTEM_TASK:> Serve private files to users with read permission. <END_TASK> <USER_TASK:> Description: def serve_private_file(request, path): """ Serve private files to users with read permission. """
logger.debug('Serving {0} to {1}'.format(path, request.user)) if not permissions.has_read_permission(request, path): if settings.DEBUG: raise PermissionDenied else: raise Http404('File not found') return server.serve(request, path=path)
<SYSTEM_TASK:> Extract a symbol from an ELF file. <END_TASK> <USER_TASK:> Description: def extract_symbol_app(parser, _, args): # pragma: no cover """ Extract a symbol from an ELF file. """
parser.add_argument('file', help='ELF file to extract a symbol from') parser.add_argument('symbol', help='the symbol to extract') args = parser.parse_args(args) return ELF(args.file).get_symbol(args.symbol).content
<SYSTEM_TASK:> Parse the ELF header in ``data`` and populate the properties. <END_TASK> <USER_TASK:> Description: def _parse_header(self, data): """ Parse the ELF header in ``data`` and populate the properties. Args: data(bytes): The ELF header. """
(magic, word_size, byte_order, version, osabi, abi_version, _), data = \ unpack('4sBBBBB7s', data[:16]), data[16:] assert magic == self._ELF_MAGIC, 'Missing ELF magic' assert word_size in (1, 2), 'Invalid word size' assert byte_order in (1, 2), 'Invalid byte order' assert version == 1, 'Invalid version' self.osabi = self.OSABI(osabi) self.abi_version = abi_version endian = Target.Endian(byte_order - 1) (type_, machine, version), data = unpack('HHI', data[:8], endian=endian), data[8:] try: self.type = self.Type(type_) except ValueError: self.type = self.Type.unknown try: self.machine = ELF.Machine(machine) except ValueError: self.machine = ELF.Machine.unknown assert version == 1, 'Invalid version' if self.machine is ELF.Machine.i386: arch = Target.Arch.x86 assert word_size == 1, 'Unexpected ELF64 for machine type x86' assert endian is Target.Endian.little, 'Unexpected big-endian for machine type x86' elif self.machine is ELF.Machine.x86_64: arch = Target.Arch.x86 assert word_size == 2, 'Unexpected ELF32 for machine type x64_64' assert endian is Target.Endian.little, 'Unexpected big-endian for machine type x86' elif self.machine is ELF.Machine.arm: arch = Target.Arch.arm assert word_size == 1, 'Unexpected ELF64 for machine type arm' elif self.machine is ELF.Machine.aarch64: arch = Target.Arch.arm assert word_size == 2, 'Unexpected ELF32 for machine type aarch64' else: arch = Target.Arch.unknown self.arch = arch self.bits = 32 * word_size self.endian = endian if self.bits == 32: fmt = 'IIIIHHHHHH' else: fmt = 'QQQIHHHHHH' fmt_size = pack_size(fmt) (self.entry, self.phoff, self.shoff, self.flags, self.hsize, self.phentsize, self.phnum, self.shentsize, self.shnum, self.shstrndx) = \ unpack(fmt, data[:fmt_size], target=self)
<SYSTEM_TASK:> Parse an ELF file and fill the class' properties. <END_TASK> <USER_TASK:> Description: def parse_file(self, f): """ Parse an ELF file and fill the class' properties. Arguments: f(file or str): The (path to) the ELF file to read. """
if type(f) is str: self.f = open(f, 'rb') else: self.f = f self._parse_header(self.f.read(64))
<SYSTEM_TASK:> Get a specific section header by index or name. <END_TASK> <USER_TASK:> Description: def get_section_header(self, section): """ Get a specific section header by index or name. Args: section(int or str): The index or name of the section header to return. Returns: :class:`~ELF.SectionHeader`: The section header. Raises: KeyError: The requested section header does not exist. """
self._ensure_section_headers_loaded() if type(section) is int: return self._section_headers_by_index[section] else: return self._section_headers_by_name[section]
<SYSTEM_TASK:> Get a specific symbol by index or name. <END_TASK> <USER_TASK:> Description: def get_symbol(self, symbol): """ Get a specific symbol by index or name. Args: symbol(int or str): The index or name of the symbol to return. Returns: ELF.Symbol: The symbol. Raises: KeyError: The requested symbol does not exist. """
self._ensure_symbols_loaded() if type(symbol) is int: return self._symbols_by_index[symbol] else: return self._symbols_by_name[symbol]
<SYSTEM_TASK:> Load a .pyc file from a file-like object. <END_TASK> <USER_TASK:> Description: def pyc_load(fp): """ Load a .pyc file from a file-like object. Arguments: fp(file): The file-like object to read. Returns: PycFile: The parsed representation of the .pyc file. """
magic_1 = U16(fp.read(2), target=MARSHAL_TARGET) magic_2 = U16(fp.read(2), target=MARSHAL_TARGET) internals = MAGIC_MAP.get(magic_1) if internals is None: raise ValueError('Invalid or unknown magic (%d).' % magic_1) if magic_2 != 2573: raise ValueError('Invalid secondary magic (%d).' % magic_2) timestamp = datetime.datetime.fromtimestamp(U32(fp.read(4), target=MARSHAL_TARGET)) if internals['version'] >= 33: file_size = U32(fp.read(4)) else: file_size = None code_object = marshal_load(fp, internals) return PycFile(magic_1, internals, timestamp, file_size, code_object)
<SYSTEM_TASK:> Returns points on convex hull of an array of points in CCW order. <END_TASK> <USER_TASK:> Description: def convex_hull(features): """Returns points on convex hull of an array of points in CCW order."""
points = sorted([s.point() for s in features]) l = reduce(_keep_left, points, []) u = reduce(_keep_left, reversed(points), []) return l.extend(u[i] for i in xrange(1, len(u) - 1)) or l
<SYSTEM_TASK:> Disassemble the bytecode of this code object into a series of <END_TASK> <USER_TASK:> Description: def disassemble(self, annotate=False, blocks=False): """ Disassemble the bytecode of this code object into a series of opcodes and labels. Can also annotate the opcodes and group the opcodes into blocks based on the labels. Arguments: annotate(bool): Whether to annotate the operations. blocks(bool): Whether to group the operations into blocks. Returns: list: A list of :class:`Op` (or :class:`AnnotatedOp`) instances and labels. """
ops = disassemble(self.co_code, self.internals) if annotate: ops = [self.annotate_op(op) for op in ops] if blocks: return blocks_from_ops(ops) else: return ops
<SYSTEM_TASK:> Convert this instance back into a native python code object. This <END_TASK> <USER_TASK:> Description: def to_code(self): """ Convert this instance back into a native python code object. This only works if the internals of the code object are compatible with those of the running python version. Returns: types.CodeType: The native python code object. """
if self.internals is not get_py_internals(): raise ValueError('CodeObject is not compatible with the running python internals.') if six.PY2: return types.CodeType( self.co_argcount, self.co_nlocals, self.co_stacksize, self.co_flags, self.co_code, self.co_consts, self.co_names, self.co_varnames, self.co_filename, self.co_name, self.co_firstlineno, self.co_lnotab, self.co_freevars, self.co_cellvars ) else: return types.CodeType( self.co_argcount, self.co_kwonlyargcount, self.co_nlocals, self.co_stacksize, self.co_flags, self.co_code, self.co_consts, self.co_names, self.co_varnames, self.co_filename, self.co_name, self.co_firstlineno, self.co_lnotab, self.co_freevars, self.co_cellvars )
<SYSTEM_TASK:> Read the index, and load the document list from it <END_TASK> <USER_TASK:> Description: def reload_index(self, progress_cb=dummy_progress_cb): """ Read the index, and load the document list from it Arguments: callback --- called during the indexation (may be called *often*). step : DocSearch.INDEX_STEP_READING or DocSearch.INDEX_STEP_SORTING progression : how many elements done yet total : number of elements to do document (only if step == DocSearch.INDEX_STEP_READING): file being read """
nb_results = self.index.start_reload_index() progress = 0 while self.index.continue_reload_index(): progress_cb(progress, nb_results, self.INDEX_STEP_LOADING) progress += 1 progress_cb(1, 1, self.INDEX_STEP_LOADING) self.index.end_reload_index()
<SYSTEM_TASK:> Replace 'old_label' by 'new_label' on all the documents. Takes care of <END_TASK> <USER_TASK:> Description: def update_label(self, old_label, new_label, callback=dummy_progress_cb): """ Replace 'old_label' by 'new_label' on all the documents. Takes care of updating the index. """
current = 0 total = self.index.get_nb_docs() self.index.start_update_label(old_label, new_label) while True: (op, doc) = self.index.continue_update_label() if op == 'end': break callback(current, total, self.LABEL_STEP_UPDATING, doc) current += 1 self.index.end_update_label()
<SYSTEM_TASK:> Remove the label 'label' from all the documents. Takes care of updating <END_TASK> <USER_TASK:> Description: def destroy_label(self, label, callback=dummy_progress_cb): """ Remove the label 'label' from all the documents. Takes care of updating the index. """
current = 0 total = self.index.get_nb_docs() self.index.start_destroy_label(label) while True: (op, doc) = self.index.continue_destroy_label() if op == 'end': break callback(current, total, self.LABEL_STEP_DESTROYING, doc) current += 1 self.index.end_destroy_label()
<SYSTEM_TASK:> Write one item to the object stream <END_TASK> <USER_TASK:> Description: def serialize_object(self, obj): """ Write one item to the object stream """
self.start_object(obj) for field in obj._meta.local_fields: if field.serialize and getattr(field, 'include_in_xml', True): if field.rel is None: if self.selected_fields is None or field.attname in self.selected_fields: self.handle_field(obj, field) else: if self.selected_fields is None or field.attname[:-3] in self.selected_fields: self.handle_fk_field(obj, field) # recursively serialize all foreign key relations for (foreign_key_descriptor_name, foreign_key_descriptor ) in get_foreign_key_desciptors(obj): # don't follow foreign keys that have a 'nofollow' attribute if foreign_key_descriptor.related.field.serialize \ and not hasattr(foreign_key_descriptor.related.field, 'nofollow'): bound_foreign_key_descriptor = foreign_key_descriptor.__get__(obj) s = RecursiveXmlSerializer() s.serialize( bound_foreign_key_descriptor.all(), xml=self.xml, stream=self.stream) #recursively serialize all one to one relations # TODO: make this work for non abstract inheritance but without infinite recursion # for (one_to_one_descriptor_name, one_to_one_descriptor) in get_one_to_one_descriptors(obj): # related_objects = [] # try: # related_object = one_to_one_descriptor.__get__(obj) # related_objects.append(related_object) # except Exception as e: # pass # # s = RecursiveXmlSerializer() # s.serialize( related_objects, xml=self.xml, stream=self.stream) # add generic relations for (generic_relation_descriptor_name, generic_relation_descriptor) in get_generic_relation_descriptors(obj): # generic relations always have serialize set to False so we always include them. bound_generic_relation_descriptor = generic_relation_descriptor.__get__(obj) s = RecursiveXmlSerializer() s.serialize( bound_generic_relation_descriptor.all(), xml=self.xml, stream=self.stream) #serialize the default field descriptors: for (default_field_descriptor_name, default_field_descriptor) in get_default_field_descriptors(obj): if default_field_descriptor.serialize: self.handle_field(obj, default_field_descriptor) for field in obj._meta.many_to_many: if field.serialize: if self.selected_fields is None or field.attname in self.selected_fields: self.handle_m2m_field(obj, field) self.end_object(obj)
<SYSTEM_TASK:> while easymode follows inverse relations for foreign keys, <END_TASK> <USER_TASK:> Description: def handle_m2m_field(self, obj, field): """ while easymode follows inverse relations for foreign keys, for manytomayfields it follows the forward relation. While easymode excludes all relations to "self" you could still create a loop if you add one extra level of indirection. """
if field.rel.through._meta.auto_created:# and obj.__class__ is not field.rel.to: # keep approximate recursion level with recursion_depth('handle_m2m_field') as recursion_level: # a stack trace is better than python crashing. if recursion_level > getattr(settings, 'RECURSION_LIMIT', sys.getrecursionlimit() / 10): raise Exception(MANY_TO_MANY_RECURSION_LIMIT_ERROR % (field.name, obj.__class__.__name__, field.rel.to.__name__)) self._start_relational_field(field) s = RecursiveXmlSerializer() s.serialize( getattr(obj, field.name).iterator(), xml=self.xml, stream=self.stream) self.xml.endElement("field")
<SYSTEM_TASK:> Prepare a capstone disassembler instance for a given target and syntax. <END_TASK> <USER_TASK:> Description: def prepare_capstone(syntax=AsmSyntax.att, target=None): """ Prepare a capstone disassembler instance for a given target and syntax. Args: syntax(AsmSyntax): The assembler syntax (Intel or AT&T). target(~pwnypack.target.Target): The target to create a disassembler instance for. The global target is used if this argument is ``None``. Returns: An instance of the capstone disassembler. Raises: NotImplementedError: If the specified target isn't supported. """
if not HAVE_CAPSTONE: raise NotImplementedError('pwnypack requires capstone to disassemble to AT&T and Intel syntax') if target is None: target = pwnypack.target.target if target.arch == pwnypack.target.Target.Arch.x86: if target.bits is pwnypack.target.Target.Bits.bits_32: md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_32) else: md = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_64) elif target.arch == pwnypack.target.Target.Arch.arm: mode = 0 if target.bits is pwnypack.target.Target.Bits.bits_32: arch = capstone.CS_ARCH_ARM if target.mode and pwnypack.target.Target.Mode.arm_thumb: mode = capstone.CS_MODE_THUMB else: mode = capstone.CS_MODE_ARM if target.mode and pwnypack.target.Target.Mode.arm_m_class: mode |= capstone.CS_MODE_MCLASS if target.mode and pwnypack.target.Target.Mode.arm_v8: mode |= capstone.CS_MODE_V8 else: arch = capstone.CS_ARCH_ARM64 if target.endian is pwnypack.target.Target.Endian.little: mode |= capstone.CS_MODE_LITTLE_ENDIAN else: mode |= capstone.CS_MODE_BIG_ENDIAN md = capstone.Cs(arch, mode) else: raise NotImplementedError('Only x86 is currently supported.') md.skipdata = True if syntax is AsmSyntax.att: md.syntax = capstone.CS_OPT_SYNTAX_ATT elif syntax is AsmSyntax.intel: md.skipdata_setup(('db', None, None)) else: raise NotImplementedError('capstone engine only implements AT&T and Intel syntax.') return md
<SYSTEM_TASK:> Disassemble machine readable code into human readable statements. <END_TASK> <USER_TASK:> Description: def disasm(code, addr=0, syntax=None, target=None): """ Disassemble machine readable code into human readable statements. Args: code(bytes): The machine code that is to be disassembled. addr(int): The memory address of the code (used for relative references). syntax(AsmSyntax): The output assembler syntax. This defaults to nasm on x86 architectures, AT&T on all other architectures. target(~pwnypack.target.Target): The architecture for which the code was written. The global target is used if this argument is ``None``. Returns: list of str: The disassembled machine code. Raises: NotImplementedError: In an unsupported target platform is specified. RuntimeError: If ndisasm encounters an error. Example: >>> from pwny import * >>> disasm(b'_\\xc3', target=Target(arch=Target.Arch.x86, bits=64)) ['pop rdi', 'ret'] """
if target is None: target = pwnypack.target.target if syntax is None: if target.arch is pwnypack.target.Target.Arch.x86: syntax = AsmSyntax.nasm else: syntax = AsmSyntax.att if syntax is AsmSyntax.nasm: if target.arch is not pwnypack.target.Target.Arch.x86: raise NotImplementedError('nasm only supports x86.') p = subprocess.Popen( [ 'ndisasm', '-b', str(target.bits.value), '-o', str(addr), '-', ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = p.communicate(code) if p.returncode: raise RuntimeError(stderr.decode('utf-8')) return [ line.split(None, 2)[2] for line in stdout.decode('utf-8').split('\n') if line and not line.startswith(' ') ] elif syntax in (AsmSyntax.intel, AsmSyntax.att): md = prepare_capstone(syntax, target) statements = [] total_size = 0 for (_, size, mnemonic, op_str) in md.disasm_lite(code, addr): statements.append((mnemonic + ' ' + op_str).strip()) total_size += size return statements else: raise NotImplementedError('Unsupported syntax for host platform.')
<SYSTEM_TASK:> Assemble code from commandline or stdin. <END_TASK> <USER_TASK:> Description: def asm_app(parser, cmd, args): # pragma: no cover """ Assemble code from commandline or stdin. Please not that all semi-colons are replaced with carriage returns unless source is read from stdin. """
parser.add_argument('source', help='the code to assemble, read from stdin if omitted', nargs='?') pwnypack.main.add_target_arguments(parser) parser.add_argument( '--syntax', '-s', choices=AsmSyntax.__members__.keys(), default=None, ) parser.add_argument( '--address', '-o', type=lambda v: int(v, 0), default=0, help='the address where the code is expected to run', ) args = parser.parse_args(args) target = pwnypack.main.target_from_arguments(args) if args.syntax is not None: syntax = AsmSyntax.__members__[args.syntax] else: syntax = None if args.source is None: args.source = sys.stdin.read() else: args.source = args.source.replace(';', '\n') return asm( args.source, syntax=syntax, target=target, addr=args.address, )
<SYSTEM_TASK:> Disassemble code from commandline or stdin. <END_TASK> <USER_TASK:> Description: def disasm_app(_parser, cmd, args): # pragma: no cover """ Disassemble code from commandline or stdin. """
parser = argparse.ArgumentParser( prog=_parser.prog, description=_parser.description, ) parser.add_argument('code', help='the code to disassemble, read from stdin if omitted', nargs='?') pwnypack.main.add_target_arguments(parser) parser.add_argument( '--syntax', '-s', choices=AsmSyntax.__members__.keys(), default=None, ) parser.add_argument( '--address', '-o', type=lambda v: int(v, 0), default=0, help='the address of the disassembled code', ) parser.add_argument( '--format', '-f', choices=['hex', 'bin'], help='the input format (defaults to hex for commandline, bin for stdin)', ) args = parser.parse_args(args) target = pwnypack.main.target_from_arguments(args) if args.syntax is not None: syntax = AsmSyntax.__members__[args.syntax] else: syntax = None if args.format is None: if args.code is None: args.format = 'bin' else: args.format = 'hex' if args.format == 'hex': code = pwnypack.codec.dehex(pwnypack.main.string_value_or_stdin(args.code)) else: code = pwnypack.main.binary_value_or_stdin(args.code) print('\n'.join(disasm(code, args.address, syntax=syntax, target=target)))
<SYSTEM_TASK:> Disassemble a symbol from an ELF file. <END_TASK> <USER_TASK:> Description: def disasm_symbol_app(_parser, _, args): # pragma: no cover """ Disassemble a symbol from an ELF file. """
parser = argparse.ArgumentParser( prog=_parser.prog, description=_parser.description, ) parser.add_argument( '--syntax', '-s', choices=AsmSyntax.__members__.keys(), default=None, ) parser.add_argument('file', help='ELF file to extract a symbol from') parser.add_argument('symbol', help='the symbol to disassemble') args = parser.parse_args(args) if args.syntax is not None: syntax = AsmSyntax.__members__[args.syntax] else: syntax = None elf = ELF(args.file) symbol = elf.get_symbol(args.symbol) print('\n'.join(disasm(symbol.content, symbol.value, syntax=syntax, target=elf)))
<SYSTEM_TASK:> Parses a string of settings. <END_TASK> <USER_TASK:> Description: def settingsAsFacts(self, settings): """ Parses a string of settings. :param setting: String of settings in the form: ``set(name1, val1), set(name2, val2)...`` """
pattern = re.compile('set\(([a-zA-Z0-9_]+),(\[a-zA-Z0-9_]+)\)') pairs = pattern.findall(settings) for name, val in pairs: self.set(name, val)
<SYSTEM_TASK:> Generates the required scripts. <END_TASK> <USER_TASK:> Description: def __scripts(self, filestem): """ Generates the required scripts. """
script_construct = open('%s/%s' % (self.tmpdir, RSD.CONSTRUCT), 'w') script_save = open('%s/%s' % (self.tmpdir, RSD.SAVE), 'w') script_subgroups = open('%s/%s' % (self.tmpdir, RSD.SUBGROUPS), 'w') # Permit the owner to execute and read this script for fn in RSD.SCRIPTS: os.chmod('%s/%s' % (self.tmpdir, fn), S_IREAD | S_IEXEC) # Writes one line of script new_script = lambda script: lambda x: script.write(x + '\n') # # 'Construction' script # w = new_script(script_construct) w(':- initialization(main).') w('main :-') w('[featurize],') w('r(%s),' % filestem) w('w.') script_construct.close() # # 'Saving' script # w = new_script(script_save) w(':- initialization(main).') w('main :-') w('[process],') w('r(%s),' % filestem) w('w,') w('w(weka, %s),' % filestem) w('w(rsd, %s).' % filestem) script_save.close() # # 'Subgroups' script # w = new_script(script_subgroups) w(':- initialization(main).') w('main :-') w('[rules],') w('r(%s),' % filestem) w('i,') w('w.') script_subgroups.close()
<SYSTEM_TASK:> An implementation of the FKM algorithm for generating the de Bruijn <END_TASK> <USER_TASK:> Description: def deBruijn(n, k): """ An implementation of the FKM algorithm for generating the de Bruijn sequence containing all k-ary strings of length n, as described in "Combinatorial Generation" by Frank Ruskey. """
a = [ 0 ] * (n + 1) def gen(t, p): if t > n: for v in a[1:p + 1]: yield v else: a[t] = a[t - p] for v in gen(t + 1, p): yield v for j in range(a[t - p] + 1, k): a[t] = j for v in gen(t + 1, t): yield v return gen(1, 1)
<SYSTEM_TASK:> Given an element of a de Bruijn sequence, find its index in that sequence. <END_TASK> <USER_TASK:> Description: def cycle_find(key, width=4): """ Given an element of a de Bruijn sequence, find its index in that sequence. Args: key(str): The piece of the de Bruijn sequence to find. width(int): The width of each element in the sequence. Returns: int: The index of ``key`` in the de Bruijn sequence. """
key_len = len(key) buf = '' it = deBruijn(width, 26) for i in range(key_len): buf += chr(ord('A') + next(it)) if buf == key: return 0 for i, c in enumerate(it): buf = buf[1:] + chr(ord('A') + c) if buf == key: return i + 1 return -1
<SYSTEM_TASK:> Generate a de Bruijn sequence of a given length. <END_TASK> <USER_TASK:> Description: def cycle_app(parser, cmd, args): # pragma: no cover """ Generate a de Bruijn sequence of a given length. """
parser.add_argument('-w', '--width', type=int, default=4, help='the length of the cycled value') parser.add_argument('length', type=int, help='the cycle length to generate') args = parser.parse_args(args) return cycle(args.length, args.width)
<SYSTEM_TASK:> Find the first position of a value in a de Bruijn sequence. <END_TASK> <USER_TASK:> Description: def cycle_find_app(_parser, cmd, args): # pragma: no cover """ Find the first position of a value in a de Bruijn sequence. """
parser = argparse.ArgumentParser( prog=_parser.prog, description=_parser.description, ) parser.add_argument('-w', '--width', type=int, default=4, help='the length of the cycled value') parser.add_argument('value', help='the value to determine the position of, read from stdin if missing', nargs='?') args = parser.parse_args(args) index = cycle_find(pwnypack.main.string_value_or_stdin(args.value), args.width) if index == -1: print('Not found.') sys.exit(1) else: print('Found at position: %d' % index)
<SYSTEM_TASK:> Resolve all html entities to their corresponding unicode character <END_TASK> <USER_TASK:> Description: def unescape_all(string): """Resolve all html entities to their corresponding unicode character"""
def escape_single(matchobj): return _unicode_for_entity_with_name(matchobj.group(1)) return entities.sub(escape_single, string)
<SYSTEM_TASK:> validates a unicode string containing xml <END_TASK> <USER_TASK:> Description: def is_valid(xml_string): """validates a unicode string containing xml"""
xml_file = StringIO.StringIO(xml_string.encode('utf-8')) parser = XmlScanner() parser.setContentHandler(ContentHandler()) try: parser.parse(xml_file) except SAXParseException: return False return True
<SYSTEM_TASK:> Instantiate a document based on its document id. <END_TASK> <USER_TASK:> Description: def __inst_doc(self, docid, doc_type_name=None): """ Instantiate a document based on its document id. The information are taken from the whoosh index. """
doc = None docpath = self.fs.join(self.rootdir, docid) if not self.fs.exists(docpath): return None if doc_type_name is not None: # if we already know the doc type name for (is_doc_type, doc_type_name_b, doc_type) in DOC_TYPE_LIST: if (doc_type_name_b == doc_type_name): doc = doc_type(self.fs, docpath, docid) if not doc: logger.warning( ("Warning: unknown doc type found in the index: %s") % doc_type_name ) # otherwise we guess the doc type if not doc: for (is_doc_type, doc_type_name, doc_type) in DOC_TYPE_LIST: if is_doc_type(self.fs, docpath): doc = doc_type(self.fs, docpath, docid) break if not doc: logger.warning("Warning: unknown doc type for doc '%s'" % docid) return doc
<SYSTEM_TASK:> Try to find a document based on its document id. if inst=True, if it <END_TASK> <USER_TASK:> Description: def get_doc_from_docid(self, docid, doc_type_name=None, inst=True): """ Try to find a document based on its document id. if inst=True, if it hasn't been instantiated yet, it will be. """
assert(docid is not None) if docid in self._docs_by_id: return self._docs_by_id[docid] if not inst: return None doc = self.__inst_doc(docid, doc_type_name) if doc is None: return None self._docs_by_id[docid] = doc return doc
<SYSTEM_TASK:> Forget about the changes <END_TASK> <USER_TASK:> Description: def cancel(self): """ Forget about the changes """
logger.info("Index: Index update cancelled") if self.index_writer: self.index_writer.cancel() del self.index_writer self.index_writer = None if self.label_guesser_updater: self.label_guesser_updater.cancel() self.label_guesser_updater = None
<SYSTEM_TASK:> Get a document or a page using its ID <END_TASK> <USER_TASK:> Description: def get(self, obj_id): """ Get a document or a page using its ID Won't instantiate them if they are not yet available """
if BasicPage.PAGE_ID_SEPARATOR in obj_id: (docid, page_nb) = obj_id.split(BasicPage.PAGE_ID_SEPARATOR) page_nb = int(page_nb) return self._docs_by_id[docid].pages[page_nb] return self._docs_by_id[obj_id]
<SYSTEM_TASK:> Returns all the documents matching the given keywords <END_TASK> <USER_TASK:> Description: def find_documents(self, sentence, limit=None, must_sort=True, search_type='fuzzy'): """ Returns all the documents matching the given keywords Arguments: sentence --- a sentenced query Returns: An array of document (doc objects) """
sentence = sentence.strip() sentence = strip_accents(sentence) if sentence == u"": return self.get_all_docs() result_list_list = [] total_results = 0 for query_parser in self.search_param_list[search_type]: query = query_parser["query_parser"].parse(sentence) sortedby = None if must_sort and "sortedby" in query_parser: sortedby = query_parser['sortedby'] if sortedby: results = self.__searcher.search( query, limit=limit, sortedby=sortedby ) else: results = self.__searcher.search( query, limit=limit ) results = [ (result['docid'], result['doctype']) for result in results ] result_list_list.append(results) total_results += len(results) if not must_sort and total_results >= limit: break # merging results docs = set() for result_intermediate in result_list_list: for result in result_intermediate: doc = self._docs_by_id.get(result[0]) if doc is None: continue docs.add(doc) docs = [d for d in docs] if not must_sort and limit is not None: docs = docs[:limit] return docs
<SYSTEM_TASK:> Search all possible suggestions. Suggestions returned always have at <END_TASK> <USER_TASK:> Description: def find_suggestions(self, sentence): """ Search all possible suggestions. Suggestions returned always have at least one document matching. Arguments: sentence --- keywords (single strings) for which we want suggestions Return: An array of sets of keywords. Each set of keywords (-> one string) is a suggestion. """
if not isinstance(sentence, str): sentence = str(sentence) keywords = sentence.split(" ") query_parser = self.search_param_list['strict'][0]['query_parser'] base_search = u" ".join(keywords).strip() final_suggestions = [] corrector = self.__searcher.corrector("content") label_corrector = self.__searcher.corrector("label") for (keyword_idx, keyword) in enumerate(keywords): if (len(keyword) <= MIN_KEYWORD_LEN): continue keyword_suggestions = label_corrector.suggest( keyword, limit=2 )[:] keyword_suggestions += corrector.suggest(keyword, limit=5)[:] for keyword_suggestion in keyword_suggestions: new_suggestion = keywords[:] new_suggestion[keyword_idx] = keyword_suggestion new_suggestion = u" ".join(new_suggestion).strip() if new_suggestion == base_search: continue # make sure it would return results query = query_parser.parse(new_suggestion) results = self.__searcher.search(query, limit=1) if len(results) <= 0: continue final_suggestions.append(new_suggestion) final_suggestions.sort() return final_suggestions
<SYSTEM_TASK:> Add a label on a document. <END_TASK> <USER_TASK:> Description: def add_label(self, doc, label, update_index=True): """ Add a label on a document. Arguments: label --- The new label (see labels.Label) doc --- The first document on which this label has been added """
label = copy.copy(label) assert(label in self.labels.values()) doc.add_label(label) if update_index: self.upd_doc(doc) self.commit()
<SYSTEM_TASK:> Destroy the index. Don't use this Index object anymore after this <END_TASK> <USER_TASK:> Description: def destroy_index(self): """ Destroy the index. Don't use this Index object anymore after this call. Index will have to be rebuilt from scratch """
self.close() logger.info("Destroying the index ...") rm_rf(self.indexdir) rm_rf(self.label_guesser_dir) logger.info("Done")
<SYSTEM_TASK:> Check if there is a document using this file hash <END_TASK> <USER_TASK:> Description: def is_hash_in_index(self, filehash): """ Check if there is a document using this file hash """
filehash = (u"%X" % filehash) results = self.__searcher.search( whoosh.query.Term('docfilehash', filehash)) return bool(results)
<SYSTEM_TASK:> Get the name of the localized field <END_TASK> <USER_TASK:> Description: def get_localized_field_name(context, field): """Get the name of the localized field"""
attrs = [ translation.get_language(), translation.get_language()[:2], settings.LANGUAGE_CODE ] def predicate(x): field_name = get_real_fieldname(field, x) if hasattr(context, field_name): return field_name return None return first_match(predicate, attrs)
<SYSTEM_TASK:> Get a field by name from a model class without messing with the app cache. <END_TASK> <USER_TASK:> Description: def get_field_from_model_by_name(model_class, field_name): """ Get a field by name from a model class without messing with the app cache. """
return first_match(lambda x: x if x.name == field_name else None, model_class._meta.fields)
<SYSTEM_TASK:> \ <END_TASK> <USER_TASK:> Description: def fit(self, data, labels, **kwargs): """\ Training the SOM on the the data and calibrate itself. After the training, `self.quant_error` and `self.topog_error` are respectively set. :param data: sparse input matrix (ideal dtype is `numpy.float32`) :type data: :class:`scipy.sparse.csr_matrix` :param labels: the labels associated with data :type labels: iterable :param \**kwargs: optional parameters for :meth:`train` """
# train the network self._som.train(data, **kwargs) # retrieve first and second bmus and distances bmus, q_error, t_error = self.bmus_with_errors(data) # set errors measures of training data self.quant_error = q_error self.topog_error = t_error # store training bmus self._bmus = bmus # calibrate self._calibrate(data, labels)
<SYSTEM_TASK:> \ <END_TASK> <USER_TASK:> Description: def _calibrate(self, data, labels): """\ Calibrate the network using `self._bmus`. """
# network calibration classifier = defaultdict(Counter) for (i,j), label in zip(self._bmus, labels): classifier[i,j][label] += 1 self.classifier = {} for ij, cnt in classifier.items(): maxi = max(cnt.items(), key=itemgetter(1)) nb = sum(cnt.values()) self.classifier[ij] = maxi[0], maxi[1] / nb
<SYSTEM_TASK:> \ <END_TASK> <USER_TASK:> Description: def predict(self, data, unkown=None): """\ Classify data according to previous calibration. :param data: sparse input matrix (ideal dtype is `numpy.float32`) :type data: :class:`scipy.sparse.csr_matrix` :param unkown: the label to attribute if no label is known :returns: the labels guessed for data :rtype: `numpy.array` """
assert self.classifier is not None, 'not calibrated' bmus = self._som.bmus(data) return self._predict_from_bmus(bmus, unkown)
<SYSTEM_TASK:> \ <END_TASK> <USER_TASK:> Description: def fit_predict(self, data, labels, unkown=None): """\ Fit and classify data efficiently. :param data: sparse input matrix (ideal dtype is `numpy.float32`) :type data: :class:`scipy.sparse.csr_matrix` :param labels: the labels associated with data :type labels: iterable :param unkown: the label to attribute if no label is known :returns: the labels guessed for data :rtype: `numpy.array` """
self.fit(data, labels) return self._predict_from_bmus(self._bmus, unkown)
<SYSTEM_TASK:> \ <END_TASK> <USER_TASK:> Description: def histogram(self, bmus=None): """\ Return a 2D histogram of bmus. :param bmus: the best-match units indexes for underlying data. :type bmus: :class:`numpy.ndarray` :returns: the computed 2D histogram of bmus. :rtype: :class:`numpy.ndarray` """
if bmus is None: assert self._bmus is not None, 'not trained' bmus = self._bmus arr = np.zeros((self._som.nrows, self._som.ncols)) for i,j in bmus: arr[i,j] += 1 return arr
<SYSTEM_TASK:> Return a suitable pickle protocol version for a given target. <END_TASK> <USER_TASK:> Description: def get_protocol_version(protocol=None, target=None): """ Return a suitable pickle protocol version for a given target. Arguments: target: The internals description of the targeted python version. If this is ``None`` the specification of the currently running python version will be used. protocol(None or int): The requested protocol version (or None for the default of the target python version). Returns: int: A suitable pickle protocol version. """
target = get_py_internals(target) if protocol is None: protocol = target['pickle_default_protocol'] if protocol > cPickle.HIGHEST_PROTOCOL: warnings.warn('Downgrading pickle protocol, running python supports up to %d.' % cPickle.HIGHEST_PROTOCOL) protocol = cPickle.HIGHEST_PROTOCOL target_highest_protocol = target['pickle_highest_protocol'] if protocol > target_highest_protocol: warnings.warn('Downgrading pickle protocol, target python supports up to %d.' % target_highest_protocol) protocol = target_highest_protocol return protocol
<SYSTEM_TASK:> Very crude inter-python version opcode translator. Raises SyntaxError when <END_TASK> <USER_TASK:> Description: def translate_opcodes(code_obj, target): """ Very crude inter-python version opcode translator. Raises SyntaxError when the opcode doesn't exist in the destination opmap. Used to transcribe python code objects between python versions. Arguments: code_obj(pwnypack.bytecode.CodeObject): The code object representation to translate. target(dict): The py_internals structure for the target python version. """
target = get_py_internals(target) src_ops = code_obj.disassemble() dst_opmap = target['opmap'] dst_ops = [] op_iter = enumerate(src_ops) for i, op in op_iter: if isinstance(op, pwnypack.bytecode.Label): dst_ops.append(op) continue if op.name not in dst_opmap: if op.name == 'POP_JUMP_IF_FALSE' and 'JUMP_IF_TRUE' in dst_opmap: lbl = pwnypack.bytecode.Label() dst_ops.extend([ pwnypack.bytecode.Op('JUMP_IF_TRUE', lbl), pwnypack.bytecode.Op('POP_TOP', None), pwnypack.bytecode.Op('JUMP_ABSOLUTE', op.arg), lbl, pwnypack.bytecode.Op('POP_TOP', None), ]) elif op.name == 'POP_JUMP_IF_TRUE' and 'JUMP_IF_FALSE' in dst_opmap: lbl = pwnypack.bytecode.Label() dst_ops.extend([ pwnypack.bytecode.Op('JUMP_IF_FALSE', lbl), pwnypack.bytecode.Op('POP_TOP', None), pwnypack.bytecode.Op('JUMP_ABSOLUTE', op.arg), lbl, pwnypack.bytecode.Op('POP_TOP', None), ]) elif op.name == 'JUMP_IF_FALSE' and 'JUMP_IF_FALSE_OR_POP' in dst_opmap and \ src_ops[i + 1].name == 'POP_TOP': next(op_iter) dst_ops.append(pwnypack.bytecode.Op('JUMP_IF_FALSE_OR_POP', op.arg)) elif op.name == 'JUMP_IF_TRUE' and 'JUMP_IF_TRUE_OR_POP' in dst_opmap and \ src_ops[i + 1].name == 'POP_TOP': next(op_iter) dst_ops.append(pwnypack.bytecode.Op('JUMP_IF_TRUE_OR_POP', op.arg)) else: raise SyntaxError('Opcode %s not supported on target.' % op.name) else: dst_ops.append(op) code_obj.assemble(dst_ops, target)
<SYSTEM_TASK:> Return stops served by this route. <END_TASK> <USER_TASK:> Description: def stops(self): """Return stops served by this route."""
serves = set() for trip in self.trips(): for stop_time in trip.stop_times(): serves |= stop_time.stops() return serves
<SYSTEM_TASK:> Pack an unsigned pointer for a given target. <END_TASK> <USER_TASK:> Description: def P(value, bits=None, endian=None, target=None): """ Pack an unsigned pointer for a given target. Args: value(int): The value to pack. bits(:class:`~pwnypack.target.Target.Bits`): Override the default word size. If ``None`` it will look at the word size of ``target``. endian(:class:`~pwnypack.target.Target.Endian`): Override the default byte order. If ``None``, it will look at the byte order of the ``target`` argument. target(:class:`~pwnypack.target.Target`): Override the default byte order. If ``None``, it will look at the byte order of the global :data:`~pwnypack.target.target`. """
return globals()['P%d' % _get_bits(bits, target)](value, endian=endian, target=target)
<SYSTEM_TASK:> Pack a signed pointer for a given target. <END_TASK> <USER_TASK:> Description: def p(value, bits=None, endian=None, target=None): """ Pack a signed pointer for a given target. Args: value(int): The value to pack. bits(:class:`pwnypack.target.Target.Bits`): Override the default word size. If ``None`` it will look at the word size of ``target``. endian(:class:`~pwnypack.target.Target.Endian`): Override the default byte order. If ``None``, it will look at the byte order of the ``target`` argument. target(:class:`~pwnypack.target.Target`): Override the default byte order. If ``None``, it will look at the byte order of the global :data:`~pwnypack.target.target`. """
return globals()['p%d' % _get_bits(bits, target)](value, endian=endian, target=target)
<SYSTEM_TASK:> Unpack an unsigned pointer for a given target. <END_TASK> <USER_TASK:> Description: def U(data, bits=None, endian=None, target=None): """ Unpack an unsigned pointer for a given target. Args: data(bytes): The data to unpack. bits(:class:`pwnypack.target.Target.Bits`): Override the default word size. If ``None`` it will look at the word size of ``target``. endian(:class:`~pwnypack.target.Target.Endian`): Override the default byte order. If ``None``, it will look at the byte order of the ``target`` argument. target(:class:`~pwnypack.target.Target`): Override the default byte order. If ``None``, it will look at the byte order of the global :data:`~pwnypack.target.target`. Returns: int: The pointer value. """
return globals()['U%d' % _get_bits(bits, target)](data, endian=endian, target=target)
<SYSTEM_TASK:> Unpack a signed pointer for a given target. <END_TASK> <USER_TASK:> Description: def u(data, bits=None, endian=None, target=None): """ Unpack a signed pointer for a given target. Args: data(bytes): The data to unpack. bits(:class:`pwnypack.target.Target.Bits`): Override the default word size. If ``None`` it will look at the word size of ``target``. endian(:class:`~pwnypack.target.Target.Endian`): Override the default byte order. If ``None``, it will look at the byte order of the ``target`` argument. target(:class:`~pwnypack.target.Target`): Override the default byte order. If ``None``, it will look at the byte order of the global :data:`~pwnypack.target.target`. Returns: int: The pointer value. """
return globals()['u%d' % _get_bits(bits, target)](data, endian=endian, target=target)
<SYSTEM_TASK:> Return all trips for this agency. <END_TASK> <USER_TASK:> Description: def trips(self): """Return all trips for this agency."""
trips = set() for route in self.routes(): trips |= route.trips() return trips
<SYSTEM_TASK:> Return all stops visited by trips for this agency. <END_TASK> <USER_TASK:> Description: def stops(self): """Return all stops visited by trips for this agency."""
stops = set() for stop_time in self.stop_times(): stops |= stop_time.stops() return stops
<SYSTEM_TASK:> Return all stop_times for this agency. <END_TASK> <USER_TASK:> Description: def stop_times(self): """Return all stop_times for this agency."""
stop_times = set() for trip in self.trips(): stop_times |= trip.stop_times() return stop_times
<SYSTEM_TASK:> For each field name in localized_fields, <END_TASK> <USER_TASK:> Description: def localize_fields(cls, localized_fields): """ For each field name in localized_fields, for each language in settings.LANGUAGES, add fields to cls, and remove the original field, instead replace it with a DefaultFieldDescriptor, which always returns the field in the current language. """
# never do this twice if hasattr(cls, 'localized_fields'): return cls # MSGID_LANGUAGE is the language that is used for the gettext message id's. # If it is not available, because the site isn't using subsites, the # LANGUAGE_CODE is good too. MSGID_LANGUAGE gives the opportunity to # specify a language not available in the site but which is still used for # the message id's. msgid_language = getattr(settings, 'MSGID_LANGUAGE', settings.LANGUAGE_CODE) # set the localized fields property cls.localized_fields = localized_fields for field in localized_fields: original_attr = get_field_from_model_by_name(cls, field) for cnt, language_code in enumerate(get_all_language_codes()): i18n_attr = copy.copy(original_attr) # add support for south introspection. i18n_attr._south_introspects = True i18n_attr.original_fieldname = field i18n_attr.include_in_xml = False lang_attr_name = get_real_fieldname(field, language_code) i18n_attr.name = lang_attr_name i18n_attr.creation_counter = i18n_attr.creation_counter + .01 * cnt # null must be allowed for the message id language because this # language might not be available at all in the backend if not i18n_attr.null and i18n_attr.default is NOT_PROVIDED: i18n_attr.null = True if language_code != msgid_language: # no validation for the fields that are language specific if not i18n_attr.blank: i18n_attr.blank = True if i18n_attr.verbose_name: i18n_attr.verbose_name = translation.string_concat( i18n_attr.verbose_name, u' (%s)' % language_code) cls.add_to_class(lang_attr_name, i18n_attr) # delete original field del cls._meta.local_fields[cls._meta.local_fields.index(original_attr)] # copy some values and functions from the original_attr # so the field can emulate the original_attr as good as possible kwargs = { 'serialize': getattr(original_attr, 'serialize', True), 'extra_attrs': getattr(original_attr, 'extra_attrs', None), 'max_length': getattr(original_attr, 'max_length', None), 'min_length': getattr(original_attr, 'min_length', None), 'form_field': original_attr.formfield( **FORMFIELD_FOR_DBFIELD_DEFAULTS.get( original_attr.__class__, {})), 'get_internal_type': original_attr.get_internal_type, 'unique': getattr(original_attr, 'unique', False), 'to_python': original_attr.to_python, } # copy __serialize__ if it was defined on the original attr if hasattr(original_attr, '__serialize__'): kwargs['__serialize__'] = original_attr.__serialize__ # add the DefaultFieldDescriptor where the original_attr was. cls.add_to_class(field, DefaultFieldDescriptor(field, **kwargs)) # update fields cache try: cls._meta._fill_fields_cache() except AttributeError: # Django 1.8 removed _fill_fields_cache cls._meta._expire_cache() cls._meta._get_fields(reverse=False) # return the finished product return cls
<SYSTEM_TASK:> Read the label file of the documents and extract all the labels <END_TASK> <USER_TASK:> Description: def __get_labels(self): """ Read the label file of the documents and extract all the labels Returns: An array of labels.Label objects """
labels = [] try: with self.fs.open(self.fs.join(self.path, self.LABEL_FILE), 'r') as file_desc: for line in file_desc.readlines(): line = line.strip() (label_name, label_color) = line.split(",", 1) labels.append(Label(name=label_name, color=label_color)) except IOError: pass return labels
<SYSTEM_TASK:> This descriptor acts as a Field, as far as the serializer is concerned. <END_TASK> <USER_TASK:> Description: def value_to_string(self, obj): """This descriptor acts as a Field, as far as the serializer is concerned."""
try: return force_unicode(self.__get__(obj)) except TypeError: return str(self.__get__(obj))
<SYSTEM_TASK:> turn a django model into a po file. <END_TASK> <USER_TASK:> Description: def poify(self, model): """turn a django model into a po file."""
if not hasattr(model, 'localized_fields'): return None # create po stream with header po_stream = polibext.PoStream(StringIO.StringIO(self.po_header)).parse() for (name, field) in easymode.tree.introspection.get_default_field_descriptors(model): occurrence = u"%s.%s.%s" % (model._meta.app_label, model.__class__.__name__, name) value = field.value_to_string(model) # only add empty strings if value != "": entry = polib.POEntry(msgid=value, occurrences=[(occurrence, model.pk)]) # make sure no duplicate entries in the po_stream existing_entry = po_stream.find(entry.msgid) if existing_entry is None: po_stream.append(entry) else: # no really, existing_entry.merge does not merge the occurrences. existing_entry.occurrences += entry.occurrences return po_stream
<SYSTEM_TASK:> Extracts to be translated strings from template and turns it into po format. <END_TASK> <USER_TASK:> Description: def xgettext(self, template): """Extracts to be translated strings from template and turns it into po format."""
cmd = 'xgettext -d django -L Python --keyword=gettext_noop \ --keyword=gettext_lazy --keyword=ngettext_lazy:1,2 --from-code=UTF-8 \ --output=- -' p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (msg, err) = p.communicate(input=templatize(template)) if err: # dont raise exception, some stuff in stderr are just warmings logging.warning(err) if XGETTEXT_REENCODES_UTF8: return msg.decode('utf-8').encode('iso-8859-1') return msg
<SYSTEM_TASK:> Runs msgmerge on a locale_file and po_string <END_TASK> <USER_TASK:> Description: def msgmerge(self, locale_file, po_string): """ Runs msgmerge on a locale_file and po_string """
cmd = "msgmerge -q %s -" % locale_file p = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (msg, err) = p.communicate(input=po_string) if err: # dont raise exception, some stuff in stderr are just warmings logging.warning("%s \nfile: %s\npostring: %s" % (err, locale_file, po_string)) return msg
<SYSTEM_TASK:> Walks a simple data structure, converting unicode to byte string. <END_TASK> <USER_TASK:> Description: def to_utf8(obj): """Walks a simple data structure, converting unicode to byte string. Supports lists, tuples, and dictionaries. """
if isinstance(obj, unicode_type): return _utf8(obj) elif isinstance(obj, dict): return dict((to_utf8(k), to_utf8(v)) for (k, v) in obj.items()) elif isinstance(obj, list): return list(to_utf8(i) for i in obj) elif isinstance(obj, tuple): return tuple(to_utf8(i) for i in obj) return obj
<SYSTEM_TASK:> After learning call the given script using 'goal'. <END_TASK> <USER_TASK:> Description: def setPostScript(self, goal, script): """ After learning call the given script using 'goal'. :param goal: goal name :param script: prolog script to call """
self.postGoal = goal self.postScript = script
<SYSTEM_TASK:> Induce a theory or features in 'mode'. <END_TASK> <USER_TASK:> Description: def induce(self, mode, pos, neg, b, filestem='default', printOutput=False): """ Induce a theory or features in 'mode'. :param filestem: The base name of this experiment. :param mode: In which mode to induce rules/features. :param pos: String of positive examples. :param neg: String of negative examples. :param b: String of background knowledge. :return: The theory as a string or an arff dataset in induce_features mode. :rtype: str """
# Write the inputs to appropriate files. self.__prepare(filestem, pos, neg, b) # Make a script to run aleph (with appropriate settings). self.__script(mode, filestem) logger.info("Running aleph...") dumpFile = None if not printOutput: dumpFile = tempfile.TemporaryFile() # Run the aleph script. p = SafePopen(['yap', '-s50000', '-h200000', '-L', Aleph.SCRIPT], cwd=self.tmpdir, stdout=dumpFile, stderr=dumpFile ).safe_run() stdout_str, stderr_str = p.communicate() logger.info("Done.") result = None if mode != 'induce_features': # Return the rules written in the output file. rules_fn = filestem + Aleph.RULES_SUFFIX result = open('%s/%s' % (self.tmpdir, rules_fn)).read() features = None else: features_fn = filestem + Aleph.FEATURES_SUFFIX features = open('%s/%s' % (self.tmpdir, features_fn)).read() dataset_fn = filestem + Aleph.PROP_DATASET_SUFFIX pl_dataset = open('%s/%s' % (self.tmpdir, dataset_fn)).read() result = self.__to_arff(features, pl_dataset, filestem) # Cleanup. self.__cleanup() return (result, features)
<SYSTEM_TASK:> Makes the script file to be run by yap. <END_TASK> <USER_TASK:> Description: def __script(self, mode, filestem): """ Makes the script file to be run by yap. """
scriptPath = '%s/%s' % (self.tmpdir, Aleph.SCRIPT) script = open(scriptPath, 'w') # Permit the owner to execute and read this script os.chmod(scriptPath, S_IREAD | S_IEXEC) cat = lambda x: script.write(x + '\n') cat(":- initialization(run_aleph).") cat("run_aleph :- ") cat("consult(aleph),") cat("read_all('%s')," % filestem) # Cat all the non-default settings for setting, value in self.settings.items(): cat("set(%s, %s)," % (setting, str(value))) cat("%s," % mode) eof = ',' if self.postScript else '.' if mode == 'induce_features': cat("consult(features),") features_fn = filestem + Aleph.FEATURES_SUFFIX dataset_fn = filestem + Aleph.PROP_DATASET_SUFFIX cat('save_features(%s),' % features_fn) cat('save_dataset(%s)%s' % (dataset_fn, eof)) else: rules_fn = filestem + Aleph.RULES_SUFFIX cat("write_rules('%s')%s" % (rules_fn, eof)) if self.postScript: cat(self.postGoal + ".") cat(self.postScript) script.close()
<SYSTEM_TASK:> shows untransformed hierarchical xml output <END_TASK> <USER_TASK:> Description: def raw(request): """shows untransformed hierarchical xml output"""
foos = foobar_models.Foo.objects.all() return HttpResponse(tree.xml(foos), mimetype='text/xml')
<SYSTEM_TASK:> shows how the XmlQuerySetChain can be used instead of @toxml decorator <END_TASK> <USER_TASK:> Description: def chain(request): """shows how the XmlQuerySetChain can be used instead of @toxml decorator"""
bars = foobar_models.Bar.objects.all() bazs = foobar_models.Baz.objects.all() qsc = XmlQuerySetChain(bars, bazs) return HttpResponse(tree.xml(qsc), mimetype='text/xml')
<SYSTEM_TASK:> Shows xml output transformed with standard xslt <END_TASK> <USER_TASK:> Description: def xslt(request): """Shows xml output transformed with standard xslt"""
foos = foobar_models.Foo.objects.all() return render_xslt_to_response('xslt/model-to-xml.xsl', foos, mimetype='text/xml')
<SYSTEM_TASK:> In a very flowy contour, it is not trivial to say which pitch value corresponds <END_TASK> <USER_TASK:> Description: def label_contours(self, intervals, window=150, hop=30): """ In a very flowy contour, it is not trivial to say which pitch value corresponds to what interval. This function labels pitch contours with intervals by guessing from the characteristics of the contour and its melodic context. :param window: the size of window over which the context is gauged, in milliseconds. :param hop: hop size in milliseconds. """
window /= 1000.0 hop /= 1000.0 exposure = int(window / hop) boundary = window - hop final_index = utils.find_nearest_index(self.pitch_obj.timestamps, self.pitch_obj.timestamps[-1] - boundary) interval = np.median(np.diff(self.pitch_obj.timestamps)) #interval = 0.00290254832393 window_step = window / interval hop_step = hop / interval start_index = 0 end_index = window_step contour_labels = {} means = [] while end_index < final_index: temp = self.pitch_obj.pitch[start_index:end_index][self.pitch_obj.pitch[start_index:end_index] > -10000] means.append(np.mean(temp)) start_index = start_index + hop_step end_index = start_index + window_step for i in xrange(exposure, len(means) - exposure + 1): _median = np.median(means[i - exposure:i]) if _median < -5000: continue ind = utils.find_nearest_index(_median, intervals) contour_end = (i - exposure) * hop_step + window_step contour_start = contour_end - hop_step #print sliceBegin, sliceEnd, JICents[ind] #newPitch[sliceBegin:sliceEnd] = JICents[ind] if intervals[ind] in contour_labels.keys(): contour_labels[intervals[ind]].append([contour_start, contour_end]) else: contour_labels[intervals[ind]] = [[contour_start, contour_end]] self.contour_labels = contour_labels
<SYSTEM_TASK:> Recursively constructs the 'wordification' document for the given example. <END_TASK> <USER_TASK:> Description: def wordify_example(name_to_table, connecting_tables, context, cached_sentences, index_by_value, target_table_name, word_att_length, data_name, ex, searched_connections): """ Recursively constructs the 'wordification' document for the given example. :param data: The given examples ExampleTable :param ex: Example for which the document is constructed """
debug = False data_name = str(data_name) if debug: print("======================================") print("example:", ex) print("table name:", data_name) print("searched_connections:", len(searched_connections), searched_connections) print("connecting_tables:", len(connecting_tables[data_name]), connecting_tables[data_name]) ex_pkey_value = data_name in context.pkeys and ex[str(context.pkeys[data_name])] if not data_name in cached_sentences or not str(ex_pkey_value) in cached_sentences[data_name]: words = [] # word list for every example if debug: print("words:", len(words)) # Construct words (tableName_attributeName_attributeValue) from the given table for att in name_to_table[data_name].domain.attributes: if not str(att.name) in context.pkeys[data_name] and not str(att.name) in context.fkeys[data_name]: words.append(att_to_s(data_name) + "_" + att_to_s(att.name) + "_" + att_to_s(ex[att])) # Words from pairs of attributes single_words = words[:] for comb_length in range(word_att_length + 1): if comb_length > 1: words.extend(["__".join(sorted(b)) for b in itertools.combinations(single_words, comb_length)]) # Apply the wordification methodology recursively on all connecting tables for sec_t_name, sec_fkey, prim_fkey in connecting_tables[data_name]: sec_t = name_to_table[sec_t_name] if debug: print("------------------") print("(sec_t,sec_fkey,prim):", (sec_t_name, sec_fkey, prim_fkey)) print("search this table:", not (sec_t_name, sec_fkey) in searched_connections and sec_t_name != target_table_name) print("search this table:", not prim_fkey or not (data_name, sec_fkey) in searched_connections) # and sec_t!=self.target_table if not (sec_t_name, sec_fkey) in searched_connections and sec_t_name != target_table_name and ( not prim_fkey or not (data_name, sec_fkey) in searched_connections): example_indexes = index_by_value[sec_t_name][str(sec_fkey)][str(ex_pkey_value)] if not prim_fkey else \ index_by_value[sec_t_name][str(prim_fkey)][str(ex[str(sec_fkey)])] for sec_ex_idx in example_indexes: words += wordify_example(name_to_table, connecting_tables, context, cached_sentences, index_by_value, target_table_name, word_att_length, sec_t_name, sec_t[sec_ex_idx], searched_connections | set( [(sec_t_name, sec_fkey), prim_fkey and (data_name, prim_fkey)])) cached_sentences[data_name][str(ex_pkey_value)] = words return cached_sentences[data_name][str(ex_pkey_value)]