_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q4400
get_permission_request
train
def get_permission_request(parser, token): """ Performs a permission request check with the given signature, user and objects and assigns the result to a context variable. Syntax:: {% get_permission_request PERMISSION_LABEL.CHECK_NAME for USER and *OBJS [as VARNAME] %} {% get_permission_request "poll_permission.change_poll" for request.user and poll as "asked_for_permissio" %} {% get_permission_request "poll_permission.change_poll" for request.user and poll,second_poll as "asked_for_permissio" %} {% if asked_for_permissio %} Dude, you already asked for permission! {% else %} Oh, please fill out this 20 page form and sign here. {% endif %} """ return PermissionForObjectNode.handle_token( parser, token, approved=False, name='"permission_request"')
python
{ "resource": "" }
q4401
permission_delete_link
train
def permission_delete_link(context, perm): """ Renders a html link to the delete view of the given permission. Returns no content if the request-user has no permission to delete foreign permissions. """ user = context['request'].user if user.is_authenticated(): if (user.has_perm('authority.delete_foreign_permissions') or user.pk == perm.creator.pk): return base_link(context, perm, 'authority-delete-permission') return {'url': None}
python
{ "resource": "" }
q4402
permission_request_delete_link
train
def permission_request_delete_link(context, perm): """ Renders a html link to the delete view of the given permission request. Returns no content if the request-user has no permission to delete foreign permissions. """ user = context['request'].user if user.is_authenticated(): link_kwargs = base_link(context, perm, 'authority-delete-permission-request') if user.has_perm('authority.delete_permission'): link_kwargs['is_requestor'] = False return link_kwargs if not perm.approved and perm.user == user: link_kwargs['is_requestor'] = True return link_kwargs return {'url': None}
python
{ "resource": "" }
q4403
permission_request_approve_link
train
def permission_request_approve_link(context, perm): """ Renders a html link to the approve view of the given permission request. Returns no content if the request-user has no permission to delete foreign permissions. """ user = context['request'].user if user.is_authenticated(): if user.has_perm('authority.approve_permission_requests'): return base_link(context, perm, 'authority-approve-permission-request') return {'url': None}
python
{ "resource": "" }
q4404
ResolverNode.resolve
train
def resolve(self, var, context): """Resolves a variable out of context if it's not in quotes""" if var is None: return var if var[0] in ('"', "'") and var[-1] == var[0]: return var[1:-1] else: return template.Variable(var).resolve(context)
python
{ "resource": "" }
q4405
PermissionManager.group_permissions
train
def group_permissions(self, group, perm, obj, approved=True): """ Get objects that have Group perm permission on """ return self.get_for_model(obj).select_related( 'user', 'group', 'creator').filter(group=group, codename=perm, approved=approved)
python
{ "resource": "" }
q4406
PermissionManager.delete_user_permissions
train
def delete_user_permissions(self, user, perm, obj, check_groups=False): """ Remove granular permission perm from user on an object instance """ user_perms = self.user_permissions(user, perm, obj, check_groups=False) if not user_perms.filter(object_id=obj.id): return perms = self.user_permissions(user, perm, obj).filter(object_id=obj.id) perms.delete()
python
{ "resource": "" }
q4407
ParsedWhois.flattened
train
def flattened(self): """Returns a flattened version of the parsed whois data""" parsed = self['parsed_whois'] flat = OrderedDict() for key in ('domain', 'created_date', 'updated_date', 'expired_date', 'statuses', 'name_servers'): value = parsed[key] flat[key] = ' | '.join(value) if type(value) in (list, tuple) else value registrar = parsed.get('registrar', {}) for key in ('name', 'abuse_contact_phone', 'abuse_contact_email', 'iana_id', 'url', 'whois_server'): flat['registrar_{0}'.format(key)] = registrar[key] for contact_type in ('registrant', 'admin', 'tech', 'billing'): contact = parsed.get('contacts', {}).get(contact_type, {}) for key in ('name', 'email', 'org', 'street', 'city', 'state', 'postal', 'country', 'phone', 'fax'): value = contact[key] flat['{0}_{1}'.format(contact_type, key)] = ' '.join(value) if type(value) in (list, tuple) else value return flat
python
{ "resource": "" }
q4408
permission_denied
train
def permission_denied(request, template_name=None, extra_context=None): """ Default 403 handler. Templates: `403.html` Context: request_path The path of the requested URL (e.g., '/app/pages/bad_page/') """ if template_name is None: template_name = ('403.html', 'authority/403.html') context = { 'request_path': request.path, } if extra_context: context.update(extra_context) return HttpResponseForbidden(loader.render_to_string( template_name=template_name, context=context, request=request, ))
python
{ "resource": "" }
q4409
Permission.approve
train
def approve(self, creator): """ Approve granular permission request setting a Permission entry as approved=True for a specific action from an user on an object instance. """ self.approved = True self.creator = creator self.save()
python
{ "resource": "" }
q4410
autodiscover_modules
train
def autodiscover_modules(): """ Goes and imports the permissions submodule of every app in INSTALLED_APPS to make sure the permission set classes are registered correctly. """ import imp from django.conf import settings for app in settings.INSTALLED_APPS: try: __import__(app) app_path = sys.modules[app].__path__ except AttributeError: continue try: imp.find_module('permissions', app_path) except ImportError: continue __import__("%s.permissions" % app) app_path = sys.modules["%s.permissions" % app] LOADING = False
python
{ "resource": "" }
q4411
BasePermission._get_user_cached_perms
train
def _get_user_cached_perms(self): """ Set up both the user and group caches. """ if not self.user: return {}, {} group_pks = set(self.user.groups.values_list( 'pk', flat=True, )) perms = Permission.objects.filter( Q(user__pk=self.user.pk) | Q(group__pk__in=group_pks), ) user_permissions = {} group_permissions = {} for perm in perms: if perm.user_id == self.user.pk: user_permissions[( perm.object_id, perm.content_type_id, perm.codename, perm.approved, )] = True # If the user has the permission do for something, but perm.user != # self.user then by definition that permission came from the # group. else: group_permissions[( perm.object_id, perm.content_type_id, perm.codename, perm.approved, )] = True return user_permissions, group_permissions
python
{ "resource": "" }
q4412
BasePermission._get_group_cached_perms
train
def _get_group_cached_perms(self): """ Set group cache. """ if not self.group: return {} perms = Permission.objects.filter( group=self.group, ) group_permissions = {} for perm in perms: group_permissions[( perm.object_id, perm.content_type_id, perm.codename, perm.approved, )] = True return group_permissions
python
{ "resource": "" }
q4413
BasePermission._prime_user_perm_caches
train
def _prime_user_perm_caches(self): """ Prime both the user and group caches and put them on the ``self.user``. In addition add a cache filled flag on ``self.user``. """ perm_cache, group_perm_cache = self._get_user_cached_perms() self.user._authority_perm_cache = perm_cache self.user._authority_group_perm_cache = group_perm_cache self.user._authority_perm_cache_filled = True
python
{ "resource": "" }
q4414
BasePermission._prime_group_perm_caches
train
def _prime_group_perm_caches(self): """ Prime the group cache and put them on the ``self.group``. In addition add a cache filled flag on ``self.group``. """ perm_cache = self._get_group_cached_perms() self.group._authority_perm_cache = perm_cache self.group._authority_perm_cache_filled = True
python
{ "resource": "" }
q4415
BasePermission.invalidate_permissions_cache
train
def invalidate_permissions_cache(self): """ In the event that the Permission table is changed during the use of a permission the Permission cache will need to be invalidated and regenerated. By calling this method the invalidation will occur, and the next time the cached_permissions is used the cache will be re-primed. """ if self.user: self.user._authority_perm_cache_filled = False if self.group: self.group._authority_perm_cache_filled = False
python
{ "resource": "" }
q4416
BasePermission.has_group_perms
train
def has_group_perms(self, perm, obj, approved): """ Check if group has the permission for the given object """ if not self.group: return False if self.use_smart_cache: content_type_pk = Permission.objects.get_content_type(obj).pk def _group_has_perms(cached_perms): # Check to see if the permission is in the cache. return cached_perms.get(( obj.pk, content_type_pk, perm, approved, )) # Check to see if the permission is in the cache. return _group_has_perms(self._group_perm_cache) # Actually hit the DB, no smart cache used. return Permission.objects.group_permissions( self.group, perm, obj, approved, ).filter( object_id=obj.pk, ).exists()
python
{ "resource": "" }
q4417
BasePermission.has_perm
train
def has_perm(self, perm, obj, check_groups=True, approved=True): """ Check if user has the permission for the given object """ if self.user: if self.has_user_perms(perm, obj, approved, check_groups): return True if self.group: return self.has_group_perms(perm, obj, approved) return False
python
{ "resource": "" }
q4418
BasePermission.requested_perm
train
def requested_perm(self, perm, obj, check_groups=True): """ Check if user requested a permission for the given object """ return self.has_perm(perm, obj, check_groups, False)
python
{ "resource": "" }
q4419
BasePermission.assign
train
def assign(self, check=None, content_object=None, generic=False): """ Assign a permission to a user. To assign permission for all checks: let check=None. To assign permission for all objects: let content_object=None. If generic is True then "check" will be suffixed with _modelname. """ result = [] if not content_object: content_objects = (self.model,) elif not isinstance(content_object, (list, tuple)): content_objects = (content_object,) else: content_objects = content_object if not check: checks = self.generic_checks + getattr(self, 'checks', []) elif not isinstance(check, (list, tuple)): checks = (check,) else: checks = check for content_object in content_objects: # raise an exception before adding any permission # i think Django does not rollback by default if not isinstance(content_object, (Model, ModelBase)): raise NotAModel(content_object) elif isinstance(content_object, Model) and not content_object.pk: raise UnsavedModelInstance(content_object) content_type = ContentType.objects.get_for_model(content_object) for check in checks: if isinstance(content_object, Model): # make an authority per object permission codename = self.get_codename( check, content_object, generic, ) try: perm = Permission.objects.get( user=self.user, codename=codename, approved=True, content_type=content_type, object_id=content_object.pk, ) except Permission.DoesNotExist: perm = Permission.objects.create( user=self.user, content_object=content_object, codename=codename, approved=True, ) result.append(perm) elif isinstance(content_object, ModelBase): # make a Django permission codename = self.get_django_codename( check, content_object, generic, without_left=True, ) try: perm = DjangoPermission.objects.get(codename=codename) except DjangoPermission.DoesNotExist: name = check if '_' in name: name = name[0:name.find('_')] perm = DjangoPermission( name=name, codename=codename, content_type=content_type, ) perm.save() self.user.user_permissions.add(perm) result.append(perm) return result
python
{ "resource": "" }
q4420
delimited
train
def delimited(items, character='|'): """Returns a character delimited version of the provided list as a Python string""" return '|'.join(items) if type(items) in (list, tuple, set) else items
python
{ "resource": "" }
q4421
API._rate_limit
train
def _rate_limit(self): """Pulls in and enforces the latest rate limits for the specified user""" self.limits_set = True for product in self.account_information(): self.limits[product['id']] = {'interval': timedelta(seconds=60 / float(product['per_minute_limit']))}
python
{ "resource": "" }
q4422
API.domain_search
train
def domain_search(self, query, exclude_query=[], max_length=25, min_length=2, has_hyphen=True, has_number=True, active_only=False, deleted_only=False, anchor_left=False, anchor_right=False, page=1, **kwargs): """Each term in the query string must be at least three characters long. Pass in a list or use spaces to separate multiple terms. """ return self._results('domain-search', '/v2/domain-search', query=delimited(query, ' '), exclude_query=delimited(exclude_query, ' '), max_length=max_length, min_length=min_length, has_hyphen=has_hyphen, has_number=has_number, active_only=active_only, deleted_only=deleted_only, anchor_left=anchor_left, anchor_right=anchor_right, page=page, items_path=('results', ), **kwargs)
python
{ "resource": "" }
q4423
API.domain_suggestions
train
def domain_suggestions(self, query, **kwargs): """Passed in name must be at least two characters long. Use a list or spaces to separate multiple terms.""" return self._results('domain-suggestions', '/v1/domain-suggestions', query=delimited(query, ' '), items_path=('suggestions', ), **kwargs)
python
{ "resource": "" }
q4424
API.hosting_history
train
def hosting_history(self, query, **kwargs): """Returns the hosting history from the given domain name""" return self._results('hosting-history', '/v1/{0}/hosting-history'.format(query), cls=GroupedIterable, **kwargs)
python
{ "resource": "" }
q4425
API.ip_registrant_monitor
train
def ip_registrant_monitor(self, query, days_back=0, search_type="all", server=None, country=None, org=None, page=1, include_total_count=False, **kwargs): """Query based on free text query terms""" return self._results('ip-registrant-monitor', '/v1/ip-registrant-monitor', query=query, days_back=days_back, search_type=search_type, server=server, country=country, org=org, page=page, include_total_count=include_total_count, **kwargs)
python
{ "resource": "" }
q4426
API.parsed_whois
train
def parsed_whois(self, query, **kwargs): """Pass in a domain name""" return self._results('parsed-whois', '/v1/{0}/whois/parsed'.format(query), cls=ParsedWhois, **kwargs)
python
{ "resource": "" }
q4427
API.reputation
train
def reputation(self, query, include_reasons=False, **kwargs): """Pass in a domain name to see its reputation score""" return self._results('reputation', '/v1/reputation', domain=query, include_reasons=include_reasons, cls=Reputation, **kwargs)
python
{ "resource": "" }
q4428
API.host_domains
train
def host_domains(self, ip=None, limit=None, **kwargs): """Pass in an IP address.""" return self._results('reverse-ip', '/v1/{0}/host-domains'.format(ip), limit=limit, **kwargs)
python
{ "resource": "" }
q4429
API.reverse_ip_whois
train
def reverse_ip_whois(self, query=None, ip=None, country=None, server=None, include_total_count=False, page=1, **kwargs): """Pass in an IP address or a list of free text query terms.""" if (ip and query) or not (ip or query): raise ValueError('Query or IP Address (but not both) must be defined') return self._results('reverse-ip-whois', '/v1/reverse-ip-whois', query=query, ip=ip, country=country, server=server, include_total_count=include_total_count, page=page, items_path=('records', ), **kwargs)
python
{ "resource": "" }
q4430
API.reverse_name_server
train
def reverse_name_server(self, query, limit=None, **kwargs): """Pass in a domain name or a name server.""" return self._results('reverse-name-server', '/v1/{0}/name-server-domains'.format(query), items_path=('primary_domains', ), limit=limit, **kwargs)
python
{ "resource": "" }
q4431
API.phisheye_term_list
train
def phisheye_term_list(self, include_inactive=False, **kwargs): """Provides a list of terms that are set up for this account. This call is not charged against your API usage limit. NOTE: The terms must be configured in the PhishEye web interface: https://research.domaintools.com/phisheye. There is no API call to set up the terms. """ return self._results('phisheye_term_list', '/v1/phisheye/term-list', include_inactive=include_inactive, items_path=('terms', ), **kwargs)
python
{ "resource": "" }
q4432
API.iris
train
def iris(self, domain=None, ip=None, email=None, nameserver=None, registrar=None, registrant=None, registrant_org=None, **kwargs): """Performs a search for the provided search terms ANDed together, returning the pivot engine row data for the resulting domains. """ if ((not domain and not ip and not email and not nameserver and not registrar and not registrant and not registrant_org and not kwargs)): raise ValueError('At least one search term must be specified') return self._results('iris', '/v1/iris', domain=domain, ip=ip, email=email, nameserver=nameserver, registrar=registrar, registrant=registrant, registrant_org=registrant_org, items_path=('results', ), **kwargs)
python
{ "resource": "" }
q4433
API.risk
train
def risk(self, domain, **kwargs): """Returns back the risk score for a given domain""" return self._results('risk', '/v1/risk', items_path=('components', ), domain=domain, cls=Reputation, **kwargs)
python
{ "resource": "" }
q4434
API.risk_evidence
train
def risk_evidence(self, domain, **kwargs): """Returns back the detailed risk evidence associated with a given domain""" return self._results('risk-evidence', '/v1/risk/evidence/', items_path=('components', ), domain=domain, **kwargs)
python
{ "resource": "" }
q4435
Fetcher.init
train
def init(self): """Returns a tuple pair of cookie and crumb used in the request""" url = 'https://finance.yahoo.com/quote/%s/history' % (self.ticker) r = requests.get(url) txt = r.content cookie = r.cookies['B'] pattern = re.compile('.*"CrumbStore":\{"crumb":"(?P<crumb>[^"]+)"\}') for line in txt.splitlines(): m = pattern.match(line.decode("utf-8")) if m is not None: crumb = m.groupdict()['crumb'] crumb = crumb.replace(u'\\u002F', '/') return cookie, crumb
python
{ "resource": "" }
q4436
Fetcher.getData
train
def getData(self, events): """Returns a list of historical data from Yahoo Finance""" if self.interval not in ["1d", "1wk", "1mo"]: raise ValueError("Incorrect interval: valid intervals are 1d, 1wk, 1mo") url = self.api_url % (self.ticker, self.start, self.end, self.interval, events, self.crumb) data = requests.get(url, cookies={'B':self.cookie}) content = StringIO(data.content.decode("utf-8")) return pd.read_csv(content, sep=',')
python
{ "resource": "" }
q4437
Disk._get_mount_methods
train
def _get_mount_methods(self, disk_type): """Finds which mount methods are suitable for the specified disk type. Returns a list of all suitable mount methods. """ if self.disk_mounter == 'auto': methods = [] def add_method_if_exists(method): if (method == 'avfs' and _util.command_exists('avfsd')) or \ _util.command_exists(method): methods.append(method) if self.read_write: add_method_if_exists('xmount') else: if disk_type == 'encase': add_method_if_exists('ewfmount') elif disk_type == 'vmdk': add_method_if_exists('vmware-mount') add_method_if_exists('affuse') elif disk_type == 'dd': add_method_if_exists('affuse') elif disk_type == 'compressed': add_method_if_exists('avfs') elif disk_type == 'qcow2': add_method_if_exists('qemu-nbd') elif disk_type == 'vdi': add_method_if_exists('qemu-nbd') add_method_if_exists('xmount') else: methods = [self.disk_mounter] return methods
python
{ "resource": "" }
q4438
Disk._mount_avfs
train
def _mount_avfs(self): """Mounts the AVFS filesystem.""" self._paths['avfs'] = tempfile.mkdtemp(prefix='image_mounter_avfs_') # start by calling the mountavfs command to initialize avfs _util.check_call_(['avfsd', self._paths['avfs'], '-o', 'allow_other'], stdout=subprocess.PIPE) # no multifile support for avfs avfspath = self._paths['avfs'] + '/' + os.path.abspath(self.paths[0]) + '#' targetraw = os.path.join(self.mountpoint, 'avfs') os.symlink(avfspath, targetraw) logger.debug("Symlinked {} with {}".format(avfspath, targetraw)) raw_path = self.get_raw_path() logger.debug("Raw path to avfs is {}".format(raw_path)) if raw_path is None: raise MountpointEmptyError()
python
{ "resource": "" }
q4439
Disk.init_volumes
train
def init_volumes(self, single=None, only_mount=None, skip_mount=None, swallow_exceptions=True): """Generator that detects and mounts all volumes in the disk. :param single: If *single* is :const:`True`, this method will call :Func:`init_single_volumes`. If *single* is False, only :func:`init_multiple_volumes` is called. If *single* is None, :func:`init_multiple_volumes` is always called, being followed by :func:`init_single_volume` if no volumes were detected. :param list only_mount: If set, must be a list of volume indexes that are only mounted. :param list skip_mount: If set, must be a list of volume indexes tat should not be mounted. :param bool swallow_exceptions: If True, Exceptions are not raised but rather set on the instance. """ for volume in self.detect_volumes(single=single): for vol in volume.init(only_mount=only_mount, skip_mount=skip_mount, swallow_exceptions=swallow_exceptions): yield vol
python
{ "resource": "" }
q4440
Disk.get_volumes
train
def get_volumes(self): """Gets a list of all volumes in this disk, including volumes that are contained in other volumes.""" volumes = [] for v in self.volumes: volumes.extend(v.get_volumes()) return volumes
python
{ "resource": "" }
q4441
Disk.unmount
train
def unmount(self, remove_rw=False, allow_lazy=False): """Removes all ties of this disk to the filesystem, so the image can be unmounted successfully. :raises SubsystemError: when one of the underlying commands fails. Some are swallowed. :raises CleanupError: when actual cleanup fails. Some are swallowed. """ for m in list(sorted(self.volumes, key=lambda v: v.mountpoint or "", reverse=True)): try: m.unmount(allow_lazy=allow_lazy) except ImageMounterError: logger.warning("Error unmounting volume {0}".format(m.mountpoint)) if self._paths.get('nbd'): _util.clean_unmount(['qemu-nbd', '-d'], self._paths['nbd'], rmdir=False) if self.mountpoint: try: _util.clean_unmount(['fusermount', '-u'], self.mountpoint) except SubsystemError: if not allow_lazy: raise _util.clean_unmount(['fusermount', '-uz'], self.mountpoint) if self._paths.get('avfs'): try: _util.clean_unmount(['fusermount', '-u'], self._paths['avfs']) except SubsystemError: if not allow_lazy: raise _util.clean_unmount(['fusermount', '-uz'], self._paths['avfs']) if self.rw_active() and remove_rw: os.remove(self.rwpath) self.is_mounted = False
python
{ "resource": "" }
q4442
ArgumentParsedShell._make_argparser
train
def _make_argparser(self): """Makes a new argument parser.""" self.argparser = ShellArgumentParser(prog='') subparsers = self.argparser.add_subparsers() for name in self.get_names(): if name.startswith('parser_'): parser = subparsers.add_parser(name[7:]) parser.set_defaults(func=getattr(self, 'arg_' + name[7:])) getattr(self, name)(parser) self.argparser_completer = None try: import argcomplete except ImportError: pass else: os.environ.setdefault("_ARGCOMPLETE_COMP_WORDBREAKS", " \t\"'") self.argparser_completer = argcomplete.CompletionFinder(self.argparser)
python
{ "resource": "" }
q4443
ArgumentParsedShell.default
train
def default(self, line): """Overriding default to get access to any argparse commands we have specified.""" if any((line.startswith(x) for x in self.argparse_names())): try: args = self.argparser.parse_args(shlex.split(line)) except Exception: # intentionally catches also other errors in argparser pass else: args.func(args) else: cmd.Cmd.default(self, line)
python
{ "resource": "" }
q4444
ArgumentParsedShell.completedefault
train
def completedefault(self, text, line, begidx, endidx): """Accessing the argcompleter if available.""" if self.argparser_completer and any((line.startswith(x) for x in self.argparse_names())): self.argparser_completer.rl_complete(line, 0) return [x[begidx:] for x in self.argparser_completer._rl_matches] else: return []
python
{ "resource": "" }
q4445
ArgumentParsedShell.completenames
train
def completenames(self, text, *ignored): """Patched to also return argparse commands""" return sorted(cmd.Cmd.completenames(self, text, *ignored) + self.argparse_names(text))
python
{ "resource": "" }
q4446
ArgumentParsedShell.do_help
train
def do_help(self, arg): """Patched to show help for arparse commands""" if not arg or arg not in self.argparse_names(): cmd.Cmd.do_help(self, arg) else: try: self.argparser.parse_args([arg, '--help']) except Exception: pass
python
{ "resource": "" }
q4447
ArgumentParsedShell.print_topics
train
def print_topics(self, header, cmds, cmdlen, maxcol): """Patched to show all argparse commands as being documented""" if header == self.doc_header: cmds.extend(self.argparse_names()) cmd.Cmd.print_topics(self, header, sorted(cmds), cmdlen, maxcol)
python
{ "resource": "" }
q4448
ImageMounterShell.preloop
train
def preloop(self): """if the parser is not already set, loads the parser.""" if not self.parser: self.stdout.write("Welcome to imagemounter {version}".format(version=__version__)) self.stdout.write("\n") self.parser = ImageParser() for p in self.args.paths: self.onecmd('disk "{}"'.format(p))
python
{ "resource": "" }
q4449
ImageMounterShell.onecmd
train
def onecmd(self, line): """Do not crash the entire program when a single command fails.""" try: return cmd.Cmd.onecmd(self, line) except Exception as e: print("Critical error.", e)
python
{ "resource": "" }
q4450
ImageMounterShell._get_all_indexes
train
def _get_all_indexes(self): """Returns all indexes available in the parser""" if self.parser: return [v.index for v in self.parser.get_volumes()] + [d.index for d in self.parser.disks] else: return None
python
{ "resource": "" }
q4451
ImageMounterShell._get_by_index
train
def _get_by_index(self, index): """Returns a volume,disk tuple for the specified index""" volume_or_disk = self.parser.get_by_index(index) volume, disk = (volume_or_disk, None) if not isinstance(volume_or_disk, Disk) else (None, volume_or_disk) return volume, disk
python
{ "resource": "" }
q4452
ImageMounterShell.do_quit
train
def do_quit(self, arg): """Quits the program.""" if self.saved: self.save() else: self.parser.clean() return True
python
{ "resource": "" }
q4453
Unmounter.unmount
train
def unmount(self): """Calls all unmount methods in the correct order.""" self.unmount_bindmounts() self.unmount_mounts() self.unmount_volume_groups() self.unmount_loopbacks() self.unmount_base_images() self.clean_dirs()
python
{ "resource": "" }
q4454
ImageParser.add_disk
train
def add_disk(self, path, force_disk_indexes=True, **args): """Adds a disk specified by the path to the ImageParser. :param path: The path to the disk volume :param force_disk_indexes: If true, always uses disk indexes. If False, only uses disk indexes if this is the second volume you add. If you plan on using this method, always leave this True. If you add a second disk when the previous disk has no index, an error is raised. :param args: Arguments to pass to the constructor of the Disk. """ if self.disks and self.disks[0].index is None: raise DiskIndexError("First disk has no index.") if force_disk_indexes or self.disks: index = len(self.disks) + 1 else: index = None disk = Disk(self, path, index=str(index) if index else None, **args) self.disks.append(disk) return disk
python
{ "resource": "" }
q4455
ImageParser.rw_active
train
def rw_active(self): """Indicates whether a read-write cache is active in any of the disks. :rtype: bool""" result = False for disk in self.disks: result = disk.rw_active() or result return result
python
{ "resource": "" }
q4456
ImageParser.get_by_index
train
def get_by_index(self, index): """Returns a Volume or Disk by its index.""" try: return self[index] except KeyError: for v in self.get_volumes(): if v.index == str(index): return v raise KeyError(index)
python
{ "resource": "" }
q4457
ImageParser.force_clean
train
def force_clean(self, remove_rw=False, allow_lazy=False, retries=5, sleep_interval=0.5): """Attempts to call the clean method, but will retry automatically if an error is raised. When the attempts run out, it will raise the last error. Note that the method will only catch :class:`ImageMounterError` exceptions. :param bool remove_rw: indicates whether a read-write cache should be removed :param bool allow_lazy: indicates whether lazy unmounting is allowed :param retries: Maximum amount of retries while unmounting :param sleep_interval: The sleep interval between attempts. :raises SubsystemError: when one of the underlying commands fails. Some are swallowed. :raises CleanupError: when actual cleanup fails. Some are swallowed. """ while True: try: self.clean(remove_rw=remove_rw, allow_lazy=allow_lazy) except ImageMounterError: if retries == 0: raise retries -= 1 time.sleep(sleep_interval) else: return
python
{ "resource": "" }
q4458
ImageParser.reconstruct
train
def reconstruct(self): """Reconstructs the filesystem of all volumes mounted by the parser by inspecting the last mount point and bind mounting everything. :raises: NoRootFoundError if no root could be found :return: the root :class:`Volume` """ volumes = list(sorted((v for v in self.get_volumes() if v.mountpoint and v.info.get('lastmountpoint')), key=lambda v: v.numeric_index)) try: root = list(filter(lambda x: x.info.get('lastmountpoint') == '/', volumes))[0] except IndexError: logger.error("Could not find / while reconstructing, aborting!") raise NoRootFoundError() volumes.remove(root) for v in volumes: if v.info.get('lastmountpoint') == root.info.get('lastmountpoint'): logger.debug("Skipping volume %s as it has the same root as %s", v, root) continue v.bindmount(os.path.join(root.mountpoint, v.info.get('lastmountpoint')[1:])) return root
python
{ "resource": "" }
q4459
VolumeSystem._make_subvolume
train
def _make_subvolume(self, **args): """Creates a subvolume, adds it to this class and returns it.""" from imagemounter.volume import Volume v = Volume(disk=self.disk, parent=self.parent, volume_detector=self.volume_detector, **args) # vstype is not passed down, let it decide for itself. self.volumes.append(v) return v
python
{ "resource": "" }
q4460
VolumeSystem._make_single_subvolume
train
def _make_single_subvolume(self, only_one=True, **args): """Creates a subvolume, adds it to this class, sets the volume index to 0 and returns it. :param bool only_one: if this volume system already has at least one volume, it is returned instead. """ if only_one and self.volumes: return self.volumes[0] if self.parent.index is None: index = '0' else: index = '{0}.0'.format(self.parent.index) volume = self._make_subvolume(index=index, **args) return volume
python
{ "resource": "" }
q4461
VolumeSystem.detect_volumes
train
def detect_volumes(self, vstype=None, method=None, force=False): """Iterator for detecting volumes within this volume system. :param str vstype: The volume system type to use. If None, uses :attr:`vstype` :param str method: The detection method to use. If None, uses :attr:`detection` :param bool force: Specify if you wnat to force running the detection if has_Detected is True. """ if self.has_detected and not force: logger.warning("Detection already ran.") return if vstype is None: vstype = self.vstype if method is None: method = self.volume_detector if method == 'auto': method = VolumeSystem._determine_auto_detection_method() if method in ALL_VOLUME_SYSTEM_DETECTORS: for v in ALL_VOLUME_SYSTEM_DETECTORS[method].detect(self, vstype): yield v else: logger.error("No viable detection method found") raise ArgumentError("No viable detection method found") self.has_detected = True
python
{ "resource": "" }
q4462
VolumeSystem._determine_auto_detection_method
train
def _determine_auto_detection_method(): """Return the detection method to use when the detection method is 'auto'""" if dependencies.pytsk3.is_available: return 'pytsk3' elif dependencies.mmls.is_available: return 'mmls' elif dependencies.parted.is_available: return 'parted' else: raise PrerequisiteFailedError("No valid detection method is installed.")
python
{ "resource": "" }
q4463
VolumeSystem._assign_disktype_data
train
def _assign_disktype_data(self, volume, slot=None): """Assigns cached disktype data to a volume.""" if slot is None: slot = volume.slot if slot in self._disktype: data = self._disktype[slot] if not volume.info.get('guid') and 'guid' in data: volume.info['guid'] = data['guid'] if not volume.info.get('label') and 'label' in data: volume.info['label'] = data['label']
python
{ "resource": "" }
q4464
VolumeDetector._format_index
train
def _format_index(self, volume_system, idx): """Returns a formatted index given the disk index idx.""" if volume_system.parent.index is not None: return '{0}.{1}'.format(volume_system.parent.index, idx) else: return str(idx)
python
{ "resource": "" }
q4465
Pytsk3VolumeDetector._find_volumes
train
def _find_volumes(self, volume_system, vstype='detect'): """Finds all volumes based on the pytsk3 library.""" try: # noinspection PyUnresolvedReferences import pytsk3 except ImportError: logger.error("pytsk3 not installed, could not detect volumes") raise ModuleNotFoundError("pytsk3") baseimage = None try: # ewf raw image is now available on base mountpoint # either as ewf1 file or as .dd file raw_path = volume_system.parent.get_raw_path() # noinspection PyBroadException try: baseimage = pytsk3.Img_Info(raw_path) except Exception: logger.error("Failed retrieving image info (possible empty image).", exc_info=True) return [] try: volumes = pytsk3.Volume_Info(baseimage, getattr(pytsk3, 'TSK_VS_TYPE_' + vstype.upper()), volume_system.parent.offset // volume_system.disk.block_size) volume_system.volume_source = 'multi' return volumes except Exception as e: # some bug in sleuthkit makes detection sometimes difficult, so we hack around it: if "(GPT or DOS at 0)" in str(e) and vstype != 'gpt': volume_system.vstype = 'gpt' # noinspection PyBroadException try: logger.warning("Error in retrieving volume info: TSK couldn't decide between GPT and DOS, " "choosing GPT for you. Use --vstype=dos to force DOS.", exc_info=True) volumes = pytsk3.Volume_Info(baseimage, getattr(pytsk3, 'TSK_VS_TYPE_GPT')) volume_system.volume_source = 'multi' return volumes except Exception as e: logger.exception("Failed retrieving image info (possible empty image).") raise SubsystemError(e) else: logger.exception("Failed retrieving image info (possible empty image).") raise SubsystemError(e) finally: if baseimage: baseimage.close() del baseimage
python
{ "resource": "" }
q4466
Pytsk3VolumeDetector.detect
train
def detect(self, volume_system, vstype='detect'): """Generator that mounts every partition of this image and yields the mountpoint.""" # Loop over all volumes in image. for p in self._find_volumes(volume_system, vstype): import pytsk3 volume = volume_system._make_subvolume( index=self._format_index(volume_system, p.addr), offset=p.start * volume_system.disk.block_size, size=p.len * volume_system.disk.block_size ) # Fill volume with more information volume.info['fsdescription'] = p.desc.strip().decode('utf-8') if p.flags == pytsk3.TSK_VS_PART_FLAG_ALLOC: volume.flag = 'alloc' volume.slot = _util.determine_slot(p.table_num, p.slot_num) volume_system._assign_disktype_data(volume) logger.info("Found allocated {2}: block offset: {0}, length: {1} ".format(p.start, p.len, volume.info['fsdescription'])) elif p.flags == pytsk3.TSK_VS_PART_FLAG_UNALLOC: volume.flag = 'unalloc' logger.info("Found unallocated space: block offset: {0}, length: {1} ".format(p.start, p.len)) elif p.flags == pytsk3.TSK_VS_PART_FLAG_META: volume.flag = 'meta' logger.info("Found meta volume: block offset: {0}, length: {1} ".format(p.start, p.len)) yield volume
python
{ "resource": "" }
q4467
PartedVolumeDetector.detect
train
def detect(self, volume_system, vstype='detect'): """Finds and mounts all volumes based on parted. :param VolumeSystem volume_system: The volume system. """ # for some reason, parted does not properly return extended volume types in its machine # output, so we need to execute it twice. meta_volumes = [] # noinspection PyBroadException try: output = _util.check_output_(['parted', volume_system.parent.get_raw_path(), 'print'], stdin=subprocess.PIPE) for line in output.splitlines(): if 'extended' in line: meta_volumes.append(int(line.split()[0])) except Exception: logger.exception("Failed executing parted command.") # skip detection of meta volumes # noinspection PyBroadException try: # parted does not support passing in the vstype. It either works, or it doesn't. cmd = ['parted', volume_system.parent.get_raw_path(), '-sm', 'unit s', 'print free'] output = _util.check_output_(cmd, stdin=subprocess.PIPE) volume_system.volume_source = 'multi' except Exception as e: logger.exception("Failed executing parted command") raise SubsystemError(e) num = 0 for line in output.splitlines(): if line.startswith("Warning") or not line or ':' not in line or line.startswith(self.parent.get_raw_path()): continue line = line[:-1] # remove last ; try: slot, start, end, length, description = line.split(':', 4) if ':' in description: description, label, flags = description.split(':', 2) else: description, label, flags = description, '', '' try: slot = int(slot) except ValueError: continue volume = volume_system._make_subvolume( index=self._format_index(volume_system, num), offset=int(start[:-1]) * volume_system.disk.block_size, # remove last s size=int(length[:-1]) * volume_system.disk.block_size) volume.info['fsdescription'] = description if label: volume.info['label'] = label if flags: volume.info['parted_flags'] = flags # TODO: detection of meta volumes if description == 'free': volume.flag = 'unalloc' logger.info("Found unallocated space: block offset: {0}, length: {1}".format(start[:-1], length[:-1])) elif slot in meta_volumes: volume.flag = 'meta' volume.slot = slot logger.info("Found meta volume: block offset: {0}, length: {1}".format(start[:-1], length[:-1])) else: volume.flag = 'alloc' volume.slot = slot volume_system._assign_disktype_data(volume) logger.info("Found allocated {2}: block offset: {0}, length: {1} " .format(start[:-1], length[:-1], volume.info['fsdescription'])) except AttributeError: logger.exception("Error while parsing parted output") continue num += 1 yield volume
python
{ "resource": "" }
q4468
VssVolumeDetector.detect
train
def detect(self, volume_system, vstype='detect'): """Detect volume shadow copy volumes in the specified path.""" path = volume_system.parent._paths['vss'] try: volume_info = _util.check_output_(["vshadowinfo", "-o", str(volume_system.parent.offset), volume_system.parent.get_raw_path()]) except Exception as e: logger.exception("Failed obtaining info from the volume shadow copies.") raise SubsystemError(e) current_store = None for line in volume_info.splitlines(): line = line.strip() if line.startswith("Store:"): idx = line.split(":")[-1].strip() current_store = volume_system._make_subvolume( index=self._format_index(volume_system, idx), flag='alloc', offset=0 ) current_store._paths['vss_store'] = os.path.join(path, 'vss' + idx) current_store.info['fsdescription'] = 'VSS Store' elif line.startswith("Volume size"): current_store.size = int(line.split(":")[-1].strip().split()[0]) elif line.startswith("Creation time"): current_store.info['creation_time'] = line.split(":")[-1].strip() return volume_system.volumes
python
{ "resource": "" }
q4469
LvmVolumeDetector.detect
train
def detect(self, volume_system, vstype='detect'): """Gather information about lvolumes, gathering their label, size and raw path""" volume_group = volume_system.parent.info.get('volume_group') result = _util.check_output_(["lvm", "lvdisplay", volume_group]) cur_v = None for l in result.splitlines(): if "--- Logical volume ---" in l: cur_v = volume_system._make_subvolume( index=self._format_index(volume_system, len(volume_system)), flag='alloc' ) cur_v.info['fsdescription'] = 'Logical Volume' if "LV Name" in l: cur_v.info['label'] = l.replace("LV Name", "").strip() if "LV Size" in l: size, unit = l.replace("LV Size", "").strip().split(" ", 1) cur_v.size = int(float(size.replace(',', '.')) * {'KiB': 1024, 'MiB': 1024 ** 2, 'GiB': 1024 ** 3, 'TiB': 1024 ** 4}.get(unit, 1)) if "LV Path" in l: cur_v._paths['lv'] = l.replace("LV Path", "").strip() cur_v.offset = 0 logger.info("{0} volumes found".format(len(volume_system))) volume_system.volume_source = 'multi' return volume_system.volumes
python
{ "resource": "" }
q4470
Volume._get_fstype_from_parser
train
def _get_fstype_from_parser(self, fstype=None): """Load fstype information from the parser instance.""" if fstype: self.fstype = fstype elif self.index in self.disk.parser.fstypes: self.fstype = self.disk.parser.fstypes[self.index] elif '*' in self.disk.parser.fstypes: self.fstype = self.disk.parser.fstypes['*'] elif '?' in self.disk.parser.fstypes and self.disk.parser.fstypes['?'] is not None: self.fstype = "?" + self.disk.parser.fstypes['?'] else: self.fstype = "" if self.fstype in VOLUME_SYSTEM_TYPES: self.volumes.vstype = self.fstype self.fstype = 'volumesystem' # convert fstype from string to a FileSystemType object if not isinstance(self.fstype, filesystems.FileSystemType): if self.fstype.startswith("?"): fallback = FILE_SYSTEM_TYPES[self.fstype[1:]] self.fstype = filesystems.FallbackFileSystemType(fallback) else: self.fstype = FILE_SYSTEM_TYPES[self.fstype]
python
{ "resource": "" }
q4471
Volume._get_blkid_type
train
def _get_blkid_type(self): """Retrieves the FS type from the blkid command.""" try: result = _util.check_output_(['blkid', '-p', '-O', str(self.offset), self.get_raw_path()]) if not result: return None # noinspection PyTypeChecker blkid_result = dict(re.findall(r'([A-Z]+)="(.+?)"', result)) self.info['blkid_data'] = blkid_result if 'PTTYPE' in blkid_result and 'TYPE' not in blkid_result: return blkid_result.get('PTTYPE') else: return blkid_result.get('TYPE') except Exception: return None
python
{ "resource": "" }
q4472
Volume._get_magic_type
train
def _get_magic_type(self): """Checks the volume for its magic bytes and returns the magic.""" try: with io.open(self.disk.get_fs_path(), "rb") as file: file.seek(self.offset) fheader = file.read(min(self.size, 4096) if self.size else 4096) except IOError: logger.exception("Failed reading first 4K bytes from volume.") return None # TODO fallback to img-cat image -s blocknum | file - # if we were able to load the module magic try: # noinspection PyUnresolvedReferences import magic if hasattr(magic, 'from_buffer'): # using https://github.com/ahupp/python-magic logger.debug("Using python-magic Python package for file type magic") result = magic.from_buffer(fheader).decode() self.info['magic_data'] = result return result elif hasattr(magic, 'open'): # using Magic file extensions by Rueben Thomas (Ubuntu python-magic module) logger.debug("Using python-magic system package for file type magic") ms = magic.open(magic.NONE) ms.load() result = ms.buffer(fheader) ms.close() self.info['magic_data'] = result return result else: logger.warning("The python-magic module is not available, but another module named magic was found.") except ImportError: logger.warning("The python-magic module is not available.") except AttributeError: logger.warning("The python-magic module is not available, but another module named magic was found.") return None
python
{ "resource": "" }
q4473
Volume.get_safe_label
train
def get_safe_label(self): """Returns a label that is safe to add to a path in the mountpoint for this volume.""" if self.info.get('label') == '/': return 'root' suffix = re.sub(r"[/ \(\)]+", "_", self.info.get('label')) if self.info.get('label') else "" if suffix and suffix[0] == '_': suffix = suffix[1:] if len(suffix) > 2 and suffix[-1] == '_': suffix = suffix[:-1] return suffix
python
{ "resource": "" }
q4474
Volume.detect_volume_shadow_copies
train
def detect_volume_shadow_copies(self): """Method to call vshadowmount and mount NTFS volume shadow copies. :return: iterable with the :class:`Volume` objects of the VSS :raises CommandNotFoundError: if the underlying command does not exist :raises SubSystemError: if the underlying command fails :raises NoMountpointAvailableError: if there is no mountpoint available """ self._make_mountpoint(var_name='vss', suffix="vss", in_paths=True) try: _util.check_call_(["vshadowmount", "-o", str(self.offset), self.get_raw_path(), self._paths['vss']]) except Exception as e: logger.exception("Failed mounting the volume shadow copies.") raise SubsystemError(e) else: return self.volumes.detect_volumes(vstype='vss')
python
{ "resource": "" }
q4475
Volume._should_mount
train
def _should_mount(self, only_mount=None, skip_mount=None): """Indicates whether this volume should be mounted. Internal method, used by imount.py""" om = only_mount is None or \ self.index in only_mount or \ self.info.get('lastmountpoint') in only_mount or \ self.info.get('label') in only_mount sm = skip_mount is None or \ (self.index not in skip_mount and self.info.get('lastmountpoint') not in skip_mount and self.info.get('label') not in skip_mount) return om and sm
python
{ "resource": "" }
q4476
Volume.init
train
def init(self, only_mount=None, skip_mount=None, swallow_exceptions=True): """Generator that mounts this volume and either yields itself or recursively generates its subvolumes. More specifically, this function will call :func:`load_fsstat_data` (iff *no_stats* is False), followed by :func:`mount`, followed by a call to :func:`detect_mountpoint`, after which ``self`` is yielded, or the result of the :func:`init` call on each subvolume is yielded :param only_mount: if specified, only volume indexes in this list are mounted. Volume indexes are strings. :param skip_mount: if specified, volume indexes in this list are not mounted. :param swallow_exceptions: if True, any error occuring when mounting the volume is swallowed and added as an exception attribute to the yielded objects. """ if swallow_exceptions: self.exception = None try: if not self._should_mount(only_mount, skip_mount): yield self return if not self.init_volume(): yield self return except ImageMounterError as e: if swallow_exceptions: self.exception = e else: raise if not self.volumes: yield self else: for v in self.volumes: for s in v.init(only_mount, skip_mount, swallow_exceptions): yield s
python
{ "resource": "" }
q4477
Volume._clear_mountpoint
train
def _clear_mountpoint(self): """Clears a created mountpoint. Does not unmount it, merely deletes it.""" if self.mountpoint: os.rmdir(self.mountpoint) self.mountpoint = ""
python
{ "resource": "" }
q4478
Volume.bindmount
train
def bindmount(self, mountpoint): """Bind mounts the volume to another mountpoint. Only works if the volume is already mounted. :raises NotMountedError: when the volume is not yet mounted :raises SubsystemError: when the underlying command failed """ if not self.mountpoint: raise NotMountedError(self) try: _util.check_call_(['mount', '--bind', self.mountpoint, mountpoint], stdout=subprocess.PIPE) if 'bindmounts' in self._paths: self._paths['bindmounts'].append(mountpoint) else: self._paths['bindmounts'] = [mountpoint] return True except Exception as e: logger.exception("Error bind mounting {0}.".format(self)) raise SubsystemError(e)
python
{ "resource": "" }
q4479
Volume.get_volumes
train
def get_volumes(self): """Recursively gets a list of all subvolumes and the current volume.""" if self.volumes: volumes = [] for v in self.volumes: volumes.extend(v.get_volumes()) volumes.append(self) return volumes else: return [self]
python
{ "resource": "" }
q4480
require
train
def require(*requirements, **kwargs): """Decorator that can be used to require requirements. :param requirements: List of requirements that should be verified :param none_on_failure: If true, does not raise a PrerequisiteFailedError, but instead returns None """ # TODO: require(*requirements, none_on_failure=False) is not supported by Python 2 none_on_failure = kwargs.get('none_on_failure', False) def inner(f): @functools.wraps(f) def wrapper(*args, **kwargs): for req in requirements: if none_on_failure: if not getattr(req, 'is_available'): return None else: getattr(req, 'require')() return f(*args, **kwargs) return wrapper return inner
python
{ "resource": "" }
q4481
CommandDependency.status_message
train
def status_message(self): """Detailed message about whether the dependency is installed. :rtype: str """ if self.is_available: return "INSTALLED {0!s}" elif self.why and self.package: return "MISSING {0!s:<20}needed for {0.why}, part of the {0.package} package" elif self.why: return "MISSING {0!s:<20}needed for {0.why}" elif self.package: return "MISSING {0!s:<20}part of the {0.package} package" else: return "MISSING {0!s:<20}"
python
{ "resource": "" }
q4482
FileSystemType.mount
train
def mount(self, volume): """Mounts the given volume on the provided mountpoint. The default implementation simply calls mount. :param Volume volume: The volume to be mounted :param mountpoint: The file system path to mount the filesystem on. :raises UnsupportedFilesystemError: when the volume system type can not be mounted. """ volume._make_mountpoint() try: self._call_mount(volume, volume.mountpoint, self._mount_type or self.type, self._mount_opts) except Exception: # undo the creation of the mountpoint volume._clear_mountpoint() raise
python
{ "resource": "" }
q4483
FileSystemType._call_mount
train
def _call_mount(self, volume, mountpoint, type=None, opts=""): """Calls the mount command, specifying the mount type and mount options.""" # default arguments for calling mount if opts and not opts.endswith(','): opts += "," opts += 'loop,offset=' + str(volume.offset) + ',sizelimit=' + str(volume.size) # building the command cmd = ['mount', volume.get_raw_path(), mountpoint, '-o', opts] # add read-only if needed if not volume.disk.read_write: cmd[-1] += ',ro' # add the type if specified if type is not None: cmd += ['-t', type] _util.check_output_(cmd, stderr=subprocess.STDOUT)
python
{ "resource": "" }
q4484
Jffs2FileSystemType.mount
train
def mount(self, volume): """Perform specific operations to mount a JFFS2 image. This kind of image is sometimes used for things like bios images. so external tools are required but given this method you don't have to memorize anything and it works fast and easy. Note that this module might not yet work while mounting multiple images at the same time. """ # we have to make a ram-device to store the image, we keep 20% overhead size_in_kb = int((volume.size / 1024) * 1.2) _util.check_call_(['modprobe', '-v', 'mtd']) _util.check_call_(['modprobe', '-v', 'jffs2']) _util.check_call_(['modprobe', '-v', 'mtdram', 'total_size={}'.format(size_in_kb), 'erase_size=256']) _util.check_call_(['modprobe', '-v', 'mtdblock']) _util.check_call_(['dd', 'if=' + volume.get_raw_path(), 'of=/dev/mtd0']) _util.check_call_(['mount', '-t', 'jffs2', '/dev/mtdblock0', volume.mountpoint])
python
{ "resource": "" }
q4485
RaidFileSystemType.mount
train
def mount(self, volume): """Add the volume to a RAID system. The RAID array is activated as soon as the array can be activated. :raises NoLoopbackAvailableError: if no loopback device was found """ volume._find_loopback() raid_status = None try: # use mdadm to mount the loopback to a md device # incremental and run as soon as available output = _util.check_output_(['mdadm', '-IR', volume.loopback], stderr=subprocess.STDOUT) match = re.findall(r"attached to ([^ ,]+)", output) if match: volume._paths['md'] = os.path.realpath(match[0]) if 'which is already active' in output: logger.info("RAID is already active in other volume, using %s", volume._paths['md']) raid_status = 'active' elif 'not enough to start' in output: volume._paths['md'] = volume._paths['md'].replace("/dev/md/", "/dev/md") logger.info("RAID volume added, but not enough to start %s", volume._paths['md']) raid_status = 'waiting' else: logger.info("RAID started at {0}".format(volume._paths['md'])) raid_status = 'active' except Exception as e: logger.exception("Failed mounting RAID.") volume._free_loopback() raise SubsystemError(e) # search for the RAID volume for v in volume.disk.parser.get_volumes(): if v._paths.get("md") == volume._paths['md'] and v.volumes: logger.debug("Adding existing volume %s to volume %s", v.volumes[0], volume) v.volumes[0].info['raid_status'] = raid_status volume.volumes.volumes.append(v.volumes[0]) return v.volumes[0] else: logger.debug("Creating RAID volume for %s", self) container = volume.volumes._make_single_subvolume(flag='alloc', offset=0, size=volume.size) container.info['fsdescription'] = 'RAID Volume' container.info['raid_status'] = raid_status return container
python
{ "resource": "" }
q4486
escape
train
def escape(s, fold_newlines=True): """Escapes a string to make it usable in LaTeX text mode. Will replace special characters as well as newlines. Some problematic characters like ``[`` and ``]`` are escaped into groups (e.g. ``{[}``), because they tend to cause problems when mixed with ``\\`` newlines otherwise. :param s: The string to escape. :param fold_newlines: If true, multiple newlines will be reduced to just a single ``\\``. Otherwise, whitespace is kept intact by adding multiple ``[n\baselineskip]``. """ def sub(m): c = m.group() if c in CHAR_ESCAPE: return CHAR_ESCAPE[c] if c.isspace(): if fold_newlines: return r'\\' return r'\\[{}\baselineskip]'.format(len(c)) return ESCAPE_RE.sub(sub, s)
python
{ "resource": "" }
q4487
build_pdf
train
def build_pdf(source, texinputs=[], builder=None): """Builds a LaTeX source to PDF. Will automatically instantiate an available builder (or raise a :class:`exceptions.RuntimeError` if none are available) and build the supplied source with it. Parameters are passed on to the builder's :meth:`~latex.build.LatexBuilder.build_pdf` function. :param builder: Specify which builder should be used - ``latexmk``, ``pdflatex`` or ``xelatexmk``. """ if builder is None: builders = PREFERRED_BUILDERS elif builder not in BUILDERS: raise RuntimeError('Invalid Builder specified') else: builders = (builder, ) for bld in builders: bld_cls = BUILDERS[bld] builder = bld_cls() if not builder.is_available(): continue return builder.build_pdf(source, texinputs) else: raise RuntimeError('No available builder could be instantiated. ' 'Please make sure LaTeX is installed.')
python
{ "resource": "" }
q4488
parse_log
train
def parse_log(log, context_size=3): """Parses latex log output and tries to extract error messages. Requires ``-file-line-error`` to be active. :param log: The contents of the logfile as a string. :param context_size: Number of lines to keep as context, including the original error line. :return: A dictionary containig ``line`` (line number, an int), ``error``, (the error message), ``filename`` (name of the temporary file used for building) and ``context`` (list of lines, starting with with the error line). """ lines = log.splitlines() errors = [] for n, line in enumerate(lines): m = LATEX_ERR_RE.match(line) if m: err = m.groupdict().copy() err['context'] = lines[n:n + context_size] try: err['line'] = int(err['line']) except TypeError: pass # ignore invalid int conversion errors.append(err) return errors
python
{ "resource": "" }
q4489
split_metadata_params
train
def split_metadata_params(headers): """ Given a dict of headers for s3, seperates those that are boto3 parameters and those that must be metadata """ params = {} metadata = {} for header_name in headers: if header_name.lower() in header_mapping: params[header_mapping[header_name.lower()]] = headers[header_name] else: metadata[header_name] = headers[header_name] return metadata, params
python
{ "resource": "" }
q4490
hash_file
train
def hash_file(filename): """ Generate a hash for the contents of a file """ hasher = hashlib.sha1() with open(filename, 'rb') as f: buf = f.read(65536) while len(buf) > 0: hasher.update(buf) buf = f.read(65536) return hasher.hexdigest()
python
{ "resource": "" }
q4491
_get_bucket_name
train
def _get_bucket_name(**values): """ Generates the bucket name for url_for. """ app = current_app # manage other special values, all have no meaning for static urls values.pop('_external', False) # external has no meaning here values.pop('_anchor', None) # anchor as well values.pop('_method', None) # method too url_style = get_setting('FLASKS3_URL_STYLE', app) if url_style == 'host': url_format = '{bucket_name}.{bucket_domain}' elif url_style == 'path': url_format = '{bucket_domain}/{bucket_name}' else: raise ValueError('Invalid S3 URL style: "{}"'.format(url_style)) if get_setting('FLASKS3_CDN_DOMAIN', app): bucket_path = '{}'.format(get_setting('FLASKS3_CDN_DOMAIN', app)) else: bucket_path = url_format.format( bucket_name=get_setting('FLASKS3_BUCKET_NAME', app), bucket_domain=get_setting('FLASKS3_BUCKET_DOMAIN', app), ) bucket_path += _get_statics_prefix(app).rstrip('/') return bucket_path, values
python
{ "resource": "" }
q4492
url_for
train
def url_for(endpoint, **values): """ Generates a URL to the given endpoint. If the endpoint is for a static resource then an Amazon S3 URL is generated, otherwise the call is passed on to `flask.url_for`. Because this function is set as a jinja environment variable when `FlaskS3.init_app` is invoked, this function replaces `flask.url_for` in templates automatically. It is unlikely that this function will need to be directly called from within your application code, unless you need to refer to static assets outside of your templates. """ app = current_app if app.config.get('TESTING', False) and not app.config.get('FLASKS3_OVERRIDE_TESTING', True): return flask_url_for(endpoint, **values) if 'FLASKS3_BUCKET_NAME' not in app.config: raise ValueError("FLASKS3_BUCKET_NAME not found in app configuration.") if endpoint == 'static' or endpoint.endswith('.static'): scheme = 'https' if not app.config.get("FLASKS3_USE_HTTPS", True): scheme = 'http' # allow per url override for scheme scheme = values.pop('_scheme', scheme) bucket_path, values = _get_bucket_name(**values) urls = app.url_map.bind(bucket_path, url_scheme=scheme) built = urls.build(endpoint, values=values, force_external=True) return built return flask_url_for(endpoint, **values)
python
{ "resource": "" }
q4493
_bp_static_url
train
def _bp_static_url(blueprint): """ builds the absolute url path for a blueprint's static folder """ u = six.u('%s%s' % (blueprint.url_prefix or '', blueprint.static_url_path or '')) return u
python
{ "resource": "" }
q4494
_gather_files
train
def _gather_files(app, hidden, filepath_filter_regex=None): """ Gets all files in static folders and returns in dict.""" dirs = [(six.text_type(app.static_folder), app.static_url_path)] if hasattr(app, 'blueprints'): blueprints = app.blueprints.values() bp_details = lambda x: (x.static_folder, _bp_static_url(x)) dirs.extend([bp_details(x) for x in blueprints if x.static_folder]) valid_files = defaultdict(list) for static_folder, static_url_loc in dirs: if not os.path.isdir(static_folder): logger.warning("WARNING - [%s does not exist]" % static_folder) else: logger.debug("Checking static folder: %s" % static_folder) for root, _, files in os.walk(static_folder): relative_folder = re.sub(r'^/', '', root.replace(static_folder, '')) files = [os.path.join(root, x) for x in files if ( (hidden or x[0] != '.') and # Skip this file if the filter regex is # defined, and this file's path is a # negative match. (filepath_filter_regex == None or re.search( filepath_filter_regex, os.path.join(relative_folder, x))))] if files: valid_files[(static_folder, static_url_loc)].extend(files) return valid_files
python
{ "resource": "" }
q4495
_static_folder_path
train
def _static_folder_path(static_url, static_folder, static_asset): """ Returns a path to a file based on the static folder, and not on the filesystem holding the file. Returns a path relative to static_url for static_asset """ # first get the asset path relative to the static folder. # static_asset is not simply a filename because it could be # sub-directory then file etc. if not static_asset.startswith(static_folder): raise ValueError("%s static asset must be under %s static folder" % (static_asset, static_folder)) rel_asset = static_asset[len(static_folder):] # Now bolt the static url path and the relative asset location together return '%s/%s' % (static_url.rstrip('/'), rel_asset.lstrip('/'))
python
{ "resource": "" }
q4496
_write_files
train
def _write_files(s3, app, static_url_loc, static_folder, files, bucket, ex_keys=None, hashes=None): """ Writes all the files inside a static folder to S3. """ should_gzip = app.config.get('FLASKS3_GZIP') add_mime = app.config.get('FLASKS3_FORCE_MIMETYPE') gzip_include_only = app.config.get('FLASKS3_GZIP_ONLY_EXTS') new_hashes = [] static_folder_rel = _path_to_relative_url(static_folder) for file_path in files: per_file_should_gzip = should_gzip asset_loc = _path_to_relative_url(file_path) full_key_name = _static_folder_path(static_url_loc, static_folder_rel, asset_loc) key_name = full_key_name.lstrip("/") logger.debug("Uploading {} to {} as {}".format(file_path, bucket, key_name)) exclude = False if app.config.get('FLASKS3_ONLY_MODIFIED', False): file_hash = hash_file(file_path) new_hashes.append((full_key_name, file_hash)) if hashes and hashes.get(full_key_name, None) == file_hash: exclude = True if ex_keys and full_key_name in ex_keys or exclude: logger.debug("%s excluded from upload" % key_name) else: h = {} # Set more custom headers if the filepath matches certain # configured regular expressions. filepath_headers = app.config.get('FLASKS3_FILEPATH_HEADERS') if filepath_headers: for filepath_regex, headers in six.iteritems(filepath_headers): if re.search(filepath_regex, file_path): for header, value in six.iteritems(headers): h[header] = value # check for extension, only if there are extensions provided if per_file_should_gzip and gzip_include_only: if os.path.splitext(file_path)[1] not in gzip_include_only: per_file_should_gzip = False if per_file_should_gzip: h["content-encoding"] = "gzip" if (add_mime or per_file_should_gzip) and "content-type" not in h: # When we use GZIP we have to explicitly set the content type # or if the mime flag is True (mimetype, encoding) = mimetypes.guess_type(file_path, False) if mimetype: h["content-type"] = mimetype else: logger.warn("Unable to detect mimetype for %s" % file_path) file_mode = 'rb' if six.PY3 else 'r' with open(file_path, file_mode) as fp: merged_dicts = merge_two_dicts(get_setting('FLASKS3_HEADERS', app), h) metadata, params = split_metadata_params(merged_dicts) if per_file_should_gzip: compressed = six.BytesIO() z = gzip.GzipFile(os.path.basename(file_path), 'wb', 9, compressed) z.write(fp.read()) z.close() data = compressed.getvalue() else: data = fp.read() s3.put_object(Bucket=bucket, Key=key_name, Body=data, ACL="public-read", Metadata=metadata, **params) return new_hashes
python
{ "resource": "" }
q4497
create_all
train
def create_all(app, user=None, password=None, bucket_name=None, location=None, include_hidden=False, filepath_filter_regex=None, put_bucket_acl=True): """ Uploads of the static assets associated with a Flask application to Amazon S3. All static assets are identified on the local filesystem, including any static assets associated with *registered* blueprints. In turn, each asset is uploaded to the bucket described by `bucket_name`. If the bucket does not exist then it is created. Flask-S3 creates the same relative static asset folder structure on S3 as can be found within your Flask application. Many of the optional arguments to `create_all` can be specified instead in your application's configuration using the Flask-S3 `configuration`_ variables. :param app: a :class:`flask.Flask` application object. :param user: an AWS Access Key ID. You can find this key in the Security Credentials section of your AWS account. :type user: `basestring` or None :param password: an AWS Secret Access Key. You can find this key in the Security Credentials section of your AWS account. :type password: `basestring` or None :param bucket_name: the name of the bucket you wish to server your static assets from. **Note**: while a valid character, it is recommended that you do not include periods in bucket_name if you wish to serve over HTTPS. See Amazon's `bucket restrictions`_ for more details. :type bucket_name: `basestring` or None :param location: the AWS region to host the bucket in; an empty string indicates the default region should be used, which is the US Standard region. Possible location values include: `'DEFAULT'`, `'EU'`, `'us-east-1'`, `'us-west-1'`, `'us-west-2'`, `'ap-south-1'`, `'ap-northeast-2'`, `'ap-southeast-1'`, `'ap-southeast-2'`, `'ap-northeast-1'`, `'eu-central-1'`, `'eu-west-1'`, `'sa-east-1'` :type location: `basestring` or None :param include_hidden: by default Flask-S3 will not upload hidden files. Set this to true to force the upload of hidden files. :type include_hidden: `bool` :param filepath_filter_regex: if specified, then the upload of static assets is limited to only those files whose relative path matches this regular expression string. For example, to only upload files within the 'css' directory of your app's static store, set to r'^css'. :type filepath_filter_regex: `basestring` or None :param put_bucket_acl: by default Flask-S3 will set the bucket ACL to public. Set this to false to leave the policy unchanged. :type put_bucket_acl: `bool` .. _bucket restrictions: http://docs.amazonwebservices.com/AmazonS3\ /latest/dev/BucketRestrictions.html """ user = user or app.config.get('AWS_ACCESS_KEY_ID') password = password or app.config.get('AWS_SECRET_ACCESS_KEY') bucket_name = bucket_name or app.config.get('FLASKS3_BUCKET_NAME') if not bucket_name: raise ValueError("No bucket name provided.") location = location or app.config.get('FLASKS3_REGION') endpoint_url = app.config.get('FLASKS3_ENDPOINT_URL') # build list of static files all_files = _gather_files(app, include_hidden, filepath_filter_regex=filepath_filter_regex) logger.debug("All valid files: %s" % all_files) # connect to s3 s3 = boto3.client("s3", endpoint_url=endpoint_url, region_name=location or None, aws_access_key_id=user, aws_secret_access_key=password) # get_or_create bucket try: s3.head_bucket(Bucket=bucket_name) except ClientError as e: if int(e.response['Error']['Code']) == 404: # Create the bucket bucket = s3.create_bucket(Bucket=bucket_name) else: raise if put_bucket_acl: s3.put_bucket_acl(Bucket=bucket_name, ACL='public-read') if get_setting('FLASKS3_ONLY_MODIFIED', app): try: hashes_object = s3.get_object(Bucket=bucket_name, Key='.file-hashes') hashes = json.loads(str(hashes_object['Body'].read().decode())) except ClientError as e: logger.warn("No file hashes found: %s" % e) hashes = None new_hashes = _upload_files(s3, app, all_files, bucket_name, hashes=hashes) try: s3.put_object(Bucket=bucket_name, Key='.file-hashes', Body=json.dumps(dict(new_hashes)), ACL='private') except boto3.exceptions.S3UploadFailedError as e: logger.warn("Unable to upload file hashes: %s" % e) else: _upload_files(s3, app, all_files, bucket_name)
python
{ "resource": "" }
q4498
_expand
train
def _expand(subsequence, sequence, max_l_dist): """Expand a partial match of a Levenstein search. An expansion must begin at the beginning of the sequence, which makes this much simpler than a full search, and allows for greater optimization. """ # If given a long sub-sequence and relatively small max distance, # use a more complex algorithm better optimized for such cases. if len(subsequence) > max(max_l_dist * 2, 10): return _expand_long(subsequence, sequence, max_l_dist) else: return _expand_short(subsequence, sequence, max_l_dist)
python
{ "resource": "" }
q4499
_py_expand_short
train
def _py_expand_short(subsequence, sequence, max_l_dist): """Straightforward implementation of partial match expansion.""" # The following diagram shows the score calculation step. # # Each new score is the minimum of: # * a OR a + 1 (substitution, if needed) # * b + 1 (deletion, i.e. skipping a sequence character) # * c + 1 (insertion, i.e. skipping a sub-sequence character) # # a -- +1 -> c # # | \ | # | \ | # +1 +1? +1 # | \ | # v ⌟ v # # b -- +1 -> scores[subseq_index] subseq_len = len(subsequence) if subseq_len == 0: return (0, 0) # Initialize the scores array with values for just skipping sub-sequence # chars. scores = list(range(1, subseq_len + 1)) min_score = subseq_len min_score_idx = -1 for seq_index, char in enumerate(sequence): # calculate scores, one for each character in the sub-sequence a = seq_index c = a + 1 for subseq_index in range(subseq_len): b = scores[subseq_index] c = scores[subseq_index] = min( a + (char != subsequence[subseq_index]), b + 1, c + 1, ) a = b # keep the minimum score found for matches of the entire sub-sequence if c <= min_score: min_score = c min_score_idx = seq_index # bail early when it is impossible to find a better expansion elif min(scores) >= min_score: break return (min_score, min_score_idx + 1) if min_score <= max_l_dist else (None, None)
python
{ "resource": "" }