text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Check whether the credentials have expired.
<END_TASK>
<USER_TASK:>
Description:
def are_credentials_still_valid(awsclient):
"""Check whether the credentials have expired.
:param awsclient:
:return: exit_code
""" |
client = awsclient.get_client('lambda')
try:
client.list_functions()
except GracefulExit:
raise
except Exception as e:
log.debug(e)
log.error(e)
return 1
return 0 |
<SYSTEM_TASK:>
Given a list, possibly nested to any level, return it flattened.
<END_TASK>
<USER_TASK:>
Description:
def flatten(lis):
"""Given a list, possibly nested to any level, return it flattened.""" |
new_lis = []
for item in lis:
if isinstance(item, collections.Sequence) and not isinstance(item, basestring):
new_lis.extend(flatten(item))
else:
new_lis.append(item)
return new_lis |
<SYSTEM_TASK:>
Create a random 6 character string.
<END_TASK>
<USER_TASK:>
Description:
def random_string(length=6):
"""Create a random 6 character string.
note: in case you use this function in a test during test together with
an awsclient then this function is altered so you get reproducible results
that will work with your recorded placebo json files (see helpers_aws.py).
""" |
return ''.join([random.choice(string.ascii_lowercase) for i in range(length)]) |
<SYSTEM_TASK:>
Bail out if template is not found.
<END_TASK>
<USER_TASK:>
Description:
def load_template():
"""Bail out if template is not found.
""" |
cloudformation, found = load_cloudformation_template()
if not found:
print(colored.red('could not load cloudformation.py, bailing out...'))
sys.exit(1)
return cloudformation |
<SYSTEM_TASK:>
Run the haas test runner.
<END_TASK>
<USER_TASK:>
Description:
def run(self, plugin_manager=None):
"""Run the haas test runner.
This will load and configure the selected plugins, set up the
environment and begin test discovery, loading and running.
Parameters
----------
plugin_manager : haas.plugin_manager.PluginManager
[Optional] Override the use of the default plugin manager.
""" |
if plugin_manager is None:
plugin_manager = PluginManager()
plugin_manager.add_plugin_arguments(self.parser)
args = self.parser.parse_args(self.argv[1:])
environment_plugins = plugin_manager.get_enabled_hook_plugins(
plugin_manager.ENVIRONMENT_HOOK, args)
runner = plugin_manager.get_driver(
plugin_manager.TEST_RUNNER, args)
with PluginContext(environment_plugins):
loader = Loader()
discoverer = plugin_manager.get_driver(
plugin_manager.TEST_DISCOVERY, args, loader=loader)
suites = [
discoverer.discover(
start=start,
top_level_directory=args.top_level_directory,
pattern=args.pattern,
)
for start in args.start
]
if len(suites) == 1:
suite = suites[0]
else:
suite = loader.create_suite(suites)
test_count = suite.countTestCases()
result_handlers = plugin_manager.get_enabled_hook_plugins(
plugin_manager.RESULT_HANDLERS, args, test_count=test_count)
result_collector = ResultCollector(
buffer=args.buffer, failfast=args.failfast)
for result_handler in result_handlers:
result_collector.add_result_handler(result_handler)
result = runner.run(result_collector, suite)
return not result.wasSuccessful() |
<SYSTEM_TASK:>
Helper function to cache currently logged in user.
<END_TASK>
<USER_TASK:>
Description:
def cache_request_user(user_cls, request, user_id):
""" Helper function to cache currently logged in user.
User is cached at `request._user`. Caching happens only only
if user is not already cached or if cached user's pk does not
match `user_id`.
:param user_cls: User model class to use for user lookup.
:param request: Pyramid Request instance.
:user_id: Current user primary key field value.
""" |
pk_field = user_cls.pk_field()
user = getattr(request, '_user', None)
if user is None or getattr(user, pk_field, None) != user_id:
request._user = user_cls.get_item(**{pk_field: user_id}) |
<SYSTEM_TASK:>
Get user by ID.
<END_TASK>
<USER_TASK:>
Description:
def get_authuser_by_userid(cls, request):
""" Get user by ID.
Used by Ticket-based auth. Is added as request method to populate
`request.user`.
""" |
userid = authenticated_userid(request)
if userid:
cache_request_user(cls, request, userid)
return request._user |
<SYSTEM_TASK:>
Get user by username
<END_TASK>
<USER_TASK:>
Description:
def get_authuser_by_name(cls, request):
""" Get user by username
Used by Token-based auth. Is added as request method to populate
`request.user`.
""" |
username = authenticated_userid(request)
if username:
return cls.get_item(username=username) |
<SYSTEM_TASK:>
Set up event subscribers.
<END_TASK>
<USER_TASK:>
Description:
def includeme(config):
""" Set up event subscribers. """ |
from .models import (
AuthUserMixin,
random_uuid,
lower_strip,
encrypt_password,
)
add_proc = config.add_field_processors
add_proc(
[random_uuid, lower_strip],
model=AuthUserMixin, field='username')
add_proc([lower_strip], model=AuthUserMixin, field='email')
add_proc([encrypt_password], model=AuthUserMixin, field='password') |
<SYSTEM_TASK:>
channels.list
<END_TASK>
<USER_TASK:>
Description:
def channels_list(self, exclude_archived=True, **params):
"""channels.list
This method returns a list of all channels in the team. This includes
channels the caller is in, channels they are not currently in, and
archived channels. The number of (non-deactivated) members in each
channel is also returned.
https://api.slack.com/methods/channels.list
""" |
method = 'channels.list'
params.update({'exclude_archived': exclude_archived and 1 or 0})
return self._make_request(method, params) |
<SYSTEM_TASK:>
Helper name for getting a channel's id from its name
<END_TASK>
<USER_TASK:>
Description:
def channel_name_to_id(self, channel_name, force_lookup=False):
"""Helper name for getting a channel's id from its name
""" |
if force_lookup or not self.channel_name_id_map:
channels = self.channels_list()['channels']
self.channel_name_id_map = {channel['name']: channel['id'] for channel in channels}
channel = channel_name.startswith('#') and channel_name[1:] or channel_name
return self.channel_name_id_map.get(channel) |
<SYSTEM_TASK:>
chat.postMessage
<END_TASK>
<USER_TASK:>
Description:
def chat_post_message(self, channel, text, **params):
"""chat.postMessage
This method posts a message to a channel.
https://api.slack.com/methods/chat.postMessage
""" |
method = 'chat.postMessage'
params.update({
'channel': channel,
'text': text,
})
return self._make_request(method, params) |
<SYSTEM_TASK:>
Connect view to route that catches all URIs like
<END_TASK>
<USER_TASK:>
Description:
def includeme(config):
""" Connect view to route that catches all URIs like
'something,something,...'
""" |
root = config.get_root_resource()
root.add('nef_polymorphic', '{collections:.+,.+}',
view=PolymorphicESView,
factory=PolymorphicACL) |
<SYSTEM_TASK:>
Get names of collections from request matchdict.
<END_TASK>
<USER_TASK:>
Description:
def get_collections(self):
""" Get names of collections from request matchdict.
:return: Names of collections
:rtype: list of str
""" |
collections = self.request.matchdict['collections'].split('/')[0]
collections = [coll.strip() for coll in collections.split(',')]
return set(collections) |
<SYSTEM_TASK:>
Get ACEs with the least permissions that fit all resources.
<END_TASK>
<USER_TASK:>
Description:
def _get_least_permissions_aces(self, resources):
""" Get ACEs with the least permissions that fit all resources.
To have access to polymorph on N collections, user MUST have
access to all of them. If this is true, ACEs are returned, that
allows 'view' permissions to current request principals.
Otherwise None is returned thus blocking all permissions except
those defined in `nefertari.acl.BaseACL`.
:param resources:
:type resources: list of Resource instances
:return: Generated Pyramid ACEs or None
:rtype: tuple or None
""" |
factories = [res.view._factory for res in resources]
contexts = [factory(self.request) for factory in factories]
for ctx in contexts:
if not self.request.has_permission('view', ctx):
return
else:
return [
(Allow, principal, 'view')
for principal in self.request.effective_principals
] |
<SYSTEM_TASK:>
Calculate and set ACL valid for requested collections.
<END_TASK>
<USER_TASK:>
Description:
def set_collections_acl(self):
""" Calculate and set ACL valid for requested collections.
DENY_ALL is added to ACL to make sure no access rules are
inherited.
""" |
acl = [(Allow, 'g:admin', ALL_PERMISSIONS)]
collections = self.get_collections()
resources = self.get_resources(collections)
aces = self._get_least_permissions_aces(resources)
if aces is not None:
for ace in aces:
acl.append(ace)
acl.append(DENY_ALL)
self.__acl__ = tuple(acl) |
<SYSTEM_TASK:>
Determine ES type names from request data.
<END_TASK>
<USER_TASK:>
Description:
def determine_types(self):
""" Determine ES type names from request data.
In particular `request.matchdict['collections']` is used to
determine types names. Its value is comma-separated sequence
of collection names under which views have been registered.
""" |
from nefertari.elasticsearch import ES
collections = self.get_collections()
resources = self.get_resources(collections)
models = set([res.view.Model for res in resources])
es_models = [mdl for mdl in models if mdl
and getattr(mdl, '_index_enabled', False)]
types = [ES.src2type(mdl.__name__) for mdl in es_models]
return types |
<SYSTEM_TASK:>
Returns a closure function that dispatches message to the WebSocket.
<END_TASK>
<USER_TASK:>
Description:
def get_command(domain_name, command_name):
"""Returns a closure function that dispatches message to the WebSocket.""" |
def send_command(self, **kwargs):
return self.ws.send_message(
'{0}.{1}'.format(domain_name, command_name),
kwargs
)
return send_command |
<SYSTEM_TASK:>
Dynamically create Domain class and set it's methods.
<END_TASK>
<USER_TASK:>
Description:
def DomainFactory(domain_name, cmds):
"""Dynamically create Domain class and set it's methods.""" |
klass = type(str(domain_name), (BaseDomain,), {})
for c in cmds:
command = get_command(domain_name, c['name'])
setattr(klass, c['name'], classmethod(command))
return klass |
<SYSTEM_TASK:>
The entrypoint for the hairball command installed via setup.py.
<END_TASK>
<USER_TASK:>
Description:
def main():
"""The entrypoint for the hairball command installed via setup.py.""" |
description = ('PATH can be either the path to a scratch file, or a '
'directory containing scratch files. Multiple PATH '
'arguments can be provided.')
parser = OptionParser(usage='%prog -p PLUGIN_NAME [options] PATH...',
description=description,
version='%prog {}'.format(__version__))
parser.add_option('-d', '--plugin-dir', metavar='DIR',
help=('Specify the path to a directory containing '
'plugins. Plugins in this directory take '
'precedence over similarly named plugins '
'included with Hairball.'))
parser.add_option('-p', '--plugin', action='append',
help=('Use the named plugin to perform analysis. '
'This option can be provided multiple times.'))
parser.add_option('-k', '--kurt-plugin', action='append',
help=('Provide either a python import path (e.g, '
'kelp.octopi) to a package/module, or the path'
' to a python file, which will be loaded as a '
'Kurt plugin. This option can be provided '
'multiple times.'))
parser.add_option('-q', '--quiet', action='store_true',
help=('Prevent output from Hairball. Plugins may still '
'produce output.'))
parser.add_option('-C', '--no-cache', action='store_true',
help='Do not use Hairball\'s cache.', default=False)
options, args = parser.parse_args(sys.argv[1:])
if not options.plugin:
parser.error('At least one plugin must be specified via -p.')
if not args:
parser.error('At least one PATH must be provided.')
if options.plugin_dir:
if os.path.isdir(options.plugin_dir):
sys.path.append(options.plugin_dir)
else:
parser.error('{} is not a directory'.format(options.plugin_dir))
hairball = Hairball(options, args, cache=not options.no_cache)
hairball.initialize_plugins()
hairball.process()
hairball.finalize() |
<SYSTEM_TASK:>
Return the fullpath to the file with sha1sum key.
<END_TASK>
<USER_TASK:>
Description:
def key_to_path(self, key):
"""Return the fullpath to the file with sha1sum key.""" |
return os.path.join(self.cache_dir, key[:2], key[2:4],
key[4:] + '.pkl') |
<SYSTEM_TASK:>
Optimized load and return the parsed version of filename.
<END_TASK>
<USER_TASK:>
Description:
def load(self, filename):
"""Optimized load and return the parsed version of filename.
Uses the on-disk parse cache if the file is located in it.
""" |
# Compute sha1 hash (key)
with open(filename) as fp:
key = sha1(fp.read()).hexdigest()
path = self.key_to_path(key)
# Return the cached file if available
if key in self.hashes:
try:
with open(path) as fp:
return cPickle.load(fp)
except EOFError:
os.unlink(path)
self.hashes.remove(key)
except IOError:
self.hashes.remove(key)
# Create the nested cache directory
try:
os.makedirs(os.path.dirname(path))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
# Process the file and save in the cache
scratch = kurt.Project.load(filename) # can fail
with os.fdopen(os.open(path, os.O_WRONLY | os.O_CREAT,
0400), 'w') as fp:
# open file for writing but make it immediately read-only
cPickle.dump(scratch, fp, cPickle.HIGHEST_PROTOCOL)
self.hashes.add(key)
return scratch |
<SYSTEM_TASK:>
Yield filepath to files with the proper extension within paths.
<END_TASK>
<USER_TASK:>
Description:
def hairball_files(self, paths, extensions):
"""Yield filepath to files with the proper extension within paths.""" |
def add_file(filename):
return os.path.splitext(filename)[1] in extensions
while paths:
arg_path = paths.pop(0)
if os.path.isdir(arg_path):
found = False
for path, dirs, files in os.walk(arg_path):
dirs.sort() # Traverse in sorted order
for filename in sorted(files):
if add_file(filename):
yield os.path.join(path, filename)
found = True
if not found:
if not self.options.quiet:
print('No files found in {}'.format(arg_path))
elif add_file(arg_path):
yield arg_path
elif not self.options.quiet:
print('Invalid file {}'.format(arg_path))
print('Did you forget to load a Kurt plugin (-k)?') |
<SYSTEM_TASK:>
Attempt to Load and initialize all the plugins.
<END_TASK>
<USER_TASK:>
Description:
def initialize_plugins(self):
"""Attempt to Load and initialize all the plugins.
Any issues loading plugins will be output to stderr.
""" |
for plugin_name in self.options.plugin:
parts = plugin_name.split('.')
if len(parts) > 1:
module_name = '.'.join(parts[:-1])
class_name = parts[-1]
else:
# Use the titlecase format of the module name as the class name
module_name = parts[0]
class_name = parts[0].title()
# First try to load plugins from the passed in plugins_dir and then
# from the hairball.plugins package.
plugin = None
for package in (None, 'hairball.plugins'):
if package:
module_name = '{}.{}'.format(package, module_name)
try:
module = __import__(module_name, fromlist=[class_name])
# Initializes the plugin by calling its constructor
plugin = getattr(module, class_name)()
# Verify plugin is of the correct class
if not isinstance(plugin, HairballPlugin):
sys.stderr.write('Invalid type for plugin {}: {}\n'
.format(plugin_name, type(plugin)))
plugin = None
else:
break
except (ImportError, AttributeError):
pass
if plugin:
self.plugins.append(plugin)
else:
sys.stderr.write('Cannot find plugin {}\n'.format(plugin_name))
if not self.plugins:
sys.stderr.write('No plugins loaded. Goodbye!\n')
sys.exit(1) |
<SYSTEM_TASK:>
Run the analysis across all files found in the given paths.
<END_TASK>
<USER_TASK:>
Description:
def process(self):
"""Run the analysis across all files found in the given paths.
Each file is loaded once and all plugins are run against it before
loading the next file.
""" |
for filename in self.hairball_files(self.paths, self.extensions):
if not self.options.quiet:
print(filename)
try:
if self.cache:
scratch = self.cache.load(filename)
else:
scratch = kurt.Project.load(filename)
except Exception: # pylint: disable=W0703
traceback.print_exc()
continue
for plugin in self.plugins:
# pylint: disable=W0212
plugin._process(scratch, filename=filename) |
<SYSTEM_TASK:>
If ``module`` is a dotted string pointing to the module,
<END_TASK>
<USER_TASK:>
Description:
def maybe_dotted(module, throw=True):
""" If ``module`` is a dotted string pointing to the module,
imports and returns the module object.
""" |
try:
return Configurator().maybe_dotted(module)
except ImportError as e:
err = '%s not found. %s' % (module, e)
if throw:
raise ImportError(err)
else:
log.error(err)
return None |
<SYSTEM_TASK:>
Return True if `arg` acts as a list and does not look like a string.
<END_TASK>
<USER_TASK:>
Description:
def issequence(arg):
"""Return True if `arg` acts as a list and does not look like a string.""" |
string_behaviour = (
isinstance(arg, six.string_types) or
isinstance(arg, six.text_type))
list_behaviour = hasattr(arg, '__getitem__') or hasattr(arg, '__iter__')
return not string_behaviour and list_behaviour |
<SYSTEM_TASK:>
Run and return the results from the Animation plugin.
<END_TASK>
<USER_TASK:>
Description:
def analyze(self, scratch, **kwargs):
"""Run and return the results from the Animation plugin.""" |
results = Counter()
for script in self.iter_scripts(scratch):
gen = self.iter_blocks(script.blocks)
name = 'start'
level = None
while name != '':
if name in self.ANIMATION:
gen, count = self._check_animation(name, level, gen)
results.update(count)
name, level, _ = next(gen, ('', 0, ''))
return {'animation': results} |
<SYSTEM_TASK:>
Return a list of received events contained in script_list.
<END_TASK>
<USER_TASK:>
Description:
def get_receive(self, script_list):
"""Return a list of received events contained in script_list.""" |
events = defaultdict(set)
for script in script_list:
if self.script_start_type(script) == self.HAT_WHEN_I_RECEIVE:
event = script.blocks[0].args[0].lower()
events[event].add(script)
return events |
<SYSTEM_TASK:>
Run and return the results from the BroadcastReceive plugin.
<END_TASK>
<USER_TASK:>
Description:
def analyze(self, scratch, **kwargs):
"""Run and return the results from the BroadcastReceive plugin.""" |
all_scripts = list(self.iter_scripts(scratch))
results = defaultdict(set)
broadcast = dict((x, self.get_broadcast_events(x)) # Events by script
for x in all_scripts)
correct = self.get_receive(all_scripts)
results['never broadcast'] = set(correct.keys())
for script, events in broadcast.items():
for event in events.keys():
if event is True: # Remove dynamic broadcasts
results['dynamic broadcast'].add(script.morph.name)
del events[event]
elif event in correct:
results['never broadcast'].discard(event)
else:
results['never received'].add(event)
# remove events from correct dict that were never broadcast
for event in correct.keys():
if event in results['never broadcast']:
del correct[event]
# Find scripts that have more than one broadcast event on any possible
# execution path through the program
# TODO: Permit mutually exclusive broadcasts
for events in broadcast.values():
if len(events) > 1:
for event in events:
if event in correct:
results['parallel broadcasts'].add(event)
del correct[event]
# Find events that have two (or more) receivers in which one of the
# receivers has a "delay" block
for event, scripts in correct.items():
if len(scripts) > 1:
for script in scripts:
for _, _, block in self.iter_blocks(script.blocks):
if block.type.shape == 'stack':
results['multiple receivers with delay'].add(event)
if event in correct:
del correct[event]
results['success'] = set(correct.keys())
return {'broadcast': results} |
<SYSTEM_TASK:>
Categorize instances of attempted say and sound synchronization.
<END_TASK>
<USER_TASK:>
Description:
def analyze(self, scratch, **kwargs):
"""Categorize instances of attempted say and sound synchronization.""" |
errors = Counter()
for script in self.iter_scripts(scratch):
prev_name, prev_depth, prev_block = '', 0, script.blocks[0]
gen = self.iter_blocks(script.blocks)
for name, depth, block in gen:
if prev_depth == depth:
if prev_name in self.SAY_THINK:
if name == 'play sound %s until done':
if not self.is_blank(prev_block.args[0]):
errors += self.check(gen)
# TODO: What about play sound?
elif prev_name in self.SAY_THINK_DURATION and \
'play sound %s' in name:
errors['1'] += 1
elif prev_name == 'play sound %s':
if name in self.SAY_THINK:
errors[self.INCORRECT] += 1
elif name in self.SAY_THINK_DURATION:
if self.is_blank(block.args[0]):
errors[self.ERROR] += 1
else:
errors[self.HACKISH] += 1
elif prev_name == 'play sound %s until done' and \
name in self.ALL_SAY_THINK:
if not self.is_blank(block.args[0]):
errors[self.INCORRECT] += 1
# TODO: Should there be an else clause here?
prev_name, prev_depth, prev_block = name, depth, block
return {'sound': errors} |
<SYSTEM_TASK:>
Check that the last part of the chain matches.
<END_TASK>
<USER_TASK:>
Description:
def check(self, gen):
"""Check that the last part of the chain matches.
TODO: Fix to handle the following situation that appears to not work
say 'message 1'
play sound until done
say 'message 2'
say 'message 3'
play sound until done
say ''
""" |
retval = Counter()
name, _, block = next(gen, ('', 0, ''))
if name in self.SAY_THINK:
if self.is_blank(block.args[0]):
retval[self.CORRECT] += 1
else:
name, _, block = next(gen, ('', 0, ''))
if name == 'play sound %s until done':
# Increment the correct count because we have at least
# one successful instance
retval[self.CORRECT] += 1
# This block represents the beginning of a second
retval += self.check(gen)
else:
retval[self.INCORRECT] += 1
else:
retval[self.INCORRECT] += 1
return retval |
<SYSTEM_TASK:>
Hook stdout and stderr if buffering is enabled.
<END_TASK>
<USER_TASK:>
Description:
def _setup_stdout(self):
"""Hook stdout and stderr if buffering is enabled.
""" |
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = StringIO()
self._stdout_buffer = StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer |
<SYSTEM_TASK:>
Unhook stdout and stderr if buffering is enabled.
<END_TASK>
<USER_TASK:>
Description:
def _restore_stdout(self):
"""Unhook stdout and stderr if buffering is enabled.
""" |
if self.buffer:
if self._mirror_output:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate() |
<SYSTEM_TASK:>
Register that a test ended in an error.
<END_TASK>
<USER_TASK:>
Description:
def addError(self, test, exception):
"""Register that a test ended in an error.
Parameters
----------
test : unittest.TestCase
The test that has completed.
exception : tuple
``exc_info`` tuple ``(type, value, traceback)``.
""" |
result = self._handle_result(
test, TestCompletionStatus.error, exception=exception)
self.errors.append(result)
self._mirror_output = True |
<SYSTEM_TASK:>
Register that a test ended with a failure.
<END_TASK>
<USER_TASK:>
Description:
def addFailure(self, test, exception):
"""Register that a test ended with a failure.
Parameters
----------
test : unittest.TestCase
The test that has completed.
exception : tuple
``exc_info`` tuple ``(type, value, traceback)``.
""" |
result = self._handle_result(
test, TestCompletionStatus.failure, exception=exception)
self.failures.append(result)
self._mirror_output = True |
<SYSTEM_TASK:>
Register that a test that was skipped.
<END_TASK>
<USER_TASK:>
Description:
def addSkip(self, test, reason):
"""Register that a test that was skipped.
Parameters
----------
test : unittest.TestCase
The test that has completed.
reason : str
The reason the test was skipped.
""" |
result = self._handle_result(
test, TestCompletionStatus.skipped, message=reason)
self.skipped.append(result) |
<SYSTEM_TASK:>
Register that a test that failed and was expected to fail.
<END_TASK>
<USER_TASK:>
Description:
def addExpectedFailure(self, test, exception):
"""Register that a test that failed and was expected to fail.
Parameters
----------
test : unittest.TestCase
The test that has completed.
exception : tuple
``exc_info`` tuple ``(type, value, traceback)``.
""" |
result = self._handle_result(
test, TestCompletionStatus.expected_failure, exception=exception)
self.expectedFailures.append(result) |
<SYSTEM_TASK:>
Register a test that passed unexpectedly.
<END_TASK>
<USER_TASK:>
Description:
def addUnexpectedSuccess(self, test):
"""Register a test that passed unexpectedly.
Parameters
----------
test : unittest.TestCase
The test that has completed.
""" |
result = self._handle_result(
test, TestCompletionStatus.unexpected_success)
self.unexpectedSuccesses.append(result) |
<SYSTEM_TASK:>
Change the terminal key. The encrypted_key is a hex string.
<END_TASK>
<USER_TASK:>
Description:
def set_terminal_key(self, encrypted_key):
"""
Change the terminal key. The encrypted_key is a hex string.
encrypted_key is expected to be encrypted under master key
""" |
if encrypted_key:
try:
new_key = bytes.fromhex(encrypted_key)
if len(self.terminal_key) != len(new_key):
# The keys must have equal length
return False
self.terminal_key = self.tmk_cipher.decrypt(new_key)
self.store_terminal_key(raw2str(self.terminal_key))
self.tpk_cipher = DES3.new(self.terminal_key, DES3.MODE_ECB)
self.print_keys()
return True
except ValueError:
return False
return False |
<SYSTEM_TASK:>
Get PIN block in ISO 0 format, encrypted with the terminal key
<END_TASK>
<USER_TASK:>
Description:
def get_encrypted_pin(self, clear_pin, card_number):
"""
Get PIN block in ISO 0 format, encrypted with the terminal key
""" |
if not self.terminal_key:
print('Terminal key is not set')
return ''
if self.pinblock_format == '01':
try:
pinblock = bytes.fromhex(get_pinblock(clear_pin, card_number))
#print('PIN block: {}'.format(raw2str(pinblock)))
except TypeError:
return ''
encrypted_pinblock = self.tpk_cipher.encrypt(pinblock)
return raw2str(encrypted_pinblock)
else:
print('Unsupported PIN Block format')
return '' |
<SYSTEM_TASK:>
Check if a string is a valid url.
<END_TASK>
<USER_TASK:>
Description:
def is_url(string, allowed_schemes=None):
"""
Check if a string is a valid url.
:param string: String to check.
:param allowed_schemes: List of valid schemes ('http', 'https', 'ftp'...). Default to None (any scheme is valid).
:return: True if url, false otherwise
:rtype: bool
""" |
if not is_full_string(string):
return False
valid = bool(URL_RE.search(string))
if allowed_schemes:
return valid and any([string.startswith(s) for s in allowed_schemes])
return valid |
<SYSTEM_TASK:>
Checks if a string is a valid credit card number.
<END_TASK>
<USER_TASK:>
Description:
def is_credit_card(string, card_type=None):
"""
Checks if a string is a valid credit card number.
If card type is provided then it checks that specific type,
otherwise any known credit card number will be accepted.
:param string: String to check.
:type string: str
:param card_type: Card type.
:type card_type: str
Can be one of these:
* VISA
* MASTERCARD
* AMERICAN_EXPRESS
* DINERS_CLUB
* DISCOVER
* JCB
or None. Default to None (any card).
:return: True if credit card, false otherwise.
:rtype: bool
""" |
if not is_full_string(string):
return False
if card_type:
if card_type not in CREDIT_CARDS:
raise KeyError(
'Invalid card type "{}". Valid types are: {}'.format(card_type, ', '.join(CREDIT_CARDS.keys()))
)
return bool(CREDIT_CARDS[card_type].search(string))
for c in CREDIT_CARDS:
if CREDIT_CARDS[c].search(string):
return True
return False |
<SYSTEM_TASK:>
Check if a string is a valid json.
<END_TASK>
<USER_TASK:>
Description:
def is_json(string):
"""
Check if a string is a valid json.
:param string: String to check.
:type string: str
:return: True if json, false otherwise
:rtype: bool
""" |
if not is_full_string(string):
return False
if bool(JSON_WRAPPER_RE.search(string)):
try:
return isinstance(json.loads(string), dict)
except (TypeError, ValueError, OverflowError):
return False
return False |
<SYSTEM_TASK:>
Checks if a given string is a slug.
<END_TASK>
<USER_TASK:>
Description:
def is_slug(string, sign='-'):
"""
Checks if a given string is a slug.
:param string: String to check.
:type string: str
:param sign: Join sign used by the slug.
:type sign: str
:return: True if slug, false otherwise.
""" |
if not is_full_string(string):
return False
rex = r'^([a-z\d]+' + re.escape(sign) + r'?)*[a-z\d]$'
return re.match(rex, string) is not None |
<SYSTEM_TASK:>
Return a new string containing shuffled items.
<END_TASK>
<USER_TASK:>
Description:
def shuffle(string):
"""
Return a new string containing shuffled items.
:param string: String to shuffle
:type string: str
:return: Shuffled string
:rtype: str
""" |
s = sorted(string) # turn the string into a list of chars
random.shuffle(s) # shuffle the list
return ''.join(s) |
<SYSTEM_TASK:>
Remove html code contained into the given string.
<END_TASK>
<USER_TASK:>
Description:
def strip_html(string, keep_tag_content=False):
"""
Remove html code contained into the given string.
:param string: String to manipulate.
:type string: str
:param keep_tag_content: True to preserve tag content, False to remove tag and its content too (default).
:type keep_tag_content: bool
:return: String with html removed.
:rtype: str
""" |
r = HTML_TAG_ONLY_RE if keep_tag_content else HTML_RE
return r.sub('', string) |
<SYSTEM_TASK:>
Parse the nblink file.
<END_TASK>
<USER_TASK:>
Description:
def parse(self, inputstring, document):
"""Parse the nblink file.
Adds the linked file as a dependency, read the file, and
pass the content to the nbshpinx.NotebookParser.
""" |
link = json.loads(inputstring)
env = document.settings.env
source_dir = os.path.dirname(env.doc2path(env.docname))
abs_path = os.path.normpath(os.path.join(source_dir, link['path']))
path = utils.relative_path(None, abs_path)
path = nodes.reprunicode(path)
document.settings.record_dependencies.add(path)
env.note_dependency(path)
target_root = env.config.nbsphinx_link_target_root
target = utils.relative_path(target_root, abs_path)
target = nodes.reprunicode(target).replace(os.path.sep, '/')
env.metadata[env.docname]['nbsphinx-link-target'] = target
# Copy parser from nbsphinx for our cutom format
try:
formats = env.config.nbsphinx_custom_formats
except AttributeError:
pass
else:
formats.setdefault(
'.nblink',
lambda s: nbformat.reads(s, as_version=_ipynbversion))
try:
include_file = io.FileInput(source_path=path, encoding='utf8')
except UnicodeEncodeError as error:
raise NotebookError(u'Problems with linked notebook "%s" path:\n'
'Cannot encode input file path "%s" '
'(wrong locale?).' %
(env.docname, SafeString(path)))
except IOError as error:
raise NotebookError(u'Problems with linked notebook "%s" path:\n%s.' %
(env.docname, ErrorString(error)))
try:
rawtext = include_file.read()
except UnicodeError as error:
raise NotebookError(u'Problem with linked notebook "%s":\n%s' %
(env.docname, ErrorString(error)))
return super(LinkedNotebookParser, self).parse(rawtext, document) |
<SYSTEM_TASK:>
Run and return the results from the DuplicateScripts plugin.
<END_TASK>
<USER_TASK:>
Description:
def analyze(self, scratch, **kwargs):
"""Run and return the results from the DuplicateScripts plugin.
Only takes into account scripts with more than 3 blocks.
""" |
scripts_set = set()
for script in self.iter_scripts(scratch):
if script[0].type.text == 'define %s':
continue # Ignore user defined scripts
blocks_list = []
for name, _, _ in self.iter_blocks(script.blocks):
blocks_list.append(name)
blocks_tuple = tuple(blocks_list)
if blocks_tuple in scripts_set:
if len(blocks_list) > 3:
self.total_duplicate += 1
self.list_duplicate.append(blocks_list)
else:
scripts_set.add(blocks_tuple) |
<SYSTEM_TASK:>
Set response content type
<END_TASK>
<USER_TASK:>
Description:
def _set_content_type(self, system):
""" Set response content type """ |
request = system.get('request')
if request:
response = request.response
ct = response.content_type
if ct == response.default_content_type:
response.content_type = 'application/json' |
<SYSTEM_TASK:>
Render a response
<END_TASK>
<USER_TASK:>
Description:
def _render_response(self, value, system):
""" Render a response """ |
view = system['view']
enc_class = getattr(view, '_json_encoder', None)
if enc_class is None:
enc_class = get_json_encoder()
return json.dumps(value, cls=enc_class) |
<SYSTEM_TASK:>
Get kwargs common for all methods.
<END_TASK>
<USER_TASK:>
Description:
def _get_common_kwargs(self, system):
""" Get kwargs common for all methods. """ |
enc_class = getattr(system['view'], '_json_encoder', None)
if enc_class is None:
enc_class = get_json_encoder()
return {
'request': system['request'],
'encoder': enc_class,
} |
<SYSTEM_TASK:>
Get kwargs common to create, update, replace.
<END_TASK>
<USER_TASK:>
Description:
def _get_create_update_kwargs(self, value, common_kw):
""" Get kwargs common to create, update, replace. """ |
kw = common_kw.copy()
kw['body'] = value
if '_self' in value:
kw['headers'] = [('Location', value['_self'])]
return kw |
<SYSTEM_TASK:>
Handle response rendering.
<END_TASK>
<USER_TASK:>
Description:
def _render_response(self, value, system):
""" Handle response rendering.
Calls mixin methods according to request.action value.
""" |
super_call = super(DefaultResponseRendererMixin, self)._render_response
try:
method_name = 'render_{}'.format(system['request'].action)
except (KeyError, AttributeError):
return super_call(value, system)
method = getattr(self, method_name, None)
if method is not None:
common_kw = self._get_common_kwargs(system)
response = method(value, system, common_kw)
system['request'].response = response
return
return super_call(value, system) |
<SYSTEM_TASK:>
Returns 'WWW-Authenticate' header with a value that should be used
<END_TASK>
<USER_TASK:>
Description:
def remember(self, request, username, **kw):
""" Returns 'WWW-Authenticate' header with a value that should be used
in 'Authorization' header.
""" |
if self.credentials_callback:
token = self.credentials_callback(username, request)
api_key = 'ApiKey {}:{}'.format(username, token)
return [('WWW-Authenticate', api_key)] |
<SYSTEM_TASK:>
Extract username and api key token from 'Authorization' header
<END_TASK>
<USER_TASK:>
Description:
def _get_credentials(self, request):
""" Extract username and api key token from 'Authorization' header """ |
authorization = request.headers.get('Authorization')
if not authorization:
return None
try:
authmeth, authbytes = authorization.split(' ', 1)
except ValueError: # not enough values to unpack
return None
if authmeth.lower() != 'apikey':
return None
if six.PY2 or isinstance(authbytes, bytes):
try:
auth = authbytes.decode('utf-8')
except UnicodeDecodeError:
auth = authbytes.decode('latin-1')
else:
auth = authbytes
try:
username, api_key = auth.split(':', 1)
except ValueError: # not enough values to unpack
return None
return username, api_key |
<SYSTEM_TASK:>
Helper function to get event kwargs.
<END_TASK>
<USER_TASK:>
Description:
def _get_event_kwargs(view_obj):
""" Helper function to get event kwargs.
:param view_obj: Instance of View that processes the request.
:returns dict: Containing event kwargs or None if events shouldn't
be fired.
""" |
request = view_obj.request
view_method = getattr(view_obj, request.action)
do_trigger = not (
getattr(view_method, '_silent', False) or
getattr(view_obj, '_silent', False))
if do_trigger:
event_kwargs = {
'view': view_obj,
'model': view_obj.Model,
'fields': FieldData.from_dict(
view_obj._json_params,
view_obj.Model)
}
ctx = view_obj.context
if hasattr(ctx, 'pk_field') or isinstance(ctx, DataProxy):
event_kwargs['instance'] = ctx
return event_kwargs |
<SYSTEM_TASK:>
Helper function to get event class.
<END_TASK>
<USER_TASK:>
Description:
def _get_event_cls(view_obj, events_map):
""" Helper function to get event class.
:param view_obj: Instance of View that processes the request.
:param events_map: Map of events from which event class should be
picked.
:returns: Found event class.
""" |
request = view_obj.request
view_method = getattr(view_obj, request.action)
event_action = (
getattr(view_method, '_event_action', None) or
request.action)
return events_map[event_action] |
<SYSTEM_TASK:>
Helper function to subscribe to group of events.
<END_TASK>
<USER_TASK:>
Description:
def subscribe_to_events(config, subscriber, events, model=None):
""" Helper function to subscribe to group of events.
:param config: Pyramid contig instance.
:param subscriber: Event subscriber function.
:param events: Sequence of events to subscribe to.
:param model: Model predicate value.
""" |
kwargs = {}
if model is not None:
kwargs['model'] = model
for evt in events:
config.add_subscriber(subscriber, evt, **kwargs) |
<SYSTEM_TASK:>
Add processors for model field.
<END_TASK>
<USER_TASK:>
Description:
def add_field_processors(config, processors, model, field):
""" Add processors for model field.
Under the hood, regular nefertari event subscribed is created which
calls field processors in order passed to this function.
Processors are passed following params:
* **new_value**: New value of of field.
* **instance**: Instance affected by request. Is None when set of
items is updated in bulk and when item is created.
* **field**: Instance of nefertari.utils.data.FieldData instance
containing data of changed field.
* **request**: Current Pyramid Request instance.
* **model**: Model class affected by request.
* **event**: Underlying event object.
Each processor must return processed value which is passed to next
processor.
:param config: Pyramid Congurator instance.
:param processors: Sequence of processor functions.
:param model: Model class for field if which processors are
registered.
:param field: Field name for which processors are registered.
""" |
before_change_events = (
BeforeCreate,
BeforeUpdate,
BeforeReplace,
BeforeUpdateMany,
BeforeRegister,
)
def wrapper(event, _processors=processors, _field=field):
proc_kw = {
'new_value': event.field.new_value,
'instance': event.instance,
'field': event.field,
'request': event.view.request,
'model': event.model,
'event': event,
}
for proc_func in _processors:
proc_kw['new_value'] = proc_func(**proc_kw)
event.field.new_value = proc_kw['new_value']
event.set_field_value(_field, proc_kw['new_value'])
for evt in before_change_events:
config.add_subscriber(wrapper, evt, model=model, field=field) |
<SYSTEM_TASK:>
Set value of request field named `field_name`.
<END_TASK>
<USER_TASK:>
Description:
def set_field_value(self, field_name, value):
""" Set value of request field named `field_name`.
Use this method to apply changes to object which is affected
by request. Values are set on `view._json_params` dict.
If `field_name` is not affected by request, it is added to
`self.fields` which makes field processors which are connected
to `field_name` to be triggered, if they are run after this
method call(connected to events after handler that performs
method call).
:param field_name: Name of request field value of which should
be set.
:param value: Value to be set.
""" |
self.view._json_params[field_name] = value
if field_name in self.fields:
self.fields[field_name].new_value = value
return
fields = FieldData.from_dict({field_name: value}, self.model)
self.fields.update(fields) |
<SYSTEM_TASK:>
Set value of response field named `field_name`.
<END_TASK>
<USER_TASK:>
Description:
def set_field_value(self, field_name, value):
""" Set value of response field named `field_name`.
If response contains single item, its field is set.
If response contains multiple items, all the items in response
are edited.
To edit response meta(e.g. 'count') edit response directly at
`event.response`.
:param field_name: Name of response field value of which should
be set.
:param value: Value to be set.
""" |
if self.response is None:
return
if 'data' in self.response:
items = self.response['data']
else:
items = [self.response]
for item in items:
item[field_name] = value |
<SYSTEM_TASK:>
Process 'fields' ES param.
<END_TASK>
<USER_TASK:>
Description:
def process_fields_param(fields):
""" Process 'fields' ES param.
* Fields list is split if needed
* '_type' field is added, if not present, so the actual value is
displayed instead of 'None'
""" |
if not fields:
return fields
if isinstance(fields, six.string_types):
fields = split_strip(fields)
if '_type' not in fields:
fields.append('_type')
return {
'_source_include': fields,
'_source': True,
} |
<SYSTEM_TASK:>
Catch and raise index errors which are not critical and thus
<END_TASK>
<USER_TASK:>
Description:
def _catch_index_error(self, response):
""" Catch and raise index errors which are not critical and thus
not raised by elasticsearch-py.
""" |
code, headers, raw_data = response
if not raw_data:
return
data = json.loads(raw_data)
if not data or not data.get('errors'):
return
try:
error_dict = data['items'][0]['index']
message = error_dict['error']
except (KeyError, IndexError):
return
raise exception_response(400, detail=message) |
<SYSTEM_TASK:>
Setup ES mappings for all existing models.
<END_TASK>
<USER_TASK:>
Description:
def setup_mappings(cls, force=False):
""" Setup ES mappings for all existing models.
This method is meant to be run once at application lauch.
ES._mappings_setup flag is set to not run make mapping creation
calls on subsequent runs.
Use `force=True` to make subsequent calls perform mapping
creation calls to ES.
""" |
if getattr(cls, '_mappings_setup', False) and not force:
log.debug('ES mappings have been already set up for currently '
'running application. Call `setup_mappings` with '
'`force=True` to perform mappings set up again.')
return
log.info('Setting up ES mappings for all existing models')
models = engine.get_document_classes()
try:
for model_name, model_cls in models.items():
if getattr(model_cls, '_index_enabled', False):
es = cls(model_cls.__name__)
es.put_mapping(body=model_cls.get_es_mapping())
except JHTTPBadRequest as ex:
raise Exception(ex.json['extra']['data'])
cls._mappings_setup = True |
<SYSTEM_TASK:>
Apply `operation` to chunks of `documents` of size
<END_TASK>
<USER_TASK:>
Description:
def process_chunks(self, documents, operation):
""" Apply `operation` to chunks of `documents` of size
`self.chunk_size`.
""" |
chunk_size = self.chunk_size
start = end = 0
count = len(documents)
while count:
if count < chunk_size:
chunk_size = count
end += chunk_size
bulk = documents[start:end]
operation(documents_actions=bulk)
start += chunk_size
count -= chunk_size |
<SYSTEM_TASK:>
Index documents that are missing from ES index.
<END_TASK>
<USER_TASK:>
Description:
def index_missing_documents(self, documents, request=None):
""" Index documents that are missing from ES index.
Determines which documents are missing using ES `mget` call which
returns a list of document IDs as `documents`. Then missing
`documents` from that list are indexed.
""" |
log.info('Trying to index documents of type `{}` missing from '
'`{}` index'.format(self.doc_type, self.index_name))
if not documents:
log.info('No documents to index')
return
query_kwargs = dict(
index=self.index_name,
doc_type=self.doc_type,
fields=['_id'],
body={'ids': [d['_pk'] for d in documents]},
)
try:
response = self.api.mget(**query_kwargs)
except IndexNotFoundException:
indexed_ids = set()
else:
indexed_ids = set(
d['_id'] for d in response['docs'] if d.get('found'))
documents = [d for d in documents if str(d['_pk']) not in indexed_ids]
if not documents:
log.info('No documents of type `{}` are missing from '
'index `{}`'.format(self.doc_type, self.index_name))
return
self._bulk('index', documents, request) |
<SYSTEM_TASK:>
Return the version of by with regex intead of importing it
<END_TASK>
<USER_TASK:>
Description:
def get_version(path="src/devpy/__init__.py"):
""" Return the version of by with regex intead of importing it""" |
init_content = open(path, "rt").read()
pattern = r"^__version__ = ['\"]([^'\"]*)['\"]"
return re.search(pattern, init_content, re.M).group(1) |
<SYSTEM_TASK:>
Get enabled plugins for specified hook name.
<END_TASK>
<USER_TASK:>
Description:
def get_enabled_hook_plugins(self, hook, args, **kwargs):
"""Get enabled plugins for specified hook name.
""" |
manager = self.hook_managers[hook]
if len(list(manager)) == 0:
return []
return [
plugin for plugin in manager.map(
self._create_hook_plugin, args, **kwargs)
if plugin is not None
] |
<SYSTEM_TASK:>
Get mutually-exlusive plugin for plugin namespace.
<END_TASK>
<USER_TASK:>
Description:
def get_driver(self, namespace, parsed_args, **kwargs):
"""Get mutually-exlusive plugin for plugin namespace.
""" |
option, dest = self._namespace_to_option(namespace)
dest_prefix = '{0}_'.format(dest)
driver_name = getattr(parsed_args, dest, 'default')
driver_extension = self.driver_managers[namespace][driver_name]
return driver_extension.plugin.from_args(
parsed_args, dest_prefix, **kwargs) |
<SYSTEM_TASK:>
Output the aggregate block count results.
<END_TASK>
<USER_TASK:>
Description:
def finalize(self):
"""Output the aggregate block count results.""" |
for name, count in sorted(self.blocks.items(), key=lambda x: x[1]):
print('{:3} {}'.format(count, name))
print('{:3} total'.format(sum(self.blocks.values()))) |
<SYSTEM_TASK:>
Run and return the results from the BlockCounts plugin.
<END_TASK>
<USER_TASK:>
Description:
def analyze(self, scratch, **kwargs):
"""Run and return the results from the BlockCounts plugin.""" |
file_blocks = Counter()
for script in self.iter_scripts(scratch):
for name, _, _ in self.iter_blocks(script.blocks):
file_blocks[name] += 1
self.blocks.update(file_blocks) # Update the overall count
return {'types': file_blocks} |
<SYSTEM_TASK:>
Run and return the results form the DeadCode plugin.
<END_TASK>
<USER_TASK:>
Description:
def analyze(self, scratch, **kwargs):
"""Run and return the results form the DeadCode plugin.
The variable_event indicates that the Scratch file contains at least
one instance of a broadcast event based on a variable. When
variable_event is True, dead code scripts reported by this plugin that
begin with a "when I receive" block may not actually indicate dead
code.
""" |
self.total_instances += 1
sprites = {}
for sprite, script in self.iter_sprite_scripts(scratch):
if not script.reachable:
sprites.setdefault(sprite, []).append(script)
if sprites:
self.dead_code_instances += 1
import pprint
pprint.pprint(sprites)
variable_event = any(True in self.get_broadcast_events(x) for x in
self.iter_scripts(scratch))
return {'dead_code': {'sprites': sprites,
'variable_event': variable_event}} |
<SYSTEM_TASK:>
Output the number of instances that contained dead code.
<END_TASK>
<USER_TASK:>
Description:
def finalize(self):
"""Output the number of instances that contained dead code.""" |
if self.total_instances > 1:
print('{} of {} instances contained dead code.'
.format(self.dead_code_instances, self.total_instances)) |
<SYSTEM_TASK:>
Show help and basic usage
<END_TASK>
<USER_TASK:>
Description:
def show_help(name):
"""
Show help and basic usage
""" |
print('Usage: python3 {} [OPTIONS]... '.format(name))
print('ISO8583 message client')
print(' -v, --verbose\t\tRun transactions verbosely')
print(' -p, --port=[PORT]\t\tTCP port to connect to, 1337 by default')
print(' -s, --server=[IP]\t\tIP of the ISO host to connect to, 127.0.0.1 by default')
print(' -t, --terminal=[ID]\t\tTerminal ID (used in DE 41 ISO field, 10001337 by default)')
print(' -m, --merchant=[ID]\t\tMerchant ID (used in DE 42 ISO field, 999999999999001 by default)')
print(' -k, --terminal-key=[KEY]\t\tTerminal key (\'DEADBEEF DEADBEEF DEADBEEF DEADBEEF\' by default)')
print(' -K, --master-key=[KEY]\t\Master key (\'ABABABAB CDCDCDCD EFEFEFEF AEAEAEAE\' by default)')
print(' -f, --file=[file.xml]\t\tUse transaction data from the given XML-file') |
<SYSTEM_TASK:>
Finds the top-level directory of a project given a start directory
<END_TASK>
<USER_TASK:>
Description:
def find_top_level_directory(start_directory):
"""Finds the top-level directory of a project given a start directory
inside the project.
Parameters
----------
start_directory : str
The directory in which test discovery will start.
""" |
top_level = start_directory
while os.path.isfile(os.path.join(top_level, '__init__.py')):
top_level = os.path.dirname(top_level)
if top_level == os.path.dirname(top_level):
raise ValueError("Can't find top level directory")
return os.path.abspath(top_level) |
<SYSTEM_TASK:>
Do test case discovery.
<END_TASK>
<USER_TASK:>
Description:
def discover(self, start, top_level_directory=None, pattern='test*.py'):
"""Do test case discovery.
This is the top-level entry-point for test discovery.
If the ``start`` argument is a drectory, then ``haas`` will
discover all tests in the package contained in that directory.
If the ``start`` argument is not a directory, it is assumed to
be a package or module name and tests in the package or module
are loaded.
FIXME: This needs a better description.
Parameters
----------
start : str
The directory, package, module, class or test to load.
top_level_directory : str
The path to the top-level directoy of the project. This is
the parent directory of the project'stop-level Python
package.
pattern : str
The glob pattern to match the filenames of modules to search
for tests.
""" |
logger.debug('Starting test discovery')
if os.path.isdir(start):
start_directory = start
return self.discover_by_directory(
start_directory, top_level_directory=top_level_directory,
pattern=pattern)
elif os.path.isfile(start):
start_filepath = start
return self.discover_by_file(
start_filepath, top_level_directory=top_level_directory)
else:
package_or_module = start
return self.discover_by_module(
package_or_module, top_level_directory=top_level_directory,
pattern=pattern) |
<SYSTEM_TASK:>
Find all tests in a package or module, or load a single test case if
<END_TASK>
<USER_TASK:>
Description:
def discover_by_module(self, module_name, top_level_directory=None,
pattern='test*.py'):
"""Find all tests in a package or module, or load a single test case if
a class or test inside a module was specified.
Parameters
----------
module_name : str
The dotted package name, module name or TestCase class and
test method.
top_level_directory : str
The path to the top-level directoy of the project. This is
the parent directory of the project'stop-level Python
package.
pattern : str
The glob pattern to match the filenames of modules to search
for tests.
""" |
# If the top level directory is given, the module may only be
# importable with that in the path.
if top_level_directory is not None and \
top_level_directory not in sys.path:
sys.path.insert(0, top_level_directory)
logger.debug('Discovering tests by module: module_name=%r, '
'top_level_directory=%r, pattern=%r', module_name,
top_level_directory, pattern)
try:
module, case_attributes = find_module_by_name(module_name)
except ImportError:
return self.discover_filtered_tests(
module_name, top_level_directory=top_level_directory,
pattern=pattern)
dirname, basename = os.path.split(module.__file__)
basename = os.path.splitext(basename)[0]
if len(case_attributes) == 0 and basename == '__init__':
# Discover in a package
return self.discover_by_directory(
dirname, top_level_directory, pattern=pattern)
elif len(case_attributes) == 0:
# Discover all in a module
return self._loader.load_module(module)
return self.discover_single_case(module, case_attributes) |
<SYSTEM_TASK:>
Find and load a single TestCase or TestCase method from a module.
<END_TASK>
<USER_TASK:>
Description:
def discover_single_case(self, module, case_attributes):
"""Find and load a single TestCase or TestCase method from a module.
Parameters
----------
module : module
The imported Python module containing the TestCase to be
loaded.
case_attributes : list
A list (length 1 or 2) of str. The first component must be
the name of a TestCase subclass. The second component must
be the name of a method in the TestCase.
""" |
# Find single case
case = module
loader = self._loader
for index, component in enumerate(case_attributes):
case = getattr(case, component, None)
if case is None:
return loader.create_suite()
elif loader.is_test_case(case):
rest = case_attributes[index + 1:]
if len(rest) > 1:
raise ValueError('Too many components in module path')
elif len(rest) == 1:
return loader.create_suite(
[loader.load_test(case, *rest)])
return loader.load_case(case)
# No cases matched, return empty suite
return loader.create_suite() |
<SYSTEM_TASK:>
Run test discovery in a directory.
<END_TASK>
<USER_TASK:>
Description:
def discover_by_directory(self, start_directory, top_level_directory=None,
pattern='test*.py'):
"""Run test discovery in a directory.
Parameters
----------
start_directory : str
The package directory in which to start test discovery.
top_level_directory : str
The path to the top-level directoy of the project. This is
the parent directory of the project'stop-level Python
package.
pattern : str
The glob pattern to match the filenames of modules to search
for tests.
""" |
start_directory = os.path.abspath(start_directory)
if top_level_directory is None:
top_level_directory = find_top_level_directory(
start_directory)
logger.debug('Discovering tests in directory: start_directory=%r, '
'top_level_directory=%r, pattern=%r', start_directory,
top_level_directory, pattern)
assert_start_importable(top_level_directory, start_directory)
if top_level_directory not in sys.path:
sys.path.insert(0, top_level_directory)
tests = self._discover_tests(
start_directory, top_level_directory, pattern)
return self._loader.create_suite(list(tests)) |
<SYSTEM_TASK:>
Run test discovery on a single file.
<END_TASK>
<USER_TASK:>
Description:
def discover_by_file(self, start_filepath, top_level_directory=None):
"""Run test discovery on a single file.
Parameters
----------
start_filepath : str
The module file in which to start test discovery.
top_level_directory : str
The path to the top-level directoy of the project. This is
the parent directory of the project'stop-level Python
package.
""" |
start_filepath = os.path.abspath(start_filepath)
start_directory = os.path.dirname(start_filepath)
if top_level_directory is None:
top_level_directory = find_top_level_directory(
start_directory)
logger.debug('Discovering tests in file: start_filepath=%r, '
'top_level_directory=', start_filepath,
top_level_directory)
assert_start_importable(top_level_directory, start_directory)
if top_level_directory not in sys.path:
sys.path.insert(0, top_level_directory)
tests = self._load_from_file(
start_filepath, top_level_directory)
return self._loader.create_suite(list(tests)) |
<SYSTEM_TASK:>
Set proper headers.
<END_TASK>
<USER_TASK:>
Description:
def _set_options_headers(self, methods):
""" Set proper headers.
Sets following headers:
Allow
Access-Control-Allow-Methods
Access-Control-Allow-Headers
Arguments:
:methods: Sequence of HTTP method names that are value for
requested URI
""" |
request = self.request
response = request.response
response.headers['Allow'] = ', '.join(sorted(methods))
if 'Access-Control-Request-Method' in request.headers:
response.headers['Access-Control-Allow-Methods'] = \
', '.join(sorted(methods))
if 'Access-Control-Request-Headers' in request.headers:
response.headers['Access-Control-Allow-Headers'] = \
'origin, x-requested-with, content-type'
return response |
<SYSTEM_TASK:>
Get names of HTTP methods that can be used at requested URI.
<END_TASK>
<USER_TASK:>
Description:
def _get_handled_methods(self, actions_map):
""" Get names of HTTP methods that can be used at requested URI.
Arguments:
:actions_map: Map of actions. Must have the same structure as
self._item_actions and self._collection_actions
""" |
methods = ('OPTIONS',)
defined_actions = []
for action_name in actions_map.keys():
view_method = getattr(self, action_name, None)
method_exists = view_method is not None
method_defined = view_method != self.not_allowed_action
if method_exists and method_defined:
defined_actions.append(action_name)
for action in defined_actions:
methods += actions_map[action]
return methods |
<SYSTEM_TASK:>
Handle collection OPTIONS request.
<END_TASK>
<USER_TASK:>
Description:
def item_options(self, **kwargs):
""" Handle collection OPTIONS request.
Singular route requests are handled a bit differently because
singular views may handle POST requests despite being registered
as item routes.
""" |
actions = self._item_actions.copy()
if self._resource.is_singular:
actions['create'] = ('POST',)
methods = self._get_handled_methods(actions)
return self._set_options_headers(methods) |
<SYSTEM_TASK:>
Pop and return aggregation params from query string params.
<END_TASK>
<USER_TASK:>
Description:
def pop_aggregations_params(self):
""" Pop and return aggregation params from query string params.
Aggregation params are expected to be prefixed(nested under) by
any of `self._aggregations_keys`.
""" |
from nefertari.view import BaseView
self._query_params = BaseView.convert_dotted(self.view._query_params)
for key in self._aggregations_keys:
if key in self._query_params:
return self._query_params.pop(key)
else:
raise KeyError('Missing aggregation params') |
<SYSTEM_TASK:>
Recursively get values under the 'field' key.
<END_TASK>
<USER_TASK:>
Description:
def get_aggregations_fields(cls, params):
""" Recursively get values under the 'field' key.
Is used to get names of fields on which aggregations should be
performed.
""" |
fields = []
for key, val in params.items():
if isinstance(val, dict):
fields += cls.get_aggregations_fields(val)
if key == 'field':
fields.append(val)
return fields |
<SYSTEM_TASK:>
Check per-field privacy rules in aggregations.
<END_TASK>
<USER_TASK:>
Description:
def check_aggregations_privacy(self, aggregations_params):
""" Check per-field privacy rules in aggregations.
Privacy is checked by making sure user has access to the fields
used in aggregations.
""" |
fields = self.get_aggregations_fields(aggregations_params)
fields_dict = dictset.fromkeys(fields)
fields_dict['_type'] = self.view.Model.__name__
try:
validate_data_privacy(self.view.request, fields_dict)
except wrappers.ValidationError as ex:
raise JHTTPForbidden(
'Not enough permissions to aggregate on '
'fields: {}'.format(ex)) |
<SYSTEM_TASK:>
Perform aggregation and return response.
<END_TASK>
<USER_TASK:>
Description:
def aggregate(self):
""" Perform aggregation and return response. """ |
from nefertari.elasticsearch import ES
aggregations_params = self.pop_aggregations_params()
if self.view._auth_enabled:
self.check_aggregations_privacy(aggregations_params)
self.stub_wrappers()
return ES(self.view.Model.__name__).aggregate(
_aggregations_params=aggregations_params,
**self._query_params) |
<SYSTEM_TASK:>
Load a TestSuite containing all TestCase instances for all tests in
<END_TASK>
<USER_TASK:>
Description:
def load_case(self, testcase):
"""Load a TestSuite containing all TestCase instances for all tests in
a TestCase subclass.
Parameters
----------
testcase : type
A subclass of :class:`unittest.TestCase`
""" |
tests = [self.load_test(testcase, name)
for name in self.find_test_method_names(testcase)]
return self.create_suite(tests) |
<SYSTEM_TASK:>
Create and return a test suite containing all cases loaded from the
<END_TASK>
<USER_TASK:>
Description:
def load_module(self, module):
"""Create and return a test suite containing all cases loaded from the
provided module.
Parameters
----------
module : module
A module object containing ``TestCases``
""" |
cases = self.get_test_cases_from_module(module)
suites = [self.load_case(case) for case in cases]
return self.create_suite(suites) |
<SYSTEM_TASK:>
Helper function that can be used in ``db_key`` to support `self`
<END_TASK>
<USER_TASK:>
Description:
def authenticated_userid(request):
"""Helper function that can be used in ``db_key`` to support `self`
as a collection key.
""" |
user = getattr(request, 'user', None)
key = user.pk_field()
return getattr(user, key) |
<SYSTEM_TASK:>
A generator for blocks contained in a block list.
<END_TASK>
<USER_TASK:>
Description:
def iter_blocks(block_list):
"""A generator for blocks contained in a block list.
Yields tuples containing the block name, the depth that the block was
found at, and finally a handle to the block itself.
""" |
# queue the block and the depth of the block
queue = [(block, 0) for block in block_list
if isinstance(block, kurt.Block)]
while queue:
block, depth = queue.pop(0)
assert block.type.text
yield block.type.text, depth, block
for arg in block.args:
if hasattr(arg, '__iter__'):
queue[0:0] = [(x, depth + 1) for x in arg
if isinstance(x, kurt.Block)]
elif isinstance(arg, kurt.Block):
queue.append((arg, depth)) |
<SYSTEM_TASK:>
Return the type of block the script begins with.
<END_TASK>
<USER_TASK:>
Description:
def script_start_type(script):
"""Return the type of block the script begins with.""" |
if script[0].type.text == 'when @greenFlag clicked':
return HairballPlugin.HAT_GREEN_FLAG
elif script[0].type.text == 'when I receive %s':
return HairballPlugin.HAT_WHEN_I_RECEIVE
elif script[0].type.text == 'when this sprite clicked':
return HairballPlugin.HAT_MOUSE
elif script[0].type.text == 'when %s key pressed':
return HairballPlugin.HAT_KEY
else:
return HairballPlugin.NO_HAT |
<SYSTEM_TASK:>
Return a Counter of event-names that were broadcast.
<END_TASK>
<USER_TASK:>
Description:
def get_broadcast_events(cls, script):
"""Return a Counter of event-names that were broadcast.
The Count will contain the key True if any of the broadcast blocks
contain a parameter that is a variable.
""" |
events = Counter()
for name, _, block in cls.iter_blocks(script):
if 'broadcast %s' in name:
if isinstance(block.args[0], kurt.Block):
events[True] += 1
else:
events[block.args[0].lower()] += 1
return events |
<SYSTEM_TASK:>
Tag each script with attribute reachable.
<END_TASK>
<USER_TASK:>
Description:
def tag_reachable_scripts(cls, scratch):
"""Tag each script with attribute reachable.
The reachable attribute will be set false for any script that does not
begin with a hat block. Additionally, any script that begins with a
'when I receive' block whose event-name doesn't appear in a
corresponding broadcast block is marked as unreachable.
""" |
if getattr(scratch, 'hairball_prepared', False): # Only process once
return
reachable = set()
untriggered_events = {}
# Initial pass to find reachable and potentially reachable scripts
for script in cls.iter_scripts(scratch):
if not isinstance(script, kurt.Comment):
starting_type = cls.script_start_type(script)
if starting_type == cls.NO_HAT:
script.reachable = False
elif starting_type == cls.HAT_WHEN_I_RECEIVE:
# Value will be updated if reachable
script.reachable = False
message = script[0].args[0].lower()
untriggered_events.setdefault(message, set()).add(script)
else:
script.reachable = True
reachable.add(script)
# Expand reachable states based on broadcast events
while reachable:
for event in cls.get_broadcast_events(reachable.pop()):
if event in untriggered_events:
for script in untriggered_events.pop(event):
script.reachable = True
reachable.add(script)
scratch.hairball_prepared = True |
<SYSTEM_TASK:>
Attribute that returns the plugin description from its docstring.
<END_TASK>
<USER_TASK:>
Description:
def description(self):
"""Attribute that returns the plugin description from its docstring.""" |
lines = []
for line in self.__doc__.split('\n')[2:]:
line = line.strip()
if line:
lines.append(line)
return ' '.join(lines) |
<SYSTEM_TASK:>
Internal hook that marks reachable scripts before calling analyze.
<END_TASK>
<USER_TASK:>
Description:
def _process(self, scratch, filename, **kwargs):
"""Internal hook that marks reachable scripts before calling analyze.
Returns data exactly as returned by the analyze method.
""" |
self.tag_reachable_scripts(scratch)
return self.analyze(scratch, filename=filename, **kwargs) |
<SYSTEM_TASK:>
Decorator to handle formatting kwargs to the proper names expected by
<END_TASK>
<USER_TASK:>
Description:
def _format_kwargs(func):
"""Decorator to handle formatting kwargs to the proper names expected by
the associated function. The formats dictionary string keys will be used as
expected function kwargs and the value list of strings will be renamed to
the associated key string.""" |
formats = {}
formats['blk'] = ["blank"]
formats['dft'] = ["default"]
formats['hdr'] = ["header"]
formats['hlp'] = ["help"]
formats['msg'] = ["message"]
formats['shw'] = ["show"]
formats['vld'] = ["valid"]
@wraps(func)
def inner(*args, **kwargs):
for k in formats.keys():
for v in formats[k]:
if v in kwargs:
kwargs[k] = kwargs[v]
kwargs.pop(v)
return func(*args, **kwargs)
return inner |
<SYSTEM_TASK:>
Shows a menu with the given list of `MenuEntry` items.
<END_TASK>
<USER_TASK:>
Description:
def show_menu(entries, **kwargs):
"""Shows a menu with the given list of `MenuEntry` items.
**Params**:
- header (str) - String to show above menu.
- note (str) - String to show as a note below menu.
- msg (str) - String to show below menu.
- dft (str) - Default value if input is left blank.
- compact (bool) - If true, the menu items will not be displayed
[default: False].
- returns (str) - Controls what part of the menu entry is returned,
'func' returns function result [default: name].
- limit (int) - If set, limits the number of menu entries show at a time
[default: None].
- fzf (bool) - If true, can enter FCHR at the menu prompt to search menu.
""" |
global _AUTO
hdr = kwargs.get('hdr', "")
note = kwargs.get('note', "")
dft = kwargs.get('dft', "")
fzf = kwargs.pop('fzf', True)
compact = kwargs.get('compact', False)
returns = kwargs.get('returns', "name")
limit = kwargs.get('limit', None)
dft = kwargs.get('dft', None)
msg = []
if limit:
return show_limit(entries, **kwargs)
def show_banner():
banner = "-- MENU"
if hdr:
banner += ": " + hdr
banner += " --"
msg.append(banner)
if _AUTO:
return
for i in entries:
msg.append(" (%s) %s" % (i.name, i.desc))
valid = [i.name for i in entries]
if type(dft) == int:
dft = str(dft)
if dft not in valid:
dft = None
if not compact:
show_banner()
if note and not _AUTO:
msg.append("[!] " + note)
if fzf:
valid.append(FCHR)
msg.append(QSTR + kwargs.get('msg', "Enter menu selection"))
msg = os.linesep.join(msg)
entry = None
while entry not in entries:
choice = ask(msg, vld=valid, dft=dft, qstr=False)
if choice == FCHR and fzf:
try:
from iterfzf import iterfzf
choice = iterfzf(reversed(["%s\t%s" % (i.name, i.desc) for i in entries])).strip("\0").split("\t", 1)[0]
except:
warn("Issue encountered during fzf search.")
match = [i for i in entries if i.name == choice]
if match:
entry = match[0]
if entry.func:
fresult = run_func(entry)
if "func" == returns:
return fresult
try:
return getattr(entry, returns)
except:
return getattr(entry, "name") |
<SYSTEM_TASK:>
Runs the function associated with the given MenuEntry.
<END_TASK>
<USER_TASK:>
Description:
def run_func(entry):
"""Runs the function associated with the given MenuEntry.""" |
if entry.func:
if entry.args and entry.krgs:
return entry.func(*entry.args, **entry.krgs)
if entry.args:
return entry.func(*entry.args)
if entry.krgs:
return entry.func(**entry.krgs)
return entry.func() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.