text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
This save method protects against two processesses concurrently modifying
<END_TASK>
<USER_TASK:>
Description:
def save(self, *args, **kwargs):
"""
This save method protects against two processesses concurrently modifying
the same object. Normally the second save would silently overwrite the
changes from the first. Instead we raise a ConcurrentModificationError.
""" |
cls = self.__class__
if self.pk:
rows = cls.objects.filter(
pk=self.pk, _change=self._change).update(
_change=self._change + 1)
if not rows:
raise ConcurrentModificationError(cls.__name__, self.pk)
self._change += 1
count = 0
max_retries=3
while True:
try:
return super(BaseModel, self).save(*args, **kwargs)
except django.db.utils.OperationalError:
if count >= max_retries:
raise
count += 1 |
<SYSTEM_TASK:>
If the object is being edited by other processes,
<END_TASK>
<USER_TASK:>
Description:
def setattrs_and_save_with_retries(self, assignments, max_retries=5):
"""
If the object is being edited by other processes,
save may fail due to concurrent modification.
This method recovers and retries the edit.
assignments is a dict of {attribute: value}
""" |
count = 0
obj=self
while True:
for attribute, value in assignments.iteritems():
setattr(obj, attribute, value)
try:
obj.full_clean()
obj.save()
except ConcurrentModificationError:
if count >= max_retries:
raise SaveRetriesExceededError(
'Exceeded retries when saving "%s" of id "%s" '\
'with assigned values "%s"' %
(self.__class__, self.id, assignments))
count += 1
obj = self.__class__.objects.get(id=self.id)
continue
return obj |
<SYSTEM_TASK:>
Anywhere in "template" that refers to a data object but does not
<END_TASK>
<USER_TASK:>
Description:
def _substitute_file_uuids_throughout_template(self, template, file_dependencies):
"""Anywhere in "template" that refers to a data object but does not
give a specific UUID, if a matching file can be found in "file_dependencies",
we will change the data object reference to use that UUID. That way templates
have a preference to connect to files nested under their ".dependencies" over
files that were previously imported to the server.
""" |
if not isinstance(template, dict):
# Nothing to do if this is a reference to a previously imported template.
return
for input in template.get('inputs', []):
self._substitute_file_uuids_in_input(input, file_dependencies)
for step in template.get('steps', []):
self._substitute_file_uuids_throughout_template(step, file_dependencies) |
<SYSTEM_TASK:>
This is a standard method called indirectly by calling
<END_TASK>
<USER_TASK:>
Description:
def create(self, validated_data):
""" This is a standard method called indirectly by calling
'save' on the serializer.
This method expects the 'parent_field' and 'parent_instance' to
be included in the Serializer context.
""" |
if self.context.get('parent_field') \
and self.context.get('parent_instance'):
validated_data.update({
self.context.get('parent_field'):
self.context.get('parent_instance')})
instance = self.Meta.model(**validated_data)
instance.full_clean()
instance.save()
return instance |
<SYSTEM_TASK:>
Retry sending request until timeout or until receiving a response.
<END_TASK>
<USER_TASK:>
Description:
def _make_request_to_server(self, query_function, raise_for_status=True,
time_limit_seconds=2, retry_delay_seconds=0.2):
"""Retry sending request until timeout or until receiving a response.
""" |
start_time = datetime.datetime.now()
while datetime.datetime.now() - start_time < datetime.timedelta(
0, time_limit_seconds):
error = None
response = None
try:
response = query_function()
except requests.exceptions.ConnectionError as e:
error = ServerConnectionError(
"No response from server.\n%s" % e)
except:
if response:
logger.info(response.text)
raise
if response is not None and raise_for_status:
# raises requests.exceptions.HTTPError
self._raise_for_status(response)
if error:
time.sleep(retry_delay_seconds)
continue
else:
return response
raise error |
<SYSTEM_TASK:>
Returns the correct Input class for a given
<END_TASK>
<USER_TASK:>
Description:
def TaskAttemptInput(input, task_attempt):
"""Returns the correct Input class for a given
data type and gather mode
""" |
(data_type, mode) = _get_input_info(input)
if data_type != 'file':
return NoOpInput(None, task_attempt)
if mode == 'no_gather':
return FileInput(input['data']['contents'], task_attempt)
else:
assert mode.startswith('gather')
return FileListInput(input['data']['contents'], task_attempt) |
<SYSTEM_TASK:>
Run a task asynchronously after at least delay_seconds
<END_TASK>
<USER_TASK:>
Description:
def execute_with_delay(task_function, *args, **kwargs):
"""Run a task asynchronously after at least delay_seconds
""" |
delay = kwargs.pop('delay', 0)
if get_setting('TEST_DISABLE_ASYNC_DELAY'):
# Delay disabled, run synchronously
logger.debug('Running function "%s" synchronously because '\
'TEST_DISABLE_ASYNC_DELAY is True'
% task_function.__name__)
return task_function(*args, **kwargs)
db.connections.close_all()
task_function.apply_async(args=args, kwargs=kwargs, countdown=delay) |
<SYSTEM_TASK:>
Check for tasks that are no longer sending a heartbeat
<END_TASK>
<USER_TASK:>
Description:
def check_for_stalled_tasks():
"""Check for tasks that are no longer sending a heartbeat
""" |
from api.models.tasks import Task
for task in Task.objects.filter(status_is_running=True):
if not task.is_responsive():
task.system_error()
if task.is_timed_out():
task.timeout_error() |
<SYSTEM_TASK:>
Check for TaskAttempts that were never cleaned up
<END_TASK>
<USER_TASK:>
Description:
def check_for_missed_cleanup():
"""Check for TaskAttempts that were never cleaned up
""" |
if get_setting('PRESERVE_ALL'):
return
from api.models.tasks import TaskAttempt
if get_setting('PRESERVE_ON_FAILURE'):
for task_attempt in TaskAttempt.objects.filter(
status_is_running=False).filter(
status_is_cleaned_up=False).exclude(
status_is_failed=True):
task_attempt.cleanup()
else:
for task_attempt in TaskAttempt.objects.filter(
status_is_running=False).filter(status_is_cleaned_up=False):
task_attempt.cleanup() |
<SYSTEM_TASK:>
This attempts to execute "retryable_function" with exponential backoff
<END_TASK>
<USER_TASK:>
Description:
def execute_with_retries(retryable_function,
retryable_errors,
logger,
human_readable_action_name='Action',
nonretryable_errors=None):
"""This attempts to execute "retryable_function" with exponential backoff
on delay time.
10 retries adds up to about 34 minutes total delay before the last attempt.
"human_readable_action_name" is an option input to customize retry message.
""" |
max_retries = 10
attempt = 0
if not nonretryable_errors:
nonretryable_errors = ()
while True:
try:
return retryable_function()
except tuple(nonretryable_errors):
raise
except tuple(retryable_errors) as e:
attempt += 1
if attempt > max_retries:
raise
# Exponentional backoff on retry delay as suggested by
# https://cloud.google.com/storage/docs/exponential-backoff
delay = 2**attempt + random.random()
logger.info('"%s" failed with error "%s". '\
'Retry number %s of %s in %s seconds'
% (human_readable_action_name, str(e),
attempt, max_retries, delay))
time.sleep(delay) |
<SYSTEM_TASK:>
Export a file from Loom to some file storage location.
<END_TASK>
<USER_TASK:>
Description:
def export_file(self, data_object, destination_directory=None,
destination_filename=None, retry=False,
export_metadata=False, export_raw_file=True):
"""Export a file from Loom to some file storage location.
Default destination_directory is cwd. Default destination_filename is the
filename from the file data object associated with the given file_id.
""" |
if not destination_directory:
destination_directory = os.getcwd()
# We get filename from the dataobject
if not destination_filename:
destination_filename = data_object['value']['filename']
destination_file_url = os.path.join(destination_directory,
destination_filename)
logger.info('Exporting file %s@%s ...' % (
data_object['value']['filename'],
data_object['uuid']))
if export_raw_file:
destination = File(
destination_file_url, self.storage_settings, retry=retry)
if destination.exists():
raise FileAlreadyExistsError(
'File already exists at %s' % destination_file_url)
logger.info('...copying file to %s' % (
destination.get_url()))
# Copy from the first file location
file_resource = data_object.get('value')
md5 = file_resource.get('md5')
source_url = data_object['value']['file_url']
File(source_url, self.storage_settings, retry=retry).copy_to(
destination, expected_md5=md5)
data_object['value'] = self._create_new_file_resource(
data_object['value'], destination.get_url())
else:
logger.info('...skipping raw file')
if export_metadata:
data_object['value'].pop('link', None)
data_object['value'].pop('upload_status', None)
destination_metadata_url = os.path.join(
destination_file_url + '.metadata.yaml')
logger.info('...writing metadata to %s' % destination_metadata_url)
metadata = yaml.safe_dump(data_object, default_flow_style=False)
metadata_file = File(destination_metadata_url,
self.storage_settings, retry=retry)
metadata_file.write(metadata)
else:
logger.info('...skipping metadata')
logger.info('...finished file export') |
<SYSTEM_TASK:>
Factory method to select the right copier for a given source and destination.
<END_TASK>
<USER_TASK:>
Description:
def Copier(source, destination):
"""Factory method to select the right copier for a given source and destination.
""" |
if source.type == 'local' and destination.type == 'local':
return LocalCopier(source, destination)
elif source.type == 'local' and destination.type == 'google_storage':
return Local2GoogleStorageCopier(source, destination)
elif source.type == 'google_storage' and destination.type == 'local':
return GoogleStorage2LocalCopier(source, destination)
elif source.type == 'google_storage' and destination.type == 'google_storage':
return GoogleStorageCopier(source, destination)
else:
raise FileUtilsError('Could not find method to copy from source '\
'"%s" to destination "%s".' % (source, destination)) |
<SYSTEM_TASK:>
Scan the data tree on the given data_channel to create a corresponding
<END_TASK>
<USER_TASK:>
Description:
def create_from_data_channel(cls, data_channel):
"""Scan the data tree on the given data_channel to create a corresponding
InputSetGenerator tree.
""" |
gather_depth = cls._get_gather_depth(data_channel)
generator = InputSetGeneratorNode()
for (data_path, data_node) in data_channel.get_ready_data_nodes(
[], gather_depth):
flat_data_node = data_node.flattened_clone(save=False)
input_item = InputItem(
flat_data_node, data_channel.channel,
data_channel.as_channel, mode=data_channel.mode)
generator._add_input_item(data_path, input_item)
return generator |
<SYSTEM_TASK:>
Returns the correct Output class for a given
<END_TASK>
<USER_TASK:>
Description:
def TaskAttemptOutput(output, task_attempt):
"""Returns the correct Output class for a given
data type, source type, and scatter mode
""" |
(data_type, mode, source_type) = _get_output_info(output)
if data_type == 'file':
if mode == 'scatter':
assert source_type in ['filenames', 'glob'], \
'source type "%s" not allowed' % source_type
if source_type == 'filenames':
return FileListScatterOutput(output, task_attempt)
return GlobScatterOutput(output, task_attempt)
else:
assert mode == 'no_scatter'
assert source_type == 'filename', \
'source type "%s" not allowed' % source_type
return FileOutput(output, task_attempt)
else: # data_type is non-file
if mode == 'scatter':
assert source_type in [
'filename', 'filenames', 'glob', 'stream'], \
'source type "%s" not allowed' % source_type
if source_type == 'filename':
return FileContentsScatterOutput(output, task_attempt)
if source_type == 'filenames':
return FileListContentsScatterOutput(output, task_attempt)
if source_type == 'glob':
return GlobContentsScatterOutput(output, task_attempt)
assert source_type == 'stream'
return StreamScatterOutput(output, task_attempt)
else:
assert mode == 'no_scatter'
assert source_type in ['filename', 'stream'], \
'source type "%s" not allowed' % source_type
if source_type == 'filename':
return FileContentsOutput(output, task_attempt)
assert source_type == 'stream'
return StreamOutput(output, task_attempt) |
<SYSTEM_TASK:>
Adds a new leaf node at the given index with the given data_object
<END_TASK>
<USER_TASK:>
Description:
def add_leaf(self, index, data_object, save=False):
"""Adds a new leaf node at the given index with the given data_object
""" |
assert self.type == data_object.type, 'data type mismatch'
if self._get_child_by_index(index) is not None:
raise NodeAlreadyExistsError(
'Leaf data node already exists at this index')
else:
data_node = DataNode(
parent=self,
index=index,
data_object=data_object,
type=self.type)
if save:
data_node.full_clean()
data_node.save()
self._add_unsaved_child(data_node)
return data_node |
<SYSTEM_TASK:>
Verify that the given index is consistent with the degree of the node.
<END_TASK>
<USER_TASK:>
Description:
def _check_index(self, index):
"""Verify that the given index is consistent with the degree of the node.
""" |
if self.degree is None:
raise UnknownDegreeError(
'Cannot access child DataNode on a parent with degree of None. '\
'Set the degree on the parent first.')
if index < 0 or index >= self.degree:
raise IndexOutOfRangeError(
'Out of range index %s. DataNode parent has degree %s, so index '\
'should be in the range 0 to %s' % (
index, self.degree, self.degree-1)) |
<SYSTEM_TASK:>
Instance names must start with a lowercase letter.
<END_TASK>
<USER_TASK:>
Description:
def _sanitize_instance_name(name, max_length):
"""Instance names must start with a lowercase letter.
All following characters must be a dash, lowercase letter,
or digit.
""" |
name = str(name).lower() # make all letters lowercase
name = re.sub(r'[^-a-z0-9]', '', name) # remove invalid characters
# remove non-lowercase letters from the beginning
name = re.sub(r'^[^a-z]+', '', name)
name = name[:max_length]
name = re.sub(r'-+$', '', name) # remove hyphens from the end
return name |
<SYSTEM_TASK:>
Reads the settings from the gce.ini file.
<END_TASK>
<USER_TASK:>
Description:
def get_config(self):
"""
Reads the settings from the gce.ini file.
Populates a SafeConfigParser object with defaults and
attempts to read an .ini-style configuration from the filename
specified in GCE_INI_PATH. If the environment variable is
not present, the filename defaults to gce.ini in the current
working directory.
""" |
gce_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
# to work.
config = ConfigParser.SafeConfigParser(defaults={
'gce_service_account_email_address': '',
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'libcloud_secrets': '',
'inventory_ip_type': '',
'cache_path': '~/.ansible/tmp',
'cache_max_age': '300'
})
if 'gce' not in config.sections():
config.add_section('gce')
if 'inventory' not in config.sections():
config.add_section('inventory')
if 'cache' not in config.sections():
config.add_section('cache')
config.read(gce_ini_path)
#########
# Section added for processing ini settings
#########
# Set the instance_states filter based on config file options
self.instance_states = []
if config.has_option('gce', 'instance_states'):
states = config.get('gce', 'instance_states')
# Ignore if instance_states is an empty string.
if states:
self.instance_states = states.split(',')
# Caching
cache_path = config.get('cache', 'cache_path')
cache_max_age = config.getint('cache', 'cache_max_age')
# TOOD(supertom): support project-specific caches
cache_name = 'ansible-gce.cache'
self.cache = CloudInventoryCache(cache_path=cache_path,
cache_max_age=cache_max_age,
cache_name=cache_name)
return config |
<SYSTEM_TASK:>
Determine inventory options. Environment variables always
<END_TASK>
<USER_TASK:>
Description:
def get_inventory_options(self):
"""Determine inventory options. Environment variables always
take precedence over configuration files.""" |
ip_type = self.config.get('inventory', 'inventory_ip_type')
# If the appropriate environment variables are set, they override
# other configuration
ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
return ip_type |
<SYSTEM_TASK:>
Determine the GCE authorization settings and return a
<END_TASK>
<USER_TASK:>
Description:
def get_gce_driver(self):
"""Determine the GCE authorization settings and return a
libcloud driver.
""" |
# Attempt to get GCE params from a configuration file, if one
# exists.
secrets_path = self.config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except:
pass
if not secrets_found and secrets_path:
if not secrets_path.endswith('secrets.py'):
err = "Must specify libcloud secrets file as "
err += "/absolute/path/to/secrets.py"
sys.exit(err)
sys.path.append(os.path.dirname(secrets_path))
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except:
pass
if not secrets_found:
args = [
self.config.get('gce','gce_service_account_email_address'),
self.config.get('gce','gce_service_account_pem_file_path')
]
kwargs = {'project': self.config.get('gce', 'gce_project_id')}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
args[0] = os.environ.get('GCE_EMAIL', args[0])
args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
# Retrieve and return the GCE driver.
gce = get_driver(Provider.GCE)(*args, **kwargs)
gce.connection.user_agent_append(
'%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
)
return gce |
<SYSTEM_TASK:>
Stream stdout and stderr from the task container to this
<END_TASK>
<USER_TASK:>
Description:
def _stream_docker_logs(self):
"""Stream stdout and stderr from the task container to this
process's stdout and stderr, respectively.
""" |
thread = threading.Thread(target=self._stderr_stream_worker)
thread.start()
for line in self.docker_client.logs(self.container, stdout=True,
stderr=False, stream=True):
sys.stdout.write(line)
thread.join() |
<SYSTEM_TASK:>
Because we allow template ID string values, where
<END_TASK>
<USER_TASK:>
Description:
def to_internal_value(self, data):
"""Because we allow template ID string values, where
serializers normally expect a dict
""" |
converted_data = _convert_template_id_to_dict(data)
return super(TemplateSerializer, self)\
.to_internal_value(converted_data) |
<SYSTEM_TASK:>
Coerce an arbitrary version string into a semver-compatible one.
<END_TASK>
<USER_TASK:>
Description:
def coerce(cls, version_string, partial=False):
"""Coerce an arbitrary version string into a semver-compatible one.
The rule is:
- If not enough components, fill minor/patch with zeroes; unless
partial=True
- If more than 3 dot-separated components, extra components are "build"
data. If some "build" data already appeared, append it to the
extra components
Examples:
>>> Version.coerce('0.1')
Version(0, 1, 0)
>>> Version.coerce('0.1.2.3')
Version(0, 1, 2, (), ('3',))
>>> Version.coerce('0.1.2.3+4')
Version(0, 1, 2, (), ('3', '4'))
>>> Version.coerce('0.1+2-3+4_5')
Version(0, 1, 0, (), ('2-3', '4-5'))
""" |
base_re = re.compile(r'^\d+(?:\.\d+(?:\.\d+)?)?')
match = base_re.match(version_string)
if not match:
raise ValueError(
"Version string lacks a numerical component: %r"
% version_string
)
version = version_string[:match.end()]
if not partial:
# We need a not-partial version.
while version.count('.') < 2:
version += '.0'
if match.end() == len(version_string):
return Version(version, partial=partial)
rest = version_string[match.end():]
# Cleanup the 'rest'
rest = re.sub(r'[^a-zA-Z0-9+.-]', '-', rest)
if rest[0] == '+':
# A 'build' component
prerelease = ''
build = rest[1:]
elif rest[0] == '.':
# An extra version component, probably 'build'
prerelease = ''
build = rest[1:]
elif rest[0] == '-':
rest = rest[1:]
if '+' in rest:
prerelease, build = rest.split('+', 1)
else:
prerelease, build = rest, ''
elif '+' in rest:
prerelease, build = rest.split('+', 1)
else:
prerelease, build = rest, ''
build = build.replace('+', '.')
if prerelease:
version = '%s-%s' % (version, prerelease)
if build:
version = '%s+%s' % (version, build)
return cls(version, partial=partial) |
<SYSTEM_TASK:>
Retrieve comparison methods to apply on version components.
<END_TASK>
<USER_TASK:>
Description:
def _comparison_functions(cls, partial=False):
"""Retrieve comparison methods to apply on version components.
This is a private API.
Args:
partial (bool): whether to provide 'partial' or 'strict' matching.
Returns:
5-tuple of cmp-like functions.
""" |
def prerelease_cmp(a, b):
"""Compare prerelease components.
Special rule: a version without prerelease component has higher
precedence than one with a prerelease component.
"""
if a and b:
return identifier_list_cmp(a, b)
elif a:
# Versions with prerelease field have lower precedence
return -1
elif b:
return 1
else:
return 0
def build_cmp(a, b):
"""Compare build metadata.
Special rule: there is no ordering on build metadata.
"""
if a == b:
return 0
else:
return NotImplemented
def make_optional(orig_cmp_fun):
"""Convert a cmp-like function to consider 'None == *'."""
@functools.wraps(orig_cmp_fun)
def alt_cmp_fun(a, b):
if a is None or b is None:
return 0
return orig_cmp_fun(a, b)
return alt_cmp_fun
if partial:
return [
base_cmp, # Major is still mandatory
make_optional(base_cmp),
make_optional(base_cmp),
make_optional(prerelease_cmp),
make_optional(build_cmp),
]
else:
return [
base_cmp,
base_cmp,
base_cmp,
prerelease_cmp,
build_cmp,
] |
<SYSTEM_TASK:>
Helper for comparison.
<END_TASK>
<USER_TASK:>
Description:
def __compare_helper(self, other, condition, notimpl_target):
"""Helper for comparison.
Allows the caller to provide:
- The condition
- The return value if the comparison is meaningless (ie versions with
build metadata).
""" |
if not isinstance(other, self.__class__):
return NotImplemented
cmp_res = self.__cmp__(other)
if cmp_res is NotImplemented:
return notimpl_target
return condition(cmp_res) |
<SYSTEM_TASK:>
Check whether a Version satisfies the Spec.
<END_TASK>
<USER_TASK:>
Description:
def match(self, version):
"""Check whether a Version satisfies the Spec.""" |
return all(spec.match(version) for spec in self.specs) |
<SYSTEM_TASK:>
Select the best compatible version among an iterable of options.
<END_TASK>
<USER_TASK:>
Description:
def select(self, versions):
"""Select the best compatible version among an iterable of options.""" |
options = list(self.filter(versions))
if options:
return max(options)
return None |
<SYSTEM_TASK:>
Converts any value to a base.Version field.
<END_TASK>
<USER_TASK:>
Description:
def to_python(self, value):
"""Converts any value to a base.Version field.""" |
if value is None or value == '':
return value
if isinstance(value, base.Version):
return value
if self.coerce:
return base.Version.coerce(value, partial=self.partial)
else:
return base.Version(value, partial=self.partial) |
<SYSTEM_TASK:>
Converts any value to a base.Spec field.
<END_TASK>
<USER_TASK:>
Description:
def to_python(self, value):
"""Converts any value to a base.Spec field.""" |
if value is None or value == '':
return value
if isinstance(value, base.Spec):
return value
return base.Spec(value) |
<SYSTEM_TASK:>
Make the drone move left.
<END_TASK>
<USER_TASK:>
Description:
def move_left(self):
"""Make the drone move left.""" |
self.at(ardrone.at.pcmd, True, -self.speed, 0, 0, 0) |
<SYSTEM_TASK:>
Make the drone decent downwards.
<END_TASK>
<USER_TASK:>
Description:
def move_down(self):
"""Make the drone decent downwards.""" |
self.at(ardrone.at.pcmd, True, 0, 0, -self.speed, 0) |
<SYSTEM_TASK:>
Make the drone move forward.
<END_TASK>
<USER_TASK:>
Description:
def move_forward(self):
"""Make the drone move forward.""" |
self.at(ardrone.at.pcmd, True, 0, -self.speed, 0, 0) |
<SYSTEM_TASK:>
Make the drone rotate left.
<END_TASK>
<USER_TASK:>
Description:
def turn_left(self):
"""Make the drone rotate left.""" |
self.at(ardrone.at.pcmd, True, 0, 0, 0, -self.speed) |
<SYSTEM_TASK:>
Wrapper for the low level at commands.
<END_TASK>
<USER_TASK:>
Description:
def at(self, cmd, *args, **kwargs):
"""Wrapper for the low level at commands.
This method takes care that the sequence number is increased after each
at command and the watchdog timer is started to make sure the drone
receives a command at least every second.
""" |
with self.lock:
self.com_watchdog_timer.cancel()
cmd(self.host, self.sequence, *args, **kwargs)
self.sequence += 1
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.com_watchdog_timer.start() |
<SYSTEM_TASK:>
Shutdown the drone.
<END_TASK>
<USER_TASK:>
Description:
def halt(self):
"""Shutdown the drone.
This method does not land or halt the actual drone, but the
communication with the drone. You should call it at the end of your
application to close all sockets, pipes, processes and threads related
with this object.
""" |
with self.lock:
self.com_watchdog_timer.cancel()
self.ipc_thread.stop()
self.ipc_thread.join()
self.network_process.terminate()
self.network_process.join() |
<SYSTEM_TASK:>
Set configuration parameters of the drone.
<END_TASK>
<USER_TASK:>
Description:
def config(host, seq, option, value):
"""Set configuration parameters of the drone.""" |
at(host, 'CONFIG', seq, [str(option), str(value)]) |
<SYSTEM_TASK:>
Sends control values directly to the engines, overriding control loops.
<END_TASK>
<USER_TASK:>
Description:
def pwm(host, seq, m1, m2, m3, m4):
"""
Sends control values directly to the engines, overriding control loops.
Parameters:
seq -- sequence number
m1 -- Integer: front left command
m2 -- Integer: front right command
m3 -- Integer: back right command
m4 -- Integer: back left command
""" |
at(host, 'PWM', seq, [m1, m2, m3, m4]) |
<SYSTEM_TASK:>
Control the drones LED.
<END_TASK>
<USER_TASK:>
Description:
def led(host, seq, anim, f, d):
"""
Control the drones LED.
Parameters:
seq -- sequence number
anim -- Integer: animation to play
f -- Float: frequency in HZ of the animation
d -- Integer: total duration in seconds of the animation
""" |
at(host, 'LED', seq, [anim, float(f), d]) |
<SYSTEM_TASK:>
Simple helper to get the value of an instance's attribute if it exists.
<END_TASK>
<USER_TASK:>
Description:
def _get_attr_value(instance, attr, default=None):
"""
Simple helper to get the value of an instance's attribute if it exists.
If the instance attribute is callable it will be called and the result will
be returned.
Optionally accepts a default value to return if the attribute is missing.
Defaults to `None`
>>> class Foo(object):
... bar = 'baz'
... def hi(self):
... return 'hi'
>>> f = Foo()
>>> _get_attr_value(f, 'bar')
'baz'
>>> _get_attr_value(f, 'xyz')
>>> _get_attr_value(f, 'xyz', False)
False
>>> _get_attr_value(f, 'hi')
'hi'
""" |
value = default
if hasattr(instance, attr):
value = getattr(instance, attr)
if callable(value):
value = value()
return value |
<SYSTEM_TASK:>
Audits the provided customer's subscription against stripe and returns a pair
<END_TASK>
<USER_TASK:>
Description:
def audit_customer_subscription(customer, unknown=True):
"""
Audits the provided customer's subscription against stripe and returns a pair
that contains a boolean and a result type.
Default result types can be found in zebra.conf.defaults and can be
overridden in your project's settings.
""" |
if (hasattr(customer, 'suspended') and customer.suspended):
result = AUDIT_RESULTS['suspended']
else:
if hasattr(customer, 'subscription'):
try:
result = AUDIT_RESULTS[customer.subscription.status]
except KeyError, err:
# TODO should this be a more specific exception class?
raise Exception("Unable to locate a result set for \
subscription status %s in ZEBRA_AUDIT_RESULTS") % str(err)
else:
result = AUDIT_RESULTS['no_subscription']
return result |
<SYSTEM_TASK:>
Check if obj is number.
<END_TASK>
<USER_TASK:>
Description:
def is_number(obj):
"""Check if obj is number.""" |
return isinstance(obj, (int, float, np.int_, np.float_)) |
<SYSTEM_TASK:>
Return list of 3-tuples from
<END_TASK>
<USER_TASK:>
Description:
def get_3_tuple_list(self,obj,default=None):
"""Return list of 3-tuples from
sequence of a sequence,
sequence - it is mapped to sequence of 3-sequences if possible
number
""" |
if is_sequence2(obj):
return [self.get_3_tuple(o,default) for o in obj]
elif is_sequence(obj):
return [self.get_3_tuple(obj[i:i+3],default) for i in range(0,len(obj),3)]
else:
return [self.get_3_tuple(obj,default)] |
<SYSTEM_TASK:>
Iterate through the application configuration and instantiate
<END_TASK>
<USER_TASK:>
Description:
def connect(self):
"""Iterate through the application configuration and instantiate
the services.
""" |
requested_services = set(
svc.lower() for svc in current_app.config.get('BOTO3_SERVICES', [])
)
region = current_app.config.get('BOTO3_REGION')
sess_params = {
'aws_access_key_id': current_app.config.get('BOTO3_ACCESS_KEY'),
'aws_secret_access_key': current_app.config.get('BOTO3_SECRET_KEY'),
'profile_name': current_app.config.get('BOTO3_PROFILE'),
'region_name': region
}
sess = boto3.session.Session(**sess_params)
try:
cns = {}
for svc in requested_services:
# Check for optional parameters
params = current_app.config.get(
'BOTO3_OPTIONAL_PARAMS', {}
).get(svc, {})
# Get session params and override them with kwargs
# `profile_name` cannot be passed to clients and resources
kwargs = sess_params.copy()
kwargs.update(params.get('kwargs', {}))
del kwargs['profile_name']
# Override the region if one is defined as an argument
args = params.get('args', [])
if len(args) >= 1:
del kwargs['region_name']
if not(isinstance(args, list) or isinstance(args, tuple)):
args = [args]
# Create resource or client
if svc in sess.get_available_resources():
cns.update({svc: sess.resource(svc, *args, **kwargs)})
else:
cns.update({svc: sess.client(svc, *args, **kwargs)})
except UnknownServiceError:
raise
return cns |
<SYSTEM_TASK:>
date to unix timestamp in milliseconds
<END_TASK>
<USER_TASK:>
Description:
def date_to_timestamp(date):
"""
date to unix timestamp in milliseconds
""" |
date_tuple = date.timetuple()
timestamp = calendar.timegm(date_tuple) * 1000
return timestamp |
<SYSTEM_TASK:>
join base_url and some GET-parameters to one; it could be absolute url optionally
<END_TASK>
<USER_TASK:>
Description:
def url_path(request, base_url=None, is_full=False, *args, **kwargs):
"""
join base_url and some GET-parameters to one; it could be absolute url optionally
usage example:
c['current_url'] = url_path(request, use_urllib=True, is_full=False)
...
<a href="{{ current_url }}">Лабораторный номер</a>
""" |
if not base_url:
base_url = request.path
if is_full:
protocol = 'https' if request.is_secure() else 'http'
base_url = '%s://%s%s' % (protocol, request.get_host(), base_url)
params = url_params(request, *args, **kwargs)
url = '%s%s' % (base_url, params)
return url |
<SYSTEM_TASK:>
create string with GET-params of request
<END_TASK>
<USER_TASK:>
Description:
def url_params(request, except_params=None, as_is=False):
"""
create string with GET-params of request
usage example:
c['sort_url'] = url_params(request, except_params=('sort',))
...
<a href="{{ sort_url }}&sort=lab_number">Лабораторный номер</a>
""" |
if not request.GET:
return ''
params = []
for key, value in request.GET.items():
if except_params and key not in except_params:
for v in request.GET.getlist(key):
params.append('%s=%s' % (key, urlquote(v)))
if as_is:
str_params = '?' + '&'.join(params)
else:
str_params = '?' + '&'.join(params)
str_params = urlquote(str_params)
return mark_safe(str_params) |
<SYSTEM_TASK:>
Look up gender for a list of names.
<END_TASK>
<USER_TASK:>
Description:
def get(self, names, country_id=None, language_id=None, retheader=False):
"""
Look up gender for a list of names.
Can optionally refine search with locale info.
May make multiple requests if there are more names than
can be retrieved in one call.
:param names: List of names.
:type names: Iterable[str]
:param country_id: Optional ISO 3166-1 alpha-2 country code.
:type country_id: Optional[str]
:param language_id: Optional ISO 639-1 language code.
:type language_id: Optional[str]
:param retheader: Optional
:type retheader: Optional[boolean]
:return:
If retheader is False:
List of dicts containing 'name', 'gender',
'probability', 'count' keys. If 'gender' is None,
'probability' and 'count' will be omitted.
else:
A dict containing 'data' and 'headers' keys.
Data is the same as when retheader is False.
Headers are the response header
(a requests.structures.CaseInsensitiveDict).
If multiple requests were made,
the header will be from the last one.
:rtype: Union[dict, Sequence[dict]]
:raises GenderizeException: if API server returns HTTP error code.
""" |
responses = [
self._get_chunk(name_chunk, country_id, language_id)
for name_chunk
in _chunked(names, Genderize.BATCH_SIZE)
]
data = list(chain.from_iterable(
response.data for response in responses
))
if retheader:
return {
"data": data,
"headers": responses[-1].headers,
}
else:
return data |
<SYSTEM_TASK:>
Creates a proxy for a variable.
<END_TASK>
<USER_TASK:>
Description:
def _make_proxy(self, varname, parent=None, constructor=MlabObjectProxy):
"""Creates a proxy for a variable.
XXX create and cache nested proxies also here.
""" |
# FIXME why not just use gensym here?
proxy_val_name = "PROXY_VAL%d__" % self._proxy_count
self._proxy_count += 1
mlabraw.eval(self._session, "%s = %s;" % (proxy_val_name, varname))
res = constructor(self, proxy_val_name, parent)
self._proxies[proxy_val_name] = res
return res |
<SYSTEM_TASK:>
Semi-raw execution of a matlab command.
<END_TASK>
<USER_TASK:>
Description:
def _do(self, cmd, *args, **kwargs):
"""Semi-raw execution of a matlab command.
Smartly handle calls to matlab, figure out what to do with `args`,
and when to use function call syntax and not.
If no `args` are specified, the ``cmd`` not ``result = cmd()`` form is
used in Matlab -- this also makes literal Matlab commands legal
(eg. cmd=``get(gca, 'Children')``).
If ``nout=0`` is specified, the Matlab command is executed as
procedure, otherwise it is executed as function (default), nout
specifying how many values should be returned (default 1).
**Beware that if you use don't specify ``nout=0`` for a `cmd` that
never returns a value will raise an error** (because assigning a
variable to a call that doesn't return a value is illegal in matlab).
``cast`` specifies which typecast should be applied to the result
(e.g. `int`), it defaults to none.
XXX: should we add ``parens`` parameter?
""" |
handle_out = kwargs.get('handle_out', _flush_write_stdout)
#self._session = self._session or mlabraw.open()
# HACK
if self._autosync_dirs:
mlabraw.eval(self._session, "cd('%s');" % os.getcwd().replace("'", "''"))
nout = kwargs.get('nout', 1)
#XXX what to do with matlab screen output
argnames = []
tempargs = []
try:
for count, arg in enumerate(args):
if isinstance(arg, MlabObjectProxy):
argnames.append(arg._name)
else:
nextName = 'arg%d__' % count
argnames.append(nextName)
tempargs.append(nextName)
# have to convert these by hand
## try:
## arg = self._as_mlabable_type(arg)
## except TypeError:
## raise TypeError("Illegal argument type (%s.:) for %d. argument" %
## (type(arg), type(count)))
mlabraw.put(self._session, argnames[-1], arg)
if args:
cmd = "%s(%s)%s" % (cmd, ", ".join(argnames),
('',';')[kwargs.get('show',0)])
# got three cases for nout:
# 0 -> None, 1 -> val, >1 -> [val1, val2, ...]
if nout == 0:
handle_out(mlabraw.eval(self._session, cmd))
return
# deal with matlab-style multiple value return
resSL = ((["RES%d__" % i for i in range(nout)]))
handle_out(mlabraw.eval(self._session, '[%s]=%s;' % (", ".join(resSL), cmd)))
res = self._get_values(resSL)
if nout == 1: res = res[0]
else: res = tuple(res)
if kwargs.has_key('cast'):
if nout == 0: raise TypeError("Can't cast: 0 nout")
return kwargs['cast'](res)
else:
return res
finally:
if len(tempargs) and self._clear_call_args:
mlabraw.eval(self._session, "clear('%s');" %
"','".join(tempargs)) |
<SYSTEM_TASK:>
r"""Directly access a variable in matlab space.
<END_TASK>
<USER_TASK:>
Description:
def _get(self, name, remove=False):
r"""Directly access a variable in matlab space.
This should normally not be used by user code.""" |
# FIXME should this really be needed in normal operation?
if name in self._proxies: return self._proxies[name]
varname = name
vartype = self._var_type(varname)
if vartype in self._mlabraw_can_convert:
var = mlabraw.get(self._session, varname)
if isinstance(var, ndarray):
if self._flatten_row_vecs and numpy.shape(var)[0] == 1:
var.shape = var.shape[1:2]
elif self._flatten_col_vecs and numpy.shape(var)[1] == 1:
var.shape = var.shape[0:1]
if self._array_cast:
var = self._array_cast(var)
else:
var = None
if self._dont_proxy.get(vartype):
# manual conversions may fail (e.g. for multidimensional
# cell arrays), in that case just fall back on proxying.
try:
var = self._manually_convert(varname, vartype)
except MlabConversionError: pass
if var is None:
# we can't convert this to a python object, so we just
# create a proxy, and don't delete the real matlab
# reference until the proxy is garbage collected
var = self._make_proxy(varname)
if remove:
mlabraw.eval(self._session, "clear('%s');" % varname)
return var |
<SYSTEM_TASK:>
r"""Directly set a variable `name` in matlab space to `value`.
<END_TASK>
<USER_TASK:>
Description:
def _set(self, name, value):
r"""Directly set a variable `name` in matlab space to `value`.
This should normally not be used in user code.""" |
if isinstance(value, MlabObjectProxy):
mlabraw.eval(self._session, "%s = %s;" % (name, value._name))
else:
## mlabraw.put(self._session, name, self._as_mlabable_type(value))
mlabraw.put(self._session, name, value) |
<SYSTEM_TASK:>
Dispatches the matlab COM client.
<END_TASK>
<USER_TASK:>
Description:
def open(self, visible=False):
""" Dispatches the matlab COM client.
Note: If this method fails, try running matlab with the -regserver flag.
""" |
if self.client:
raise MatlabConnectionError('Matlab(TM) COM client is still active. Use close to '
'close it')
self.client = win32com.client.Dispatch('matlab.application')
self.client.visible = visible |
<SYSTEM_TASK:>
Loads the requested variables from the matlab com client.
<END_TASK>
<USER_TASK:>
Description:
def get(self, names_to_get, convert_to_numpy=True):
""" Loads the requested variables from the matlab com client.
names_to_get can be either a variable name or a list of variable names.
If it is a variable name, the values is returned.
If it is a list, a dictionary of variable_name -> value is returned.
If convert_to_numpy is true, the method will all array values to numpy
arrays. Scalars are left as regular python objects.
""" |
self._check_open()
single_itme = isinstance(names_to_get, (unicode, str))
if single_itme:
names_to_get = [names_to_get]
ret = {}
for name in names_to_get:
ret[name] = self.client.GetWorkspaceData(name, 'base')
# TODO(daniv): Do we really want to reduce dimensions like that? what if this a row vector?
while isinstance(ret[name], (tuple, list)) and len(ret[name]) == 1:
ret[name] = ret[name][0]
if convert_to_numpy and isinstance(ret[name], (tuple, list)):
ret[name] = np.array(ret[name])
if single_itme:
return ret.values()[0]
return ret |
<SYSTEM_TASK:>
Loads a dictionary of variable names into the matlab com client.
<END_TASK>
<USER_TASK:>
Description:
def put(self, name_to_val):
""" Loads a dictionary of variable names into the matlab com client.
""" |
self._check_open()
for name, val in name_to_val.iteritems():
# First try to put data as a matrix:
try:
self.client.PutFullMatrix(name, 'base', val, None)
except:
self.client.PutWorkspaceData(name, 'base', val) |
<SYSTEM_TASK:>
Tries to guess matlab's version according to its process path.
<END_TASK>
<USER_TASK:>
Description:
def find_matlab_version(process_path):
""" Tries to guess matlab's version according to its process path.
If we couldn't gues the version, None is returned.
""" |
bin_path = os.path.dirname(process_path)
matlab_path = os.path.dirname(bin_path)
matlab_dir_name = os.path.basename(matlab_path)
version = matlab_dir_name
if not is_linux():
version = matlab_dir_name.replace('MATLAB_', '').replace('.app', '')
if not is_valid_release_version(version):
return None
return version |
<SYSTEM_TASK:>
Loads a dictionary of variable names into the matlab shell.
<END_TASK>
<USER_TASK:>
Description:
def put(self, name_to_val, oned_as='row', on_new_output=None):
""" Loads a dictionary of variable names into the matlab shell.
oned_as is the same as in scipy.io.matlab.savemat function:
oned_as : {'column', 'row'}, optional
If 'column', write 1-D numpy arrays as column vectors.
If 'row', write 1D numpy arrays as row vectors.
""" |
self._check_open()
# We can't give stdin to mlabio.savemat because it needs random access :(
temp = StringIO()
mlabio.savemat(temp, name_to_val, oned_as=oned_as)
temp.seek(0)
temp_str = temp.read()
temp.close()
self.process.stdin.write('load stdio;\n')
self._read_until('ack load stdio\n', on_new_output=on_new_output)
self.process.stdin.write(temp_str)
#print 'sent %d kb' % (len(temp_str) / 1024)
self._read_until('ack load finished\n', on_new_output=on_new_output)
self._sync_output(on_new_output=on_new_output) |
<SYSTEM_TASK:>
Loads the requested variables from the matlab shell.
<END_TASK>
<USER_TASK:>
Description:
def get(self,
names_to_get,
extract_numpy_scalars=True,
on_new_output=None):
""" Loads the requested variables from the matlab shell.
names_to_get can be either a variable name, a list of variable names, or
None.
If it is a variable name, the values is returned.
If it is a list, a dictionary of variable_name -> value is returned.
If it is None, a dictionary with all variables is returned.
If extract_numpy_scalars is true, the method will convert numpy scalars
(0-dimension arrays) to a regular python variable.
""" |
self._check_open()
single_item = isinstance(names_to_get, (unicode, str))
if single_item:
names_to_get = [names_to_get]
if names_to_get == None:
self.process.stdin.write('save stdio;\n')
else:
# Make sure that we throw an excpetion if the names are not defined.
for name in names_to_get:
self.eval('%s;' % name, print_expression=False, on_new_output=on_new_output)
#print 'save(\'stdio\', \'%s\');\n' % '\', \''.join(names_to_get)
self.process.stdin.write(
"save('stdio', '%s', '-v7');\n" % '\', \''.join(names_to_get))
# We have to read to a temp buffer because mlabio.loadmat needs
# random access :(
self._read_until('start_binary\n', on_new_output=on_new_output)
#print 'got start_binary'
temp_str = self._sync_output(on_new_output=on_new_output)
#print 'got all outout'
# Remove expected output and "\n>>"
# TODO(dani): Get rid of the unecessary copy.
# MATLAB 2010a adds an extra >> so we need to remove more spaces.
if self.matlab_version == (2010, 'a'):
temp_str = temp_str[:-len(self.expected_output_end)-6]
else:
temp_str = temp_str[:-len(self.expected_output_end)-3]
temp = StringIO(temp_str)
#print ('____')
#print len(temp_str)
#print ('____')
ret = mlabio.loadmat(temp, chars_as_strings=True, squeeze_me=True)
#print '******'
#print ret
#print '******'
temp.close()
if single_item:
return ret.values()[0]
for key in ret.iterkeys():
while ret[key].shape and ret[key].shape[-1] == 1:
ret[key] = ret[key][0]
if extract_numpy_scalars:
if isinstance(ret[key], np.ndarray) and not ret[key].shape:
ret[key] = ret[key].tolist()
#print 'done'
return ret |
<SYSTEM_TASK:>
Pass `file` to `func` and ensure the file is closed afterwards. If
<END_TASK>
<USER_TASK:>
Description:
def withFile(file, func, mode='r', expand=False):
"""Pass `file` to `func` and ensure the file is closed afterwards. If
`file` is a string, open according to `mode`; if `expand` is true also
expand user and vars.
""" |
file = _normalizeToFile(file, mode=mode, expand=expand)
try: return func(file)
finally: file.close() |
<SYSTEM_TASK:>
r"""Return ``file`` a list of chomped lines. See `slurpLines`.
<END_TASK>
<USER_TASK:>
Description:
def slurpChompedLines(file, expand=False):
r"""Return ``file`` a list of chomped lines. See `slurpLines`.""" |
f=_normalizeToFile(file, "r", expand)
try: return list(chompLines(f))
finally: f.close() |
<SYSTEM_TASK:>
Create a new tempfile, write ``s`` to it and return the filename.
<END_TASK>
<USER_TASK:>
Description:
def strToTempfile(s, suffix=None, prefix=None, dir=None, binary=False):
"""Create a new tempfile, write ``s`` to it and return the filename.
`suffix`, `prefix` and `dir` are like in `tempfile.mkstemp`.
""" |
fd, filename = tempfile.mkstemp(**dict((k,v) for (k,v) in
[('suffix',suffix),('prefix',prefix),('dir', dir)]
if v is not None))
spitOut(s, fd, binary)
return filename |
<SYSTEM_TASK:>
Returns the elements in `iterable` that aren't unique; stops after it found
<END_TASK>
<USER_TASK:>
Description:
def notUnique(iterable, reportMax=INF):
"""Returns the elements in `iterable` that aren't unique; stops after it found
`reportMax` non-unique elements.
Examples:
>>> list(notUnique([1,1,2,2,3,3]))
[1, 2, 3]
>>> list(notUnique([1,1,2,2,3,3], 1))
[1]
""" |
hash = {}
n=0
if reportMax < 1:
raise ValueError("`reportMax` must be >= 1 and is %r" % reportMax)
for item in iterable:
count = hash[item] = hash.get(item, 0) + 1
if count > 1:
yield item
n += 1
if n >= reportMax:
return |
<SYSTEM_TASK:>
r"""Divide `iterable` in `n` lists, so that every `n`th element belongs to
<END_TASK>
<USER_TASK:>
Description:
def unweave(iterable, n=2):
r"""Divide `iterable` in `n` lists, so that every `n`th element belongs to
list `n`.
Example:
>>> unweave((1,2,3,4,5), 3)
[[1, 4], [2, 5], [3]]
""" |
res = [[] for i in range(n)]
i = 0
for x in iterable:
res[i % n].append(x)
i += 1
return res |
<SYSTEM_TASK:>
r"""Return an inverted version of dict `d`, so that values become keys and
<END_TASK>
<USER_TASK:>
Description:
def invertDict(d, allowManyToOne=False):
r"""Return an inverted version of dict `d`, so that values become keys and
vice versa. If multiple keys in `d` have the same value an error is
raised, unless `allowManyToOne` is true, in which case one of those
key-value pairs is chosen at random for the inversion.
Examples:
>>> invertDict({1: 2, 3: 4}) == {2: 1, 4: 3}
True
>>> invertDict({1: 2, 3: 2})
Traceback (most recent call last):
File "<stdin>", line 1, in ?
ValueError: d can't be inverted!
>>> invertDict({1: 2, 3: 2}, allowManyToOne=True).keys()
[2]
""" |
res = dict(izip(d.itervalues(), d.iterkeys()))
if not allowManyToOne and len(res) != len(d):
raise ValueError("d can't be inverted!")
return res |
<SYSTEM_TASK:>
r"""Return the set union of `seq1` and `seqs`, duplicates removed, order random.
<END_TASK>
<USER_TASK:>
Description:
def union(seq1=(), *seqs):
r"""Return the set union of `seq1` and `seqs`, duplicates removed, order random.
Examples:
>>> union()
[]
>>> union([1,2,3])
[1, 2, 3]
>>> union([1,2,3], {1:2, 5:1})
[1, 2, 3, 5]
>>> union((1,2,3), ['a'], "bcd")
['a', 1, 2, 3, 'd', 'b', 'c']
>>> union([1,2,3], iter([0,1,1,1]))
[0, 1, 2, 3]
""" |
if not seqs: return list(seq1)
res = set(seq1)
for seq in seqs:
res.update(set(seq))
return list(res) |
<SYSTEM_TASK:>
r"""Return a list with all elements in `seq2` removed from `seq1`, order
<END_TASK>
<USER_TASK:>
Description:
def without(seq1, seq2):
r"""Return a list with all elements in `seq2` removed from `seq1`, order
preserved.
Examples:
>>> without([1,2,3,1,2], [1])
[2, 3, 2]
""" |
if isSet(seq2): d2 = seq2
else: d2 = set(seq2)
return [elt for elt in seq1 if elt not in d2] |
<SYSTEM_TASK:>
r"""Like `some`, but only returns `True` if all the elements of `iterables`
<END_TASK>
<USER_TASK:>
Description:
def every(predicate, *iterables):
r"""Like `some`, but only returns `True` if all the elements of `iterables`
satisfy `predicate`.
Examples:
>>> every(bool, [])
True
>>> every(bool, [0])
False
>>> every(bool, [1,1])
True
>>> every(operator.eq, [1,2,3],[1,2])
True
>>> every(operator.eq, [1,2,3],[0,2])
False
""" |
try:
if len(iterables) == 1: ifilterfalse(predicate, iterables[0]).next()
else: ifilterfalse(bool, starmap(predicate, izip(*iterables))).next()
except StopIteration: return True
else: return False |
<SYSTEM_TASK:>
Return the variables pickled pickled into `filename` with `saveVars`
<END_TASK>
<USER_TASK:>
Description:
def loadDict(filename):
"""Return the variables pickled pickled into `filename` with `saveVars`
as a dict.""" |
filename = os.path.expanduser(filename)
if not splitext(filename)[1]: filename += ".bpickle"
f = None
try:
f = open(filename, "rb")
varH = cPickle.load(f)
finally:
if f: f.close()
return varH |
<SYSTEM_TASK:>
r"""Create a short info string detailing how a program was invoked. This is
<END_TASK>
<USER_TASK:>
Description:
def runInfo(prog=None,vers=None,date=None,user=None,dir=None,args=None):
r"""Create a short info string detailing how a program was invoked. This is
meant to be added to a history comment field of a data file were it is
important to keep track of what programs modified it and how.
!!!:`args` should be a **``list``** not a ``str``.""" |
return "%(prog)s %(vers)s;" \
" run %(date)s by %(usr)s in %(dir)s with: %(args)s'n" % \
mkDict(prog=prog or sys.argv[0],
vers=vers or magicGlobals().get("__version__", ""),
date=date or isoDateTimeStr(),
usr=user or getpass.getuser(),
dir=dir or os.getcwd(),
args=" ".join(args or sys.argv)) |
<SYSTEM_TASK:>
Compose `funcs` to a single function.
<END_TASK>
<USER_TASK:>
Description:
def compose(*funcs):
"""Compose `funcs` to a single function.
>>> compose(operator.abs, operator.add)(-2,-3)
5
>>> compose()('nada')
'nada'
>>> compose(sorted, set, partial(filter, None))(range(3)[::-1]*2)
[1, 2]
""" |
# slightly optimized for most common cases and hence verbose
if len(funcs) == 2: f0,f1=funcs; return lambda *a,**kw: f0(f1(*a,**kw))
elif len(funcs) == 3: f0,f1,f2=funcs; return lambda *a,**kw: f0(f1(f2(*a,**kw)))
elif len(funcs) == 0: return lambda x:x # XXX single kwarg
elif len(funcs) == 1: return funcs[0]
else:
def composed(*args,**kwargs):
y = funcs[-1](*args,**kwargs)
for f in funcs[:0:-1]: y = f(y)
return y
return composed |
<SYSTEM_TASK:>
Get a filename of a built-in library file.
<END_TASK>
<USER_TASK:>
Description:
def get_lib_filename(category, name):
""" Get a filename of a built-in library file. """ |
base_dir = os.path.dirname(os.path.abspath(__file__))
if category == 'js':
filename = os.path.join('js', '{0}.js'.format(name))
elif category == 'css':
filename = os.path.join('css', '{0}.css'.format(name))
elif category == 'html':
filename = os.path.join('html', '{0}.html'.format(name))
else:
raise ValueError("Unknown category")
return os.path.join(base_dir, 'lib', filename) |
<SYSTEM_TASK:>
Ethernet has flow control! The inter-frame pause can be adjusted, by
<END_TASK>
<USER_TASK:>
Description:
def set_pause_param(self, autoneg, rx_pause, tx_pause):
"""
Ethernet has flow control! The inter-frame pause can be adjusted, by
auto-negotiation through an ethernet frame type with a simple two-field
payload, and by setting it explicitly.
http://en.wikipedia.org/wiki/Ethernet_flow_control
""" |
# create a struct ethtool_pauseparm
# create a struct ifreq with its .ifr_data pointing at the above
ecmd = array.array('B', struct.pack('IIII',
ETHTOOL_SPAUSEPARAM, bool(autoneg), bool(rx_pause), bool(tx_pause)))
buf_addr, _buf_len = ecmd.buffer_info()
ifreq = struct.pack('16sP', self.name, buf_addr)
fcntl.ioctl(sockfd, SIOCETHTOOL, ifreq) |
<SYSTEM_TASK:>
Split a string with comma or space-separated elements into a list.
<END_TASK>
<USER_TASK:>
Description:
def split_elements(value):
"""Split a string with comma or space-separated elements into a list.""" |
l = [v.strip() for v in value.split(',')]
if len(l) == 1:
l = value.split()
return l |
<SYSTEM_TASK:>
Create the module instance of the GraphiteClient.
<END_TASK>
<USER_TASK:>
Description:
def init(init_type='plaintext_tcp', *args, **kwargs):
"""
Create the module instance of the GraphiteClient.
""" |
global _module_instance
reset()
validate_init_types = ['plaintext_tcp', 'plaintext', 'pickle_tcp',
'pickle', 'plain']
if init_type not in validate_init_types:
raise GraphiteSendException(
"Invalid init_type '%s', must be one of: %s" %
(init_type, ", ".join(validate_init_types)))
# Use TCP to send data to the plain text receiver on the graphite server.
if init_type in ['plaintext_tcp', 'plaintext', 'plain']:
_module_instance = GraphiteClient(*args, **kwargs)
# Use TCP to send pickled data to the pickle receiver on the graphite
# server.
if init_type in ['pickle_tcp', 'pickle']:
_module_instance = GraphitePickleClient(*args, **kwargs)
return _module_instance |
<SYSTEM_TASK:>
Allow the module to be called from the cli.
<END_TASK>
<USER_TASK:>
Description:
def cli():
""" Allow the module to be called from the cli. """ |
import argparse
parser = argparse.ArgumentParser(description='Send data to graphite')
# Core of the application is to accept a metric and a value.
parser.add_argument('metric', metavar='metric', type=str,
help='name.of.metric')
parser.add_argument('value', metavar='value', type=int,
help='value of metric as int')
args = parser.parse_args()
metric = args.metric
value = args.value
graphitesend_instance = init()
graphitesend_instance.send(metric, value) |
<SYSTEM_TASK:>
Make a TCP connection to the graphite server on port self.port
<END_TASK>
<USER_TASK:>
Description:
def connect(self):
"""
Make a TCP connection to the graphite server on port self.port
""" |
self.socket = socket.socket()
self.socket.settimeout(self.timeout_in_seconds)
try:
self.socket.connect(self.addr)
except socket.timeout:
raise GraphiteSendException(
"Took over %d second(s) to connect to %s" %
(self.timeout_in_seconds, self.addr))
except socket.gaierror:
raise GraphiteSendException(
"No address associated with hostname %s:%s" % self.addr)
except Exception as error:
raise GraphiteSendException(
"unknown exception while connecting to %s - %s" %
(self.addr, error)
)
return self.socket |
<SYSTEM_TASK:>
Close the TCP connection with the graphite server.
<END_TASK>
<USER_TASK:>
Description:
def disconnect(self):
"""
Close the TCP connection with the graphite server.
""" |
try:
self.socket.shutdown(1)
# If its currently a socket, set it to None
except AttributeError:
self.socket = None
except Exception:
self.socket = None
# Set the self.socket to None, no matter what.
finally:
self.socket = None |
<SYSTEM_TASK:>
Dispatch the different steps of sending
<END_TASK>
<USER_TASK:>
Description:
def _dispatch_send(self, message):
"""
Dispatch the different steps of sending
""" |
if self.dryrun:
return message
if not self.socket:
raise GraphiteSendException(
"Socket was not created before send"
)
sending_function = self._send
if self._autoreconnect:
sending_function = self._send_and_reconnect
try:
if self.asynchronous and gevent:
gevent.spawn(sending_function, message)
else:
sending_function(message)
except Exception as e:
self._handle_send_error(e)
return "sent {0} long message: {1}".format(len(message), message[:75]) |
<SYSTEM_TASK:>
Send _message_ to Graphite Server and attempt reconnect on failure.
<END_TASK>
<USER_TASK:>
Description:
def _send_and_reconnect(self, message):
"""Send _message_ to Graphite Server and attempt reconnect on failure.
If _autoreconnect_ was specified, attempt to reconnect if first send
fails.
:raises AttributeError: When the socket has not been set.
:raises socket.error: When the socket connection is no longer valid.
""" |
try:
self.socket.sendall(message.encode("ascii"))
except (AttributeError, socket.error):
if not self.autoreconnect():
raise
else:
self.socket.sendall(message.encode("ascii")) |
<SYSTEM_TASK:>
Check if socket have been monkey patched by gevent
<END_TASK>
<USER_TASK:>
Description:
def enable_asynchronous(self):
"""Check if socket have been monkey patched by gevent""" |
def is_monkey_patched():
try:
from gevent import monkey, socket
except ImportError:
return False
if hasattr(monkey, "saved"):
return "socket" in monkey.saved
return gevent.socket.socket == socket.socket
if not is_monkey_patched():
raise Exception("To activate asynchonoucity, please monkey patch"
" the socket module with gevent")
return True |
<SYSTEM_TASK:>
Given a message send it to the graphite server.
<END_TASK>
<USER_TASK:>
Description:
def _send(self, message):
""" Given a message send it to the graphite server. """ |
# An option to lowercase the entire message
if self.lowercase_metric_names:
message = message.lower()
# convert the message into a pickled payload.
message = self.str2listtuple(message)
try:
self.socket.sendall(message)
# Capture missing socket.
except socket.gaierror as error:
raise GraphiteSendException(
"Failed to send data to %s, with error: %s" %
(self.addr, error)) # noqa
# Capture socket closure before send.
except socket.error as error:
raise GraphiteSendException(
"Socket closed before able to send data to %s, "
"with error: %s" %
(self.addr, error)) # noqa
except Exception as error:
raise GraphiteSendException(
"Unknown error while trying to send data down socket to %s, "
"error: %s" %
(self.addr, error)) # noqa
return "sent %d long pickled message" % len(message) |
<SYSTEM_TASK:>
Make sure the metric is free of control chars, spaces, tabs, etc.
<END_TASK>
<USER_TASK:>
Description:
def clean_metric_name(self, metric_name):
"""
Make sure the metric is free of control chars, spaces, tabs, etc.
""" |
if not self._clean_metric_name:
return metric_name
metric_name = str(metric_name)
for _from, _to in self.cleaning_replacement_list:
metric_name = metric_name.replace(_from, _to)
return metric_name |
<SYSTEM_TASK:>
Patched method for PageAdmin.get_form.
<END_TASK>
<USER_TASK:>
Description:
def get_form(self, request, obj=None, **kwargs):
"""
Patched method for PageAdmin.get_form.
Returns a page form without the base field 'meta_description' which is
overridden in djangocms-page-meta.
This is triggered in the page add view and in the change view if
the meta description of the page is empty.
""" |
language = get_language_from_request(request, obj)
form = _BASE_PAGEADMIN__GET_FORM(self, request, obj, **kwargs)
if not obj or not obj.get_meta_description(language=language):
form.base_fields.pop('meta_description', None)
return form |
<SYSTEM_TASK:>
Initialize communication with the MPR121.
<END_TASK>
<USER_TASK:>
Description:
def begin(self, address=MPR121_I2CADDR_DEFAULT, i2c=None, **kwargs):
"""Initialize communication with the MPR121.
Can specify a custom I2C address for the device using the address
parameter (defaults to 0x5A). Optional i2c parameter allows specifying a
custom I2C bus source (defaults to platform's I2C bus).
Returns True if communication with the MPR121 was established, otherwise
returns False.
""" |
# Assume we're using platform's default I2C bus if none is specified.
if i2c is None:
import Adafruit_GPIO.I2C as I2C
i2c = I2C
# Require repeated start conditions for I2C register reads. Unfortunately
# the MPR121 is very sensitive and requires repeated starts to read all
# the registers.
I2C.require_repeated_start()
# Save a reference to the I2C device instance for later communication.
self._device = i2c.get_i2c_device(address, **kwargs)
return self._reset() |
<SYSTEM_TASK:>
Return touch state of all pins as a 12-bit value where each bit
<END_TASK>
<USER_TASK:>
Description:
def touched(self):
"""Return touch state of all pins as a 12-bit value where each bit
represents a pin, with a value of 1 being touched and 0 not being touched.
""" |
t = self._i2c_retry(self._device.readU16LE, MPR121_TOUCHSTATUS_L)
return t & 0x0FFF |
<SYSTEM_TASK:>
Return True if the specified pin is being touched, otherwise returns
<END_TASK>
<USER_TASK:>
Description:
def is_touched(self, pin):
"""Return True if the specified pin is being touched, otherwise returns
False.
""" |
assert pin >= 0 and pin < 12, 'pin must be between 0-11 (inclusive)'
t = self.touched()
return (t & (1 << pin)) > 0 |
<SYSTEM_TASK:>
Decorator straight up stolen from stackoverflow
<END_TASK>
<USER_TASK:>
Description:
def profileit(func):
"""
Decorator straight up stolen from stackoverflow
""" |
def wrapper(*args, **kwargs):
datafn = func.__name__ + ".profile" # Name the data file sensibly
prof = cProfile.Profile()
prof.enable()
retval = prof.runcall(func, *args, **kwargs)
prof.disable()
stats = pstats.Stats(prof)
try:
stats.sort_stats('cumtime').print_stats()
except KeyError:
pass # breaks in python 2.6
return retval
return wrapper |
<SYSTEM_TASK:>
Gets all of the fields on the model.
<END_TASK>
<USER_TASK:>
Description:
def _get_fields_for_model(model):
"""
Gets all of the fields on the model.
:param DeclarativeModel model: A SQLAlchemy ORM Model
:return: A tuple of the fields on the Model corresponding
to the columns on the Model.
:rtype: tuple
""" |
fields = []
for name in model._sa_class_manager:
prop = getattr(model, name)
if isinstance(prop.property, RelationshipProperty):
for pk in prop.property.mapper.primary_key:
fields.append('{0}.{1}'.format(name, pk.name))
else:
fields.append(name)
return tuple(fields) |
<SYSTEM_TASK:>
Gets the necessary relationships for the resource
<END_TASK>
<USER_TASK:>
Description:
def _get_relationships(model):
"""
Gets the necessary relationships for the resource
by inspecting the sqlalchemy model for relationships.
:param DeclarativeMeta model: The SQLAlchemy ORM model.
:return: A tuple of Relationship/ListRelationship instances
corresponding to the relationships on the Model.
:rtype: tuple
""" |
relationships = []
for name, relationship in inspect(model).relationships.items():
class_ = relationship.mapper.class_
if relationship.uselist:
rel = ListRelationship(name, relation=class_.__name__)
else:
rel = Relationship(name, relation=class_.__name__)
relationships.append(rel)
return tuple(relationships) |
<SYSTEM_TASK:>
Creates a ResourceBase subclass by inspecting a SQLAlchemy
<END_TASK>
<USER_TASK:>
Description:
def create_resource(model, session_handler, resource_bases=(CRUDL,),
relationships=None, links=None, preprocessors=None,
postprocessors=None, fields=None, paginate_by=100,
auto_relationships=True, pks=None, create_fields=None,
update_fields=None, list_fields=None, append_slash=False):
"""
Creates a ResourceBase subclass by inspecting a SQLAlchemy
Model. This is somewhat more restrictive than explicitly
creating managers and resources. However, if you only need
any of the basic CRUD+L operations,
:param sqlalchemy.Model model: This is the model that
will be inspected to create a Resource and Manager from.
By default, all of it's fields will be exposed, although
this can be overridden using the fields attribute.
:param tuple resource_bases: A tuple of ResourceBase subclasses.
Defaults to the restmixins.CRUDL class only. However if you only
wanted Update and Delete you could pass in
```(restmixins.Update, restmixins.Delete)``` which
would cause the resource to inherit from those two.
Additionally, you could create your own mixins and pass them in
as the resource_bases
:param tuple relationships: extra relationships to pass
into the ResourceBase constructor. If auto_relationships
is set to True, then they will be appended to these relationships.
:param tuple links: Extra links to pass into the ResourceBase as
the class _links attribute. Defaults to an empty tuple.
:param tuple preprocessors: Preprocessors for the resource class attribute.
:param tuple postprocessors: Postprocessors for the resource class attribute.
:param ripozo_sqlalchemy.SessionHandler|ripozo_sqlalchemy.ScopedSessionHandler session_handler: A session handler
to use when instantiating an instance of the Manager class created
from the model. This is responsible for getting and handling
sessions in both normal cases and exceptions.
:param tuple fields: The fields to expose on the api. Defaults to
all of the fields on the model.
:param bool auto_relationships: If True, then the SQLAlchemy Model
will be inspected for relationships and they will be automatically
appended to the relationships on the resource class attribute.
:param list create_fields: A list of the fields that are valid when
creating a resource. By default this will be the fields without
any primary keys included
:param list update_fields: A list of the fields that are valid when
updating a resource. By default this will be the fields without
any primary keys included
:param list list_fields: A list of the fields that will be returned
when the list endpoint is requested. Defaults to the fields
attribute.
:param bool append_slash: A flag to forcibly append slashes to
the end of urls.
:return: A ResourceBase subclass and AlchemyManager subclass
:rtype: ResourceMetaClass
""" |
relationships = relationships or tuple()
if auto_relationships:
relationships += _get_relationships(model)
links = links or tuple()
preprocessors = preprocessors or tuple()
postprocessors = postprocessors or tuple()
pks = pks or _get_pks(model)
fields = fields or _get_fields_for_model(model)
list_fields = list_fields or fields
create_fields = create_fields or [x for x in fields if x not in set(pks)]
update_fields = update_fields or [x for x in fields if x not in set(pks)]
manager_cls_attrs = dict(paginate_by=paginate_by, fields=fields, model=model,
list_fields=list_fields, create_fields=create_fields,
update_fields=update_fields)
manager_class = type(str(model.__name__), (AlchemyManager,), manager_cls_attrs)
manager = manager_class(session_handler)
resource_cls_attrs = dict(preprocessors=preprocessors,
postprocessors=postprocessors,
_relationships=relationships, _links=links,
pks=pks, manager=manager, append_slash=append_slash)
res_class = ResourceMetaClass(str(model.__name__), resource_bases, resource_cls_attrs)
return res_class |
<SYSTEM_TASK:>
Logic to decide if the file should be processed or just needs to
<END_TASK>
<USER_TASK:>
Description:
def _is_pickle_valid(self):
"""Logic to decide if the file should be processed or just needs to
be loaded from its pickle data.
""" |
if not os.path.exists(self._pickle_file):
return False
else:
file_mtime = os.path.getmtime(self.logfile)
pickle_mtime = os.path.getmtime(self._pickle_file)
if file_mtime > pickle_mtime:
return False
return True |
<SYSTEM_TASK:>
Load data from a pickle file.
<END_TASK>
<USER_TASK:>
Description:
def _load(self):
"""Load data from a pickle file. """ |
with open(self._pickle_file, 'rb') as source:
pickler = pickle.Unpickler(source)
for attribute in self._pickle_attributes:
pickle_data = pickler.load()
setattr(self, attribute, pickle_data) |
<SYSTEM_TASK:>
Save the attributes defined on _pickle_attributes in a pickle file.
<END_TASK>
<USER_TASK:>
Description:
def _save(self):
"""Save the attributes defined on _pickle_attributes in a pickle file.
This improves a lot the nth run as the log file does not need to be
processed every time.
""" |
with open(self._pickle_file, 'wb') as source:
pickler = pickle.Pickler(source, pickle.HIGHEST_PROTOCOL)
for attribute in self._pickle_attributes:
attr = getattr(self, attribute, None)
pickler.dump(attr) |
<SYSTEM_TASK:>
Parse data from data stream and replace object lines.
<END_TASK>
<USER_TASK:>
Description:
def parse_data(self, logfile):
"""Parse data from data stream and replace object lines.
:param logfile: [required] Log file data stream.
:type logfile: str
""" |
for line in logfile:
stripped_line = line.strip()
parsed_line = Line(stripped_line)
if parsed_line.valid:
self._valid_lines.append(parsed_line)
else:
self._invalid_lines.append(stripped_line)
self.total_lines = len(self._valid_lines) + len(self._invalid_lines) |
<SYSTEM_TASK:>
Filter current log lines by a given filter function.
<END_TASK>
<USER_TASK:>
Description:
def filter(self, filter_func, reverse=False):
"""Filter current log lines by a given filter function.
This allows to drill down data out of the log file by filtering the
relevant log lines to analyze.
For example, filter by a given IP so only log lines for that IP are
further processed with commands (top paths, http status counter...).
:param filter_func: [required] Filter method, see filters.py for all
available filters.
:type filter_func: function
:param reverse: negate the filter (so accept all log lines that return
``False``).
:type reverse: boolean
:returns: a new instance of Log containing only log lines
that passed the filter function.
:rtype: :class:`Log`
""" |
new_log_file = Log()
new_log_file.logfile = self.logfile
new_log_file.total_lines = 0
new_log_file._valid_lines = []
new_log_file._invalid_lines = self._invalid_lines[:]
# add the reverse conditional outside the loop to keep the loop as
# straightforward as possible
if not reverse:
for i in self._valid_lines:
if filter_func(i):
new_log_file.total_lines += 1
new_log_file._valid_lines.append(i)
else:
for i in self._valid_lines:
if not filter_func(i):
new_log_file.total_lines += 1
new_log_file._valid_lines.append(i)
return new_log_file |
<SYSTEM_TASK:>
Returns a list of all methods that start with ``cmd_``.
<END_TASK>
<USER_TASK:>
Description:
def commands(cls):
"""Returns a list of all methods that start with ``cmd_``.""" |
cmds = [cmd[4:] for cmd in dir(cls) if cmd.startswith('cmd_')]
return cmds |
<SYSTEM_TASK:>
Reports a breakdown of how many requests have been made per IP.
<END_TASK>
<USER_TASK:>
Description:
def cmd_ip_counter(self):
"""Reports a breakdown of how many requests have been made per IP.
.. note::
To enable this command requests need to provide a header with the
forwarded IP (usually X-Forwarded-For) and be it the only header
being captured.
""" |
ip_counter = defaultdict(int)
for line in self._valid_lines:
ip = line.get_ip()
if ip is not None:
ip_counter[ip] += 1
return ip_counter |
<SYSTEM_TASK:>
List all requests that took a certain amount of time to be
<END_TASK>
<USER_TASK:>
Description:
def cmd_slow_requests(self):
"""List all requests that took a certain amount of time to be
processed.
.. warning::
By now hardcoded to 1 second (1000 milliseconds), improve the
command line interface to allow to send parameters to each command
or globally.
""" |
slow_requests = [
line.time_wait_response
for line in self._valid_lines
if line.time_wait_response > 1000
]
return slow_requests |
<SYSTEM_TASK:>
Returns the average response time of all, non aborted, requests.
<END_TASK>
<USER_TASK:>
Description:
def cmd_average_response_time(self):
"""Returns the average response time of all, non aborted, requests.""" |
average = [
line.time_wait_response
for line in self._valid_lines
if line.time_wait_response >= 0
]
divisor = float(len(average))
if divisor > 0:
return sum(average) / float(len(average))
return 0 |
<SYSTEM_TASK:>
Returns the average queue time of all, non aborted, requests.
<END_TASK>
<USER_TASK:>
Description:
def cmd_average_waiting_time(self):
"""Returns the average queue time of all, non aborted, requests.""" |
average = [
line.time_wait_queues
for line in self._valid_lines
if line.time_wait_queues >= 0
]
divisor = float(len(average))
if divisor > 0:
return sum(average) / float(len(average))
return 0 |
<SYSTEM_TASK:>
Generate statistics regarding how many requests were processed by
<END_TASK>
<USER_TASK:>
Description:
def cmd_server_load(self):
"""Generate statistics regarding how many requests were processed by
each downstream server.
""" |
servers = defaultdict(int)
for line in self._valid_lines:
servers[line.server_name] += 1
return servers |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.