text_prompt
stringlengths 100
17.7k
⌀ | code_prompt
stringlengths 7
9.86k
⌀ |
---|---|
<SYSTEM_TASK:>
Get the minimum value
<END_TASK>
<USER_TASK:>
Description:
def get_min_value(self):
""" Get the minimum value """ |
value = self.get_default_value()
if self.attribute_type is str:
min_value = value[:self.min_length - 1]
elif self.attribute_type is int:
min_value = self.min_length - 1
else:
raise TypeError('Attribute %s can not have a minimum value' % self.local_name)
return min_value |
<SYSTEM_TASK:>
Attempt to automatically determine base name using `get_url_name`.
<END_TASK>
<USER_TASK:>
Description:
def get_default_base_name(self, viewset):
"""
Attempt to automatically determine base name using `get_url_name`.
""" |
queryset = getattr(viewset, 'queryset', None)
if queryset is not None:
get_url_name = getattr(queryset.model, 'get_url_name', None)
if get_url_name is not None:
return get_url_name()
return super(SortedDefaultRouter, self).get_default_base_name(viewset) |
<SYSTEM_TASK:>
Modify nc_user_count quota usage on structure role grant or revoke
<END_TASK>
<USER_TASK:>
Description:
def change_customer_nc_users_quota(sender, structure, user, role, signal, **kwargs):
""" Modify nc_user_count quota usage on structure role grant or revoke """ |
assert signal in (signals.structure_role_granted, signals.structure_role_revoked), \
'Handler "change_customer_nc_users_quota" has to be used only with structure_role signals'
assert sender in (Customer, Project), \
'Handler "change_customer_nc_users_quota" works only with Project and Customer models'
if sender == Customer:
customer = structure
elif sender == Project:
customer = structure.customer
customer_users = customer.get_users()
customer.set_quota_usage(Customer.Quotas.nc_user_count, customer_users.count()) |
<SYSTEM_TASK:>
Delete not shared service settings without services
<END_TASK>
<USER_TASK:>
Description:
def delete_service_settings_on_service_delete(sender, instance, **kwargs):
""" Delete not shared service settings without services """ |
service = instance
try:
service_settings = service.settings
except ServiceSettings.DoesNotExist:
# If this handler works together with delete_service_settings_on_scope_delete
# it tries to delete service settings that are already deleted.
return
if not service_settings.shared:
service.settings.delete() |
<SYSTEM_TASK:>
If VM that contains service settings were deleted - all settings
<END_TASK>
<USER_TASK:>
Description:
def delete_service_settings_on_scope_delete(sender, instance, **kwargs):
""" If VM that contains service settings were deleted - all settings
resources could be safely deleted from NC.
""" |
for service_settings in ServiceSettings.objects.filter(scope=instance):
service_settings.unlink_descendants()
service_settings.delete() |
<SYSTEM_TASK:>
Return authenication string to place in Authorization Header
<END_TASK>
<USER_TASK:>
Description:
def get_authentication_header(self, user=None, api_key=None, password=None, certificate=None):
""" Return authenication string to place in Authorization Header
If API Token is set, it'll be used. Otherwise, the clear
text password will be sent. Users of NURESTLoginController are responsible to
clean the password property.
Returns:
Returns the XREST Authentication string with API Key or user password encoded.
""" |
if not user:
user = self.user
if not api_key:
api_key = self.api_key
if not password:
password = self.password
if not password:
password = self.password
if not certificate:
certificate = self._certificate
if certificate:
return "XREST %s" % urlsafe_b64encode("{}:".format(user).encode('utf-8')).decode('utf-8')
if api_key:
return "XREST %s" % urlsafe_b64encode("{}:{}".format(user, api_key).encode('utf-8')).decode('utf-8')
return "XREST %s" % urlsafe_b64encode("{}:{}".format(user, password).encode('utf-8')).decode('utf-8') |
<SYSTEM_TASK:>
Impersonate a user in a enterprise
<END_TASK>
<USER_TASK:>
Description:
def impersonate(self, user, enterprise):
""" Impersonate a user in a enterprise
Args:
user: the name of the user to impersonate
enterprise: the name of the enterprise where to use impersonation
""" |
if not user or not enterprise:
raise ValueError('You must set a user name and an enterprise name to begin impersonification')
self._is_impersonating = True
self._impersonation = "%s@%s" % (user, enterprise) |
<SYSTEM_TASK:>
Verify if the controller corresponds
<END_TASK>
<USER_TASK:>
Description:
def equals(self, controller):
""" Verify if the controller corresponds
to the current one.
""" |
if controller is None:
return False
return self.user == controller.user and self.enterprise == controller.enterprise and self.url == controller.url |
<SYSTEM_TASK:>
Revert back connection to pool.
<END_TASK>
<USER_TASK:>
Description:
def release(self, conn):
"""Revert back connection to pool.""" |
if conn.in_transaction:
raise InvalidRequestError(
"Cannot release a connection with "
"not finished transaction"
)
raw = conn.connection
res = yield from self._pool.release(raw)
return res |
<SYSTEM_TASK:>
Return how much consumables are used by resource with current configuration.
<END_TASK>
<USER_TASK:>
Description:
def get_configuration(cls, resource):
""" Return how much consumables are used by resource with current configuration.
Output example:
{
<ConsumableItem instance>: <usage>,
<ConsumableItem instance>: <usage>,
...
}
""" |
strategy = cls._get_strategy(resource.__class__)
return strategy.get_configuration(resource) |
<SYSTEM_TASK:>
Obtain the version number
<END_TASK>
<USER_TASK:>
Description:
def get_version():
"""Obtain the version number""" |
import imp
import os
mod = imp.load_source(
'version', os.path.join('skdata', '__init__.py')
)
return mod.__version__ |
<SYSTEM_TASK:>
Accept invitation for current user.
<END_TASK>
<USER_TASK:>
Description:
def accept(self, request, uuid=None):
""" Accept invitation for current user.
To replace user's email with email from invitation - add parameter
'replace_email' to request POST body.
""" |
invitation = self.get_object()
if invitation.state != models.Invitation.State.PENDING:
raise ValidationError(_('Only pending invitation can be accepted.'))
elif invitation.civil_number and invitation.civil_number != request.user.civil_number:
raise ValidationError(_('User has an invalid civil number.'))
if invitation.project:
if invitation.project.has_user(request.user):
raise ValidationError(_('User already has role within this project.'))
elif invitation.customer.has_user(request.user):
raise ValidationError(_('User already has role within this customer.'))
if settings.WALDUR_CORE['VALIDATE_INVITATION_EMAIL'] and invitation.email != request.user.email:
raise ValidationError(_('Invitation and user emails mismatch.'))
replace_email = bool(request.data.get('replace_email'))
invitation.accept(request.user, replace_email=replace_email)
return Response({'detail': _('Invitation has been successfully accepted.')},
status=status.HTTP_200_OK) |
<SYSTEM_TASK:>
Run different actions on price estimate scope deletion.
<END_TASK>
<USER_TASK:>
Description:
def scope_deletion(sender, instance, **kwargs):
""" Run different actions on price estimate scope deletion.
If scope is a customer - delete all customer estimates and their children.
If scope is a deleted resource - redefine consumption details, recalculate
ancestors estimates and update estimate details.
If scope is a unlinked resource - delete all resource price estimates and update ancestors.
In all other cases - update price estimate details.
""" |
is_resource = isinstance(instance, structure_models.ResourceMixin)
if is_resource and getattr(instance, 'PERFORM_UNLINK', False):
_resource_unlink(resource=instance)
elif is_resource and not getattr(instance, 'PERFORM_UNLINK', False):
_resource_deletion(resource=instance)
elif isinstance(instance, structure_models.Customer):
_customer_deletion(customer=instance)
else:
for price_estimate in models.PriceEstimate.objects.filter(scope=instance):
price_estimate.init_details() |
<SYSTEM_TASK:>
Recalculate consumption details and save resource details
<END_TASK>
<USER_TASK:>
Description:
def _resource_deletion(resource):
""" Recalculate consumption details and save resource details """ |
if resource.__class__ not in CostTrackingRegister.registered_resources:
return
new_configuration = {}
price_estimate = models.PriceEstimate.update_resource_estimate(resource, new_configuration)
price_estimate.init_details() |
<SYSTEM_TASK:>
Update resource consumption details and price estimate if its configuration has changed.
<END_TASK>
<USER_TASK:>
Description:
def resource_update(sender, instance, created=False, **kwargs):
""" Update resource consumption details and price estimate if its configuration has changed.
Create estimates for previous months if resource was created not in current month.
""" |
resource = instance
try:
new_configuration = CostTrackingRegister.get_configuration(resource)
except ResourceNotRegisteredError:
return
models.PriceEstimate.update_resource_estimate(
resource, new_configuration, raise_exception=not _is_in_celery_task())
# Try to create historical price estimates
if created:
_create_historical_estimates(resource, new_configuration) |
<SYSTEM_TASK:>
Update resource consumption details and price estimate if its configuration has changed
<END_TASK>
<USER_TASK:>
Description:
def resource_quota_update(sender, instance, **kwargs):
""" Update resource consumption details and price estimate if its configuration has changed """ |
quota = instance
resource = quota.scope
try:
new_configuration = CostTrackingRegister.get_configuration(resource)
except ResourceNotRegisteredError:
return
models.PriceEstimate.update_resource_estimate(
resource, new_configuration, raise_exception=not _is_in_celery_task()) |
<SYSTEM_TASK:>
Create consumption details and price estimates for past months.
<END_TASK>
<USER_TASK:>
Description:
def _create_historical_estimates(resource, configuration):
""" Create consumption details and price estimates for past months.
Usually we need to update historical values on resource import.
""" |
today = timezone.now()
month_start = core_utils.month_start(today)
while month_start > resource.created:
month_start -= relativedelta(months=1)
models.PriceEstimate.create_historical(resource, configuration, max(month_start, resource.created)) |
<SYSTEM_TASK:>
Decide if the Ipython command line is running code.
<END_TASK>
<USER_TASK:>
Description:
def IPYTHON_MAIN():
"""Decide if the Ipython command line is running code.""" |
import pkg_resources
runner_frame = inspect.getouterframes(inspect.currentframe())[-2]
return (
getattr(runner_frame, "function", None)
== pkg_resources.load_entry_point("ipython", "console_scripts", "ipython").__name__
) |
<SYSTEM_TASK:>
Register a model class according to its remote name
<END_TASK>
<USER_TASK:>
Description:
def register_model(cls, model):
"""
Register a model class according to its remote name
Args:
model: the model to register
""" |
rest_name = model.rest_name
resource_name = model.resource_name
if rest_name not in cls._model_rest_name_registry:
cls._model_rest_name_registry[rest_name] = [model]
cls._model_resource_name_registry[resource_name] = [model]
elif model not in cls._model_rest_name_registry[rest_name]:
cls._model_rest_name_registry[rest_name].append(model)
cls._model_resource_name_registry[resource_name].append(model) |
<SYSTEM_TASK:>
Get the first model corresponding to a rest_name
<END_TASK>
<USER_TASK:>
Description:
def get_first_model_with_rest_name(cls, rest_name):
""" Get the first model corresponding to a rest_name
Args:
rest_name: the rest name
""" |
models = cls.get_models_with_rest_name(rest_name)
if len(models) > 0:
return models[0]
return None |
<SYSTEM_TASK:>
Get the first model corresponding to a resource_name
<END_TASK>
<USER_TASK:>
Description:
def get_first_model_with_resource_name(cls, resource_name):
""" Get the first model corresponding to a resource_name
Args:
resource_name: the resource name
""" |
models = cls.get_models_with_resource_name(resource_name)
if len(models) > 0:
return models[0]
return None |
<SYSTEM_TASK:>
Try to finder the spec and if it cannot be found, use the underscore starring syntax
<END_TASK>
<USER_TASK:>
Description:
def find_spec(self, fullname, target=None):
"""Try to finder the spec and if it cannot be found, use the underscore starring syntax
to identify potential matches.
""" |
spec = super().find_spec(fullname, target=target)
if spec is None:
original = fullname
if "." in fullname:
original, fullname = fullname.rsplit(".", 1)
else:
original, fullname = "", original
if "_" in fullname:
files = fuzzy_file_search(self.path, fullname)
if files:
file = Path(sorted(files)[0])
spec = super().find_spec(
(original + "." + file.stem.split(".", 1)[0]).lstrip("."), target=target
)
fullname = (original + "." + fullname).lstrip(".")
if spec and fullname != spec.name:
spec = FuzzySpec(
spec.name,
spec.loader,
origin=spec.origin,
loader_state=spec.loader_state,
alias=fullname,
is_package=bool(spec.submodule_search_locations),
)
return spec |
<SYSTEM_TASK:>
Updates the user and perform the callback method
<END_TASK>
<USER_TASK:>
Description:
def save(self, async=False, callback=None, encrypted=True):
""" Updates the user and perform the callback method """ |
if self._new_password and encrypted:
self.password = Sha1.encrypt(self._new_password)
controller = NURESTSession.get_current_session().login_controller
controller.password = self._new_password
controller.api_key = None
data = json.dumps(self.to_dict())
request = NURESTRequest(method=HTTP_METHOD_PUT, url=self.get_resource_url(), data=data)
if async:
return self.send_request(request=request, async=async, local_callback=self._did_save, remote_callback=callback)
else:
connection = self.send_request(request=request)
return self._did_save(connection) |
<SYSTEM_TASK:>
Launched when save has been successfully executed
<END_TASK>
<USER_TASK:>
Description:
def _did_save(self, connection):
""" Launched when save has been successfully executed """ |
self._new_password = None
controller = NURESTSession.get_current_session().login_controller
controller.password = None
controller.api_key = self.api_key
if connection.async:
callback = connection.callbacks['remote']
if connection.user_info:
callback(connection.user_info, connection)
else:
callback(self, connection)
else:
return (self, connection) |
<SYSTEM_TASK:>
Fetch all information about the current object
<END_TASK>
<USER_TASK:>
Description:
def fetch(self, async=False, callback=None):
""" Fetch all information about the current object
Args:
async (bool): Boolean to make an asynchronous call. Default is False
callback (function): Callback method that will be triggered in case of asynchronous call
Returns:
tuple: (current_fetcher, callee_parent, fetched_bjects, connection)
Example:
>>> entity = NUEntity(id="xxx-xxx-xxx-xxx")
>>> entity.fetch() # will get the entity with id "xxx-xxx-xxx-xxx"
>>> print entity.name
"My Entity"
""" |
request = NURESTRequest(method=HTTP_METHOD_GET, url=self.get_resource_url())
if async:
return self.send_request(request=request, async=async, local_callback=self._did_fetch, remote_callback=callback)
else:
connection = self.send_request(request=request)
return self._did_retrieve(connection) |
<SYSTEM_TASK:>
This function retrieves the ISO 639 and inverted names datasets as tsv files and returns them as lists.
<END_TASK>
<USER_TASK:>
Description:
def _fabtabular():
"""
This function retrieves the ISO 639 and inverted names datasets as tsv files and returns them as lists.
""" |
import csv
import sys
from pkg_resources import resource_filename
data = resource_filename(__package__, 'iso-639-3.tab')
inverted = resource_filename(__package__, 'iso-639-3_Name_Index.tab')
macro = resource_filename(__package__, 'iso-639-3-macrolanguages.tab')
part5 = resource_filename(__package__, 'iso639-5.tsv')
part2 = resource_filename(__package__, 'iso639-2.tsv')
part1 = resource_filename(__package__, 'iso639-1.tsv')
# if sys.version_info[0] == 2:
# from urllib2 import urlopen
# from contextlib import closing
# data_fo = closing(urlopen('http://www-01.sil.org/iso639-3/iso-639-3.tab'))
# inverted_fo = closing(urlopen('http://www-01.sil.org/iso639-3/iso-639-3_Name_Index.tab'))
# else:
# from urllib.request import urlopen
# import io
# data_fo = io.StringIO(urlopen('http://www-01.sil.org/iso639-3/iso-639-3.tab').read().decode())
# inverted_fo = io.StringIO(urlopen('http://www-01.sil.org/iso639-3/iso-639-3_Name_Index.tab').read().decode())
if sys.version_info[0] == 3:
from functools import partial
global open
open = partial(open, encoding='utf-8')
data_fo = open(data)
inverted_fo = open(inverted)
macro_fo = open(macro)
part5_fo = open(part5)
part2_fo = open(part2)
part1_fo = open(part1)
with data_fo as u:
with inverted_fo as i:
with macro_fo as m:
with part5_fo as p5:
with part2_fo as p2:
with part1_fo as p1:
return (list(csv.reader(u, delimiter='\t'))[1:],
list(csv.reader(i, delimiter='\t'))[1:],
list(csv.reader(m, delimiter='\t'))[1:],
list(csv.reader(p5, delimiter='\t'))[1:],
list(csv.reader(p2, delimiter='\t'))[1:],
list(csv.reader(p1, delimiter='\t'))[1:]) |
<SYSTEM_TASK:>
A list of users connected to the project
<END_TASK>
<USER_TASK:>
Description:
def users(self, request, uuid=None):
""" A list of users connected to the project """ |
project = self.get_object()
queryset = project.get_users()
# we need to handle filtration manually because we want to filter only project users, not projects.
filter_backend = filters.UserConcatenatedNameOrderingBackend()
queryset = filter_backend.filter_queryset(request, queryset, self)
queryset = self.paginate_queryset(queryset)
serializer = self.get_serializer(queryset, many=True)
return self.get_paginated_response(serializer.data) |
<SYSTEM_TASK:>
Only staff can update shared settings, otherwise user has to be an owner of the settings.
<END_TASK>
<USER_TASK:>
Description:
def can_user_update_settings(request, view, obj=None):
""" Only staff can update shared settings, otherwise user has to be an owner of the settings.""" |
if obj is None:
return
# TODO [TM:3/21/17] clean it up after WAL-634. Clean up service settings update tests as well.
if obj.customer and not obj.shared:
return permissions.is_owner(request, view, obj)
else:
return permissions.is_staff(request, view, obj) |
<SYSTEM_TASK:>
Filter services by type
<END_TASK>
<USER_TASK:>
Description:
def list(self, request, *args, **kwargs):
"""
Filter services by type
^^^^^^^^^^^^^^^^^^^^^^^
It is possible to filter services by their types. Example:
/api/services/?service_type=DigitalOcean&service_type=OpenStack
""" |
return super(ServicesViewSet, self).list(request, *args, **kwargs) |
<SYSTEM_TASK:>
Allow to execute action only if service settings are not shared or user is staff
<END_TASK>
<USER_TASK:>
Description:
def _require_staff_for_shared_settings(request, view, obj=None):
""" Allow to execute action only if service settings are not shared or user is staff """ |
if obj is None:
return
if obj.settings.shared and not request.user.is_staff:
raise PermissionDenied(_('Only staff users are allowed to import resources from shared services.')) |
<SYSTEM_TASK:>
Unlink all related resources, service project link and service itself.
<END_TASK>
<USER_TASK:>
Description:
def unlink(self, request, uuid=None):
"""
Unlink all related resources, service project link and service itself.
""" |
service = self.get_object()
service.unlink_descendants()
self.perform_destroy(service)
return Response(status=status.HTTP_204_NO_CONTENT) |
<SYSTEM_TASK:>
Fetch ancestors quotas that have the same name and are registered as aggregator quotas.
<END_TASK>
<USER_TASK:>
Description:
def get_aggregator_quotas(self, quota):
""" Fetch ancestors quotas that have the same name and are registered as aggregator quotas. """ |
ancestors = quota.scope.get_quota_ancestors()
aggregator_quotas = []
for ancestor in ancestors:
for ancestor_quota_field in ancestor.get_quotas_fields(field_class=AggregatorQuotaField):
if ancestor_quota_field.get_child_quota_name() == quota.name:
aggregator_quotas.append(ancestor.quotas.get(name=ancestor_quota_field))
return aggregator_quotas |
<SYSTEM_TASK:>
Attaches the handler to the specified event.
<END_TASK>
<USER_TASK:>
Description:
def on(self, event, handler):
"""Attaches the handler to the specified event.
@param event: event to attach the handler to. Any object can be passed
as event, but string is preferable. If qcore.EnumBase
instance is passed, its name is used as event key.
@param handler: event handler.
@return: self, so calls like this can be chained together.
""" |
event_hook = self.get_or_create(event)
event_hook.subscribe(handler)
return self |
<SYSTEM_TASK:>
Detaches the handler from the specified event.
<END_TASK>
<USER_TASK:>
Description:
def off(self, event, handler):
"""Detaches the handler from the specified event.
@param event: event to detach the handler to. Any object can be passed
as event, but string is preferable. If qcore.EnumBase
instance is passed, its name is used as event key.
@param handler: event handler.
@return: self, so calls like this can be chained together.
""" |
event_hook = self.get_or_create(event)
event_hook.unsubscribe(handler)
return self |
<SYSTEM_TASK:>
Triggers the specified event by invoking EventHook.trigger under the hood.
<END_TASK>
<USER_TASK:>
Description:
def trigger(self, event, *args):
"""Triggers the specified event by invoking EventHook.trigger under the hood.
@param event: event to trigger. Any object can be passed
as event, but string is preferable. If qcore.EnumBase
instance is passed, its name is used as event key.
@param args: event arguments.
@return: self, so calls like this can be chained together.
""" |
event_hook = self.get_or_create(event)
event_hook.trigger(*args)
return self |
<SYSTEM_TASK:>
Safely triggers the specified event by invoking
<END_TASK>
<USER_TASK:>
Description:
def safe_trigger(self, event, *args):
"""Safely triggers the specified event by invoking
EventHook.safe_trigger under the hood.
@param event: event to trigger. Any object can be passed
as event, but string is preferable. If qcore.EnumBase
instance is passed, its name is used as event key.
@param args: event arguments.
@return: self, so calls like this can be chained together.
""" |
event_hook = self.get_or_create(event)
event_hook.safe_trigger(*args)
return self |
<SYSTEM_TASK:>
Invitation lifetime must be specified in Waldur Core settings with parameter
<END_TASK>
<USER_TASK:>
Description:
def cancel_expired_invitations(invitations=None):
"""
Invitation lifetime must be specified in Waldur Core settings with parameter
"INVITATION_LIFETIME". If invitation creation time is less than expiration time, the invitation will set as expired.
""" |
expiration_date = timezone.now() - settings.WALDUR_CORE['INVITATION_LIFETIME']
if not invitations:
invitations = models.Invitation.objects.filter(state=models.Invitation.State.PENDING)
invitations = invitations.filter(created__lte=expiration_date)
invitations.update(state=models.Invitation.State.EXPIRED) |
<SYSTEM_TASK:>
Prepare the contract json abi for sighash lookups and fast access
<END_TASK>
<USER_TASK:>
Description:
def _prepare_abi(self, jsonabi):
"""
Prepare the contract json abi for sighash lookups and fast access
:param jsonabi: contracts abi in json format
:return:
""" |
self.signatures = {}
for element_description in jsonabi:
abi_e = AbiMethod(element_description)
if abi_e["type"] == "constructor":
self.signatures[b"__constructor__"] = abi_e
elif abi_e["type"] == "fallback":
abi_e.setdefault("inputs", [])
self.signatures[b"__fallback__"] = abi_e
elif abi_e["type"] == "function":
# function and signature present
# todo: we could generate the sighash ourselves? requires keccak256
if abi_e.get("signature"):
self.signatures[Utils.str_to_bytes(abi_e["signature"])] = abi_e
elif abi_e["type"] == "event":
self.signatures[b"__event__"] = abi_e
else:
raise Exception("Invalid abi type: %s - %s - %s" % (abi_e.get("type"),
element_description, abi_e)) |
<SYSTEM_TASK:>
Describe the input bytesequence s based on the loaded contract abi definition
<END_TASK>
<USER_TASK:>
Description:
def describe_input(self, s):
"""
Describe the input bytesequence s based on the loaded contract abi definition
:param s: bytes input
:return: AbiMethod instance
""" |
signatures = self.signatures.items()
for sighash, method in signatures:
if sighash is None or sighash.startswith(b"__"):
continue # skip constructor
if s.startswith(sighash):
s = s[len(sighash):]
types_def = self.signatures.get(sighash)["inputs"]
types = [t["type"] for t in types_def]
names = [t["name"] for t in types_def]
if not len(s):
values = len(types) * ["<nA>"]
else:
values = decode_abi(types, s)
# (type, name, data)
method.inputs = [{"type": t, "name": n, "data": v} for t, n, v in list(
zip(types, names, values))]
return method
else:
method = AbiMethod({"type": "fallback",
"name": "__fallback__",
"inputs": [], "outputs": []})
types_def = self.signatures.get(b"__fallback__", {"inputs": []})["inputs"]
types = [t["type"] for t in types_def]
names = [t["name"] for t in types_def]
values = decode_abi(types, s)
# (type, name, data)
method.inputs = [{"type": t, "name": n, "data": v} for t, n, v in list(
zip(types, names, values))]
return method |
<SYSTEM_TASK:>
Raises an AssertionError if expected is not actual.
<END_TASK>
<USER_TASK:>
Description:
def assert_is(expected, actual, message=None, extra=None):
"""Raises an AssertionError if expected is not actual.""" |
assert expected is actual, _assert_fail_message(
message, expected, actual, "is not", extra
) |
<SYSTEM_TASK:>
Raises an AssertionError if expected is actual.
<END_TASK>
<USER_TASK:>
Description:
def assert_is_not(expected, actual, message=None, extra=None):
"""Raises an AssertionError if expected is actual.""" |
assert expected is not actual, _assert_fail_message(
message, expected, actual, "is", extra
) |
<SYSTEM_TASK:>
Raises an AssertionError if expected != actual.
<END_TASK>
<USER_TASK:>
Description:
def assert_eq(expected, actual, message=None, tolerance=None, extra=None):
"""Raises an AssertionError if expected != actual.
If tolerance is specified, raises an AssertionError if either
- expected or actual isn't a number, or
- the difference between expected and actual is larger than the tolerance.
""" |
if tolerance is None:
assert expected == actual, _assert_fail_message(
message, expected, actual, "!=", extra
)
else:
assert isinstance(tolerance, _number_types), (
"tolerance parameter to assert_eq must be a number: %r" % tolerance
)
assert isinstance(expected, _number_types) and isinstance(
actual, _number_types
), (
"parameters must be numbers when tolerance is specified: %r, %r"
% (expected, actual)
)
diff = abs(expected - actual)
assert diff <= tolerance, _assert_fail_message(
message, expected, actual, "is more than %r away from" % tolerance, extra
) |
<SYSTEM_TASK:>
Asserts that two dictionaries are equal, producing a custom message if they are not.
<END_TASK>
<USER_TASK:>
Description:
def assert_dict_eq(expected, actual, number_tolerance=None, dict_path=[]):
"""Asserts that two dictionaries are equal, producing a custom message if they are not.""" |
assert_is_instance(expected, dict)
assert_is_instance(actual, dict)
expected_keys = set(expected.keys())
actual_keys = set(actual.keys())
assert expected_keys <= actual_keys, "Actual dict at %s is missing keys: %r" % (
_dict_path_string(dict_path),
expected_keys - actual_keys,
)
assert actual_keys <= expected_keys, "Actual dict at %s has extra keys: %r" % (
_dict_path_string(dict_path),
actual_keys - expected_keys,
)
for k in expected_keys:
key_path = dict_path + [k]
assert_is_instance(
actual[k],
type(expected[k]),
extra="Types don't match for %s" % _dict_path_string(key_path),
)
assert_is_instance(
expected[k],
type(actual[k]),
extra="Types don't match for %s" % _dict_path_string(key_path),
)
if isinstance(actual[k], dict):
assert_dict_eq(
expected[k],
actual[k],
number_tolerance=number_tolerance,
dict_path=key_path,
)
elif isinstance(actual[k], _number_types):
assert_eq(
expected[k],
actual[k],
extra="Value doesn't match for %s" % _dict_path_string(key_path),
tolerance=number_tolerance,
)
else:
assert_eq(
expected[k],
actual[k],
extra="Value doesn't match for %s" % _dict_path_string(key_path),
) |
<SYSTEM_TASK:>
Raises an AssertionError if left_hand <= right_hand.
<END_TASK>
<USER_TASK:>
Description:
def assert_gt(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand <= right_hand.""" |
assert left > right, _assert_fail_message(message, left, right, "<=", extra) |
<SYSTEM_TASK:>
Raises an AssertionError if left_hand < right_hand.
<END_TASK>
<USER_TASK:>
Description:
def assert_ge(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand < right_hand.""" |
assert left >= right, _assert_fail_message(message, left, right, "<", extra) |
<SYSTEM_TASK:>
Raises an AssertionError if left_hand >= right_hand.
<END_TASK>
<USER_TASK:>
Description:
def assert_lt(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand >= right_hand.""" |
assert left < right, _assert_fail_message(message, left, right, ">=", extra) |
<SYSTEM_TASK:>
Raises an AssertionError if left_hand > right_hand.
<END_TASK>
<USER_TASK:>
Description:
def assert_le(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand > right_hand.""" |
assert left <= right, _assert_fail_message(message, left, right, ">", extra) |
<SYSTEM_TASK:>
Raises an AssertionError if obj is not in seq.
<END_TASK>
<USER_TASK:>
Description:
def assert_in(obj, seq, message=None, extra=None):
"""Raises an AssertionError if obj is not in seq.""" |
assert obj in seq, _assert_fail_message(message, obj, seq, "is not in", extra) |
<SYSTEM_TASK:>
Raises an AssertionError if obj is in iter.
<END_TASK>
<USER_TASK:>
Description:
def assert_not_in(obj, seq, message=None, extra=None):
"""Raises an AssertionError if obj is in iter.""" |
# for very long strings, provide a truncated error
if isinstance(seq, six.string_types) and obj in seq and len(seq) > 200:
index = seq.find(obj)
start_index = index - 50
if start_index > 0:
truncated = "(truncated) ..."
else:
truncated = ""
start_index = 0
end_index = index + len(obj) + 50
truncated += seq[start_index:end_index]
if end_index < len(seq):
truncated += "... (truncated)"
assert False, _assert_fail_message(message, obj, truncated, "is in", extra)
assert obj not in seq, _assert_fail_message(message, obj, seq, "is in", extra) |
<SYSTEM_TASK:>
Raises an AssertionError if obj is not in seq using assert_eq cmp.
<END_TASK>
<USER_TASK:>
Description:
def assert_in_with_tolerance(obj, seq, tolerance, message=None, extra=None):
"""Raises an AssertionError if obj is not in seq using assert_eq cmp.""" |
for i in seq:
try:
assert_eq(obj, i, tolerance=tolerance, message=message, extra=extra)
return
except AssertionError:
pass
assert False, _assert_fail_message(message, obj, seq, "is not in", extra) |
<SYSTEM_TASK:>
Raises an AssertionError if substring is not a substring of subject.
<END_TASK>
<USER_TASK:>
Description:
def assert_is_substring(substring, subject, message=None, extra=None):
"""Raises an AssertionError if substring is not a substring of subject.""" |
assert (
(subject is not None)
and (substring is not None)
and (subject.find(substring) != -1)
), _assert_fail_message(message, substring, subject, "is not in", extra) |
<SYSTEM_TASK:>
Raises an AssertionError if substring is a substring of subject.
<END_TASK>
<USER_TASK:>
Description:
def assert_is_not_substring(substring, subject, message=None, extra=None):
"""Raises an AssertionError if substring is a substring of subject.""" |
assert (
(subject is not None)
and (substring is not None)
and (subject.find(substring) == -1)
), _assert_fail_message(message, substring, subject, "is in", extra) |
<SYSTEM_TASK:>
Raises an AssertionError if the objects contained
<END_TASK>
<USER_TASK:>
Description:
def assert_unordered_list_eq(expected, actual, message=None):
"""Raises an AssertionError if the objects contained
in expected are not equal to the objects contained
in actual without regard to their order.
This takes quadratic time in the umber of elements in actual; don't use it for very long lists.
""" |
missing_in_actual = []
missing_in_expected = list(actual)
for x in expected:
try:
missing_in_expected.remove(x)
except ValueError:
missing_in_actual.append(x)
if missing_in_actual or missing_in_expected:
if not message:
message = (
"%r not equal to %r; missing items: %r in expected, %r in actual."
% (expected, actual, missing_in_expected, missing_in_actual)
)
assert False, message |
<SYSTEM_TASK:>
Execute function only if one of input parameters is not empty
<END_TASK>
<USER_TASK:>
Description:
def _execute_if_not_empty(func):
""" Execute function only if one of input parameters is not empty """ |
def wrapper(*args, **kwargs):
if any(args[1:]) or any(kwargs.items()):
return func(*args, **kwargs)
return wrapper |
<SYSTEM_TASK:>
Prepare body for elasticsearch query
<END_TASK>
<USER_TASK:>
Description:
def prepare_search_body(self, should_terms=None, must_terms=None, must_not_terms=None, search_text='', start=None, end=None):
"""
Prepare body for elasticsearch query
Search parameters
^^^^^^^^^^^^^^^^^
These parameters are dictionaries and have format: <term>: [<value 1>, <value 2> ...]
should_terms: it resembles logical OR
must_terms: it resembles logical AND
must_not_terms: it resembles logical NOT
search_text : string
Text for FTS(full text search)
start, end : datetime
Filter for event creation time
""" |
self.body = self.SearchBody()
self.body.set_should_terms(should_terms)
self.body.set_must_terms(must_terms)
self.body.set_must_not_terms(must_not_terms)
self.body.set_search_text(search_text)
self.body.set_timestamp_filter(start, end)
self.body.prepare() |
<SYSTEM_TASK:>
Execute high level-operation
<END_TASK>
<USER_TASK:>
Description:
def execute(cls, instance, async=True, countdown=2, is_heavy_task=False, **kwargs):
""" Execute high level-operation """ |
cls.pre_apply(instance, async=async, **kwargs)
result = cls.apply_signature(instance, async=async, countdown=countdown,
is_heavy_task=is_heavy_task, **kwargs)
cls.post_apply(instance, async=async, **kwargs)
return result |
<SYSTEM_TASK:>
Serialize input data and apply signature
<END_TASK>
<USER_TASK:>
Description:
def apply_signature(cls, instance, async=True, countdown=None, is_heavy_task=False, **kwargs):
""" Serialize input data and apply signature """ |
serialized_instance = utils.serialize_instance(instance)
signature = cls.get_task_signature(instance, serialized_instance, **kwargs)
link = cls.get_success_signature(instance, serialized_instance, **kwargs)
link_error = cls.get_failure_signature(instance, serialized_instance, **kwargs)
if async:
return signature.apply_async(link=link, link_error=link_error, countdown=countdown,
queue=is_heavy_task and 'heavy' or None)
else:
result = signature.apply()
callback = link if not result.failed() else link_error
if callback is not None:
cls._apply_callback(callback, result)
return result.get() |
<SYSTEM_TASK:>
Checks whether Link action is disabled.
<END_TASK>
<USER_TASK:>
Description:
def is_disabled_action(view):
"""
Checks whether Link action is disabled.
""" |
if not isinstance(view, core_views.ActionsViewSet):
return False
action = getattr(view, 'action', None)
return action in view.disabled_actions if action is not None else False |
<SYSTEM_TASK:>
Return a list of the valid HTTP methods for this endpoint.
<END_TASK>
<USER_TASK:>
Description:
def get_allowed_methods(self, callback):
"""
Return a list of the valid HTTP methods for this endpoint.
""" |
if hasattr(callback, 'actions'):
return [method.upper() for method in callback.actions.keys() if method != 'head']
return [
method for method in
callback.cls().allowed_methods if method not in ('OPTIONS', 'HEAD')
] |
<SYSTEM_TASK:>
Determine a link description.
<END_TASK>
<USER_TASK:>
Description:
def get_description(self, path, method, view):
"""
Determine a link description.
This will be based on the method docstring if one exists,
or else the class docstring.
""" |
description = super(WaldurSchemaGenerator, self).get_description(path, method, view)
permissions_description = get_permissions_description(view, method)
if permissions_description:
description += '\n\n' + permissions_description if description else permissions_description
if isinstance(view, core_views.ActionsViewSet):
validators_description = get_validators_description(view)
if validators_description:
description += '\n\n' + validators_description if description else validators_description
validation_description = get_validation_description(view, method)
if validation_description:
description += '\n\n' + validation_description if description else validation_description
return description |
<SYSTEM_TASK:>
Delete error message if instance state changed from erred
<END_TASK>
<USER_TASK:>
Description:
def delete_error_message(sender, instance, name, source, target, **kwargs):
""" Delete error message if instance state changed from erred """ |
if source != StateMixin.States.ERRED:
return
instance.error_message = ''
instance.save(update_fields=['error_message']) |
<SYSTEM_TASK:>
Instantiates an enum with an arbitrary value.
<END_TASK>
<USER_TASK:>
Description:
def _make_value(self, value):
"""Instantiates an enum with an arbitrary value.""" |
member = self.__new__(self, value)
member.__init__(value)
return member |
<SYSTEM_TASK:>
Parses an enum member name or value into an enum member.
<END_TASK>
<USER_TASK:>
Description:
def parse(cls, value, default=_no_default):
"""Parses an enum member name or value into an enum member.
Accepts the following types:
- Members of this enum class. These are returned directly.
- Integers. If there is an enum member with the integer as a value, that member is returned.
- Strings. If there is an enum member with the string as its name, that member is returned.
For integers and strings that don't correspond to an enum member, default is returned; if
no default is given the function raises KeyError instead.
Examples:
>>> class Color(Enum):
... red = 1
... blue = 2
>>> Color.parse(Color.red)
Color.red
>>> Color.parse(1)
Color.red
>>> Color.parse('blue')
Color.blue
""" |
if isinstance(value, cls):
return value
elif isinstance(value, six.integer_types) and not isinstance(value, EnumBase):
e = cls._value_to_member.get(value, _no_default)
else:
e = cls._name_to_member.get(value, _no_default)
if e is _no_default or not e.is_valid():
if default is _no_default:
raise _create_invalid_value_error(cls, value)
return default
return e |
<SYSTEM_TASK:>
Parses a flag integer or string into a Flags instance.
<END_TASK>
<USER_TASK:>
Description:
def parse(cls, value, default=_no_default):
"""Parses a flag integer or string into a Flags instance.
Accepts the following types:
- Members of this enum class. These are returned directly.
- Integers. These are converted directly into a Flags instance with the given name.
- Strings. The function accepts a comma-delimited list of flag names, corresponding to
members of the enum. These are all ORed together.
Examples:
>>> class Car(Flags):
... is_big = 1
... has_wheels = 2
>>> Car.parse(1)
Car.is_big
>>> Car.parse(3)
Car.parse('has_wheels,is_big')
>>> Car.parse('is_big,has_wheels')
Car.parse('has_wheels,is_big')
""" |
if isinstance(value, cls):
return value
elif isinstance(value, int):
e = cls._make_value(value)
else:
if not value:
e = cls._make_value(0)
else:
r = 0
for k in value.split(","):
v = cls._name_to_member.get(k, _no_default)
if v is _no_default:
if default is _no_default:
raise _create_invalid_value_error(cls, value)
else:
return default
r |= v.value
e = cls._make_value(r)
if not e.is_valid():
if default is _no_default:
raise _create_invalid_value_error(cls, value)
return default
return e |
<SYSTEM_TASK:>
Get permission checks that will be executed for current action.
<END_TASK>
<USER_TASK:>
Description:
def get_permission_checks(self, request, view):
"""
Get permission checks that will be executed for current action.
""" |
if view.action is None:
return []
# if permissions are defined for view directly - use them.
if hasattr(view, view.action + '_permissions'):
return getattr(view, view.action + '_permissions')
# otherwise return view-level permissions + extra view permissions
extra_permissions = getattr(view, view.action + 'extra_permissions', [])
if request.method in SAFE_METHODS:
return getattr(view, 'safe_methods_permissions', []) + extra_permissions
else:
return getattr(view, 'unsafe_methods_permissions', []) + extra_permissions |
<SYSTEM_TASK:>
Registers the function to the server's default fixed function manager.
<END_TASK>
<USER_TASK:>
Description:
def add_function(self, function):
"""
Registers the function to the server's default fixed function manager.
""" |
#noinspection PyTypeChecker
if not len(self.settings.FUNCTION_MANAGERS):
raise ConfigurationError(
'Where have the default function manager gone?!')
self.settings.FUNCTION_MANAGERS[0].add_function(function) |
<SYSTEM_TASK:>
When ElasticSearch analyzes string, it breaks it into parts.
<END_TASK>
<USER_TASK:>
Description:
def format_raw_field(key):
"""
When ElasticSearch analyzes string, it breaks it into parts.
In order make query for not-analyzed exact string values, we should use subfield instead.
The index template for Elasticsearch 5.0 has been changed.
The subfield for string multi-fields has changed from .raw to .keyword
Thus workaround for backward compatibility during migration is required.
See also: https://github.com/elastic/logstash/blob/v5.4.1/docs/static/breaking-changes.asciidoc
""" |
subfield = django_settings.WALDUR_CORE.get('ELASTICSEARCH', {}).get('raw_subfield', 'keyword')
return '%s.%s' % (camel_case_to_underscore(key), subfield) |
<SYSTEM_TASK:>
Creates a decorator function that applies the decorator_cls that was passed in.
<END_TASK>
<USER_TASK:>
Description:
def decorate(decorator_cls, *args, **kwargs):
"""Creates a decorator function that applies the decorator_cls that was passed in.""" |
global _wrappers
wrapper_cls = _wrappers.get(decorator_cls, None)
if wrapper_cls is None:
class PythonWrapper(decorator_cls):
pass
wrapper_cls = PythonWrapper
wrapper_cls.__name__ = decorator_cls.__name__ + "PythonWrapper"
_wrappers[decorator_cls] = wrapper_cls
def decorator(fn):
wrapped = wrapper_cls(fn, *args, **kwargs)
_update_wrapper(wrapped, fn)
return wrapped
return decorator |
<SYSTEM_TASK:>
States that method is deprecated.
<END_TASK>
<USER_TASK:>
Description:
def deprecated(replacement_description):
"""States that method is deprecated.
:param replacement_description: Describes what must be used instead.
:return: the original method with modified docstring.
""" |
def decorate(fn_or_class):
if isinstance(fn_or_class, type):
pass # Can't change __doc__ of type objects
else:
try:
fn_or_class.__doc__ = "This API point is obsolete. %s\n\n%s" % (
replacement_description,
fn_or_class.__doc__,
)
except AttributeError:
pass # For Cython method descriptors, etc.
return fn_or_class
return decorate |
<SYSTEM_TASK:>
Decorator that can convert the result of a function call.
<END_TASK>
<USER_TASK:>
Description:
def convert_result(converter):
"""Decorator that can convert the result of a function call.""" |
def decorate(fn):
@inspection.wraps(fn)
def new_fn(*args, **kwargs):
return converter(fn(*args, **kwargs))
return new_fn
return decorate |
<SYSTEM_TASK:>
Decorator for retrying a function if it throws an exception.
<END_TASK>
<USER_TASK:>
Description:
def retry(exception_cls, max_tries=10, sleep=0.05):
"""Decorator for retrying a function if it throws an exception.
:param exception_cls: an exception type or a parenthesized tuple of exception types
:param max_tries: maximum number of times this function can be executed. Must be at least 1.
:param sleep: number of seconds to sleep between function retries
""" |
assert max_tries > 0
def with_max_retries_call(delegate):
for i in xrange(0, max_tries):
try:
return delegate()
except exception_cls:
if i + 1 == max_tries:
raise
time.sleep(sleep)
def outer(fn):
is_generator = inspect.isgeneratorfunction(fn)
@functools.wraps(fn)
def retry_fun(*args, **kwargs):
return with_max_retries_call(lambda: fn(*args, **kwargs))
@functools.wraps(fn)
def retry_generator_fun(*args, **kwargs):
def get_first_item():
results = fn(*args, **kwargs)
for first_result in results:
return [first_result], results
return [], results
cache, generator = with_max_retries_call(get_first_item)
for item in cache:
yield item
for item in generator:
yield item
if not is_generator:
# so that qcore.inspection.get_original_fn can retrieve the original function
retry_fun.fn = fn
# Necessary for pickling of Cythonized functions to work. Cython's __reduce__
# method always returns the original name of the function.
retry_fun.__reduce__ = lambda: fn.__name__
return retry_fun
else:
retry_generator_fun.fn = fn
retry_generator_fun.__reduce__ = lambda: fn.__name__
return retry_generator_fun
return outer |
<SYSTEM_TASK:>
Converts a context manager into a decorator.
<END_TASK>
<USER_TASK:>
Description:
def decorator_of_context_manager(ctxt):
"""Converts a context manager into a decorator.
This decorator will run the decorated function in the context of the
manager.
:param ctxt: Context to run the function in.
:return: Wrapper around the original function.
""" |
def decorator_fn(*outer_args, **outer_kwargs):
def decorator(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
with ctxt(*outer_args, **outer_kwargs):
return fn(*args, **kwargs)
return wrapper
return decorator
if getattr(ctxt, "__doc__", None) is None:
msg = "Decorator that runs the inner function in the context of %s"
decorator_fn.__doc__ = msg % ctxt
else:
decorator_fn.__doc__ = ctxt.__doc__
return decorator_fn |
<SYSTEM_TASK:>
A helper function, gets standard information from the error.
<END_TASK>
<USER_TASK:>
Description:
def get_error(self, error):
"""
A helper function, gets standard information from the error.
""" |
error_type = type(error)
if error.error_type == ET_CLIENT:
error_type_name = 'Client'
else:
error_type_name = 'Server'
return {
'type': error_type_name,
'name': error_type.__name__,
'prefix': getattr(error_type, '__module__', ''),
'message': unicode(error),
'params': error.args,
} |
<SYSTEM_TASK:>
Get error messages about object and his ancestor quotas that will be exceeded if quota_delta will be added.
<END_TASK>
<USER_TASK:>
Description:
def validate_quota_change(self, quota_deltas, raise_exception=False):
"""
Get error messages about object and his ancestor quotas that will be exceeded if quota_delta will be added.
raise_exception - if True QuotaExceededException will be raised if validation fails
quota_deltas - dictionary of quotas deltas, example:
{
'ram': 1024,
'storage': 2048,
...
}
Example of output:
['ram quota limit: 1024, requires: 2048(instance#1)', ...]
""" |
errors = []
for name, delta in six.iteritems(quota_deltas):
quota = self.quotas.get(name=name)
if quota.is_exceeded(delta):
errors.append('%s quota limit: %s, requires %s (%s)\n' % (
quota.name, quota.limit, quota.usage + delta, quota.scope))
if not raise_exception:
return errors
else:
if errors:
raise exceptions.QuotaExceededException(_('One or more quotas were exceeded: %s') % ';'.join(errors)) |
<SYSTEM_TASK:>
Return dictionary with sum of all scopes' quotas.
<END_TASK>
<USER_TASK:>
Description:
def get_sum_of_quotas_as_dict(cls, scopes, quota_names=None, fields=['usage', 'limit']):
"""
Return dictionary with sum of all scopes' quotas.
Dictionary format:
{
'quota_name1': 'sum of limits for quotas with such quota_name1',
'quota_name1_usage': 'sum of usages for quotas with such quota_name1',
...
}
All `scopes` have to be instances of the same model.
`fields` keyword argument defines sum of which fields of quotas will present in result.
""" |
if not scopes:
return {}
if quota_names is None:
quota_names = cls.get_quotas_names()
scope_models = set([scope._meta.model for scope in scopes])
if len(scope_models) > 1:
raise exceptions.QuotaError(_('All scopes have to be instances of the same model.'))
filter_kwargs = {
'content_type': ct_models.ContentType.objects.get_for_model(scopes[0]),
'object_id__in': [scope.id for scope in scopes],
'name__in': quota_names
}
result = {}
if 'usage' in fields:
items = Quota.objects.filter(**filter_kwargs)\
.values('name').annotate(usage=Sum('usage'))
for item in items:
result[item['name'] + '_usage'] = item['usage']
if 'limit' in fields:
unlimited_quotas = Quota.objects.filter(limit=-1, **filter_kwargs)
unlimited_quotas = list(unlimited_quotas.values_list('name', flat=True))
for quota_name in unlimited_quotas:
result[quota_name] = -1
items = Quota.objects\
.filter(**filter_kwargs)\
.exclude(name__in=unlimited_quotas)\
.values('name')\
.annotate(limit=Sum('limit'))
for item in items:
result[item['name']] = item['limit']
return result |
<SYSTEM_TASK:>
Returns a list of scope types acceptable by events filter.
<END_TASK>
<USER_TASK:>
Description:
def scope_types(self, request, *args, **kwargs):
""" Returns a list of scope types acceptable by events filter. """ |
return response.Response(utils.get_scope_types_mapping().keys()) |
<SYSTEM_TASK:>
Import this instrument's settings from the given file. Will
<END_TASK>
<USER_TASK:>
Description:
def import_from_file(self, index, filename):
"""Import this instrument's settings from the given file. Will
automatically add the instrument's synth and table to the song's
synths and tables if needed.
Note that this may invalidate existing instrument accessor objects.
:param index: the index into which to import
:param filename: the file from which to load
:raises ImportException: if importing failed, usually because the song
doesn't have enough synth or table slots left for the instrument's
synth or table
""" |
with open(filename, 'r') as fp:
self._import_from_struct(index, json.load(fp)) |
<SYSTEM_TASK:>
Load a Project from a ``.lsdsng`` file.
<END_TASK>
<USER_TASK:>
Description:
def load_lsdsng(filename):
"""Load a Project from a ``.lsdsng`` file.
:param filename: the name of the file from which to load
:rtype: :py:class:`pylsdj.Project`
""" |
# Load preamble data so that we know the name and version of the song
with open(filename, 'rb') as fp:
preamble_data = bread.parse(fp, spec.lsdsng_preamble)
with open(filename, 'rb') as fp:
# Skip the preamble this time around
fp.seek(int(len(preamble_data) / 8))
# Load compressed data into a block map and use BlockReader to
# decompress it
factory = BlockFactory()
while True:
block_data = bytearray(fp.read(blockutils.BLOCK_SIZE))
if len(block_data) == 0:
break
block = factory.new_block()
block.data = block_data
remapped_blocks = filepack.renumber_block_keys(factory.blocks)
reader = BlockReader()
compressed_data = reader.read(remapped_blocks)
# Now, decompress the raw data and use it and the preamble to construct
# a Project
raw_data = filepack.decompress(compressed_data)
name = preamble_data.name
version = preamble_data.version
size_blks = int(math.ceil(
float(len(compressed_data)) / blockutils.BLOCK_SIZE))
return Project(name, version, size_blks, raw_data) |
<SYSTEM_TASK:>
Load a Project from an ``.srm`` file.
<END_TASK>
<USER_TASK:>
Description:
def load_srm(filename):
"""Load a Project from an ``.srm`` file.
:param filename: the name of the file from which to load
:rtype: :py:class:`pylsdj.Project`
""" |
# .srm files are just decompressed projects without headers
# In order to determine the file's size in compressed blocks, we have to
# compress it first
with open(filename, 'rb') as fp:
raw_data = fp.read()
compressed_data = filepack.compress(raw_data)
factory = BlockFactory()
writer = BlockWriter()
writer.write(compressed_data, factory)
size_in_blocks = len(factory.blocks)
# We'll give the file a dummy name ("SRMLOAD") and version, since we know
# neither
name = "SRMLOAD"
version = 0
return Project(name, version, size_in_blocks, raw_data) |
<SYSTEM_TASK:>
the song associated with the project
<END_TASK>
<USER_TASK:>
Description:
def song(self):
"""the song associated with the project""" |
if self._song is None:
self._song = Song(self._song_data)
return self._song |
<SYSTEM_TASK:>
Save a project in .lsdsng format to the target file.
<END_TASK>
<USER_TASK:>
Description:
def save(self, filename):
"""Save a project in .lsdsng format to the target file.
:param filename: the name of the file to which to save
:deprecated: use ``save_lsdsng(filename)`` instead
""" |
with open(filename, 'wb') as fp:
writer = BlockWriter()
factory = BlockFactory()
preamble_dummy_bytes = bytearray([0] * 9)
preamble = bread.parse(
preamble_dummy_bytes, spec.lsdsng_preamble)
preamble.name = self.name
preamble.version = self.version
preamble_data = bread.write(preamble)
raw_data = self.get_raw_data()
compressed_data = filepack.compress(raw_data)
writer.write(compressed_data, factory)
fp.write(preamble_data)
for key in sorted(factory.blocks.keys()):
fp.write(bytearray(factory.blocks[key].data)) |
<SYSTEM_TASK:>
Save a project in .srm format to the target file.
<END_TASK>
<USER_TASK:>
Description:
def save_srm(self, filename):
"""Save a project in .srm format to the target file.
:param filename: the name of the file to which to save
""" |
with open(filename, 'wb') as fp:
raw_data = bread.write(self._song_data, spec.song)
fp.write(raw_data) |
<SYSTEM_TASK:>
Save this file.
<END_TASK>
<USER_TASK:>
Description:
def save(self, filename, callback=_noop_callback):
"""Save this file.
:param filename: the file to which to save the .sav file
:type filename: str
:param callback: a progress callback function
:type callback: function
""" |
with open(filename, 'wb') as fp:
self._save(fp, callback) |
<SYSTEM_TASK:>
Splits compressed data into blocks.
<END_TASK>
<USER_TASK:>
Description:
def split(compressed_data, segment_size, block_factory):
"""Splits compressed data into blocks.
:param compressed_data: the compressed data to split
:param segment_size: the size of a block in bytes
:param block_factory: a BlockFactory used to construct the blocks
:rtype: a list of block IDs of blocks that the block factory created while
splitting
""" |
# Split compressed data into blocks
segments = []
current_segment_start = 0
index = 0
data_size = len(compressed_data)
while index < data_size:
current_byte = compressed_data[index]
if index < data_size - 1:
next_byte = compressed_data[index + 1]
else:
next_byte = None
jump_size = 1
if current_byte == RLE_BYTE:
assert next_byte is not None, "Expected a command to follow " \
"RLE byte"
if next_byte == RLE_BYTE:
jump_size = 2
else:
jump_size = 3
elif current_byte == SPECIAL_BYTE:
assert next_byte is not None, "Expected a command to follow " \
"special byte"
if next_byte == SPECIAL_BYTE:
jump_size = 2
elif next_byte == DEFAULT_INSTR_BYTE or \
next_byte == DEFAULT_WAVE_BYTE:
jump_size = 3
else:
assert False, "Encountered unexpected EOF or block " \
"switch while segmenting"
# Need two bytes for the jump or EOF
if index - current_segment_start + jump_size > segment_size - 2:
segments.append(compressed_data[
current_segment_start:index])
current_segment_start = index
else:
index += jump_size
# Append the last segment, if any
if current_segment_start != index:
segments.append(compressed_data[
current_segment_start:current_segment_start + index])
# Make sure that no data was lost while segmenting
total_segment_length = sum(map(len, segments))
assert total_segment_length == len(compressed_data), "Lost %d bytes of " \
"data while segmenting" % (len(compressed_data) - total_segment_length)
block_ids = []
for segment in segments:
block = block_factory.new_block()
block_ids.append(block.id)
for (i, segment) in enumerate(segments):
block = block_factory.blocks[block_ids[i]]
assert len(block.data) == 0, "Encountered a block with "
"pre-existing data while writing"
if i == len(segments) - 1:
# Write EOF to the end of the segment
add_eof(segment)
else:
# Write a pointer to the next segment
add_block_switch(segment, block_ids[i + 1])
# Pad segment with zeroes until it's large enough
pad(segment, segment_size)
block.data = segment
return block_ids |
<SYSTEM_TASK:>
Renumber a block map's indices so that tehy match the blocks' block
<END_TASK>
<USER_TASK:>
Description:
def renumber_block_keys(blocks):
"""Renumber a block map's indices so that tehy match the blocks' block
switch statements.
:param blocks a block map to renumber
:rtype: a renumbered copy of the block map
""" |
# There is an implicit block switch to the 0th block at the start of the
# file
byte_switch_keys = [0]
block_keys = list(blocks.keys())
# Scan the blocks, recording every block switch statement
for block in list(blocks.values()):
i = 0
while i < len(block.data) - 1:
current_byte = block.data[i]
next_byte = block.data[i + 1]
if current_byte == RLE_BYTE:
if next_byte == RLE_BYTE:
i += 2
else:
i += 3
elif current_byte == SPECIAL_BYTE:
if next_byte in SPECIAL_DEFAULTS:
i += 3
elif next_byte == SPECIAL_BYTE:
i += 2
else:
if next_byte != EOF_BYTE:
byte_switch_keys.append(next_byte)
break
else:
i += 1
byte_switch_keys.sort()
block_keys.sort()
assert len(byte_switch_keys) == len(block_keys), (
"Number of blocks that are target of block switches (%d) "
% (len(byte_switch_keys)) +
"does not equal number of blocks in the song (%d)"
% (len(block_keys)) +
"; possible corruption")
if byte_switch_keys == block_keys:
# No remapping necessary
return blocks
new_block_map = {}
for block_key, byte_switch_key in zip(
block_keys, byte_switch_keys):
new_block_map[byte_switch_key] = blocks[block_key]
return new_block_map |
<SYSTEM_TASK:>
Merge the given blocks into a contiguous block of compressed data.
<END_TASK>
<USER_TASK:>
Description:
def merge(blocks):
"""Merge the given blocks into a contiguous block of compressed data.
:param blocks: the list of blocks
:rtype: a list of compressed bytes
""" |
current_block = blocks[sorted(blocks.keys())[0]]
compressed_data = []
eof = False
while not eof:
data_size_to_append = None
next_block = None
i = 0
while i < len(current_block.data) - 1:
current_byte = current_block.data[i]
next_byte = current_block.data[i + 1]
if current_byte == RLE_BYTE:
if next_byte == RLE_BYTE:
i += 2
else:
i += 3
elif current_byte == SPECIAL_BYTE:
if next_byte in SPECIAL_DEFAULTS:
i += 3
elif next_byte == SPECIAL_BYTE:
i += 2
else:
data_size_to_append = i
# hit end of file
if next_byte == EOF_BYTE:
eof = True
else:
next_block = blocks[next_byte]
break
else:
i += 1
assert data_size_to_append is not None, "Ran off the end of a "\
"block without encountering a block switch or EOF"
compressed_data.extend(current_block.data[0:data_size_to_append])
if not eof:
assert next_block is not None, "Switched blocks, but did " \
"not provide the next block to switch to"
current_block = next_block
return compressed_data |
<SYSTEM_TASK:>
Add zeroes to a segment until it reaches a certain size.
<END_TASK>
<USER_TASK:>
Description:
def pad(segment, size):
"""Add zeroes to a segment until it reaches a certain size.
:param segment: the segment to pad
:param size: the size to which to pad the segment
""" |
for i in range(size - len(segment)):
segment.append(0)
assert len(segment) == size |
<SYSTEM_TASK:>
Decompress data that has been compressed by the filepack algorithm.
<END_TASK>
<USER_TASK:>
Description:
def decompress(compressed_data):
"""Decompress data that has been compressed by the filepack algorithm.
:param compressed_data: an array of compressed data bytes to decompress
:rtype: an array of decompressed bytes""" |
raw_data = []
index = 0
while index < len(compressed_data):
current = compressed_data[index]
index += 1
if current == RLE_BYTE:
directive = compressed_data[index]
index += 1
if directive == RLE_BYTE:
raw_data.append(RLE_BYTE)
else:
count = compressed_data[index]
index += 1
raw_data.extend([directive] * count)
elif current == SPECIAL_BYTE:
directive = compressed_data[index]
index += 1
if directive == SPECIAL_BYTE:
raw_data.append(SPECIAL_BYTE)
elif directive == DEFAULT_WAVE_BYTE:
count = compressed_data[index]
index += 1
raw_data.extend(DEFAULT_WAVE * count)
elif directive == DEFAULT_INSTR_BYTE:
count = compressed_data[index]
index += 1
raw_data.extend(DEFAULT_INSTRUMENT_FILEPACK * count)
elif directive == EOF_BYTE:
assert False, ("Unexpected EOF command encountered while "
"decompressing")
else:
assert False, "Countered unexpected sequence 0x%02x 0x%02x" % (
current, directive)
else:
raw_data.append(current)
return raw_data |
<SYSTEM_TASK:>
Compress raw bytes with the filepack algorithm.
<END_TASK>
<USER_TASK:>
Description:
def compress(raw_data):
"""Compress raw bytes with the filepack algorithm.
:param raw_data: an array of raw data bytes to compress
:rtype: a list of compressed bytes
""" |
raw_data = bytearray(raw_data)
compressed_data = []
data_size = len(raw_data)
index = 0
next_bytes = [-1, -1, -1]
def is_default_instrument(index):
if index + len(DEFAULT_INSTRUMENT_FILEPACK) > len(raw_data):
return False
instr_bytes = raw_data[index:index + len(DEFAULT_INSTRUMENT_FILEPACK)]
if instr_bytes[0] != 0xa8 or instr_bytes[1] != 0:
return False
return instr_bytes == DEFAULT_INSTRUMENT_FILEPACK
def is_default_wave(index):
return (index + len(DEFAULT_WAVE) <= len(raw_data) and
raw_data[index:index + len(DEFAULT_WAVE)] == DEFAULT_WAVE)
while index < data_size:
current_byte = raw_data[index]
for i in range(3):
if index < data_size - (i + 1):
next_bytes[i] = raw_data[index + (i + 1)]
else:
next_bytes[i] = -1
if current_byte == RLE_BYTE:
compressed_data.append(RLE_BYTE)
compressed_data.append(RLE_BYTE)
index += 1
elif current_byte == SPECIAL_BYTE:
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(SPECIAL_BYTE)
index += 1
elif is_default_instrument(index):
counter = 1
index += len(DEFAULT_INSTRUMENT_FILEPACK)
while (is_default_instrument(index) and
counter < 0x100):
counter += 1
index += len(DEFAULT_INSTRUMENT_FILEPACK)
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(DEFAULT_INSTR_BYTE)
compressed_data.append(counter)
elif is_default_wave(index):
counter = 1
index += len(DEFAULT_WAVE)
while is_default_wave(index) and counter < 0xff:
counter += 1
index += len(DEFAULT_WAVE)
compressed_data.append(SPECIAL_BYTE)
compressed_data.append(DEFAULT_WAVE_BYTE)
compressed_data.append(counter)
elif (current_byte == next_bytes[0] and
next_bytes[0] == next_bytes[1] and
next_bytes[1] == next_bytes[2]):
# Do RLE compression
compressed_data.append(RLE_BYTE)
compressed_data.append(current_byte)
counter = 0
while (index < data_size and
raw_data[index] == current_byte and
counter < 0xff):
index += 1
counter += 1
compressed_data.append(counter)
else:
compressed_data.append(current_byte)
index += 1
return compressed_data |
<SYSTEM_TASK:>
Return a human-readable name without LSDJ's trailing zeroes.
<END_TASK>
<USER_TASK:>
Description:
def name_without_zeroes(name):
"""
Return a human-readable name without LSDJ's trailing zeroes.
:param name: the name from which to strip zeroes
:rtype: the name, without trailing zeroes
""" |
first_zero = name.find(b'\0')
if first_zero == -1:
return name
else:
return str(name[:first_zero]) |
<SYSTEM_TASK:>
a ```pylsdj.Table``` referencing the instrument's table, or None
<END_TASK>
<USER_TASK:>
Description:
def table(self):
"""a ```pylsdj.Table``` referencing the instrument's table, or None
if the instrument doesn't have a table""" |
if hasattr(self.data, 'table_on') and self.data.table_on:
assert_index_sane(self.data.table, len(self.song.tables))
return self.song.tables[self.data.table] |
<SYSTEM_TASK:>
import from an lsdinst struct
<END_TASK>
<USER_TASK:>
Description:
def import_lsdinst(self, struct_data):
"""import from an lsdinst struct""" |
self.name = struct_data['name']
self.automate = struct_data['data']['automate']
self.pan = struct_data['data']['pan']
if self.table is not None:
self.table.import_lsdinst(struct_data) |
<SYSTEM_TASK:>
Export this instrument's settings to a file.
<END_TASK>
<USER_TASK:>
Description:
def export_to_file(self, filename):
"""Export this instrument's settings to a file.
:param filename: the name of the file
""" |
instr_json = self.export_struct()
with open(filename, 'w') as fp:
json.dump(instr_json, fp, indent=2) |
<SYSTEM_TASK:>
Write this sample to a WAV file.
<END_TASK>
<USER_TASK:>
Description:
def write_wav(self, filename):
"""Write this sample to a WAV file.
:param filename: the file to which to write
""" |
wave_output = None
try:
wave_output = wave.open(filename, 'w')
wave_output.setparams(WAVE_PARAMS)
frames = bytearray([x << 4 for x in self.sample_data])
wave_output.writeframes(frames)
finally:
if wave_output is not None:
wave_output.close() |
<SYSTEM_TASK:>
Read sample data for this sample from a WAV file.
<END_TASK>
<USER_TASK:>
Description:
def read_wav(self, filename):
"""Read sample data for this sample from a WAV file.
:param filename: the file from which to read
""" |
wave_input = None
try:
wave_input = wave.open(filename, 'r')
wave_frames = bytearray(
wave_input.readframes(wave_input.getnframes()))
self.sample_data = [x >> 4 for x in wave_frames]
finally:
if wave_input is not None:
wave_input.close() |
<SYSTEM_TASK:>
find the local ip address on the given device
<END_TASK>
<USER_TASK:>
Description:
def get_device_address(device):
""" find the local ip address on the given device """ |
if device is None:
return None
command = ['ip', 'route', 'list', 'dev', device]
ip_routes = subprocess.check_output(command).strip()
for line in ip_routes.split('\n'):
seen = ''
for a in line.split():
if seen == 'src':
return a
seen = a
return None |
<SYSTEM_TASK:>
Find the device where the default route is.
<END_TASK>
<USER_TASK:>
Description:
def get_default_net_device():
""" Find the device where the default route is. """ |
with open('/proc/net/route') as fh:
for line in fh:
iface, dest, _ = line.split(None, 2)
if dest == '00000000':
return iface
return None |
<SYSTEM_TASK:>
Return true if it looks like a migration already exists.
<END_TASK>
<USER_TASK:>
Description:
def migration_exists(self, app, fixture_path):
"""
Return true if it looks like a migration already exists.
""" |
base_name = os.path.basename(fixture_path)
# Loop through all migrations
for migration_path in glob.glob(os.path.join(app.path, 'migrations', '*.py')):
if base_name in open(migration_path).read():
return True
return False |
<SYSTEM_TASK:>
Create a data migration for app that uses fixture_path.
<END_TASK>
<USER_TASK:>
Description:
def create_migration(self, app, fixture_path):
"""
Create a data migration for app that uses fixture_path.
""" |
self.monkey_patch_migration_template(app, fixture_path)
out = StringIO()
management.call_command('makemigrations', app.label, empty=True, stdout=out)
self.restore_migration_template()
self.stdout.write(out.getvalue()) |
<SYSTEM_TASK:>
Try showing the most desirable GUI
<END_TASK>
<USER_TASK:>
Description:
def show():
"""Try showing the most desirable GUI
This function cycles through the currently registered
graphical user interfaces, if any, and presents it to
the user.
""" |
parent = None
current = QtWidgets.QApplication.activeWindow()
while current:
parent = current
current = parent.parent()
window = (_discover_gui() or _show_no_gui)(parent)
return window |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.