Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def validate_api_call(schema, raw_request, raw_response):
request = normalize_request(raw_request)
with ErrorDict() as errors:
try:
validate_request(
request=request,
schema=schema,
)
except ValidationError as err:
errors['request'].add_error(err.messages or getattr(err, 'detail'))
return
response = normalize_response(raw_response, raw_request)
try:
validate_response(
response=response,
request_method=request.method,
schema=schema
)
except ValidationError as err:
errors['response'].add_error(err.messages or getattr(err, 'detail')) | [
"\n Validate the request/response cycle of an api call against a swagger\n schema. Request/Response objects from the `requests` and `urllib` library\n are supported.\n "
] |
Please provide a description of the function:def validate_email(email, check_mx=False, verify=False, debug=False, smtp_timeout=10):
if debug:
logger = logging.getLogger('validate_email')
logger.setLevel(logging.DEBUG)
else:
logger = None
try:
assert re.match(VALID_ADDRESS_REGEXP, email) is not None
check_mx |= verify
if check_mx:
if not DNS:
raise Exception('For check the mx records or check if the email exists you must '
'have installed pyDNS python package')
hostname = email[email.find('@') + 1:]
mx_hosts = get_mx_ip(hostname)
if mx_hosts is None:
return False
for mx in mx_hosts:
try:
if not verify and mx[1] in MX_CHECK_CACHE:
return MX_CHECK_CACHE[mx[1]]
smtp = smtplib.SMTP(timeout=smtp_timeout)
smtp.connect(mx[1])
MX_CHECK_CACHE[mx[1]] = True
if not verify:
try:
smtp.quit()
except smtplib.SMTPServerDisconnected:
pass
return True
status, _ = smtp.helo()
if status != 250:
smtp.quit()
if debug:
logger.debug(u'%s answer: %s - %s', mx[1], status, _)
continue
smtp.mail('')
status, _ = smtp.rcpt(email)
if status == 250:
smtp.quit()
return True
if debug:
logger.debug(u'%s answer: %s - %s', mx[1], status, _)
smtp.quit()
except smtplib.SMTPServerDisconnected: # Server not permits verify user
if debug:
logger.debug(u'%s disconected.', mx[1])
except smtplib.SMTPConnectError:
if debug:
logger.debug(u'Unable to connect to %s.', mx[1])
return None
except AssertionError:
return False
except (ServerError, socket.error) as e:
if debug:
logger.debug('ServerError or socket.error exception raised (%s).', e)
return None
return True | [
"Indicate whether the given string is a valid email address\n according to the 'addr-spec' portion of RFC 2822 (see section\n 3.4.1). Parts of the spec that are marked obsolete are *not*\n included in this test, and certain arcane constructions that\n depend on circular definitions in the spec may not pass, but in\n general this should correctly identify any email address likely\n to be in use as of 2011."
] |
Please provide a description of the function:def find_parameter(parameters, **kwargs):
matching_parameters = filter_parameters(parameters, **kwargs)
if len(matching_parameters) == 1:
return matching_parameters[0]
elif len(matching_parameters) > 1:
raise MultipleParametersFound()
raise NoParameterFound() | [
"\n Given a list of parameters, find the one with the given name.\n "
] |
Please provide a description of the function:def merge_parameter_lists(*parameter_definitions):
merged_parameters = {}
for parameter_list in parameter_definitions:
for parameter in parameter_list:
key = (parameter['name'], parameter['in'])
merged_parameters[key] = parameter
return merged_parameters.values() | [
"\n Merge multiple lists of parameters into a single list. If there are any\n duplicate definitions, the last write wins.\n "
] |
Please provide a description of the function:def validate_status_code_to_response_definition(response, operation_definition):
status_code = response.status_code
operation_responses = {str(code): val for code, val
in operation_definition['responses'].items()}
key = status_code
if key not in operation_responses:
key = 'default'
try:
response_definition = operation_responses[key]
except KeyError:
raise ValidationError(
MESSAGES['response']['invalid_status_code'].format(
status_code, ', '.join(operation_responses.keys()),
),
)
return response_definition | [
"\n Given a response, validate that the response status code is in the accepted\n status codes defined by this endpoint.\n\n If so, return the response definition that corresponds to the status code.\n "
] |
Please provide a description of the function:def generate_path_validator(api_path, path_definition, parameters,
context, **kwargs):
path_level_parameters = dereference_parameter_list(
path_definition.get('parameters', []),
context,
)
operation_level_parameters = dereference_parameter_list(
parameters,
context,
)
all_parameters = merge_parameter_lists(
path_level_parameters,
operation_level_parameters,
)
# PATH
in_path_parameters = filter_parameters(all_parameters, in_=PATH)
return chain_reduce_partial(
attrgetter('path'),
generate_path_parameters_validator(api_path, in_path_parameters, context),
) | [
"\n Generates a callable for validating the parameters in a response object.\n "
] |
Please provide a description of the function:def validate_response(response, request_method, schema):
with ErrorDict() as errors:
# 1
# TODO: tests
try:
api_path = validate_path_to_api_path(
path=response.path,
context=schema,
**schema
)
except ValidationError as err:
errors['path'].extend(list(err.messages))
return # this causes an exception to be raised since errors is no longer falsy.
path_definition = schema['paths'][api_path] or {}
# TODO: tests
try:
operation_definition = validate_request_method_to_operation(
request_method=request_method,
path_definition=path_definition,
)
except ValidationError as err:
errors['method'].add_error(err.detail)
return
# 4
try:
response_definition = validate_status_code_to_response_definition(
response=response,
operation_definition=operation_definition,
)
except ValidationError as err:
errors['status_code'].add_error(err.detail)
else:
# 5
response_validator = generate_response_validator(
api_path,
operation_definition=operation_definition,
path_definition=path_definition,
response_definition=response_definition,
context=schema,
)
try:
response_validator(response, context=schema)
except ValidationError as err:
errors['body'].add_error(err.detail) | [
"\n Response validation involves the following steps.\n 4. validate that the response status_code is in the allowed responses for\n the request method.\n 5. validate that the response content validates against any provided\n schemas for the responses.\n 6. headers, content-types, etc..., ???\n "
] |
Please provide a description of the function:def construct_schema_validators(schema, context):
validators = ValidationDict()
if '$ref' in schema:
validators.add_validator(
'$ref', SchemaReferenceValidator(schema['$ref'], context),
)
if 'properties' in schema:
for property_, property_schema in schema['properties'].items():
property_validator = generate_object_validator(
schema=property_schema,
context=context,
)
validators.add_property_validator(property_, property_validator)
if schema.get('additionalProperties') is False:
validators.add_validator(
'additionalProperties',
generate_additional_properties_validator(context=context, **schema),
)
assert 'context' not in schema
for key in schema:
if key in validator_mapping:
validators.add_validator(key, validator_mapping[key](context=context, **schema))
return validators | [
"\n Given a schema object, construct a dictionary of validators needed to\n validate a response matching the given schema.\n\n Special Cases:\n - $ref:\n These validators need to be Lazily evaluating so that circular\n validation dependencies do not result in an infinitely deep\n validation chain.\n - properties:\n These validators are meant to apply to properties of the object\n being validated rather than the object itself. In this case, we\n need recurse back into this function to generate a dictionary of\n validators for the property.\n "
] |
Please provide a description of the function:def validate_type(value, types, **kwargs):
if not is_value_of_any_type(value, types):
raise ValidationError(MESSAGES['type']['invalid'].format(
repr(value), get_type_for_value(value), types,
)) | [
"\n Validate that the value is one of the provided primative types.\n "
] |
Please provide a description of the function:def generate_type_validator(type_, **kwargs):
if is_non_string_iterable(type_):
types = tuple(type_)
else:
types = (type_,)
# support x-nullable since Swagger 2.0 doesn't support null type
# (see https://github.com/OAI/OpenAPI-Specification/issues/229)
if kwargs.get('x-nullable', False) and NULL not in types:
types = types + (NULL,)
return functools.partial(validate_type, types=types) | [
"\n Generates a callable validator for the given type or iterable of types.\n "
] |
Please provide a description of the function:def validate_multiple_of(value, divisor, **kwargs):
if not decimal.Decimal(str(value)) % decimal.Decimal(str(divisor)) == 0:
raise ValidationError(
MESSAGES['multiple_of']['invalid'].format(divisor, value),
) | [
"\n Given a value and a divisor, validate that the value is divisible by the\n divisor.\n "
] |
Please provide a description of the function:def validate_minimum(value, minimum, is_exclusive, **kwargs):
if is_exclusive:
comparison_text = "greater than"
compare_fn = operator.gt
else:
comparison_text = "greater than or equal to"
compare_fn = operator.ge
if not compare_fn(value, minimum):
raise ValidationError(
MESSAGES['minimum']['invalid'].format(value, comparison_text, minimum),
) | [
"\n Validator function for validating that a value does not violate it's\n minimum allowed value. This validation can be inclusive, or exclusive of\n the minimum depending on the value of `is_exclusive`.\n "
] |
Please provide a description of the function:def generate_minimum_validator(minimum, exclusiveMinimum=False, **kwargs):
return functools.partial(validate_minimum, minimum=minimum, is_exclusive=exclusiveMinimum) | [
"\n Generator function returning a callable for minimum value validation.\n "
] |
Please provide a description of the function:def validate_maximum(value, maximum, is_exclusive, **kwargs):
if is_exclusive:
comparison_text = "less than"
compare_fn = operator.lt
else:
comparison_text = "less than or equal to"
compare_fn = operator.le
if not compare_fn(value, maximum):
raise ValidationError(
MESSAGES['maximum']['invalid'].format(value, comparison_text, maximum),
) | [
"\n Validator function for validating that a value does not violate it's\n maximum allowed value. This validation can be inclusive, or exclusive of\n the maximum depending on the value of `is_exclusive`.\n "
] |
Please provide a description of the function:def generate_maximum_validator(maximum, exclusiveMaximum=False, **kwargs):
return functools.partial(validate_maximum, maximum=maximum, is_exclusive=exclusiveMaximum) | [
"\n Generator function returning a callable for maximum value validation.\n "
] |
Please provide a description of the function:def validate_min_items(value, minimum, **kwargs):
if len(value) < minimum:
raise ValidationError(
MESSAGES['min_items']['invalid'].format(
minimum, len(value),
),
) | [
"\n Validator for ARRAY types to enforce a minimum number of items allowed for\n the ARRAY to be valid.\n "
] |
Please provide a description of the function:def validate_max_items(value, maximum, **kwargs):
if len(value) > maximum:
raise ValidationError(
MESSAGES['max_items']['invalid'].format(
maximum, len(value),
),
) | [
"\n Validator for ARRAY types to enforce a maximum number of items allowed for\n the ARRAY to be valid.\n "
] |
Please provide a description of the function:def validate_unique_items(value, **kwargs):
# we can't just look at the items themselves since 0 and False are treated
# the same as dictionary keys, and objects aren't hashable.
counter = collections.Counter((
json.dumps(v, sort_keys=True) for v in value
))
dupes = [json.loads(v) for v, count in counter.items() if count > 1]
if dupes:
raise ValidationError(
MESSAGES['unique_items']['invalid'].format(
repr(dupes),
),
) | [
"\n Validator for ARRAY types to enforce that all array items must be unique.\n "
] |
Please provide a description of the function:def validate_object(obj, field_validators=None, non_field_validators=None,
schema=None, context=None):
if schema is None:
schema = {}
if context is None:
context = {}
if field_validators is None:
field_validators = ValidationDict()
if non_field_validators is None:
non_field_validators = ValidationList()
from flex.validation.schema import (
construct_schema_validators,
)
schema_validators = construct_schema_validators(schema, context)
if '$ref' in schema_validators and hasattr(schema_validators['$ref'], 'validators'):
ref_ = field_validators.pop('$ref')
for k, v in ref_.validators.items():
if k not in schema_validators:
schema_validators.add_validator(k, v)
if 'discriminator' in schema:
schema_validators = add_polymorphism_requirements(obj, schema, context, schema_validators)
# delete resolved discriminator to avoid infinite recursion
del schema['discriminator']
schema_validators.update(field_validators)
schema_validators.validate_object(obj, context=context)
non_field_validators.validate_object(obj, context=context)
return obj | [
"\n Takes a mapping and applies a mapping of validator functions to it\n collecting and reraising any validation errors that occur.\n "
] |
Please provide a description of the function:def generate_value_processor(type_, collectionFormat=None, items=None, **kwargs):
processors = []
if is_non_string_iterable(type_):
assert False, "This should not be possible"
else:
if type_ == ARRAY and collectionFormat:
if collectionFormat in DELIMETERS:
delimeter = DELIMETERS[collectionFormat]
# split the string based on the delimeter specified by the
# `collectionFormat`
processors.append(operator.methodcaller('split', delimeter))
else:
if collectionFormat != MULTI:
raise TypeError("collectionFormat not implemented")
processors.append(add_string_into_list)
# remove any Falsy values like empty strings.
processors.append(functools.partial(filter, bool))
# strip off any whitespace
processors.append(functools.partial(map, operator.methodcaller('strip')))
if items is not None:
if isinstance(items, collections.Mapping):
items_processors = itertools.repeat(
generate_value_processor(**items)
)
elif isinstance(items, collections.Sequence):
items_processors = itertools.chain(
(generate_value_processor(**item) for item in items),
itertools.repeat(lambda v: v),
)
elif isinstance(items, six.string_types):
raise NotImplementedError("Not implemented")
else:
assert False, "Should not be possible"
# 1. zip the processor and the array items together
# 2. apply the processor to each array item.
# 3. cast the starmap generator to a list.
processors.append(
chain_reduce_partial(
functools.partial(zip, items_processors),
functools.partial(itertools.starmap, lambda fn, v: fn(v)),
list,
)
)
else:
processors.append(
functools.partial(cast_value_to_type, type_=type_)
)
def processor(value, **kwargs):
try:
return chain_reduce_partial(*processors)(value)
except (ValueError, TypeError):
return value
return processor | [
"\n Create a callable that will take the string value of a header and cast it\n to the appropriate type. This can involve:\n\n - splitting a header of type 'array' by its delimeters.\n - type casting the internal elements of the array.\n "
] |
Please provide a description of the function:def validate_request_method_to_operation(request_method, path_definition):
try:
operation_definition = path_definition[request_method]
except KeyError:
allowed_methods = set(REQUEST_METHODS).intersection(path_definition.keys())
raise ValidationError(
MESSAGES['request']['invalid_method'].format(
request_method, allowed_methods,
),
)
return operation_definition | [
"\n Given a request method, validate that the request method is valid for the\n api path.\n\n If so, return the operation definition related to this request method.\n "
] |
Please provide a description of the function:def validate_path_to_api_path(path, paths, basePath='', context=None, **kwargs):
if context is None:
context = {}
try:
api_path = match_path_to_api_path(
path_definitions=paths,
target_path=path,
base_path=basePath,
context=context,
)
except LookupError as err:
raise ValidationError(str(err))
except MultiplePathsFound as err:
raise ValidationError(str(err))
return api_path | [
"\n Given a path, find the api_path it matches.\n "
] |
Please provide a description of the function:def validate_path_parameters(target_path, api_path, path_parameters, context):
base_path = context.get('basePath', '')
full_api_path = re.sub(NORMALIZE_SLASH_REGEX, '/', base_path + api_path)
parameter_values = get_path_parameter_values(
target_path, full_api_path, path_parameters, context,
)
validate_parameters(parameter_values, path_parameters, context=context) | [
"\n Helper function for validating a request path\n "
] |
Please provide a description of the function:def construct_parameter_validators(parameter, context):
validators = ValidationDict()
if '$ref' in parameter:
validators.add_validator(
'$ref', ParameterReferenceValidator(parameter['$ref'], context),
)
for key in parameter:
if key in validator_mapping:
validators.add_validator(
key,
validator_mapping[key](context=context, **parameter),
)
if 'schema' in parameter:
schema_validators = construct_schema_validators(parameter['schema'], context=context)
for key, value in schema_validators.items():
validators.setdefault(key, value)
return validators | [
"\n Constructs a dictionary of validator functions for the provided parameter\n definition.\n "
] |
Please provide a description of the function:def construct_multi_parameter_validators(parameters, context):
validators = ValidationDict()
for parameter in parameters:
key = parameter['name']
if key in validators:
raise ValueError("Duplicate parameter name {0}".format(key))
parameter_validators = construct_parameter_validators(parameter, context=context)
validators.add_validator(
key,
generate_object_validator(field_validators=parameter_validators),
)
return validators | [
"\n Given an iterable of parameters, returns a dictionary of validator\n functions for each parameter. Note that this expects the parameters to be\n unique in their name value, and throws an error if this is not the case.\n "
] |
Please provide a description of the function:def generate_path_parameters_validator(api_path, path_parameters, context):
path_parameter_validator = functools.partial(
validate_path_parameters,
api_path=api_path,
path_parameters=path_parameters,
context=context,
)
return path_parameter_validator | [
"\n Generates a validator function that given a path, validates that it against\n the path parameters\n "
] |
Please provide a description of the function:def escape_regex_special_chars(api_path):
def substitute(string, replacements):
pattern, repl = replacements
return re.sub(pattern, repl, string)
return functools.reduce(substitute, REGEX_REPLACEMENTS, api_path) | [
"\n Turns the non prametrized path components into strings subtable for using\n as a regex pattern. This primarily involves escaping special characters so\n that the actual character is matched in the regex.\n "
] |
Please provide a description of the function:def construct_parameter_pattern(parameter):
name = parameter['name']
type = parameter['type']
repeated = '[^/]'
if type == 'integer':
repeated = '\d'
return "(?P<{name}>{repeated}+)".format(name=name, repeated=repeated) | [
"\n Given a parameter definition returns a regex pattern that will match that\n part of the path.\n "
] |
Please provide a description of the function:def process_path_part(part, parameters):
if PARAMETER_REGEX.match(part):
parameter_name = part.strip('{}')
try:
parameter = find_parameter(
parameters,
name=parameter_name,
in_=PATH
)
except ValueError:
pass
else:
return construct_parameter_pattern(parameter)
return escape_regex_special_chars(part) | [
"\n Given a part of a path either:\n - If it is a parameter:\n parse it to a regex group\n - Otherwise:\n escape any special regex characters\n "
] |
Please provide a description of the function:def path_to_pattern(api_path, parameters):
parts = re.split(PARAMETER_REGEX, api_path)
pattern = ''.join((process_path_part(part, parameters) for part in parts))
if not pattern.startswith('^'):
pattern = "^{0}".format(pattern)
if not pattern.endswith('$'):
pattern = "{0}$".format(pattern)
return pattern | [
"\n Given an api path, possibly with parameter notation, return a pattern\n suitable for turing into a regular expression which will match request\n paths that conform to the parameter definitions and the api path.\n "
] |
Please provide a description of the function:def match_path_to_api_path(path_definitions, target_path, base_path='',
context=None):
if context is None:
context = {}
assert isinstance(context, collections.Mapping)
if target_path.startswith(base_path):
# Convert all of the api paths into Path instances for easier regex
# matching.
normalized_target_path = re.sub(NORMALIZE_SLASH_REGEX, '/',
target_path)
matching_api_paths = list()
matching_api_paths_regex = list()
for p, v in path_definitions.items():
# Doing this to help with case where we might have base_path
# being just /, and then the path starts with / as well.
full_path = re.sub(NORMALIZE_SLASH_REGEX, '/', base_path + p)
r = path_to_regex(
api_path=full_path,
path_parameters=extract_path_parameters(v),
operation_parameters=extract_operation_parameters(v),
context=context,
)
if full_path == normalized_target_path:
matching_api_paths.append(p)
elif r.match(normalized_target_path):
matching_api_paths_regex.\
append((p, r.match(normalized_target_path)))
# Keep it consistent with the previous behavior
target_path = target_path[len(base_path):]
else:
matching_api_paths = []
matching_api_paths_regex = []
if not matching_api_paths and not matching_api_paths_regex:
fstr = MESSAGES['path']['no_matching_paths_found'].format(target_path)
raise LookupError(fstr)
elif len(matching_api_paths) == 1:
return matching_api_paths[0]
elif len(matching_api_paths) > 1:
raise MultiplePathsFound(
MESSAGES['path']['multiple_paths_found'].format(
target_path, [v[0] for v in matching_api_paths],
)
)
elif len(matching_api_paths_regex) == 1:
return matching_api_paths_regex[0][0]
elif len(matching_api_paths_regex) > 1:
# TODO: This area needs improved logic.
# We check to see if any of the matched paths is longers than
# the others. If so, we *assume* it is the correct match. This is
# going to be prone to false positives. in certain cases.
matches_by_path_size = collections.defaultdict(list)
for path, match in matching_api_paths_regex:
matches_by_path_size[len(path)].append(path)
longest_match = max(matches_by_path_size.keys())
if len(matches_by_path_size[longest_match]) == 1:
return matches_by_path_size[longest_match][0]
raise MultiplePathsFound(
MESSAGES['path']['multiple_paths_found'].format(
target_path, [v[0] for v in matching_api_paths_regex],
)
)
else:
return matching_api_paths_regex[0][0] | [
"\n Match a request or response path to one of the api paths.\n\n Anything other than exactly one match is an error condition.\n "
] |
Please provide a description of the function:def validate_request(request, schema):
with ErrorDict() as errors:
# 1
try:
api_path = validate_path_to_api_path(
path=request.path,
context=schema,
**schema
)
except ValidationError as err:
errors['path'].add_error(err.detail)
return # this causes an exception to be raised since errors is no longer falsy.
path_definition = schema['paths'][api_path] or {}
if not path_definition:
# TODO: is it valid to not have a definition for a path?
return
# 2
try:
operation_definition = validate_request_method_to_operation(
request_method=request.method,
path_definition=path_definition,
)
except ValidationError as err:
errors['method'].add_error(err.detail)
return
if operation_definition is None:
# TODO: is this compliant with swagger, can path operations have a null
# definition?
return
# 3
operation_validators = construct_operation_validators(
api_path=api_path,
path_definition=path_definition,
operation_definition=operation_definition,
context=schema,
)
try:
validate_operation(request, operation_validators, context=schema)
except ValidationError as err:
errors['method'].add_error(err.detail) | [
"\n Request validation does the following steps.\n\n 1. validate that the path matches one of the defined paths in the schema.\n 2. validate that the request method conforms to a supported methods for the given path.\n 3. validate that the request parameters conform to the parameter\n definitions for the operation definition.\n "
] |
Please provide a description of the function:def normalize_request(request):
if isinstance(request, Request):
return request
for normalizer in REQUEST_NORMALIZERS:
try:
return normalizer(request)
except TypeError:
continue
raise ValueError("Unable to normalize the provided request") | [
"\n Given a request, normalize it to the internal Request class.\n "
] |
Please provide a description of the function:def normalize_response(response, request=None):
if isinstance(response, Response):
return response
if request is not None and not isinstance(request, Request):
request = normalize_request(request)
for normalizer in RESPONSE_NORMALIZERS:
try:
return normalizer(response, request=request)
except TypeError:
continue
raise ValueError("Unable to normalize the provided response") | [
"\n Given a response, normalize it to the internal Response class. This also\n involves normalizing the associated request object.\n "
] |
Please provide a description of the function:def data(self):
if not self.body:
return self.body
elif self.body is EMPTY:
return EMPTY
elif self.content_type and self.content_type.startswith('application/json'):
try:
if isinstance(self.body, six.binary_type):
return json.loads(self.body.decode('utf-8'))
else:
return json.loads(self.body)
except ValueError as e:
if isinstance(e, JSONDecodeError):
# this will only be True for Python3+
raise e
raise JSONDecodeError(str(e))
elif self.content_type == 'application/x-www-form-urlencoded':
return dict(urlparse.parse_qsl(self.body))
else:
raise NotImplementedError("No parser for content type") | [
"\n TODO: What is the right way to do this?\n "
] |
Please provide a description of the function:def add_error(self, error):
if is_non_string_iterable(error) and not isinstance(error, collections.Mapping):
for value in error:
self.add_error(value)
else:
self.append(error) | [
"\n In the case where a list/tuple is passed in this just extends the list\n rather than having nested lists.\n\n Otherwise, the value is appended.\n "
] |
Please provide a description of the function:def deep_equal(a, b):
if is_any_string_type(a) and is_any_string_type(b):
if isinstance(a, six.binary_type):
a = six.text_type(a, encoding='utf-8')
if isinstance(b, six.binary_type):
b = six.text_type(b, encoding='utf-8')
return a == b
return a == b and isinstance(a, type(b)) and isinstance(b, type(a)) | [
"\n Because of things in python like:\n >>> 1 == 1.0\n True\n >>> 1 == True\n True\n >>> b'test' == 'test' # python3\n False\n "
] |
Please provide a description of the function:def format_errors(errors, indent=0, prefix='', suffix=''):
if is_single_item_iterable(errors):
errors = errors[0]
if isinstance(errors, SINGULAR_TYPES):
yield indent_message(repr(errors), indent, prefix=prefix, suffix=suffix)
elif isinstance(errors, collections.Mapping):
for key, value in errors.items():
assert isinstance(key, SINGULAR_TYPES), type(key)
if isinstance(value, SINGULAR_TYPES):
message = "{0}: {1}".format(repr(key), repr(value))
yield indent_message(message, indent, prefix=prefix, suffix=suffix)
else:
yield indent_message(repr(key), indent, prefix=prefix, suffix=':')
for message in format_errors(value, indent + 4, prefix='- '):
yield message
elif is_non_string_iterable(errors):
# for making the rhs of the numbers line up
extra_indent = int(math.ceil(math.log10(len(errors)))) + 2
for index, value in enumerate(errors):
list_prefix = "{0}. ".format(index)
messages = format_errors(
value,
indent=indent + extra_indent - len(list_prefix),
prefix=list_prefix,
)
for message in messages:
yield message
else:
assert False, "should not be possible" | [
"\n string: \"example\"\n\n \"example\"\n\n dict:\n \"example\":\n -\n\n "
] |
Please provide a description of the function:def generate_header_validator(headers, context, **kwargs):
validators = ValidationDict()
for header_definition in headers:
header_processor = generate_value_processor(
context=context,
**header_definition
)
header_validator = generate_object_validator(
field_validators=construct_header_validators(header_definition, context=context),
)
validators.add_property_validator(
header_definition['name'],
chain_reduce_partial(
header_processor,
header_validator,
),
)
return generate_object_validator(field_validators=validators) | [
"\n Generates a validation function that will validate a dictionary of headers.\n "
] |
Please provide a description of the function:def generate_parameters_validator(api_path, path_definition, parameters,
context, **kwargs):
# TODO: figure out how to merge this with the same code in response
# validation.
validators = ValidationDict()
path_level_parameters = dereference_parameter_list(
path_definition.get('parameters', []),
context,
)
operation_level_parameters = dereference_parameter_list(
parameters,
context,
)
all_parameters = merge_parameter_lists(
path_level_parameters,
operation_level_parameters,
)
# PATH
in_path_parameters = filter_parameters(all_parameters, in_=PATH)
validators.add_validator(
'path',
chain_reduce_partial(
attrgetter('path'),
generate_path_parameters_validator(api_path, in_path_parameters, context),
),
)
# QUERY
in_query_parameters = filter_parameters(all_parameters, in_=QUERY)
validators.add_validator(
'query',
chain_reduce_partial(
attrgetter('query_data'),
functools.partial(
validate_query_parameters,
query_parameters=in_query_parameters,
context=context,
),
),
)
# HEADERS
in_header_parameters = filter_parameters(all_parameters, in_=HEADER)
validators.add_validator(
'headers',
chain_reduce_partial(
attrgetter('headers'),
generate_header_validator(in_header_parameters, context),
),
)
# FORM_DATA
# in_form_data_parameters = filter_parameters(all_parameters, in_=FORM_DATA)
# validators.add_validator(
# 'form_data',
# chain_reduce_partial(
# attrgetter('data'),
# generate_form_data_validator(in_form_data_parameters, context),
# )
# )
# REQUEST_BODY
in_request_body_parameters = filter_parameters(all_parameters, in_=BODY)
validators.add_validator(
'request_body',
chain_reduce_partial(
attrgetter('data'),
generate_request_body_validator(in_request_body_parameters, context),
)
)
return generate_object_validator(field_validators=validators) | [
"\n Generates a validator function to validate.\n\n - request.path against the path parameters.\n - request.query against the query parameters.\n - request.headers against the header parameters.\n - TODO: request.body against the body parameters.\n - TODO: request.formData against any form data.\n "
] |
Please provide a description of the function:def construct_operation_validators(api_path, path_definition, operation_definition, context):
validators = {}
# sanity check
assert 'context' not in operation_definition
assert 'api_path' not in operation_definition
assert 'path_definition' not in operation_definition
for key in operation_definition.keys():
if key not in validator_mapping:
# TODO: is this the right thing to do?
continue
validators[key] = validator_mapping[key](
context=context,
api_path=api_path,
path_definition=path_definition,
**operation_definition
)
# Global defaults
if 'consumes' in context and 'consumes' not in validators:
validators['consumes'] = validator_mapping['consumes'](**context)
if 'parameters' in path_definition and 'parameters' not in validators:
validators['parameters'] = validator_mapping['parameters'](
context=context,
api_path=api_path,
path_definition=path_definition,
parameters=path_definition['parameters'],
**operation_definition
)
return validators | [
"\n - consumes (did the request conform to the content types this api consumes)\n - produces (did the response conform to the content types this endpoint produces)\n - parameters (did the parameters of this request validate)\n TODO: move path parameter validation to here, because each operation\n can override any of the path level parameters.\n - schemes (was the request scheme correct)\n - security: TODO since security isn't yet implemented.\n "
] |
Please provide a description of the function:def partial_safe_wraps(wrapped_func, *args, **kwargs):
if isinstance(wrapped_func, functools.partial):
return partial_safe_wraps(wrapped_func.func)
else:
return functools.wraps(wrapped_func) | [
"\n A version of `functools.wraps` that is safe to wrap a partial in.\n "
] |
Please provide a description of the function:def skip_if_empty(func):
@partial_safe_wraps(func)
def inner(value, *args, **kwargs):
if value is EMPTY:
return
else:
return func(value, *args, **kwargs)
return inner | [
"\n Decorator for validation functions which makes them pass if the value\n passed in is the EMPTY sentinal value.\n "
] |
Please provide a description of the function:def rewrite_reserved_words(func):
@partial_safe_wraps(func)
def inner(*args, **kwargs):
for word in RESERVED_WORDS:
key = "{0}_".format(word)
if key in kwargs:
kwargs[word] = kwargs.pop(key)
return func(*args, **kwargs)
return inner | [
"\n Given a function whos kwargs need to contain a reserved word such as `in`,\n allow calling that function with the keyword as `in_`, such that function\n kwargs are rewritten to use the reserved word.\n "
] |
Please provide a description of the function:def any_validator(obj, validators, **kwargs):
if not len(validators) > 1:
raise ValueError(
"any_validator requires at least 2 validator. Only got "
"{0}".format(len(validators))
)
errors = ErrorDict()
for key, validator in validators.items():
try:
validator(obj, **kwargs)
except ValidationError as err:
errors[key] = err.detail
else:
break
else:
if len(errors) == 1:
# Special case for a single error. Just raise it as if it was the
# only validator run.
error = errors.values()[0]
raise ValidationError(error)
else:
# Raise all of the errors with the key namespaces.
errors.raise_() | [
"\n Attempt multiple validators on an object.\n\n - If any pass, then all validation passes.\n - Otherwise, raise all of the errors.\n "
] |
Please provide a description of the function:def _extract_to_tempdir(archive_filename):
if not os.path.exists(archive_filename):
raise Exception("Archive '%s' does not exist" % (archive_filename))
tempdir = tempfile.mkdtemp(prefix="metaextract_")
current_cwd = os.getcwd()
try:
if tarfile.is_tarfile(archive_filename):
with tarfile.open(archive_filename) as f:
f.extractall(tempdir)
elif zipfile.is_zipfile(archive_filename):
with zipfile.ZipFile(archive_filename) as f:
f.extractall(tempdir)
else:
raise Exception("Can not extract '%s'. "
"Not a tar or zip file" % archive_filename)
os.chdir(tempdir)
yield tempdir
finally:
os.chdir(current_cwd)
shutil.rmtree(tempdir) | [
"extract the given tarball or zipfile to a tempdir and change\n the cwd to the new tempdir. Delete the tempdir at the end"
] |
Please provide a description of the function:def _enter_single_subdir(root_dir):
current_cwd = os.getcwd()
try:
dest_dir = root_dir
dir_list = os.listdir(root_dir)
if len(dir_list) == 1:
first = os.path.join(root_dir, dir_list[0])
if os.path.isdir(first):
dest_dir = first
else:
dest_dir = root_dir
os.chdir(dest_dir)
yield dest_dir
finally:
os.chdir(current_cwd) | [
"if the given directory has just a single subdir, enter that"
] |
Please provide a description of the function:def _set_file_encoding_utf8(filename):
with open(filename, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write("# -*- coding: utf-8 -*-\n" + content) | [
"set a encoding header as suggested in PEP-0263. This\n is not entirely correct because we don't know the encoding of the\n given file but it's at least a chance to get metadata from the setup.py"
] |
Please provide a description of the function:def _setup_py_run_from_dir(root_dir, py_interpreter):
data = {}
with _enter_single_subdir(root_dir) as single_subdir:
if not os.path.exists("setup.py"):
raise Exception("'setup.py' does not exist in '%s'" % (
single_subdir))
# generate a temporary json file which contains the metadata
output_json = tempfile.NamedTemporaryFile()
cmd = "%s setup.py -q --command-packages metaextract " \
"metaextract -o %s " % (py_interpreter, output_json.name)
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError:
# try again with a encoding in setup.py
_set_file_encoding_utf8("setup.py")
subprocess.check_output(cmd, shell=True)
# read json file and return data
with open(output_json.name, "r") as f:
data = json.loads(f.read())
# sort some of the keys if the dict values are lists
for key in ['data_files', 'entry_points', 'extras_require',
'install_requires', 'setup_requires', 'scripts',
'tests_require', 'tests_suite']:
if key in data['data'] and isinstance(data['data'][key], list):
data['data'][key] = sorted(data['data'][key])
return data | [
"run the extractmeta command via the setup.py in the given root_dir.\n the output of extractmeta is json and is stored in a tempfile\n which is then read in and returned as data"
] |
Please provide a description of the function:def from_archive(archive_filename, py_interpreter=sys.executable):
with _extract_to_tempdir(archive_filename) as root_dir:
data = _setup_py_run_from_dir(root_dir, py_interpreter)
return data | [
"extract metadata from a given sdist archive file\n\n :param archive_filename: a sdist archive file\n :param py_interpreter: The full path to the used python interpreter\n\n :returns: a json blob with metadata\n"
] |
Please provide a description of the function:def cimread(source, packageMap=None, nsURI=None, start_dict=None):
# Start the clock.
t0 = time()
#logger.info('##########################################################################')
logger.info('START of parsing file \"%s\"', source)
logger_errors_grouped = {}
# A map of uuids to CIM objects to be returned.
d = start_dict if start_dict is not None else {}
# Obtain the namespaces from the input file
namespaces = xmlns(source)
ns_rdf = get_rdf_ns(namespaces)
if bool(nsURI) != bool(packageMap):
raise ValueError(
'Either pass "packageMap" AND "nsURI" or none of them.')
elif (nsURI is None) and (packageMap is None):
nsURI, packageMap = get_cim_ns(namespaces)
# CIM element tag base (e.g. {http://iec.ch/TC57/2009/CIM-schema-cim14#}).
base = "{%s#}" % nsURI
# Length of element tag base.
m = len(base)
# First pass instantiates the classes.
context = iterparse(source, ("start", "end"))
# Turn it into an iterator (required for cElementTree).
context = iter(context)
# Get the root element ({http://www.w3.org/1999/02/22-rdf-syntax-ns#}RDF).
_, root = next(context)
for event, elem in context:
# Process 'end' elements in the CIM namespace.
if event == "end" and elem.tag[:m] == base:
# Unique resource identifier for the CIM object.
uuid = elem.get("{%s}ID" % ns_rdf)
if uuid is not None: # class
# Element tag without namespace (e.g. VoltageLevel).
tag = elem.tag[m:]
try:
mname = packageMap[tag]
except KeyError:
logger.error("Unable to locate module for: %s (%s)",
tag, uuid)
root.clear()
continue
# Import the module for the CIM object.
module = __import__(mname, globals(), locals(), [tag], 0)
# Get the CIM class from the module.
klass = getattr(module, tag)
# Instantiate the class and map it to the uuid.
d[uuid] = klass(UUID=uuid)
# Clear children of the root element to minimise memory usage.
root.clear()
# Reset stream
if hasattr(source, "seek"):
source.seek(0)
## Second pass sets attributes and references.
context = iter( iterparse(source, ("start", "end")) )
# Get the root element ({http://www.w3.org/1999/02/22-rdf-syntax-ns#}RDF).
_, root = next(context)
for event, elem in context:
# Process 'start' elements in the CIM namespace.
if event == "start" and elem.tag[:m] == base:
uuid = elem.get("{%s}ID" % ns_rdf)
if uuid is None:
uuid = elem.get("{%s}about" % ns_rdf)
if uuid is not None:
uuid = uuid[1:]
if uuid is not None:
# Locate the CIM object using the uuid.
try:
obj = d[uuid]
except KeyError:
logger.error("Missing '%s' object with uuid: %s",
elem.tag[m:], uuid)
root.clear()
continue
# Iterate over attributes/references.
for event, elem in context:
# Process end events with elements in the CIM namespace.
if event == "end" and elem.tag[:m] == base:
# Break if class closing element (e.g. </cim:Terminal>).
if elem.get("{%s}ID" % ns_rdf) is None and \
elem.get("{%s}about" % ns_rdf) is None:
# Get the attribute/reference name.
attr = elem.tag[m:].rsplit(".")[-1]
if not hasattr(obj, attr):
error_msg = "'%s' has not attribute '%s'" %(obj.__class__.__name__, attr)
try:
logger_errors_grouped[error_msg] += 1
except KeyError:
logger_errors_grouped[error_msg] = 1
# logger.error("'%s' has not attribute '%s'",
# obj.__class__.__name__, attr)
continue
# Use the rdf:resource attribute to distinguish
# between attributes and references/enums.
uuid2 = elem.get("{%s}resource" % ns_rdf)
if uuid2 is None: # attribute
# Convert value type using the default value.
try:
typ = type( getattr(obj, attr) )
if typ == type(True): # KKG: Test if it is boolean value
# KKG: NB: The function bool("false") returns True, because it is called upon non-empty string!
# This means that it wrongly reads "false" value as boolean True and this is why this special case testing is necessary
if str.title(elem.text) == 'True':
setattr(obj, attr, True)
else:
setattr(obj, attr, False)
else:
setattr(obj, attr, typ(elem.text))
except TypeError:
pass
else: # reference or enum
# Use the '#' prefix to distinguish between
# references and enumerations.
if uuid2[0] == "#": # reference
try:
val = d[uuid2[1:]] # remove '#' prefix
except KeyError:
logger.error("Referenced '%s' [%s] "
"object missing.",
obj.__class__.__name__,
uuid2[1:])
continue
default = getattr(obj, attr)
if default == None: # 1..1 or 1..n
# Rely on properties to set any
# bi-directional references.
setattr(obj, attr, val)
elif isinstance(default, list): # many
# Use 'add*' method to set reference.
getattr(obj, ("add%s" % attr))(val)
# else:
# logger.error("Reference error [%s].",
# default)
else: # enum
val = uuid2.rsplit(".", 1)[1]
setattr(obj, attr, val)
else:
# Finished setting object attributes.
break
# Clear children of the root element to minimise memory usage.
root.clear()
if logger_errors_grouped:
for error, count in logger_errors_grouped.items():
logging_message = '%s : %d times' %(error, count)
logger.warn(logging_message)
# logging_message = 'Created totally %d CIM objects in %.2fs.' %(len(d), time() - t0)
logger.info('Created totally %d CIM objects in %.2fs.' %(len(d), time() - t0))
# logging_message = 'END of parsing file \"%s\"\n' % source
logger.info('END of parsing file \"%s\"\n' % source)
return d | [
" CIM RDF/XML parser.\n\n @type source: File-like object or a path to a file.\n @param source: CIM RDF/XML file.\n @type profile: dict\n @param packageMap: Map of class name to PyCIM package name. All CIM\n classes are under the one namespace, but are arranged into sub-packages\n so a map from class name to package name is required. Defaults to the\n latest CIM version, but may be set to a map from a profile to return\n a profile model.\n @type profile: string\n @param nsURI: CIM namespace URI used in the RDF/XML file. For example:\n http://iec.ch/TC57/2010/CIM-schema-cim15\n @rtype: dict\n @return: Map of UUID to CIM object.\n\n @author: Richard Lincoln <[email protected]>\n "
] |
Please provide a description of the function:def xmlns(source):
namespaces = {}
events=("end", "start-ns", "end-ns")
for (event, elem) in iterparse(source, events):
if event == "start-ns":
prefix, ns = elem
namespaces[prefix] = ns
elif event == "end":
break
# Reset stream
if hasattr(source, "seek"):
source.seek(0)
return namespaces | [
"\n Returns a map of prefix to namespace for the given XML file.\n\n "
] |
Please provide a description of the function:def get_cim_ns(namespaces):
try:
ns = namespaces['cim']
if ns.endswith('#'):
ns = ns[:-1]
except KeyError:
ns = ''
logger.error('No CIM namespace defined in input file.')
CIM16nsURI = 'http://iec.ch/TC57/2013/CIM-schema-cim16'
nsuri = ns
import CIM14, CIM15
if ns == CIM14.nsURI:
ns = 'CIM14'
elif ns == CIM15.nsURI:
ns = 'CIM15'
elif ns == CIM16nsURI:
ns = 'CIM15'
else:
ns = 'CIM15'
logger.warn('Could not detect CIM version. Using %s.' % ns)
cim = __import__(ns, globals(), locals(), ['nsURI', 'packageMap'])
return nsuri, cim.packageMap | [
"\n Tries to obtain the CIM version from the given map of namespaces and\n returns the appropriate *nsURI* and *packageMap*.\n\n "
] |
Please provide a description of the function:def cimwrite(d, source, encoding="utf-8"):
# Start the clock
t0 = time()
w = XMLWriter(source, encoding)
# Write the XML declaration.
w.declaration()
# Add a '#' suffix to the CIM namespace URI if not present.
nsCIM = nsURI if nsURI[-1] == "#" else nsURI + "#"
# Start the root RDF element and declare namespaces.
xmlns = {u"xmlns:%s" % nsPrefixRDF: nsRDF, u"xmlns:%s" % nsPrefix: nsCIM}
rdf = w.start(u"%s:RDF" % nsPrefixRDF, xmlns)
# Iterate over all UUID, CIM object pairs in the given dictionary.
for uuid, obj in d.items():
w.start(u"%s:%s" % (nsPrefix, obj.__class__.__name__),
{u"%s:ID" % nsPrefixRDF: obj.UUID})
mro = obj.__class__.mro()
mro.reverse()
# Serialise attributes.
for klass in mro[2:]: # skip 'object' and 'Element'
attrs = [a for a in klass._attrs if a not in klass._enums]
for attr in attrs:
val = getattr(obj, attr)
if val != klass._defaults[attr]:
w.element(u"%s:%s.%s" % (nsPrefix, klass.__name__, attr),
str(val))
# Serialise enumeration data-types.
for klass in mro[2:]: # skip 'object' and 'Element'
enums = [a for a in klass._attrs if a in klass._enums]
for enum in enums:
val = getattr(obj, enum)
dt = klass._enums[enum]
w.element(u"%s:%s.%s" % (nsPrefix, klass.__name__, enum),
attrib={u"%s:resource" % nsPrefixRDF:
u"%s%s.%s" % (nsCIM, dt, val)})
# Serialise references.
for klass in mro[2:]: # skip 'object' and 'Element'
# FIXME: serialise 'many' references.
refs = [r for r in klass._refs if r not in klass._many_refs]
for ref in refs:
val = getattr(obj, ref)
if val is not None:
w.element(u"%s:%s.%s" % (nsPrefix, klass.__name__, ref),
attrib={u"%s:resource" % nsPrefixRDF:
u"#%s" % val.UUID})
w.end()
# Close the root RDF element.
w.close(rdf)
# Flush the output stream.
w.flush()
logger.info("%d CIM objects serialised in %.2fs.", len(d), time() - t0) | [
"CIM RDF/XML serializer.\n\n @type d: dict\n @param d: Map of URIs to CIM objects.\n @type source: File or file-like object.\n @param source: This object must implement a C{write} method\n that takes an 8-bit string.\n @type encoding: string\n @param encoding: Character encoding defaults to \"utf-8\", but can also\n be set to \"us-ascii\".\n @rtype: bool\n @return: Write success.\n "
] |
Please provide a description of the function:def create_block(mc, block_id, subtype=None):
# Get player tile position and real position.
ptx, pty, ptz = mc.player.getTilePos()
px, py, pz = mc.player.getPos()
# Create block at current player tile location.
if subtype is None:
mc.setBlock(ptx, pty, ptz, block_id)
else:
mc.setBlock(ptx, pty, ptz, block_id, subtype)
# Move the player's real positon up one block.
mc.player.setPos(px, py+1, pz) | [
"Build a block with the specified id and subtype under the player in the\n Minecraft world. Subtype is optional and can be specified as None to use\n the default subtype for the block.\n "
] |
Please provide a description of the function:def _busy_wait_ms(self, ms):
start = time.time()
delta = ms/1000.0
while (time.time() - start) <= delta:
pass | [
"Busy wait for the specified number of milliseconds."
] |
Please provide a description of the function:def _write_frame(self, data):
assert data is not None and 0 < len(data) < 255, 'Data must be array of 1 to 255 bytes.'
# Build frame to send as:
# - SPI data write (0x01)
# - Preamble (0x00)
# - Start code (0x00, 0xFF)
# - Command length (1 byte)
# - Command length checksum
# - Command bytes
# - Checksum
# - Postamble (0x00)
length = len(data)
frame = bytearray(length+8)
frame[0] = PN532_SPI_DATAWRITE
frame[1] = PN532_PREAMBLE
frame[2] = PN532_STARTCODE1
frame[3] = PN532_STARTCODE2
frame[4] = length & 0xFF
frame[5] = self._uint8_add(~length, 1)
frame[6:-2] = data
checksum = reduce(self._uint8_add, data, 0xFF)
frame[-2] = ~checksum & 0xFF
frame[-1] = PN532_POSTAMBLE
# Send frame.
logger.debug('Write frame: 0x{0}'.format(binascii.hexlify(frame)))
self._gpio.set_low(self._cs)
self._busy_wait_ms(2)
self._spi.write(frame)
self._gpio.set_high(self._cs) | [
"Write a frame to the PN532 with the specified data bytearray."
] |
Please provide a description of the function:def _read_data(self, count):
# Build a read request frame.
frame = bytearray(count)
frame[0] = PN532_SPI_DATAREAD
# Send the frame and return the response, ignoring the SPI header byte.
self._gpio.set_low(self._cs)
self._busy_wait_ms(2)
response = self._spi.transfer(frame)
self._gpio.set_high(self._cs)
return response | [
"Read a specified count of bytes from the PN532."
] |
Please provide a description of the function:def _read_frame(self, length):
# Read frame with expected length of data.
response = self._read_data(length+8)
logger.debug('Read frame: 0x{0}'.format(binascii.hexlify(response)))
# Check frame starts with 0x01 and then has 0x00FF (preceeded by optional
# zeros).
if response[0] != 0x01:
raise RuntimeError('Response frame does not start with 0x01!')
# Swallow all the 0x00 values that preceed 0xFF.
offset = 1
while response[offset] == 0x00:
offset += 1
if offset >= len(response):
raise RuntimeError('Response frame preamble does not contain 0x00FF!')
if response[offset] != 0xFF:
raise RuntimeError('Response frame preamble does not contain 0x00FF!')
offset += 1
if offset >= len(response):
raise RuntimeError('Response contains no data!')
# Check length & length checksum match.
frame_len = response[offset]
if (frame_len + response[offset+1]) & 0xFF != 0:
raise RuntimeError('Response length checksum did not match length!')
# Check frame checksum value matches bytes.
checksum = reduce(self._uint8_add, response[offset+2:offset+2+frame_len+1], 0)
if checksum != 0:
raise RuntimeError('Response checksum did not match expected value!')
# Return frame data.
return response[offset+2:offset+2+frame_len] | [
"Read a response frame from the PN532 of at most length bytes in size.\n Returns the data inside the frame if found, otherwise raises an exception\n if there is an error parsing the frame. Note that less than length bytes\n might be returned!\n "
] |
Please provide a description of the function:def _wait_ready(self, timeout_sec=1):
start = time.time()
# Send a SPI status read command and read response.
self._gpio.set_low(self._cs)
self._busy_wait_ms(2)
response = self._spi.transfer([PN532_SPI_STATREAD, 0x00])
self._gpio.set_high(self._cs)
# Loop until a ready response is received.
while response[1] != PN532_SPI_READY:
# Check if the timeout has been exceeded.
if time.time() - start >= timeout_sec:
return False
# Wait a little while and try reading the status again.
time.sleep(0.01)
self._gpio.set_low(self._cs)
self._busy_wait_ms(2)
response = self._spi.transfer([PN532_SPI_STATREAD, 0x00])
self._gpio.set_high(self._cs)
return True | [
"Wait until the PN532 is ready to receive commands. At most wait\n timeout_sec seconds for the PN532 to be ready. If the PN532 is ready\n before the timeout is exceeded then True will be returned, otherwise\n False is returned when the timeout is exceeded.\n "
] |
Please provide a description of the function:def call_function(self, command, response_length=0, params=[], timeout_sec=1):
# Build frame data with command and parameters.
data = bytearray(2+len(params))
data[0] = PN532_HOSTTOPN532
data[1] = command & 0xFF
data[2:] = params
# Send frame and wait for response.
self._write_frame(data)
if not self._wait_ready(timeout_sec):
return None
# Verify ACK response and wait to be ready for function response.
response = self._read_data(len(PN532_ACK))
if response != PN532_ACK:
raise RuntimeError('Did not receive expected ACK from PN532!')
if not self._wait_ready(timeout_sec):
return None
# Read response bytes.
response = self._read_frame(response_length+2)
# Check that response is for the called function.
if not (response[0] == PN532_PN532TOHOST and response[1] == (command+1)):
raise RuntimeError('Received unexpected command response!')
# Return response data.
return response[2:] | [
"Send specified command to the PN532 and expect up to response_length\n bytes back in a response. Note that less than the expected bytes might\n be returned! Params can optionally specify an array of bytes to send as\n parameters to the function call. Will wait up to timeout_secs seconds\n for a response and return a bytearray of response bytes, or None if no\n response is available within the timeout.\n "
] |
Please provide a description of the function:def begin(self):
# Assert CS pin low for a second for PN532 to be ready.
self._gpio.set_low(self._cs)
time.sleep(1.0)
# Call GetFirmwareVersion to sync up with the PN532. This might not be
# required but is done in the Arduino library and kept for consistency.
self.get_firmware_version()
self._gpio.set_high(self._cs) | [
"Initialize communication with the PN532. Must be called before any\n other calls are made against the PN532.\n "
] |
Please provide a description of the function:def get_firmware_version(self):
response = self.call_function(PN532_COMMAND_GETFIRMWAREVERSION, 4)
if response is None:
raise RuntimeError('Failed to detect the PN532! Make sure there is sufficient power (use a 1 amp or greater power supply), the PN532 is wired correctly to the device, and the solder joints on the PN532 headers are solidly connected.')
return (response[0], response[1], response[2], response[3]) | [
"Call PN532 GetFirmwareVersion function and return a tuple with the IC,\n Ver, Rev, and Support values.\n "
] |
Please provide a description of the function:def read_passive_target(self, card_baud=PN532_MIFARE_ISO14443A, timeout_sec=1):
# Send passive read command for 1 card. Expect at most a 7 byte UUID.
response = self.call_function(PN532_COMMAND_INLISTPASSIVETARGET,
params=[0x01, card_baud],
response_length=17)
# If no response is available return None to indicate no card is present.
if response is None:
return None
# Check only 1 card with up to a 7 byte UID is present.
if response[0] != 0x01:
raise RuntimeError('More than one card detected!')
if response[5] > 7:
raise RuntimeError('Found card with unexpectedly long UID!')
# Return UID of card.
return response[6:6+response[5]] | [
"Wait for a MiFare card to be available and return its UID when found.\n Will wait up to timeout_sec seconds and return None if no card is found,\n otherwise a bytearray with the UID of the found card is returned.\n "
] |
Please provide a description of the function:def mifare_classic_authenticate_block(self, uid, block_number, key_number, key):
# Build parameters for InDataExchange command to authenticate MiFare card.
uidlen = len(uid)
keylen = len(key)
params = bytearray(3+uidlen+keylen)
params[0] = 0x01 # Max card numbers
params[1] = key_number & 0xFF
params[2] = block_number & 0xFF
params[3:3+keylen] = key
params[3+keylen:] = uid
# Send InDataExchange request and verify response is 0x00.
response = self.call_function(PN532_COMMAND_INDATAEXCHANGE,
params=params,
response_length=1)
return response[0] == 0x00 | [
"Authenticate specified block number for a MiFare classic card. Uid\n should be a byte array with the UID of the card, block number should be\n the block to authenticate, key number should be the key type (like\n MIFARE_CMD_AUTH_A or MIFARE_CMD_AUTH_B), and key should be a byte array\n with the key data. Returns True if the block was authenticated, or False\n if not authenticated.\n "
] |
Please provide a description of the function:def mifare_classic_read_block(self, block_number):
# Send InDataExchange request to read block of MiFare data.
response = self.call_function(PN532_COMMAND_INDATAEXCHANGE,
params=[0x01, MIFARE_CMD_READ, block_number & 0xFF],
response_length=17)
# Check first response is 0x00 to show success.
if response[0] != 0x00:
return None
# Return first 4 bytes since 16 bytes are always returned.
return response[1:] | [
"Read a block of data from the card. Block number should be the block\n to read. If the block is successfully read a bytearray of length 16 with\n data starting at the specified block will be returned. If the block is\n not read then None will be returned.\n "
] |
Please provide a description of the function:def mifare_classic_write_block(self, block_number, data):
assert data is not None and len(data) == 16, 'Data must be an array of 16 bytes!'
# Build parameters for InDataExchange command to do MiFare classic write.
params = bytearray(19)
params[0] = 0x01 # Max card numbers
params[1] = MIFARE_CMD_WRITE
params[2] = block_number & 0xFF
params[3:] = data
# Send InDataExchange request.
response = self.call_function(PN532_COMMAND_INDATAEXCHANGE,
params=params,
response_length=1)
return response[0] == 0x00 | [
"Write a block of data to the card. Block number should be the block\n to write and data should be a byte array of length 16 with the data to\n write. If the data is successfully written then True is returned,\n otherwise False is returned.\n "
] |
Please provide a description of the function:def _dirmatch(path, matchwith):
matchlen = len(matchwith)
if (path.startswith(matchwith)
and path[matchlen:matchlen + 1] in [os.sep, '']):
return True
return False | [
"Check if path is within matchwith's tree.\n\n >>> _dirmatch('/home/foo/bar', '/home/foo/bar')\n True\n >>> _dirmatch('/home/foo/bar/', '/home/foo/bar')\n True\n >>> _dirmatch('/home/foo/bar/etc', '/home/foo/bar')\n True\n >>> _dirmatch('/home/foo/bar2', '/home/foo/bar')\n False\n >>> _dirmatch('/home/foo/bar2/etc', '/home/foo/bar')\n False\n "
] |
Please provide a description of the function:def _virtualenv_sys(venv_path):
"obtain version and path info from a virtualenv."
executable = os.path.join(venv_path, env_bin_dir, 'python')
# Must use "executable" as the first argument rather than as the
# keyword argument "executable" to get correct value from sys.path
p = subprocess.Popen([executable,
'-c', 'import sys;'
'print (sys.version[:3]);'
'print ("\\n".join(sys.path));'],
env={},
stdout=subprocess.PIPE)
stdout, err = p.communicate()
assert not p.returncode and stdout
lines = stdout.decode('utf-8').splitlines()
return lines[0], list(filter(bool, lines[1:])) | [] |
Please provide a description of the function:def int_to_ef(n):
flags = {}
for name, value in libarchive.constants.archive_entry.FILETYPES.items():
flags[name] = (n & value) > 0
return ENTRY_FILETYPE(**flags) | [
"This is here for testing support but, in practice, this isn't very\n useful as many of the flags are just combinations of other flags. The\n relationships are defined by the OS in ways that aren't semantically\n intuitive to this project.\n "
] |
Please provide a description of the function:def _enumerator(opener, entry_cls, format_code=None, filter_code=None):
archive_res = _archive_read_new()
try:
r = _set_read_context(archive_res, format_code, filter_code)
opener(archive_res)
def it():
while 1:
with _archive_read_next_header(archive_res) as entry_res:
if entry_res is None:
break
e = entry_cls(archive_res, entry_res)
yield e
if e.is_consumed is False:
_archive_read_data_skip(archive_res)
yield it()
finally:
_archive_read_free(archive_res) | [
"Return an archive enumerator from a user-defined source, using a user-\n defined entry type.\n "
] |
Please provide a description of the function:def file_enumerator(filepath, block_size=10240, *args, **kwargs):
_LOGGER.debug("Enumerating through archive file: %s", filepath)
def opener(archive_res):
_LOGGER.debug("Opening from file (file_enumerator): %s", filepath)
_archive_read_open_filename(archive_res, filepath, block_size)
if 'entry_cls' not in kwargs:
kwargs['entry_cls'] = _ArchiveEntryItReadable
return _enumerator(opener,
*args,
**kwargs) | [
"Return an enumerator that knows how to read a physical file."
] |
Please provide a description of the function:def memory_enumerator(buffer_, *args, **kwargs):
_LOGGER.debug("Enumerating through (%d) bytes of archive data.",
len(buffer_))
def opener(archive_res):
_LOGGER.debug("Opening from (%d) bytes (memory_enumerator).",
len(buffer_))
_archive_read_open_memory(archive_res, buffer_)
if 'entry_cls' not in kwargs:
kwargs['entry_cls'] = _ArchiveEntryItReadable
return _enumerator(opener,
*args,
**kwargs) | [
"Return an enumerator that knows how to read raw memory."
] |
Please provide a description of the function:def _pour(opener, flags=0, *args, **kwargs):
with _enumerator(opener,
*args,
entry_cls=_ArchiveEntryItState,
**kwargs) as r:
ext = libarchive.calls.archive_write.c_archive_write_disk_new()
libarchive.calls.archive_write.c_archive_write_disk_set_options(
ext,
flags
)
for state in r:
yield state
if state.selected is False:
continue
r = libarchive.calls.archive_write.c_archive_write_header(
ext,
state.entry_res)
buff = ctypes.c_void_p()
size = ctypes.c_size_t()
offset = ctypes.c_longlong()
while 1:
r = libarchive.calls.archive_read.\
c_archive_read_data_block(
state.reader_res,
ctypes.byref(buff),
ctypes.byref(size),
ctypes.byref(offset))
if r == libarchive.constants.archive.ARCHIVE_EOF:
break
elif r != libarchive.constants.archive.ARCHIVE_OK:
message = c_archive_error_string(state.reader_res)
raise libarchive.exception.ArchiveError(
"Pour failed: (%d) [%s]" % (r, message))
r = libarchive.calls.archive_write.c_archive_write_data_block(
ext,
buff,
size,
offset)
r = libarchive.calls.archive_write.\
c_archive_write_finish_entry(ext) | [
"A flexible pouring facility that knows how to enumerate entry data."
] |
Please provide a description of the function:def file_pour(filepath, block_size=10240, *args, **kwargs):
def opener(archive_res):
_LOGGER.debug("Opening from file (file_pour): %s", filepath)
_archive_read_open_filename(archive_res, filepath, block_size)
return _pour(opener, *args, flags=0, **kwargs) | [
"Write physical files from entries."
] |
Please provide a description of the function:def memory_pour(buffer_, *args, **kwargs):
def opener(archive_res):
_LOGGER.debug("Opening from (%d) bytes (memory_pour).", len(buffer_))
_archive_read_open_memory(archive_res, buffer_)
return _pour(opener, *args, flags=0, **kwargs) | [
"Yield data from entries."
] |
Please provide a description of the function:def _archive_write_data(archive, data):
n = libarchive.calls.archive_write.c_archive_write_data(
archive,
ctypes.cast(ctypes.c_char_p(data), ctypes.c_void_p),
len(data))
if n == 0:
message = c_archive_error_string(archive)
raise ValueError("No bytes were written. Error? [%s]" % (message)) | [
"Write data to archive. This will only be called with a non-empty string.\n "
] |
Please provide a description of the function:def _create(opener,
format_code,
files,
filter_code=None,
block_size=16384):
a = _archive_write_new()
_set_write_context(a, format_code, filter_code)
_LOGGER.debug("Opening archive (create).")
opener(a)
# Use the standard uid/gid lookup mechanisms.
# This was set on an instance of *disk* that wasn't used. Do we still need it?
#_archive_read_disk_set_standard_lookup(disk)
# We used to yield this, but that necessitated users always flattening the
# response. This means we don't have to, but we still have to return an
# enumerable in order to maintain compatibility.
added = []
for filepath in files:
filepath = filepath.encode('utf-8')
disk = libarchive.calls.archive_read.c_archive_read_disk_new()
libarchive.calls.archive_read.c_archive_read_disk_open(
disk,
filepath)
while 1:
entry = libarchive.calls.archive_entry.c_archive_entry_new()
r = libarchive.calls.archive_read.c_archive_read_next_header2(
disk,
entry)
if r == libarchive.constants.archive.ARCHIVE_EOF:
break
elif r != libarchive.constants.archive.ARCHIVE_OK:
message = c_archive_error_string(disk)
raise libarchive.exception.ArchiveError(
"Could not build header from physical source file "
"during create: (%d) [%s]" %
(r, message))
ae = libarchive.adapters.archive_entry.ArchiveEntry(
disk,
entry)
# print("WRITING: [{}] {}".format(ae, ae.filetype))
# Strip leading slash so it stores as a relative path.
if os.path.isabs(ae.pathname) is True:
ae.pathname = ae.pathname[1:]
added.append(ae)
libarchive.calls.archive_read.c_archive_read_disk_descend(disk)
# NOTE: There's a `archive_entry_set_size()` on the underlying
# entry type, but it doesn't appear to be necessary. The sizes
# report perfectly fine with the [probably automatic] counting that
# occurs just with `_archive_write_data()`.
r = _archive_write_header(a, entry)
if ae.filetype.IFLNK is True and os.path.islink(ae.sourcepath) is True:
target_path = os.readlink(ae.sourcepath)
ae.symlink_targetpath = target_path
else:
with open(ae.sourcepath, 'rb') as f:
while 1:
data = f.read(block_size)
if not data:
break
_archive_write_data(a, data)
libarchive.calls.archive_entry.c_archive_entry_free(entry)
libarchive.calls.archive_read.c_archive_read_close(disk)
libarchive.calls.archive_read.c_archive_read_free(disk)
_LOGGER.debug("Closing archive (create).")
_archive_write_close(a)
_archive_write_free(a)
return added | [
"Create an archive from a collection of files (not recursive)."
] |
Please provide a description of the function:def _write_ctrl_meas(self):
self._write_register_byte(_BME280_REGISTER_CTRL_HUM, self.overscan_humidity)
self._write_register_byte(_BME280_REGISTER_CTRL_MEAS, self._ctrl_meas) | [
"\n Write the values to the ctrl_meas and ctrl_hum registers in the device\n ctrl_meas sets the pressure and temperature data acquistion options\n ctrl_hum sets the humidty oversampling and must be written to first\n "
] |
Please provide a description of the function:def _write_config(self):
normal_flag = False
if self._mode == MODE_NORMAL:
#Writes to the config register may be ignored while in Normal mode
normal_flag = True
self.mode = MODE_SLEEP #So we switch to Sleep mode first
self._write_register_byte(_BME280_REGISTER_CONFIG, self._config)
if normal_flag:
self.mode = MODE_NORMAL | [
"Write the value to the config register in the device "
] |
Please provide a description of the function:def _config(self):
config = 0
if self.mode == MODE_NORMAL:
config += (self._t_standby << 5)
if self._iir_filter:
config += (self._iir_filter << 2)
return config | [
"Value to be written to the device's config register "
] |
Please provide a description of the function:def _ctrl_meas(self):
ctrl_meas = (self.overscan_temperature << 5)
ctrl_meas += (self.overscan_pressure << 2)
ctrl_meas += self.mode
return ctrl_meas | [
"Value to be written to the device's ctrl_meas register "
] |
Please provide a description of the function:def measurement_time_typical(self):
meas_time_ms = 1.0
if self.overscan_temperature != OVERSCAN_DISABLE:
meas_time_ms += (2 * _BME280_OVERSCANS.get(self.overscan_temperature))
if self.overscan_pressure != OVERSCAN_DISABLE:
meas_time_ms += (2 * _BME280_OVERSCANS.get(self.overscan_pressure) + 0.5)
if self.overscan_humidity != OVERSCAN_DISABLE:
meas_time_ms += (2 * _BME280_OVERSCANS.get(self.overscan_humidity) + 0.5)
return meas_time_ms | [
"Typical time in milliseconds required to complete a measurement in normal mode"
] |
Please provide a description of the function:def pressure(self):
self._read_temperature()
# Algorithm from the BME280 driver
# https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c
adc = self._read24(_BME280_REGISTER_PRESSUREDATA) / 16 # lowest 4 bits get dropped
var1 = float(self._t_fine) / 2.0 - 64000.0
var2 = var1 * var1 * self._pressure_calib[5] / 32768.0
var2 = var2 + var1 * self._pressure_calib[4] * 2.0
var2 = var2 / 4.0 + self._pressure_calib[3] * 65536.0
var3 = self._pressure_calib[2] * var1 * var1 / 524288.0
var1 = (var3 + self._pressure_calib[1] * var1) / 524288.0
var1 = (1.0 + var1 / 32768.0) * self._pressure_calib[0]
if var1 == 0:
return 0
if var1:
pressure = 1048576.0 - adc
pressure = ((pressure - var2 / 4096.0) * 6250.0) / var1
var1 = self._pressure_calib[8] * pressure * pressure / 2147483648.0
var2 = pressure * self._pressure_calib[7] / 32768.0
pressure = pressure + (var1 + var2 + self._pressure_calib[6]) / 16.0
pressure /= 100
if pressure < _BME280_PRESSURE_MIN_HPA:
return _BME280_PRESSURE_MIN_HPA
if pressure > _BME280_PRESSURE_MAX_HPA:
return _BME280_PRESSURE_MAX_HPA
return pressure
else:
return _BME280_PRESSURE_MIN_HPA | [
"\n The compensated pressure in hectoPascals.\n returns None if pressure measurement is disabled\n "
] |
Please provide a description of the function:def humidity(self):
self._read_temperature()
hum = self._read_register(_BME280_REGISTER_HUMIDDATA, 2)
#print("Humidity data: ", hum)
adc = float(hum[0] << 8 | hum[1])
#print("adc:", adc)
# Algorithm from the BME280 driver
# https://github.com/BoschSensortec/BME280_driver/blob/master/bme280.c
var1 = float(self._t_fine) - 76800.0
#print("var1 ", var1)
var2 = (self._humidity_calib[3] * 64.0 + (self._humidity_calib[4] / 16384.0) * var1)
#print("var2 ",var2)
var3 = adc - var2
#print("var3 ",var3)
var4 = self._humidity_calib[1] / 65536.0
#print("var4 ",var4)
var5 = (1.0 + (self._humidity_calib[2] / 67108864.0) * var1)
#print("var5 ",var5)
var6 = 1.0 + (self._humidity_calib[5] / 67108864.0) * var1 * var5
#print("var6 ",var6)
var6 = var3 * var4 * (var5 * var6)
humidity = var6 * (1.0 - self._humidity_calib[0] * var6 / 524288.0)
if humidity > _BME280_HUMIDITY_MAX:
return _BME280_HUMIDITY_MAX
if humidity < _BME280_HUMIDITY_MIN:
return _BME280_HUMIDITY_MIN
# else...
return humidity | [
"\n The relative humidity in RH %\n returns None if humidity measurement is disabled\n "
] |
Please provide a description of the function:def altitude(self):
pressure = self.pressure # in Si units for hPascal
return 44330 * (1.0 - math.pow(pressure / self.sea_level_pressure, 0.1903)) | [
"The altitude based on current ``pressure`` versus the sea level pressure\n (``sea_level_pressure``) - which you must enter ahead of time)"
] |
Please provide a description of the function:def _read_coefficients(self):
coeff = self._read_register(_BME280_REGISTER_DIG_T1, 24)
coeff = list(struct.unpack('<HhhHhhhhhhhh', bytes(coeff)))
coeff = [float(i) for i in coeff]
self._temp_calib = coeff[:3]
self._pressure_calib = coeff[3:]
self._humidity_calib = [0]*6
self._humidity_calib[0] = self._read_byte(_BME280_REGISTER_DIG_H1)
coeff = self._read_register(_BME280_REGISTER_DIG_H2, 7)
coeff = list(struct.unpack('<hBBBBb', bytes(coeff)))
self._humidity_calib[1] = float(coeff[0])
self._humidity_calib[2] = float(coeff[1])
self._humidity_calib[3] = float((coeff[2] << 4) | (coeff[3] & 0xF))
self._humidity_calib[4] = float((coeff[4] << 4) | (coeff[3] >> 4))
self._humidity_calib[5] = float(coeff[5]) | [
"Read & save the calibration coefficients"
] |
Please provide a description of the function:def _read24(self, register):
ret = 0.0
for b in self._read_register(register, 3):
ret *= 256.0
ret += float(b & 0xFF)
return ret | [
"Read an unsigned 24-bit value as a floating point and return it."
] |
Please provide a description of the function:def _create(self, postData) :
if self.infos is None :
r = self.connection.session.post(self.indexesURL, params = {"collection" : self.collection.name}, data = json.dumps(postData, default=str))
data = r.json()
if (r.status_code >= 400) or data['error'] :
raise CreationError(data['errorMessage'], data)
self.infos = data | [
"Creates an index of any type according to postData"
] |
Please provide a description of the function:def createVertex(self, collectionName, docAttributes, waitForSync = False) :
url = "%s/vertex/%s" % (self.URL, collectionName)
store = DOC.DocumentStore(self.database[collectionName], validators=self.database[collectionName]._fields, initDct=docAttributes)
# self.database[collectionName].validateDct(docAttributes)
store.validate()
r = self.connection.session.post(url, data = json.dumps(docAttributes, default=str), params = {'waitForSync' : waitForSync})
data = r.json()
if r.status_code == 201 or r.status_code == 202 :
return self.database[collectionName][data["vertex"]["_key"]]
raise CreationError("Unable to create vertice, %s" % data["errorMessage"], data) | [
"adds a vertex to the graph and returns it"
] |
Please provide a description of the function:def deleteVertex(self, document, waitForSync = False) :
url = "%s/vertex/%s" % (self.URL, document._id)
r = self.connection.session.delete(url, params = {'waitForSync' : waitForSync})
data = r.json()
if r.status_code == 200 or r.status_code == 202 :
return True
raise DeletionError("Unable to delete vertice, %s" % document._id, data) | [
"deletes a vertex from the graph as well as al linked edges"
] |
Please provide a description of the function:def createEdge(self, collectionName, _fromId, _toId, edgeAttributes, waitForSync = False) :
if not _fromId :
raise ValueError("Invalid _fromId: %s" % _fromId)
if not _toId :
raise ValueError("Invalid _toId: %s" % _toId)
if collectionName not in self.definitions :
raise KeyError("'%s' is not among the edge definitions" % collectionName)
url = "%s/edge/%s" % (self.URL, collectionName)
self.database[collectionName].validatePrivate("_from", _fromId)
self.database[collectionName].validatePrivate("_to", _toId)
ed = self.database[collectionName].createEdge()
ed.set(edgeAttributes)
ed.validate()
payload = ed.getStore()
payload.update({'_from' : _fromId, '_to' : _toId})
r = self.connection.session.post(url, data = json.dumps(payload, default=str), params = {'waitForSync' : waitForSync})
data = r.json()
if r.status_code == 201 or r.status_code == 202 :
return self.database[collectionName][data["edge"]["_key"]]
# print "\ngraph 160, ", data, payload, _fromId
raise CreationError("Unable to create edge, %s" % r.json()["errorMessage"], data) | [
"creates an edge between two documents"
] |
Please provide a description of the function:def link(self, definition, doc1, doc2, edgeAttributes, waitForSync = False) :
"A shorthand for createEdge that takes two documents as input"
if type(doc1) is DOC.Document :
if not doc1._id :
doc1.save()
doc1_id = doc1._id
else :
doc1_id = doc1
if type(doc2) is DOC.Document :
if not doc2._id :
doc2.save()
doc2_id = doc2._id
else :
doc2_id = doc2
return self.createEdge(definition, doc1_id, doc2_id, edgeAttributes, waitForSync) | [] |
Please provide a description of the function:def unlink(self, definition, doc1, doc2) :
"deletes all links between doc1 and doc2"
links = self.database[definition].fetchByExample( {"_from": doc1._id,"_to" : doc2._id}, batchSize = 100)
for l in links :
self.deleteEdge(l) | [] |
Please provide a description of the function:def deleteEdge(self, edge, waitForSync = False) :
url = "%s/edge/%s" % (self.URL, edge._id)
r = self.connection.session.delete(url, params = {'waitForSync' : waitForSync})
if r.status_code == 200 or r.status_code == 202 :
return True
raise DeletionError("Unable to delete edge, %s" % edge._id, r.json()) | [
"removes an edge from the graph"
] |
Please provide a description of the function:def traverse(self, startVertex, **kwargs) :
url = "%s/traversal" % self.database.URL
if type(startVertex) is DOC.Document :
startVertex_id = startVertex._id
else :
startVertex_id = startVertex
payload = {"startVertex": startVertex_id, "graphName" : self.name}
if "expander" in kwargs :
if "direction" in kwargs :
raise ValueError()
elif "direction" not in kwargs :
raise ValueError()
payload.update(kwargs)
r = self.connection.session.post(url, data = json.dumps(payload, default=str))
data = r.json()
if r.status_code < 200 or r.status_code > 202 or data["error"] :
raise TraversalError(data["errorMessage"], data)
return data["result"] | [
"Traversal! see: https://docs.arangodb.com/HttpTraversal/README.html for a full list of the possible kwargs.\n The function must have as argument either: direction = \"outbout\"/\"any\"/\"inbound\" or expander = \"custom JS (see arangodb's doc)\".\n The function can't have both 'direction' and 'expander' as arguments.\n ",
"The function can't have both 'direction' and 'expander' as arguments",
"The function must have as argument either: direction = \"outbout\"/\"any\"/\"inbound\" or expander = \"custom JS (see arangodb's doc)\" "
] |
Please provide a description of the function:def delete(self, _key) :
"removes a document from the cache"
try :
doc = self.cacheStore[_key]
doc.prev.nextDoc = doc.nextDoc
doc.nextDoc.prev = doc.prev
del(self.cacheStore[_key])
except KeyError :
raise KeyError("Document with _key %s is not available in cache" % _key) | [] |
Please provide a description of the function:def getChain(self) :
"returns a list of keys representing the chain of documents"
l = []
h = self.head
while h :
l.append(h._key)
h = h.nextDoc
return l | [] |
Please provide a description of the function:def stringify(self) :
"a pretty str version of getChain()"
l = []
h = self.head
while h :
l.append(str(h._key))
h = h.nextDoc
return "<->".join(l) | [] |
Please provide a description of the function:def validate(self, value) :
for v in self.validators :
v.validate(value)
return True | [
"checks the validity of 'value' given the lits of validators"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.