code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def build_request_to_validator_map(schema, resolver): schema_models = schema.get('models', {}) return dict( ( RequestMatcher(api['path'], operation['method']), ValidatorMap.from_operation(operation, schema_models, resolver) ) for api in schema['apis'] for operation in api['operations'] )
Build a mapping from :class:`RequestMatcher` to :class:`ValidatorMap` for each operation in the API spec. This mapping may be used to retrieve the appropriate validators for a request.
def load_schema(schema_path): with open(schema_path, 'r') as schema_file: schema = simplejson.load(schema_file) resolver = RefResolver('', '', schema.get('models', {})) return build_request_to_validator_map(schema, resolver)
Prepare the api specification for request and response validation. :returns: a mapping from :class:`RequestMatcher` to :class:`ValidatorMap` for every operation in the api specification. :rtype: dict
def validate(self, values): if not self.schema or (values is None and not self.schema.get('required', False)): return self.validator.validate(values)
Validate a :class:`dict` of values. If `self.schema` is falsy this is a noop.
def matches(self, request): return ( partial_path_match(request.path_info, self.path) and request.method == self.method )
:param request: a :class:`pyramid.request.Request` :returns: True if this matcher matches the request, False otherwise
def get_swagger_objects(settings, route_info, registry): enabled_swagger_versions = get_swagger_versions(registry.settings) schema12 = registry.settings['pyramid_swagger.schema12'] schema20 = registry.settings['pyramid_swagger.schema20'] fallback_to_swagger12_route = ( SWAGGER_20 in enabled_swagger_versions and SWAGGER_12 in enabled_swagger_versions and settings.prefer_20_routes and route_info.get('route') and route_info['route'].name not in settings.prefer_20_routes ) if fallback_to_swagger12_route: return settings.swagger12_handler, schema12 if SWAGGER_20 in enabled_swagger_versions: return settings.swagger20_handler, schema20 if SWAGGER_12 in enabled_swagger_versions: return settings.swagger12_handler, schema12
Returns appropriate swagger handler and swagger spec schema. Swagger Handler contains callables that isolate implementation differences in the tween to handle both Swagger 1.2 and Swagger 2.0. Exception is made when `settings.prefer_20_routes` are non-empty and ['1.2', '2.0'] both are present in available swagger versions. In this special scenario, '2.0' spec is chosen only for requests which are listed in the `prefer_20_routes`. This helps in incremental migration of routes from v1.2 to v2.0 by making moving to v2.0 opt-in. :rtype: (:class:`SwaggerHandler`, :class:`pyramid_swagger.model.SwaggerSchema` OR :class:`bravado_core.spec.Spec`)
def validation_tween_factory(handler, registry): settings = load_settings(registry) route_mapper = registry.queryUtility(IRoutesMapper) validation_context = _get_validation_context(registry) def validator_tween(request): # We don't have access to this yet but let's go ahead and build the # matchdict so we can validate it and use it to exclude routes from # validation. route_info = route_mapper(request) swagger_handler, spec = get_swagger_objects(settings, route_info, registry) if should_exclude_request(settings, request, route_info): return handler(request) try: op_or_validators_map = swagger_handler.op_for_request( request, route_info=route_info, spec=spec) except PathNotMatchedError as exc: if settings.validate_path: with validation_context(request): raise PathNotFoundError(str(exc), child=exc) else: return handler(request) def operation(_): return op_or_validators_map if isinstance(op_or_validators_map, Operation) else None request.set_property(operation) if settings.validate_request: with validation_context(request, response=None): request_data = swagger_handler.handle_request( PyramidSwaggerRequest(request, route_info), op_or_validators_map, ) def swagger_data(_): return request_data request.set_property(swagger_data) response = handler(request) if settings.validate_response: with validation_context(request, response=response): swagger_handler.handle_response(response, op_or_validators_map) return response return validator_tween
Pyramid tween for performing validation. Note this is very simple -- it validates requests, responses, and paths while delegating to the relevant matching view.
def handle_request(request, validator_map, **kwargs): request_data = {} validation_pairs = [] for validator, values in [ (validator_map.query, request.query), (validator_map.path, request.path), (validator_map.form, request.form), (validator_map.headers, request.headers), ]: values = cast_params(validator.schema, values) validation_pairs.append((validator, values)) request_data.update(values) # Body is a special case because the key for the request_data comes # from the name in the schema, instead of keys in the values if validator_map.body.schema: param_name = validator_map.body.schema['name'] validation_pairs.append((validator_map.body, request.body)) request_data[param_name] = request.body validate_request(validation_pairs) return request_data
Validate the request against the swagger spec and return a dict with all parameter values available in the request, casted to the expected python type. :param request: a :class:`PyramidSwaggerRequest` to validate :param validator_map: a :class:`pyramid_swagger.load_schema.ValidatorMap` used to validate the request :returns: a :class:`dict` of request data for each parameter in the swagger spec :raises: RequestValidationError when the request is not valid for the swagger spec
def build_swagger12_handler(schema): if schema: return SwaggerHandler( op_for_request=schema.validators_for_request, handle_request=handle_request, handle_response=validate_response, )
Builds a swagger12 handler or returns None if no schema is present. :type schema: :class:`pyramid_swagger.model.SwaggerSchema` :rtype: :class:`SwaggerHandler` or None
def get_exclude_paths(registry): # TODO(#63): remove deprecated `skip_validation` setting in v2.0. regexes = registry.settings.get( 'pyramid_swagger.skip_validation', registry.settings.get( 'pyramid_swagger.exclude_paths', DEFAULT_EXCLUDED_PATHS ) ) # being nice to users using strings :p if not isinstance(regexes, list) and not isinstance(regexes, tuple): regexes = [regexes] return [re.compile(r) for r in regexes]
Compiles a list of paths that should not be validated against. :rtype: list of compiled validation regexes
def cast_request_param(param_type, param_name, param_value): try: return CAST_TYPE_TO_FUNC.get(param_type, lambda x: x)(param_value) except ValueError: log.warn("Failed to cast %s value of %s to %s", param_name, param_value, param_type) # Ignore type error, let jsonschema validation handle incorrect types return param_value
Try to cast a request param (e.g. query arg, POST data) from a string to its specified type in the schema. This allows validating non-string params. :param param_type: name of the type to be casted to :type param_type: string :param param_name: param name :type param_name: string :param param_value: param value :type param_value: string
def validate_response(response, validator_map): validator = validator_map.response # Short circuit if we are supposed to not validate anything. returns_nothing = validator.schema.get('type') == 'void' body_empty = response.body in (None, b'', b'{}', b'null') if returns_nothing and body_empty: return # Don't attempt to validate non-success responses in v1.2 if not 200 <= response.status_code <= 203: return validator.validate(prepare_body(response))
Validates response against our schemas. :param response: the response object to validate :type response: :class:`pyramid.response.Response` :type validator_map: :class:`pyramid_swagger.load_schema.ValidatorMap`
def swaggerize_response(response, op): response_spec = get_response_spec(response.status_int, op) bravado_core.response.validate_response( response_spec, op, PyramidSwaggerResponse(response))
Delegate handling the Swagger concerns of the response to bravado-core. :type response: :class:`pyramid.response.Response` :type op: :class:`bravado_core.operation.Operation`
def get_op_for_request(request, route_info, spec): # pyramid.urldispath.Route route = route_info['route'] if hasattr(route, 'path'): route_path = route.path if route_path[0] != '/': route_path = '/' + route_path op = spec.get_op_for_request(request.method, route_path) if op is not None: return op else: raise PathNotMatchedError( "Could not find a matching Swagger " "operation for {0} request {1}" .format(request.method, request.url)) else: raise PathNotMatchedError( "Could not find a matching route for {0} request {1}. " "Have you registered this endpoint with Pyramid?" .format(request.method, request.url))
Find out which operation in the Swagger schema corresponds to the given pyramid request. :type request: :class:`pyramid.request.Request` :type route_info: dict (usually has 'match' and 'route' keys) :type spec: :class:`bravado_core.spec.Spec` :rtype: :class:`bravado_core.operation.Operation` :raises: PathNotMatchedError when a matching Swagger operation is not found.
def get_swagger_versions(settings): swagger_versions = set(aslist(settings.get( 'pyramid_swagger.swagger_versions', DEFAULT_SWAGGER_VERSIONS))) if len(swagger_versions) == 0: raise ValueError('pyramid_swagger.swagger_versions is empty') for swagger_version in swagger_versions: if swagger_version not in SUPPORTED_SWAGGER_VERSIONS: raise ValueError('Swagger version {0} is not supported.' .format(swagger_version)) return swagger_versions
Validates and returns the versions of the Swagger Spec that this pyramid application supports. :type settings: dict :return: list of strings. eg ['1.2', '2.0'] :raises: ValueError when an unsupported Swagger version is encountered.
def form(self): # Don't read the POST dict unless the body is form encoded if self.request.content_type in self.FORM_TYPES: return self.request.POST.mixed() return {}
:rtype: dict
def register_api_doc_endpoints(config, endpoints, base_path='/api-docs'): for endpoint in endpoints: path = base_path.rstrip('/') + endpoint.path config.add_route(endpoint.route_name, path) config.add_view( endpoint.view, route_name=endpoint.route_name, renderer=endpoint.renderer)
Create and register pyramid endpoints to service swagger api docs. Routes and views will be registered on the `config` at `path`. :param config: a pyramid configuration to register the new views and routes :type config: :class:`pyramid.config.Configurator` :param endpoints: a list of endpoints to register as routes and views :type endpoints: a list of :class:`pyramid_swagger.model.PyramidEndpoint` :param base_path: the base path used to register api doc endpoints. Defaults to `/api-docs`. :type base_path: string
def build_swagger_12_endpoints(resource_listing, api_declarations): yield build_swagger_12_resource_listing(resource_listing) for name, filepath in api_declarations.items(): with open(filepath) as input_file: yield build_swagger_12_api_declaration( name, simplejson.load(input_file))
:param resource_listing: JSON representing a Swagger 1.2 resource listing :type resource_listing: dict :param api_declarations: JSON representing Swagger 1.2 api declarations :type api_declarations: dict :rtype: iterable of :class:`pyramid_swagger.model.PyramidEndpoint`
def build_swagger_12_resource_listing(resource_listing): def view_for_resource_listing(request): # Thanks to the magic of closures, this means we gracefully return JSON # without file IO at request time. return resource_listing return PyramidEndpoint( path='', route_name='pyramid_swagger.swagger12.api_docs', view=view_for_resource_listing, renderer='json')
:param resource_listing: JSON representing a Swagger 1.2 resource listing :type resource_listing: dict :rtype: :class:`pyramid_swagger.model.PyramidEndpoint`
def build_swagger_12_api_declaration(resource_name, api_declaration): # NOTE: This means our resource paths are currently constrained to be valid # pyramid routes! (minus the leading /) route_name = 'pyramid_swagger.swagger12.apidocs-{0}'.format(resource_name) return PyramidEndpoint( path='/{0}'.format(resource_name), route_name=route_name, view=build_swagger_12_api_declaration_view(api_declaration), renderer='json')
:param resource_name: The `path` parameter from the resource listing for this resource. :type resource_name: string :param api_declaration: JSON representing a Swagger 1.2 api declaration :type api_declaration: dict :rtype: :class:`pyramid_swagger.model.PyramidEndpoint`
def build_swagger_12_api_declaration_view(api_declaration_json): def view_for_api_declaration(request): # Note that we rewrite basePath to always point at this server's root. return dict( api_declaration_json, basePath=str(request.application_url), ) return view_for_api_declaration
Thanks to the magic of closures, this means we gracefully return JSON without file IO at request time.
def partial_path_match(path1, path2, kwarg_re=r'\{.*\}'): split_p1 = path1.split('/') split_p2 = path2.split('/') pat = re.compile(kwarg_re) if len(split_p1) != len(split_p2): return False for partial_p1, partial_p2 in zip(split_p1, split_p2): if pat.match(partial_p1) or pat.match(partial_p2): continue if not partial_p1 == partial_p2: return False return True
Validates if path1 and path2 matches, ignoring any kwargs in the string. We need this to ensure we can match Swagger patterns like: /foo/{id} against the observed pyramid path /foo/1 :param path1: path of a url :type path1: string :param path2: path of a url :type path2: string :param kwarg_re: regex pattern to identify kwargs :type kwarg_re: regex string :returns: boolean
def validators_for_request(self, request, **kwargs): for resource_validator in self.resource_validators: for matcher, validator_map in resource_validator.items(): if matcher.matches(request): return validator_map raise PathNotMatchedError( 'Could not find the relevant path ({0}) in the Swagger schema. ' 'Perhaps you forgot to add it?'.format(request.path_info) )
Takes a request and returns a validator mapping for the request. :param request: A Pyramid request to fetch schemas for :type request: :class:`pyramid.request.Request` :returns: a :class:`pyramid_swagger.load_schema.ValidatorMap` which can be used to validate `request`
def find_resource_paths(schema_dir): def not_api_doc_file(filename): return not filename.endswith(API_DOCS_FILENAME) def not_swagger_dot_json(filename): # Exclude a Swagger 2.0 schema file if it happens to exist. return not os.path.basename(filename) == 'swagger.json' def filename_to_path(filename): filename, _ext = os.path.splitext(os.path.basename(filename)) return '/' + filename filenames = glob.glob('{0}/*.json'.format(schema_dir)) return map(filename_to_path, filter(not_swagger_dot_json, filter(not_api_doc_file, sorted(filenames))))
The inverse of :func:`find_resource_names` used to generate a resource listing from a directory of swagger api docs.
def build_schema_mapping(schema_dir, resource_listing): def resource_name_to_filepath(name): return os.path.join(schema_dir, '{0}.json'.format(name)) return dict( (resource, resource_name_to_filepath(resource)) for resource in find_resource_names(resource_listing) )
Discovers schema file locations and relations. :param schema_dir: the directory schema files live inside :type schema_dir: string :param resource_listing: A swagger resource listing :type resource_listing: dict :returns: a mapping from resource name to file path :rtype: dict
def _load_resource_listing(resource_listing): try: with open(resource_listing) as resource_listing_file: return simplejson.load(resource_listing_file) # If not found, raise a more user-friendly error. except IOError: raise ResourceListingNotFoundError( 'No resource listing found at {0}. Note that your json file ' 'must be named {1}'.format(resource_listing, API_DOCS_FILENAME) )
Load the resource listing from file, handling errors. :param resource_listing: path to the api-docs resource listing file :type resource_listing: string :returns: contents of the resource listing file :rtype: dict
def get_resource_listing(schema_dir, should_generate_resource_listing): listing_filename = os.path.join(schema_dir, API_DOCS_FILENAME) resource_listing = _load_resource_listing(listing_filename) if not should_generate_resource_listing: return resource_listing return generate_resource_listing(schema_dir, resource_listing)
Return the resource listing document. :param schema_dir: the directory which contains swagger spec files :type schema_dir: string :param should_generate_resource_listing: when True a resource listing will be generated from the list of *.json files in the schema_dir. Otherwise return the contents of the resource listing file :type should_generate_resource_listing: boolean :returns: the contents of a resource listing document
def compile_swagger_schema(schema_dir, resource_listing): mapping = build_schema_mapping(schema_dir, resource_listing) resource_validators = ingest_resources(mapping, schema_dir) endpoints = list(build_swagger_12_endpoints(resource_listing, mapping)) return SwaggerSchema(endpoints, resource_validators)
Build a SwaggerSchema from various files. :param schema_dir: the directory schema files live inside :type schema_dir: string :returns: a SwaggerSchema object
def get_swagger_schema(settings): schema_dir = settings.get('pyramid_swagger.schema_directory', 'api_docs') resource_listing = get_resource_listing( schema_dir, settings.get('pyramid_swagger.generate_resource_listing', False) ) if settings.get('pyramid_swagger.enable_swagger_spec_validation', True): validate_swagger_schema(schema_dir, resource_listing) return compile_swagger_schema(schema_dir, resource_listing)
Return a :class:`pyramid_swagger.model.SwaggerSchema` constructed from the swagger specs in `pyramid_swagger.schema_directory`. If `pyramid_swagger.enable_swagger_spec_validation` is enabled the schema will be validated before returning it. :param settings: a pyramid registry settings with configuration for building a swagger schema :type settings: dict :returns: a :class:`pyramid_swagger.model.SwaggerSchema`
def get_swagger_spec(settings): schema_dir = settings.get('pyramid_swagger.schema_directory', 'api_docs/') schema_filename = settings.get('pyramid_swagger.schema_file', 'swagger.json') schema_path = os.path.join(schema_dir, schema_filename) schema_url = urlparse.urljoin('file:', pathname2url(os.path.abspath(schema_path))) handlers = build_http_handlers(None) # don't need http_client for file: file_handler = handlers['file'] spec_dict = file_handler(schema_url) return Spec.from_dict( spec_dict, config=create_bravado_core_config(settings), origin_url=schema_url)
Return a :class:`bravado_core.spec.Spec` constructed from the swagger specs in `pyramid_swagger.schema_directory`. If `pyramid_swagger.enable_swagger_spec_validation` is enabled the schema will be validated before returning it. :param settings: a pyramid registry settings with configuration for building a swagger schema :type settings: dict :rtype: :class:`bravado_core.spec.Spec`
def create_bravado_core_config(settings): # Map pyramid_swagger config key -> bravado_core config key config_keys = { 'pyramid_swagger.enable_request_validation': 'validate_requests', 'pyramid_swagger.enable_response_validation': 'validate_responses', 'pyramid_swagger.enable_swagger_spec_validation': 'validate_swagger_spec', 'pyramid_swagger.use_models': 'use_models', 'pyramid_swagger.user_formats': 'formats', 'pyramid_swagger.include_missing_properties': 'include_missing_properties', } configs = { 'use_models': False } bravado_core_configs_from_pyramid_swagger_configs = { bravado_core_key: settings[pyramid_swagger_key] for pyramid_swagger_key, bravado_core_key in iteritems(config_keys) if pyramid_swagger_key in settings } if bravado_core_configs_from_pyramid_swagger_configs: warnings.warn( message='Configs {old_configs} are deprecated, please use {new_configs} instead.'.format( old_configs=', '.join(k for k, v in sorted(iteritems(config_keys))), new_configs=', '.join( '{}{}'.format(BRAVADO_CORE_CONFIG_PREFIX, v) for k, v in sorted(iteritems(config_keys)) ), ), category=DeprecationWarning, ) configs.update(bravado_core_configs_from_pyramid_swagger_configs) configs.update({ key.replace(BRAVADO_CORE_CONFIG_PREFIX, ''): value for key, value in iteritems(settings) if key.startswith(BRAVADO_CORE_CONFIG_PREFIX) }) return configs
Create a configuration dict for bravado_core based on pyramid_swagger settings. :param settings: pyramid registry settings with configuration for building a swagger schema :type settings: dict :returns: config dict suitable for passing into bravado_core.spec.Spec.from_dict(..) :rtype: dict
def ingest_resources(mapping, schema_dir): ingested_resources = [] for name, filepath in iteritems(mapping): try: ingested_resources.append(load_schema(filepath)) # If we have trouble reading any files, raise a more user-friendly # error. except IOError: raise ApiDeclarationNotFoundError( 'No api declaration found at {0}. Attempted to load the `{1}` ' 'resource relative to the schema_directory `{2}`. Perhaps ' 'your resource name and API declaration file do not ' 'match?'.format(filepath, name, schema_dir) ) return ingested_resources
Consume the Swagger schemas and produce a queryable datastructure. :param mapping: Map from resource name to filepath of its api declaration :type mapping: dict :param schema_dir: the directory schema files live inside :type schema_dir: string :returns: A list of mapping from :class:`RequestMatcher` to :class:`ValidatorMap`
def export_obj(vertices, triangles, filename): with open(filename, 'w') as fh: for v in vertices: fh.write("v {} {} {}\n".format(*v)) for f in triangles: fh.write("f {} {} {}\n".format(*(f + 1)))
Exports a mesh in the (.obj) format.
def export_off(vertices, triangles, filename): with open(filename, 'w') as fh: fh.write('OFF\n') fh.write('{} {} 0\n'.format(len(vertices), len(triangles))) for v in vertices: fh.write("{} {} {}\n".format(*v)) for f in triangles: fh.write("3 {} {} {}\n".format(*f))
Exports a mesh in the (.off) format.
def export_mesh(vertices, triangles, filename, mesh_name="mcubes_mesh"): import collada mesh = collada.Collada() vert_src = collada.source.FloatSource("verts-array", vertices, ('X','Y','Z')) geom = collada.geometry.Geometry(mesh, "geometry0", mesh_name, [vert_src]) input_list = collada.source.InputList() input_list.addInput(0, 'VERTEX', "#verts-array") triset = geom.createTriangleSet(np.copy(triangles), input_list, "") geom.primitives.append(triset) mesh.geometries.append(geom) geomnode = collada.scene.GeometryNode(geom, []) node = collada.scene.Node(mesh_name, children=[geomnode]) myscene = collada.scene.Scene("mcubes_scene", [node]) mesh.scenes.append(myscene) mesh.scene = myscene mesh.write(filename)
Exports a mesh in the COLLADA (.dae) format. Needs PyCollada (https://github.com/pycollada/pycollada).
def auto_doc(tool, nco_self): def desc(func): func.__doc__ = nco_self.call([tool, "--help"]).get("stdout") return func return desc
Generate the __doc__ string of the decorated function by calling the nco help command :param tool: :param nco_self: :return:
def read_cdf(self, infile): if not self.return_cdf: self.load_cdf_module() if self.cdf_module == "scipy": # making it compatible to older scipy versions file_obj = self.cdf.netcdf_file(infile, mode="r") elif self.cdf_module == "netcdf4": file_obj = self.cdf.Dataset(infile) else: raise ImportError( "Could not import data \ from file {0}".format( infile ) ) return file_obj
Return a cdf handle created by the available cdf library. python-netcdf4 and scipy supported (default:scipy)
def read_array(self, infile, var_name): file_handle = self.read_cdf(infile) try: # return the data array return file_handle.variables[var_name][:] except KeyError: print("Cannot find variable: {0}".format(var_name)) raise KeyError
Directly return a numpy array for a given variable name
def read_ma_array(self, infile, var_name): file_obj = self.read_cdf(infile) # .data is not backwards compatible to old scipy versions, [:] is data = file_obj.variables[var_name][:] # load numpy if available try: import numpy as np except Exception: raise ImportError("numpy is required to return masked arrays.") if hasattr(file_obj.variables[var_name], "_FillValue"): # return masked array fill_val = file_obj.variables[var_name]._FillValue retval = np.ma.masked_where(data == fill_val, data) else: # generate dummy mask which is always valid retval = np.ma.array(data) return retval
Create a masked array based on cdf's FillValue
def save_related(self, request, form, formsets, change): form.save_m2m() for formset in formsets: self.save_formset(request, form, formset, change=change)
Given the ``HttpRequest``, the parent ``ModelForm`` instance, the list of inline formsets and a boolean value based on whether the parent is being added or changed, save the related objects to the database. Note that at this point save_form() and save_model() have already been called.
def digest(self,data=None): if data is not None: self.update(data) b=create_string_buffer(256) size=c_size_t(256) if libcrypto.EVP_DigestSignFinal(self.ctx,b,pointer(size))<=0: raise DigestError('SignFinal') self.digest_finalized=True return b.raw[:size.value]
Method digest is redefined to return keyed MAC value instead of just digest.
def bytes(num, check_result=False): if num <= 0: raise ValueError("'num' should be > 0") buf = create_string_buffer(num) result = libcrypto.RAND_bytes(buf, num) if check_result and result == 0: raise RandError("Random Number Generator not seeded sufficiently") return buf.raw[:num]
Returns num bytes of cryptographically strong pseudo-random bytes. If checkc_result is True, raises error if PRNG is not seeded enough
def pseudo_bytes(num): if num <= 0: raise ValueError("'num' should be > 0") buf = create_string_buffer(num) libcrypto.RAND_pseudo_bytes(buf, num) return buf.raw[:num]
Returns num bytes of pseudo random data. Pseudo- random byte sequences generated by pseudo_bytes() will be unique if they are of sufficient length, but are not necessarily unpredictable. They can be used for non-cryptographic purposes and for certain purposes in cryptographic protocols, but usually not for key generation etc.
def seed(data, entropy=None): if not isinstance(data, bintype): raise TypeError("A string is expected") ptr = c_char_p(data) size = len(data) if entropy is None: libcrypto.RAND_seed(ptr, size) else: libcrypto.RAND_add(ptr, size, entropy)
Seeds random generator with data. If entropy is not None, it should be floating point(double) value estimating amount of entropy in the data (in bytes).
def create(dotted, shortname, longname): if pyver > 2: dotted = dotted.encode('ascii') shortname = shortname.encode('utf-8') longname = longname.encode('utf-8') nid = libcrypto.OBJ_create(dotted, shortname, longname) if nid == 0: raise LibCryptoError("Problem adding new OID to the database") return Oid(nid)
Creates new OID in the database @param dotted - dotted-decimal representation of new OID @param shortname - short name for new OID @param longname - long name for new OID @returns Oid object corresponding to new OID This function should be used with exreme care. Whenever possible, it is better to add new OIDs via OpenSSL configuration file Results of calling this function twice for same OIDor for Oid alredy in database are undefined
def dotted(self): " Returns dotted-decimal reperesentation " obj = libcrypto.OBJ_nid2obj(self.nid) buf = create_string_buffer(256) libcrypto.OBJ_obj2txt(buf, 256, obj, 1) if pyver == 2: return buf.value else: return buf.value.decode('ascii'f dotted(self): " Returns dotted-decimal reperesentation " obj = libcrypto.OBJ_nid2obj(self.nid) buf = create_string_buffer(256) libcrypto.OBJ_obj2txt(buf, 256, obj, 1) if pyver == 2: return buf.value else: return buf.value.decode('ascii')
Returns dotted-decimal reperesentation
def fromobj(obj): nid = libcrypto.OBJ_obj2nid(obj) if nid == 0: buf = create_string_buffer(80) dotted_len = libcrypto.OBJ_obj2txt(buf, 80, obj, 1) dotted = buf[:dotted_len] oid = create(dotted, dotted, dotted) else: oid = Oid(nid) return oid
Creates an OID object from the pointer to ASN1_OBJECT c structure. This method intended for internal use for submodules which deal with libcrypto ASN1 parsing functions, such as x509 or CMS
def _password_callback(c): if c is None: return PW_CALLBACK_FUNC(0) if callable(c): if pyver ==2 : def __cb(buf, length, rwflag, userdata): pwd = c(rwflag) cnt = min(len(pwd),length) memmove(buf,pwd, cnt) return cnt else: def __cb(buf, length, rwflag, userdata): pwd = c(rwflag).encode("utf-8") cnt = min(len(pwd),length) memmove(buf,pwd, cnt) return cnt else: if pyver > 2: c=c.encode("utf-8") def __cb(buf,length,rwflag,userdata): cnt=min(len(c),length) memmove(buf,c,cnt) return cnt return PW_CALLBACK_FUNC(__cb)
Converts given user function or string to C password callback function, passable to openssl. IF function is passed, it would be called upon reading or writing PEM format private key with one argument which is True if we are writing key and should verify passphrase and false if we are reading
def sign(self, digest, **kwargs): ctx = libcrypto.EVP_PKEY_CTX_new(self.key, None) if ctx is None: raise PKeyError("Initailizing sign context") if libcrypto.EVP_PKEY_sign_init(ctx) < 1: raise PKeyError("sign_init") self._configure_context(ctx, kwargs) # Find out signature size siglen = c_long(0) if libcrypto.EVP_PKEY_sign(ctx, None, byref(siglen), digest, len(digest)) < 1: raise PKeyError("computing signature length") sig = create_string_buffer(siglen.value) if libcrypto.EVP_PKEY_sign(ctx, sig, byref(siglen), digest, len(digest)) < 1: raise PKeyError("signing") libcrypto.EVP_PKEY_CTX_free(ctx) return sig.raw[:int(siglen.value)]
Signs given digest and retirns signature Keyword arguments allows to set various algorithm-specific parameters. See pkeyutl(1) manual.
def verify(self, digest, signature, **kwargs): ctx = libcrypto.EVP_PKEY_CTX_new(self.key, None) if ctx is None: raise PKeyError("Initailizing verify context") if libcrypto.EVP_PKEY_verify_init(ctx) < 1: raise PKeyError("verify_init") self._configure_context(ctx, kwargs) ret = libcrypto.EVP_PKEY_verify(ctx, signature, len(signature), digest, len(digest)) if ret < 0: raise PKeyError("Signature verification") libcrypto.EVP_PKEY_CTX_free(ctx) return ret > 0
Verifies given signature on given digest Returns True if Ok, False if don't match Keyword arguments allows to set algorithm-specific parameters
def derive(self, peerkey, **kwargs): if not self.cansign: raise ValueError("No private key available") ctx = libcrypto.EVP_PKEY_CTX_new(self.key, None) if ctx is None: raise PKeyError("Initailizing derive context") if libcrypto.EVP_PKEY_derive_init(ctx) < 1: raise PKeyError("derive_init") # This is workaround around missing functionality in GOST engine # it provides only numeric control command to set UKM, not # string one. self._configure_context(ctx, kwargs, ["ukm"]) if libcrypto.EVP_PKEY_derive_set_peer(ctx, peerkey.key) <= 0: raise PKeyError("Cannot set peer key") if "ukm" in kwargs: # We just hardcode numeric command to set UKM here if libcrypto.EVP_PKEY_CTX_ctrl(ctx, -1, 1 << 10, 8, 8, kwargs["ukm"]) <= 0: raise PKeyError("Cannot set UKM") keylen = c_long(0) if libcrypto.EVP_PKEY_derive(ctx, None, byref(keylen)) <= 0: raise PKeyError("computing shared key length") buf = create_string_buffer(keylen.value) if libcrypto.EVP_PKEY_derive(ctx, buf, byref(keylen)) <= 0: raise PKeyError("computing actual shared key") libcrypto.EVP_PKEY_CTX_free(ctx) return buf.raw[:int(keylen.value)]
Derives shared key (DH,ECDH,VKO 34.10). Requires private key available @param peerkey - other key (may be public only) Keyword parameters are algorithm-specific
def exportpub(self, format="PEM"): bio = Membio() if format == "PEM": retcode = libcrypto.PEM_write_bio_PUBKEY(bio.bio, self.key) else: retcode = libcrypto.i2d_PUBKEY_bio(bio.bio, self.key) if retcode == 0: raise PKeyError("error serializing public key") return str(bio)
Returns public key as PEM or DER structure.
def exportpriv(self, format="PEM", password=None, cipher=None): bio = Membio() if cipher is None: evp_cipher = None else: evp_cipher = cipher.cipher if format == "PEM": ret = libcrypto.PEM_write_bio_PrivateKey(bio.bio, self.key, evp_cipher, None, 0, _password_callback(password), None) if ret ==0: raise PKeyError("error serializing private key") return str(bio) else: ret = libcrypto.i2d_PKCS8PrivateKey_bio(bio.bio, self.key, evp_cipher, None, 0, _password_callback(password), None) if ret ==0: raise PKeyError("error serializing private key") return bintype(bio)
Returns private key as PEM or DER Structure. If password and cipher are specified, encrypts key on given password, using given algorithm. Cipher must be an ctypescrypto.cipher.CipherType object Password can be either string or function with one argument, which returns password. It is called with argument True, which means, that we are encrypting key, and password should be verified (requested twice from user, for example).
def _configure_context(ctx, opts, skip=()): for oper in opts: if oper in skip: continue if isinstance(oper,chartype): op = oper.encode("ascii") else: op = oper if isinstance(opts[oper],chartype): value = opts[oper].encode("ascii") elif isinstance(opts[oper],bintype): value = opts[oper] else: if pyver == 2: value = str(opts[oper]) else: value = str(opts[oper]).encode('ascii') ret = libcrypto.EVP_PKEY_CTX_ctrl_str(ctx, op, value) if ret == -2: raise PKeyError("Parameter %s is not supported by key" % oper) if ret < 1: raise PKeyError("Error setting parameter %s" % oper)
Configures context of public key operations @param ctx - context to configure @param opts - dictionary of options (from kwargs of calling function) @param skip - list of options which shouldn't be passed to context
def read(self, length=None): if not length is None: if not isinstance(length, inttype) : raise TypeError("length to read should be number") buf = create_string_buffer(length) readbytes = libcrypto.BIO_read(self.bio, buf, length) if readbytes == -2: raise NotImplementedError("Function is not supported by" + "this BIO") if readbytes == -1: raise IOError if readbytes == 0: return b"" return buf.raw[:readbytes] else: buf = create_string_buffer(1024) out = b"" readbytes = 1 while readbytes > 0: readbytes = libcrypto.BIO_read(self.bio, buf, 1024) if readbytes == -2: raise NotImplementedError("Function is not supported by " + "this BIO") if readbytes == -1: raise IOError if readbytes > 0: out += buf.raw[:readbytes] return out
Reads data from readble BIO. For test purposes. @param length - if specifed, limits amount of data read. If not BIO is read until end of buffer
def write(self, data): if pyver == 2: if isinstance(data, unicode): data = data.encode("utf-8") else: data = str(data) else: if not isinstance(data, bytes): data=str(data).encode("utf-8") written = libcrypto.BIO_write(self.bio, data, len(data)) if written == -2: raise NotImplementedError("Function not supported by this BIO") if written < len(data): raise IOError("Not all data were successfully written")
Writes data to writable bio. For test purposes
def CMS(data, format="PEM"): bio = Membio(data) if format == "PEM": ptr = libcrypto.PEM_read_bio_CMS(bio.bio, None, None, None) else: ptr = libcrypto.d2i_CMS_bio(bio.bio, None) if ptr is None: raise CMSError("Error parsing CMS data") typeoid = Oid(libcrypto.OBJ_obj2nid(libcrypto.CMS_get0_type(ptr))) if typeoid.shortname() == "pkcs7-signedData": return SignedData(ptr) elif typeoid.shortname() == "pkcs7-envelopedData": return EnvelopedData(ptr) elif typeoid.shortname() == "pkcs7-encryptedData": return EncryptedData(ptr) else: raise NotImplementedError("cannot handle "+typeoid.shortname())
Factory function to create CMS objects from received messages. Parses CMS data and returns either SignedData or EnvelopedData object. format argument can be either "PEM" or "DER". It determines object type from the contents of received CMS structure.
def pem(self): bio = Membio() if not libcrypto.PEM_write_bio_CMS(bio.bio, self.ptr): raise CMSError("writing CMS to PEM") return str(bio)
Serialize in PEM format
def create(data, cert, pkey, flags=Flags.BINARY, certs=None): if not pkey.cansign: raise ValueError("Specified keypair has no private part") if cert.pubkey != pkey: raise ValueError("Certificate doesn't match public key") bio = Membio(data) if certs is not None and len(certs) > 0: certstack_obj = StackOfX509(certs) # keep reference to prevent immediate __del__ call certstack = certstack_obj.ptr else: certstack = None ptr = libcrypto.CMS_sign(cert.cert, pkey.key, certstack, bio.bio, flags) if ptr is None: raise CMSError("signing message") return SignedData(ptr)
Creates SignedData message by signing data with pkey and certificate. @param data - data to sign @param cert - signer's certificate @param pkey - pkey object with private key to sign @param flags - OReed combination of Flags constants @param certs - list of X509 objects to include into CMS
def sign(self, cert, pkey, digest_type=None, data=None, flags=Flags.BINARY): if not pkey.cansign: raise ValueError("Specified keypair has no private part") if cert.pubkey != pkey: raise ValueError("Certificate doesn't match public key") if libcrypto.CMS_add1_signer(self.ptr, cert.cert, pkey.key, digest_type.digest, flags) is None: raise CMSError("adding signer") if flags & Flags.REUSE_DIGEST == 0: if data is not None: bio = Membio(data) biodata = bio.bio else: biodata = None res = libcrypto.CMS_final(self.ptr, biodata, None, flags) if res <= 0: raise CMSError("Cannot finalize CMS")
Adds another signer to already signed message @param cert - signer's certificate @param pkey - signer's private key @param digest_type - message digest to use as DigestType object (if None - default for key would be used) @param data - data to sign (if detached and Flags.REUSE_DIGEST is not specified) @param flags - ORed combination of Flags consants
def verify(self, store, flags, data=None, certs=None): bio = None if data != None: bio_obj = Membio(data) bio = bio_obj.bio if certs is not None and len(certs) > 0: certstack_obj = StackOfX509(certs) # keep reference to prevent immediate __del__ call certstack = certstack_obj.ptr else: certstack = None res = libcrypto.CMS_verify(self.ptr, certstack, store.store, bio, None, flags) return res > 0
Verifies signature under CMS message using trusted cert store @param store - X509Store object with trusted certs @param flags - OR-ed combination of flag consants @param data - message data, if messge has detached signature param certs - list of certificates to use during verification If Flags.NOINTERN is specified, these are only sertificates to search for signing certificates @returns True if signature valid, False otherwise
def signers(self): signerlist = libcrypto.CMS_get0_signers(self.ptr) if signerlist is None: raise CMSError("Cannot get signers") return StackOfX509(ptr=signerlist, disposable=False)
Return list of signer's certificates
def data(self): # Check if signatire is detached if self.detached: return None bio = Membio() if not libcrypto.CMS_verify(self.ptr, None, None, None, bio.bio, Flags.NO_VERIFY): raise CMSError("extract data") return str(bio)
Returns signed data if present in the message
def addcert(self, cert): if libcrypto.CMS_add1_cert(self.ptr, cert.cert) <= 0: raise CMSError("Cannot add cert")
Adds a certificate (probably intermediate CA) to the SignedData structure
def certs(self): certstack = libcrypto.CMS_get1_certs(self.ptr) if certstack is None: raise CMSError("getting certs") return StackOfX509(ptr=certstack, disposable=True)
List of the certificates contained in the structure
def create(recipients, data, cipher, flags=0): recp = StackOfX509(recipients) bio = Membio(data) cms_ptr = libcrypto.CMS_encrypt(recp.ptr, bio.bio, cipher.cipher, flags) if cms_ptr is None: raise CMSError("encrypt EnvelopedData") return EnvelopedData(cms_ptr)
Creates and encrypts message @param recipients - list of X509 objects @param data - contents of the message @param cipher - CipherType object @param flags - flag
def decrypt(self, pkey, cert, flags=0): if not pkey.cansign: raise ValueError("Specified keypair has no private part") if pkey != cert.pubkey: raise ValueError("Certificate doesn't match private key") bio = Membio() res = libcrypto.CMS_decrypt(self.ptr, pkey.key, cert.cert, None, bio.bio, flags) if res <= 0: raise CMSError("decrypting CMS") return str(bio)
Decrypts message @param pkey - private key to decrypt @param cert - certificate of this private key (to find neccessary RecipientInfo @param flags - flags @returns - decrypted data
def create(data, cipher, key, flags=0): bio = Membio(data) ptr = libcrypto.CMS_EncryptedData_encrypt(bio.bio, cipher.cipher, key, len(key), flags) if ptr is None: raise CMSError("encrypt data") return EncryptedData(ptr)
Creates an EncryptedData message. @param data data to encrypt @param cipher cipher.CipherType object represening required cipher type @param key - byte array used as simmetic key @param flags - OR-ed combination of Flags constant
def decrypt(self, key, flags=0): bio = Membio() if libcrypto.CMS_EncryptedData_decrypt(self.ptr, key, len(key), None, bio.bio, flags) <= 0: raise CMSError("decrypt data") return str(bio)
Decrypts encrypted data message @param key - symmetic key to decrypt @param flags - OR-ed combination of Flags constant
def name(self): if not hasattr(self, 'digest_name'): self.digest_name = Oid(libcrypto.EVP_MD_type(self.digest) ).longname() return self.digest_name
Returns name of the digest
def update(self, data, length=None): if self.digest_finalized: raise DigestError("No updates allowed") if not isinstance(data, bintype): raise TypeError("A byte string is expected") if length is None: length = len(data) elif length > len(data): raise ValueError("Specified length is greater than length of data") result = libcrypto.EVP_DigestUpdate(self.ctx, c_char_p(data), length) if result != 1: raise DigestError("Unable to update digest")
Hashes given byte string @param data - string to hash @param length - if not specifed, entire string is hashed, otherwise only first length bytes
def digest(self, data=None): if self.digest_finalized: return self.digest_out.raw[:self.digest_size] if data is not None: self.update(data) self.digest_out = create_string_buffer(256) length = c_long(0) result = libcrypto.EVP_DigestFinal_ex(self.ctx, self.digest_out, byref(length)) if result != 1: raise DigestError("Unable to finalize digest") self.digest_finalized = True return self.digest_out.raw[:self.digest_size]
Finalizes digest operation and return digest value Optionally hashes more data before finalizing
def copy(self): new_digest = Digest(self.digest_type) libcrypto.EVP_MD_CTX_copy(new_digest.ctx, self.ctx) return new_digest
Creates copy of the digest CTX to allow to compute digest while being able to hash more data
def _clean_ctx(self): try: if self.ctx is not None: libcrypto.EVP_MD_CTX_free(self.ctx) del self.ctx except AttributeError: pass self.digest_out = None self.digest_finalized = False
Clears and deallocates context
def hexdigest(self, data=None): from base64 import b16encode if pyver == 2: return b16encode(self.digest(data)) else: return b16encode(self.digest(data)).decode('us-ascii')
Returns digest in the hexadecimal form. For compatibility with hashlib
def _X509__asn1date_to_datetime(asn1date): bio = Membio() libcrypto.ASN1_TIME_print(bio.bio, asn1date) pydate = datetime.strptime(str(bio), "%b %d %H:%M:%S %Y %Z") return pydate.replace(tzinfo=utc)
Converts openssl ASN1_TIME object to python datetime.datetime
def find(self, oid): if not isinstance(oid, Oid): raise TypeError("Need crytypescrypto.oid.Oid as argument") found = [] index = -1 end = len(self) while True: index = libcrypto.X509_get_ext_by_NID(self.cert.cert, oid.nid, index) if index >= end or index < 0: break found.append(self[index]) return found
Return list of extensions with given Oid
def find_critical(self, crit=True): if crit: flag = 1 else: flag = 0 found = [] end = len(self) index = -1 while True: index = libcrypto.X509_get_ext_by_critical(self.cert.cert, flag, index) if index >= end or index < 0: break found.append(self[index]) return found
Return list of critical extensions (or list of non-cricital, if optional second argument is False
def pem(self): bio = Membio() if libcrypto.PEM_write_bio_X509(bio.bio, self.cert) == 0: raise X509Error("error serializing certificate") return str(bio)
Returns PEM represntation of the certificate
def verify(self, store=None, chain=None, key=None): if store is not None and key is not None: raise X509Error("key and store cannot be specified simultaneously") if store is not None: ctx = libcrypto.X509_STORE_CTX_new() if ctx is None: raise X509Error("Error allocating X509_STORE_CTX") if chain is not None and len(chain) > 0: chain_ptr = StackOfX509(chain).ptr else: chain_ptr = None if libcrypto.X509_STORE_CTX_init(ctx, store.store, self.cert, chain_ptr) < 0: raise X509Error("Error allocating X509_STORE_CTX") res = libcrypto.X509_verify_cert(ctx) libcrypto.X509_STORE_CTX_free(ctx) return res > 0 else: if key is None: if self.issuer != self.subject: # Not a self-signed certificate return False key = self.pubkey res = libcrypto.X509_verify(self.cert, key.key) if res < 0: raise X509Error("X509_verify failed") return res > 0
Verify self. Supports verification on both X509 store object or just public issuer key @param store X509Store object. @param chain - list of X509 objects to add into verification context.These objects are untrusted, but can be used to build certificate chain up to trusted object in the store @param key - PKey object with open key to validate signature parameters store and key are mutually exclusive. If neither is specified, attempts to verify self as self-signed certificate
def serial(self): asnint = libcrypto.X509_get_serialNumber(self.cert) bio = Membio() libcrypto.i2a_ASN1_INTEGER(bio.bio, asnint) return int(str(bio), 16)
Serial number of certificate as integer
def add_cert(self, cert): if not isinstance(cert, X509): raise TypeError("cert should be X509") libcrypto.X509_STORE_add_cert(self.store, cert.cert)
Explicitely adds certificate to set of trusted in the store @param cert - X509 object to add
def setpurpose(self, purpose): if isinstance(purpose, str): purp_no = libcrypto.X509_PURPOSE_get_by_sname(purpose) if purp_no <= 0: raise X509Error("Invalid certificate purpose '%s'" % purpose) elif isinstance(purpose, int): purp_no = purpose if libcrypto.X509_STORE_set_purpose(self.store, purp_no) <= 0: raise X509Error("cannot set purpose")
Sets certificate purpose which verified certificate should match @param purpose - number from 1 to 9 or standard strind defined in Openssl possible strings - sslcient,sslserver, nssslserver, smimesign,i smimeencrypt, crlsign, any, ocsphelper
def settime(self, time): if isinstance(time, datetime) or isinstance(time, datetime.date): seconds = int(time.strftime("%s")) elif isinstance(time, int): seconds = time else: raise TypeError("datetime.date, datetime.datetime or integer " + "is required as time argument") raise NotImplementedError
Set point in time used to check validity of certificates for Time can be either python datetime object or number of seconds sinse epoch
def append(self, value): if not self.need_free: raise ValueError("Stack is read-only") if not isinstance(value, X509): raise TypeError('StackOfX509 can contain only X509 objects') sk_push(self.ptr, libcrypto.X509_dup(value.cert))
Adds certificate to stack
def pbkdf2(password, salt, outlen, digesttype="sha1", iterations=2000): dgst = DigestType(digesttype) out = create_string_buffer(outlen) if isinstance(password,chartype): pwd = password.encode("utf-8") else: pwd = password res = libcrypto.PKCS5_PBKDF2_HMAC(pwd, len(pwd), salt, len(salt), iterations, dgst.digest, outlen, out) if res <= 0: raise LibCryptoError("error computing PBKDF2") return out.raw
Interface to PKCS5_PBKDF2_HMAC function Parameters: @param password - password to derive key from @param salt - random salt to use for key derivation @param outlen - number of bytes to derive @param digesttype - name of digest to use to use (default sha1) @param iterations - number of iterations to use @returns outlen bytes of key material derived from password and salt
def new(algname, key, encrypt=True, iv=None): ciph_type = CipherType(algname) return Cipher(ciph_type, key, iv, encrypt)
Returns new cipher object ready to encrypt-decrypt data @param algname - string algorithm name like in opemssl command line @param key - binary string representing ciher key @param encrypt - if True (default) cipher would be initialized for encryption, otherwise - for decrypton @param iv - initialization vector
def padding(self, padding=True): padding_flag = 1 if padding else 0 libcrypto.EVP_CIPHER_CTX_set_padding(self.ctx, padding_flag)
Sets padding mode of the cipher
def update(self, data): if self.cipher_finalized: raise CipherError("No updates allowed") if not isinstance(data, bintype): raise TypeError("A byte string is expected") if len(data) == 0: return "" outbuf = create_string_buffer(self.block_size+len(data)) outlen = c_int(0) ret = libcrypto.EVP_CipherUpdate(self.ctx, outbuf, byref(outlen), data, len(data)) if ret <= 0: self._clean_ctx() self.cipher_finalized = True raise CipherError("problem processing data") return outbuf.raw[:int(outlen.value)]
Performs actual encrypton/decrypion @param data - part of the plain text/ciphertext to process @returns - part of ciphercext/plain text Passed chunk of text doesn't need to contain full ciher blocks. If neccessery, part of passed data would be kept internally until next data would be received or finish called
def finish(self): if self.cipher_finalized: raise CipherError("Cipher operation is already completed") outbuf = create_string_buffer(self.block_size) self.cipher_finalized = True outlen = c_int(0) result = libcrypto.EVP_CipherFinal_ex(self.ctx, outbuf, byref(outlen)) if result == 0: self._clean_ctx() raise CipherError("Unable to finalize cipher") if outlen.value > 0: return outbuf.raw[:int(outlen.value)] else: return b""
Finalizes processing. If some data are kept in the internal state, they would be processed and returned.
def _clean_ctx(self): try: if self.ctx is not None: self.__ctxcleanup(self.ctx) libcrypto.EVP_CIPHER_CTX_free(self.ctx) del self.ctx except AttributeError: pass self.cipher_finalized = True
Cleans up cipher ctx and deallocates it
def set_default(eng, algorithms=0xFFFF): if not isinstance(eng,Engine): eng=Engine(eng) global default libcrypto.ENGINE_set_default(eng.ptr, c_int(algorithms)) default = eng
Sets specified engine as default for all algorithms, supported by it For compatibility with 0.2.x if string is passed instead of engine, attempts to load engine with this id
def from_keyed_iterable(iterable, key, filter_func=None): generated = {} for element in iterable: try: k = getattr(element, key) except AttributeError: raise RuntimeError("{} does not have the keyed attribute: {}".format( element, key )) if filter_func is None or filter_func(element): if k in generated: generated[k] += [element] else: generated[k] = [element] return generated
Construct a dictionary out of an iterable, using an attribute name as the key. Optionally provide a filter function, to determine what should be kept in the dictionary.
def subtract_by_key(dict_a, dict_b): difference_dict = {} for key in dict_a: if key not in dict_b: difference_dict[key] = dict_a[key] return difference_dict
given two dicts, a and b, this function returns c = a - b, where a - b is defined as the key difference between a and b. e.g., {1:None, 2:3, 3:"yellow", 4:True} - {2:4, 1:"green"} = {3:"yellow", 4:True}
def subtract(dict_a, dict_b, strict=False): if not strict: return subtract_by_key(dict_a, dict_b) difference_dict = {} for key in dict_a: if key not in dict_b or dict_b[key] != dict_a[key]: difference_dict[key] = dict_a[key] return difference_dict
a stricter form of subtract_by_key(), this version will only remove an entry from dict_a if the key is in dict_b *and* the value at that key matches
def winnow_by_keys(dct, keys=None, filter_func=None): has = {} has_not = {} for key in dct: key_passes_check = False if keys is not None: key_passes_check = key in keys elif filter_func is not None: key_passes_check = filter_func(key) if key_passes_check: has[key] = dct[key] else: has_not[key] = dct[key] return WinnowedResult(has, has_not)
separates a dict into has-keys and not-has-keys pairs, using either a list of keys or a filtering function.
def setdefaults(dct, defaults): for key in defaults: dct.setdefault(key, defaults[key]) return dct
Given a target dct and a dict of {key:default value} pairs, calls setdefault for all of those pairs.
def unlist(list_thing, complain=True): if complain and len(list_thing) > 1: raise ValueError("More than one element in {}".format(list_thing)) elif len(list_thing) == 1: return list_thing[0] if complain: raise ValueError("Nothing in {}".format(list_thing)) return None
transforms [Something] -> Something. By default, raises a ValueError for any other list values.
def flatten(iterable): container = iterable.__class__ placeholder = [] for item in iterable: try: placeholder.extend(flatten(item)) except TypeError: placeholder.append(item) return container(placeholder)
Fully flattens an iterable: In: flatten([1,2,3,4,[5,6,[7,8]]]) Out: [1,2,3,4,5,6,7,8]
def flat_map(iterable, func): results = [] for element in iterable: result = func(element) if len(result) > 0: results.extend(result) return results
func must take an item and return an interable that contains that item. this is flatmap in the classic mode
def product(sequence, initial=1): if not isinstance(sequence, collections.Iterable): raise TypeError("'{}' object is not iterable".format(type(sequence).__name__)) return reduce(operator.mul, sequence, initial)
like the built-in sum, but for multiplication.