file_path
stringlengths
22
162
content
stringlengths
19
501k
size
int64
19
501k
lang
stringclasses
1 value
avg_line_length
float64
6.33
100
max_line_length
int64
18
935
alphanum_fraction
float64
0.34
0.93
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/docstring.py
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from botocore.docs.method import document_model_driven_method from botocore.docs.waiter import document_wait_method from botocore.docs.paginator import document_paginate_method from botocore.docs.bcdoc.restdoc import DocumentStructure class LazyLoadedDocstring(str): """Used for lazily loading docstrings You can instantiate this class and assign it to a __doc__ value. The docstring will not be generated till accessed via __doc__ or help(). Note that all docstring classes **must** subclass from this class. It cannot be used directly as a docstring. """ def __init__(self, *args, **kwargs): """ The args and kwargs are the same as the underlying document generation function. These just get proxied to the underlying function. """ super(LazyLoadedDocstring, self).__init__() self._gen_args = args self._gen_kwargs = kwargs self._docstring = None def __new__(cls, *args, **kwargs): # Needed in order to sub class from str with args and kwargs return super(LazyLoadedDocstring, cls).__new__(cls) def _write_docstring(self, *args, **kwargs): raise NotImplementedError( '_write_docstring is not implemented. Please subclass from ' 'this class and provide your own _write_docstring method' ) def expandtabs(self, tabsize=8): """Expands tabs to spaces So this is a big hack in order to get lazy loaded docstring work for the ``help()``. In the ``help()`` function, ``pydoc`` and ``inspect`` are used. At some point the ``inspect.cleandoc`` method is called. To clean the docs ``expandtabs`` is called and that is where we override the method to generate and return the docstrings. """ if self._docstring is None: self._generate() return self._docstring.expandtabs(tabsize) def __str__(self): return self._generate() # __doc__ of target will use either __repr__ or __str__ of this class. __repr__ = __str__ def _generate(self): # Generate the docstring if it is not already cached. if self._docstring is None: self._docstring = self._create_docstring() return self._docstring def _create_docstring(self): docstring_structure = DocumentStructure('docstring', target='html') # Call the document method function with the args and kwargs # passed to the class. self._write_docstring( docstring_structure, *self._gen_args, **self._gen_kwargs) return docstring_structure.flush_structure().decode('utf-8') class ClientMethodDocstring(LazyLoadedDocstring): def _write_docstring(self, *args, **kwargs): document_model_driven_method(*args, **kwargs) class WaiterDocstring(LazyLoadedDocstring): def _write_docstring(self, *args, **kwargs): document_wait_method(*args, **kwargs) class PaginatorDocstring(LazyLoadedDocstring): def _write_docstring(self, *args, **kwargs): document_paginate_method(*args, **kwargs)
3,699
Python
37.14433
75
0.666126
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/utils.py
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import re from collections import namedtuple def py_type_name(type_name): """Get the Python type name for a given model type. >>> py_type_name('list') 'list' >>> py_type_name('structure') 'dict' :rtype: string """ return { 'blob': 'bytes', 'character': 'string', 'double': 'float', 'long': 'integer', 'map': 'dict', 'structure': 'dict', 'timestamp': 'datetime', }.get(type_name, type_name) def py_default(type_name): """Get the Python default value for a given model type. >>> py_default('string') '\'string\'' >>> py_default('list') '[...]' >>> py_default('unknown') '...' :rtype: string """ return { 'double': '123.0', 'long': '123', 'integer': '123', 'string': "'string'", 'blob': "b'bytes'", 'boolean': 'True|False', 'list': '[...]', 'map': '{...}', 'structure': '{...}', 'timestamp': 'datetime(2015, 1, 1)', }.get(type_name, '...') def get_official_service_name(service_model): """Generate the official name of an AWS Service :param service_model: The service model representing the service """ official_name = service_model.metadata.get('serviceFullName') short_name = service_model.metadata.get('serviceAbbreviation', '') if short_name.startswith('Amazon'): short_name = short_name[7:] if short_name.startswith('AWS'): short_name = short_name[4:] if short_name and short_name.lower() not in official_name.lower(): official_name += ' ({0})'.format(short_name) return official_name _DocumentedShape = namedtuple( 'DocumentedShape', ['name', 'type_name', 'documentation', 'metadata', 'members', 'required_members']) class DocumentedShape (_DocumentedShape): """Use this class to inject new shapes into a model for documentation""" def __new__(cls, name, type_name, documentation, metadata=None, members=None, required_members=None): if metadata is None: metadata = [] if members is None: members = [] if required_members is None: required_members = [] return super(DocumentedShape, cls).__new__( cls, name, type_name, documentation, metadata, members, required_members) class AutoPopulatedParam(object): def __init__(self, name, param_description=None): self.name = name self.param_description = param_description if param_description is None: self.param_description = ( 'Please note that this parameter is automatically populated ' 'if it is not provided. Including this parameter is not ' 'required\n') def document_auto_populated_param(self, event_name, section, **kwargs): """Documents auto populated parameters It will remove any required marks for the parameter, remove the parameter from the example, and add a snippet about the parameter being autopopulated in the description. """ if event_name.startswith('docs.request-params'): if self.name in section.available_sections: section = section.get_section(self.name) if 'is-required' in section.available_sections: section.delete_section('is-required') description_section = section.get_section( 'param-documentation') description_section.writeln(self.param_description) elif event_name.startswith('docs.request-example'): section = section.get_section('structure-value') if self.name in section.available_sections: section.delete_section(self.name) class HideParamFromOperations(object): """Hides a single parameter from multiple operations. This method will remove a parameter from documentation and from examples. This method is typically used for things that are automatically populated because a user would be unable to provide a value (e.g., a checksum of a serialized XML request body).""" def __init__(self, service_name, parameter_name, operation_names): """ :type service_name: str :param service_name: Name of the service to modify. :type parameter_name: str :param parameter_name: Name of the parameter to modify. :type operation_names: list :param operation_names: Operation names to modify. """ self._parameter_name = parameter_name self._params_events = set() self._example_events = set() # Build up the sets of relevant event names. param_template = 'docs.request-params.%s.%s.complete-section' example_template = 'docs.request-example.%s.%s.complete-section' for name in operation_names: self._params_events.add(param_template % (service_name, name)) self._example_events.add(example_template % (service_name, name)) def hide_param(self, event_name, section, **kwargs): if event_name in self._example_events: # Modify the structure value for example events. section = section.get_section('structure-value') elif event_name not in self._params_events: return if self._parameter_name in section.available_sections: section.delete_section(self._parameter_name) class AppendParamDocumentation(object): """Appends documentation to a specific parameter""" def __init__(self, parameter_name, doc_string): self._parameter_name = parameter_name self._doc_string = doc_string def append_documentation(self, event_name, section, **kwargs): if self._parameter_name in section.available_sections: section = section.get_section(self._parameter_name) description_section = section.get_section( 'param-documentation') description_section.writeln(self._doc_string) _CONTROLS = { '\n': '\\n', '\r': '\\r', '\t': '\\t', '\b': '\\b', '\f': '\\f', } # Combines all CONTROLS keys into a big or regular expression _ESCAPE_CONTROLS_RE = re.compile('|'.join(map(re.escape, _CONTROLS))) # Based on the match get the appropriate replacement from CONTROLS _CONTROLS_MATCH_HANDLER = lambda match: _CONTROLS[match.group(0)] def escape_controls(value): return _ESCAPE_CONTROLS_RE.sub(_CONTROLS_MATCH_HANDLER, value)
7,176
Python
35.247475
77
0.621098
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/shape.py
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. # NOTE: This class should not be instantiated and its # ``traverse_and_document_shape`` method called directly. It should be # inherited from a Documenter class with the appropriate methods # and attributes. from botocore.utils import is_json_value_header class ShapeDocumenter(object): EVENT_NAME = '' def __init__(self, service_name, operation_name, event_emitter, context=None): self._service_name = service_name self._operation_name = operation_name self._event_emitter = event_emitter self._context = context if context is None: self._context = { 'special_shape_types': {} } def traverse_and_document_shape(self, section, shape, history, include=None, exclude=None, name=None, is_required=False): """Traverses and documents a shape Will take a self class and call its appropriate methods as a shape is traversed. :param section: The section to document. :param history: A list of the names of the shapes that have been traversed. :type include: Dictionary where keys are parameter names and values are the shapes of the parameter names. :param include: The parameter shapes to include in the documentation. :type exclude: List of the names of the parameters to exclude. :param exclude: The names of the parameters to exclude from documentation. :param name: The name of the shape. :param is_required: If the shape is a required member. """ param_type = shape.type_name if getattr(shape, 'serialization', {}).get('eventstream'): param_type = 'event_stream' if shape.name in history: self.document_recursive_shape(section, shape, name=name) else: history.append(shape.name) is_top_level_param = (len(history) == 2) getattr(self, 'document_shape_type_%s' % param_type, self.document_shape_default)( section, shape, history=history, name=name, include=include, exclude=exclude, is_top_level_param=is_top_level_param, is_required=is_required) if is_top_level_param: self._event_emitter.emit( 'docs.%s.%s.%s.%s' % (self.EVENT_NAME, self._service_name, self._operation_name, name), section=section) at_overlying_method_section = (len(history) == 1) if at_overlying_method_section: self._event_emitter.emit( 'docs.%s.%s.%s.complete-section' % (self.EVENT_NAME, self._service_name, self._operation_name), section=section) history.pop() def _get_special_py_default(self, shape): special_defaults = { 'jsonvalue_header': '{...}|[...]|123|123.4|\'string\'|True|None', 'streaming_input_shape': 'b\'bytes\'|file', 'streaming_output_shape': 'StreamingBody()', 'eventstream_output_shape': 'EventStream()', } return self._get_value_for_special_type(shape, special_defaults) def _get_special_py_type_name(self, shape): special_type_names = { 'jsonvalue_header': 'JSON serializable', 'streaming_input_shape': 'bytes or seekable file-like object', 'streaming_output_shape': ':class:`.StreamingBody`', 'eventstream_output_shape': ':class:`.EventStream`', } return self._get_value_for_special_type(shape, special_type_names) def _get_value_for_special_type(self, shape, special_type_map): if is_json_value_header(shape): return special_type_map['jsonvalue_header'] for special_type, marked_shape in self._context[ 'special_shape_types'].items(): if special_type in special_type_map: if shape == marked_shape: return special_type_map[special_type] return None
4,994
Python
41.330508
78
0.572887
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/paginator.py
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from botocore import xform_name from botocore.compat import OrderedDict from botocore.docs.utils import DocumentedShape from botocore.utils import get_service_module_name from botocore.docs.method import document_model_driven_method class PaginatorDocumenter(object): def __init__(self, client, service_paginator_model): self._client = client self._service_name = self._client.meta.service_model.service_name self._service_paginator_model = service_paginator_model def document_paginators(self, section): """Documents the various paginators for a service param section: The section to write to. """ section.style.h2('Paginators') section.style.new_line() section.writeln('The available paginators are:') paginator_names = sorted( self._service_paginator_model._paginator_config) # List the available paginators and then document each paginator. for paginator_name in paginator_names: section.style.li( ':py:class:`%s.Paginator.%s`' % ( self._client.__class__.__name__, paginator_name)) self._add_paginator(section, paginator_name) def _add_paginator(self, section, paginator_name): section = section.add_new_section(paginator_name) # Docment the paginator class section.style.start_sphinx_py_class( class_name='%s.Paginator.%s' % ( self._client.__class__.__name__, paginator_name)) section.style.start_codeblock() section.style.new_line() # Document how to instantiate the paginator. section.write( 'paginator = client.get_paginator(\'%s\')' % xform_name( paginator_name) ) section.style.end_codeblock() section.style.new_line() # Get the pagination model for the particular paginator. paginator_config = self._service_paginator_model.get_paginator( paginator_name) document_paginate_method( section=section, paginator_name=paginator_name, event_emitter=self._client.meta.events, service_model=self._client.meta.service_model, paginator_config=paginator_config ) def document_paginate_method(section, paginator_name, event_emitter, service_model, paginator_config, include_signature=True): """Documents the paginate method of a paginator :param section: The section to write to :param paginator_name: The name of the paginator. It is snake cased. :param event_emitter: The event emitter to use to emit events :param service_model: The service model :param paginator_config: The paginator config associated to a particular paginator. :param include_signature: Whether or not to include the signature. It is useful for generating docstrings. """ # Retrieve the operation model of the underlying operation. operation_model = service_model.operation_model( paginator_name) # Add representations of the request and response parameters # we want to include in the description of the paginate method. # These are parameters we expose via the botocore interface. pagination_config_members = OrderedDict() pagination_config_members['MaxItems'] = DocumentedShape( name='MaxItems', type_name='integer', documentation=( '<p>The total number of items to return. If the total ' 'number of items available is more than the value ' 'specified in max-items then a <code>NextToken</code> ' 'will be provided in the output that you can use to ' 'resume pagination.</p>')) if paginator_config.get('limit_key', None): pagination_config_members['PageSize'] = DocumentedShape( name='PageSize', type_name='integer', documentation='<p>The size of each page.<p>') pagination_config_members['StartingToken'] = DocumentedShape( name='StartingToken', type_name='string', documentation=( '<p>A token to specify where to start paginating. ' 'This is the <code>NextToken</code> from a previous ' 'response.</p>')) botocore_pagination_params = [ DocumentedShape( name='PaginationConfig', type_name='structure', documentation=( '<p>A dictionary that provides parameters to control ' 'pagination.</p>'), members=pagination_config_members) ] botocore_pagination_response_params = [ DocumentedShape( name='NextToken', type_name='string', documentation=( '<p>A token to resume pagination.</p>')) ] service_pagination_params = [] # Add the normal input token of the method to a list # of input paramters that we wish to hide since we expose our own. if isinstance(paginator_config['input_token'], list): service_pagination_params += paginator_config['input_token'] else: service_pagination_params.append(paginator_config['input_token']) # Hide the limit key in the documentation. if paginator_config.get('limit_key', None): service_pagination_params.append(paginator_config['limit_key']) # Hide the output tokens in the documentation. service_pagination_response_params = [] if isinstance(paginator_config['output_token'], list): service_pagination_response_params += paginator_config[ 'output_token'] else: service_pagination_response_params.append(paginator_config[ 'output_token']) paginate_description = ( 'Creates an iterator that will paginate through responses ' 'from :py:meth:`{0}.Client.{1}`.'.format( get_service_module_name(service_model), xform_name(paginator_name)) ) document_model_driven_method( section, 'paginate', operation_model, event_emitter=event_emitter, method_description=paginate_description, example_prefix='response_iterator = paginator.paginate', include_input=botocore_pagination_params, include_output=botocore_pagination_response_params, exclude_input=service_pagination_params, exclude_output=service_pagination_response_params, include_signature=include_signature )
7,046
Python
38.589887
79
0.654272
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/client.py
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import inspect from botocore.docs.utils import get_official_service_name from botocore.docs.method import document_custom_method from botocore.docs.method import document_model_driven_method from botocore.docs.method import get_instance_public_methods from botocore.docs.sharedexample import document_shared_examples from botocore.docs.example import ResponseExampleDocumenter from botocore.docs.params import ResponseParamsDocumenter from botocore.docs.utils import DocumentedShape from botocore.compat import OrderedDict class ClientDocumenter(object): def __init__(self, client, shared_examples=None): self._client = client self._shared_examples = shared_examples if self._shared_examples is None: self._shared_examples = {} self._service_name = self._client.meta.service_model.service_name def document_client(self, section): """Documents a client and its methods :param section: The section to write to. """ self._add_title(section) self._add_class_signature(section) client_methods = get_instance_public_methods(self._client) self._add_client_intro(section, client_methods) self._add_client_methods(section, client_methods) def _add_title(self, section): section.style.h2('Client') def _add_client_intro(self, section, client_methods): section = section.add_new_section('intro') # Write out the top level description for the client. official_service_name = get_official_service_name( self._client.meta.service_model) section.write( 'A low-level client representing %s' % official_service_name) section.style.new_line() section.include_doc_string(self._client.meta.service_model.documentation) # Write out the client example instantiation. self._add_client_creation_example(section) # List out all of the possible client methods. section.style.new_line() section.write('These are the available methods:') section.style.new_line() class_name = self._client.__class__.__name__ for method_name in sorted(client_methods): section.style.li(':py:meth:`~%s.Client.%s`' % ( class_name, method_name)) def _add_class_signature(self, section): section.style.start_sphinx_py_class( class_name='%s.Client' % self._client.__class__.__name__) def _add_client_creation_example(self, section): section.style.start_codeblock() section.style.new_line() section.write( 'client = session.create_client(\'{service}\')'.format( service=self._service_name) ) section.style.end_codeblock() def _add_client_methods(self, section, client_methods): section = section.add_new_section('methods') for method_name in sorted(client_methods): self._add_client_method( section, method_name, client_methods[method_name]) def _add_client_method(self, section, method_name, method): section = section.add_new_section(method_name) if self._is_custom_method(method_name): self._add_custom_method(section, method_name, method) else: self._add_model_driven_method(section, method_name) def _is_custom_method(self, method_name): return method_name not in self._client.meta.method_to_api_mapping def _add_custom_method(self, section, method_name, method): document_custom_method(section, method_name, method) def _add_method_exceptions_list(self, section, operation_model): error_section = section.add_new_section('exceptions') error_section.style.new_line() error_section.style.bold('Exceptions') error_section.style.new_line() client_name = self._client.__class__.__name__ for error in operation_model.error_shapes: class_name = '%s.Client.exceptions.%s' % (client_name, error.name) error_section.style.li(':py:class:`%s`' % class_name) def _add_model_driven_method(self, section, method_name): service_model = self._client.meta.service_model operation_name = self._client.meta.method_to_api_mapping[method_name] operation_model = service_model.operation_model(operation_name) example_prefix = 'response = client.%s' % method_name document_model_driven_method( section, method_name, operation_model, event_emitter=self._client.meta.events, method_description=operation_model.documentation, example_prefix=example_prefix, ) # Add any modeled exceptions if operation_model.error_shapes: self._add_method_exceptions_list(section, operation_model) # Add the shared examples shared_examples = self._shared_examples.get(operation_name) if shared_examples: document_shared_examples( section, operation_model, example_prefix, shared_examples) class ClientExceptionsDocumenter(object): _USER_GUIDE_LINK = ( 'https://boto3.amazonaws.com/' 'v1/documentation/api/latest/guide/error-handling.html' ) _GENERIC_ERROR_SHAPE = DocumentedShape( name='Error', type_name='structure', documentation=( 'Normalized access to common exception attributes.' ), members=OrderedDict([ ('Code', DocumentedShape( name='Code', type_name='string', documentation=( 'An identifier specifying the exception type.' ), )), ('Message', DocumentedShape( name='Message', type_name='string', documentation=( 'A descriptive message explaining why the exception ' 'occured.' ), )), ]), ) def __init__(self, client): self._client = client self._service_name = self._client.meta.service_model.service_name def document_exceptions(self, section): self._add_title(section) self._add_overview(section) self._add_exceptions_list(section) self._add_exception_classes(section) def _add_title(self, section): section.style.h2('Client Exceptions') def _add_overview(self, section): section.style.new_line() section.write( 'Client exceptions are available on a client instance ' 'via the ``exceptions`` property. For more detailed instructions ' 'and examples on the exact usage of client exceptions, see the ' 'error handling ' ) section.style.external_link( title='user guide', link=self._USER_GUIDE_LINK, ) section.write('.') section.style.new_line() def _exception_class_name(self, shape): cls_name = self._client.__class__.__name__ return '%s.Client.exceptions.%s' % (cls_name, shape.name) def _add_exceptions_list(self, section): error_shapes = self._client.meta.service_model.error_shapes if not error_shapes: section.style.new_line() section.write('This client has no modeled exception classes.') section.style.new_line() return section.style.new_line() section.write('The available client exceptions are:') section.style.new_line() for shape in error_shapes: class_name = self._exception_class_name(shape) section.style.li(':py:class:`%s`' % class_name) def _add_exception_classes(self, section): for shape in self._client.meta.service_model.error_shapes: self._add_exception_class(section, shape) def _add_exception_class(self, section, shape): class_section = section.add_new_section(shape.name) class_name = self._exception_class_name(shape) class_section.style.start_sphinx_py_class(class_name=class_name) self._add_top_level_documentation(class_section, shape) self._add_exception_catch_example(class_section, shape) self._add_response_attr(class_section, shape) class_section.style.end_sphinx_py_class() def _add_top_level_documentation(self, section, shape): if shape.documentation: section.style.new_line() section.include_doc_string(shape.documentation) section.style.new_line() def _add_exception_catch_example(self, section, shape): section.style.new_line() section.style.bold('Example') section.style.start_codeblock() section.write('try:') section.style.indent() section.style.new_line() section.write('...') section.style.dedent() section.style.new_line() section.write('except client.exceptions.%s as e:' % shape.name) section.style.indent() section.style.new_line() section.write('print(e.response)') section.style.dedent() section.style.end_codeblock() def _add_response_attr(self, section, shape): response_section = section.add_new_section('response') response_section.style.start_sphinx_py_attr('response') self._add_response_attr_description(response_section) self._add_response_example(response_section, shape) self._add_response_params(response_section, shape) response_section.style.end_sphinx_py_attr() def _add_response_attr_description(self, section): section.style.new_line() section.include_doc_string( 'The parsed error response. All exceptions have a top level ' '``Error`` key that provides normalized access to common ' 'exception atrributes. All other keys are specific to this ' 'service or exception class.' ) section.style.new_line() def _add_response_example(self, section, shape): example_section = section.add_new_section('syntax') example_section.style.new_line() example_section.style.bold('Syntax') example_section.style.new_paragraph() documenter = ResponseExampleDocumenter( service_name=self._service_name, operation_name=None, event_emitter=self._client.meta.events, ) documenter.document_example( example_section, shape, include=[self._GENERIC_ERROR_SHAPE], ) def _add_response_params(self, section, shape): params_section = section.add_new_section('Structure') params_section.style.new_line() params_section.style.bold('Structure') params_section.style.new_paragraph() documenter = ResponseParamsDocumenter( service_name=self._service_name, operation_name=None, event_emitter=self._client.meta.events, ) documenter.document_params( params_section, shape, include=[self._GENERIC_ERROR_SHAPE], )
11,726
Python
39.023891
81
0.634232
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/params.py
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from botocore.docs.shape import ShapeDocumenter from botocore.docs.utils import py_type_name class BaseParamsDocumenter(ShapeDocumenter): def document_params(self, section, shape, include=None, exclude=None): """Fills out the documentation for a section given a model shape. :param section: The section to write the documentation to. :param shape: The shape of the operation. :type include: Dictionary where keys are parameter names and values are the shapes of the parameter names. :param include: The parameter shapes to include in the documentation. :type exclude: List of the names of the parameters to exclude. :param exclude: The names of the parameters to exclude from documentation. """ history = [] self.traverse_and_document_shape( section=section, shape=shape, history=history, name=None, include=include, exclude=exclude) def document_recursive_shape(self, section, shape, **kwargs): self._add_member_documentation(section, shape, **kwargs) def document_shape_default(self, section, shape, history, include=None, exclude=None, **kwargs): self._add_member_documentation(section, shape, **kwargs) def document_shape_type_list(self, section, shape, history, include=None, exclude=None, **kwargs): self._add_member_documentation(section, shape, **kwargs) param_shape = shape.member param_section = section.add_new_section( param_shape.name, context={'shape': shape.member.name}) self._start_nested_param(param_section) self.traverse_and_document_shape( section=param_section, shape=param_shape, history=history, name=None) section = section.add_new_section('end-list') self._end_nested_param(section) def document_shape_type_map(self, section, shape, history, include=None, exclude=None, **kwargs): self._add_member_documentation(section, shape, **kwargs) key_section = section.add_new_section( 'key', context={'shape': shape.key.name}) self._start_nested_param(key_section) self._add_member_documentation(key_section, shape.key) param_section = section.add_new_section( shape.value.name, context={'shape': shape.value.name}) param_section.style.indent() self._start_nested_param(param_section) self.traverse_and_document_shape( section=param_section, shape=shape.value, history=history, name=None) end_section = section.add_new_section('end-map') self._end_nested_param(end_section) self._end_nested_param(end_section) def document_shape_type_structure(self, section, shape, history, include=None, exclude=None, name=None, **kwargs): members = self._add_members_to_shape(shape.members, include) self._add_member_documentation(section, shape, name=name) for param in members: if exclude and param in exclude: continue param_shape = members[param] param_section = section.add_new_section( param, context={'shape': param_shape.name}) self._start_nested_param(param_section) self.traverse_and_document_shape( section=param_section, shape=param_shape, history=history, name=param) section = section.add_new_section('end-structure') self._end_nested_param(section) def _add_member_documentation(self, section, shape, **kwargs): pass def _add_members_to_shape(self, members, include): if include: members = members.copy() for param in include: members[param.name] = param return members def _document_non_top_level_param_type(self, type_section, shape): special_py_type = self._get_special_py_type_name(shape) py_type = py_type_name(shape.type_name) type_format = '(%s) -- ' if special_py_type is not None: # Special type can reference a linked class. # Italicizing it blows away the link. type_section.write(type_format % special_py_type) else: type_section.style.italics(type_format % py_type) def _start_nested_param(self, section): section.style.indent() section.style.new_line() def _end_nested_param(self, section): section.style.dedent() section.style.new_line() class ResponseParamsDocumenter(BaseParamsDocumenter): """Generates the description for the response parameters""" EVENT_NAME = 'response-params' def _add_member_documentation(self, section, shape, name=None, **kwargs): name_section = section.add_new_section('param-name') name_section.write('- ') if name is not None: name_section.style.bold('%s ' % name) type_section = section.add_new_section('param-type') self._document_non_top_level_param_type(type_section, shape) documentation_section = section.add_new_section('param-documentation') if shape.documentation: documentation_section.style.indent() documentation_section.include_doc_string(shape.documentation) section.style.new_paragraph() def document_shape_type_event_stream(self, section, shape, history, **kwargs): self.document_shape_type_structure(section, shape, history, **kwargs) class RequestParamsDocumenter(BaseParamsDocumenter): """Generates the description for the request parameters""" EVENT_NAME = 'request-params' def document_shape_type_structure(self, section, shape, history, include=None, exclude=None, **kwargs): if len(history) > 1: self._add_member_documentation(section, shape, **kwargs) section.style.indent() members = self._add_members_to_shape(shape.members, include) for i, param in enumerate(members): if exclude and param in exclude: continue param_shape = members[param] param_section = section.add_new_section( param, context={'shape': param_shape.name}) param_section.style.new_line() is_required = param in shape.required_members self.traverse_and_document_shape( section=param_section, shape=param_shape, history=history, name=param, is_required=is_required) section = section.add_new_section('end-structure') if len(history) > 1: section.style.dedent() section.style.new_line() def _add_member_documentation(self, section, shape, name=None, is_top_level_param=False, is_required=False, **kwargs): py_type = self._get_special_py_type_name(shape) if py_type is None: py_type = py_type_name(shape.type_name) if is_top_level_param: type_section = section.add_new_section('param-type') type_section.write(':type %s: %s' % (name, py_type)) end_type_section = type_section.add_new_section('end-param-type') end_type_section.style.new_line() name_section = section.add_new_section('param-name') name_section.write(':param %s: ' % name) else: name_section = section.add_new_section('param-name') name_section.write('- ') if name is not None: name_section.style.bold('%s ' % name) type_section = section.add_new_section('param-type') self._document_non_top_level_param_type(type_section, shape) if is_required: is_required_section = section.add_new_section('is-required') is_required_section.style.indent() is_required_section.style.bold('[REQUIRED] ') if shape.documentation: documentation_section = section.add_new_section( 'param-documentation') documentation_section.style.indent() documentation_section.include_doc_string(shape.documentation) self._add_special_trait_documentation(documentation_section, shape) end_param_section = section.add_new_section('end-param') end_param_section.style.new_paragraph() def _add_special_trait_documentation(self, section, shape): if 'idempotencyToken' in shape.metadata: self._append_idempotency_documentation(section) def _append_idempotency_documentation(self, section): docstring = 'This field is autopopulated if not provided.' section.write(docstring)
9,561
Python
42.266968
79
0.622843
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/bcdoc/docstringparser.py
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from botocore.compat import six class DocStringParser(six.moves.html_parser.HTMLParser): """ A simple HTML parser. Focused on converting the subset of HTML that appears in the documentation strings of the JSON models into simple ReST format. """ def __init__(self, doc): self.tree = None self.doc = doc six.moves.html_parser.HTMLParser.__init__(self) def reset(self): six.moves.html_parser.HTMLParser.reset(self) self.tree = HTMLTree(self.doc) def feed(self, data): # HTMLParser is an old style class, so the super() method will not work. six.moves.html_parser.HTMLParser.feed(self, data) self.tree.write() self.tree = HTMLTree(self.doc) def close(self): six.moves.html_parser.HTMLParser.close(self) # Write if there is anything remaining. self.tree.write() self.tree = HTMLTree(self.doc) def handle_starttag(self, tag, attrs): self.tree.add_tag(tag, attrs=attrs) def handle_endtag(self, tag): self.tree.add_tag(tag, is_start=False) def handle_data(self, data): self.tree.add_data(data) class HTMLTree(object): """ A tree which handles HTML nodes. Designed to work with a python HTML parser, meaning that the current_node will be the most recently opened tag. When a tag is closed, the current_node moves up to the parent node. """ def __init__(self, doc): self.doc = doc self.head = StemNode() self.current_node = self.head self.unhandled_tags = [] def add_tag(self, tag, attrs=None, is_start=True): if not self._doc_has_handler(tag, is_start): self.unhandled_tags.append(tag) return if is_start: if tag == 'li': node = LineItemNode(attrs) else: node = TagNode(tag, attrs) self.current_node.add_child(node) self.current_node = node else: self.current_node = self.current_node.parent def _doc_has_handler(self, tag, is_start): if is_start: handler_name = 'start_%s' % tag else: handler_name = 'end_%s' % tag return hasattr(self.doc.style, handler_name) def add_data(self, data): self.current_node.add_child(DataNode(data)) def write(self): self.head.write(self.doc) class Node(object): def __init__(self, parent=None): self.parent = parent def write(self, doc): raise NotImplementedError class StemNode(Node): def __init__(self, parent=None): super(StemNode, self).__init__(parent) self.children = [] def add_child(self, child): child.parent = self self.children.append(child) def write(self, doc): self._write_children(doc) def _write_children(self, doc): for child in self.children: child.write(doc) class TagNode(StemNode): """ A generic Tag node. It will verify that handlers exist before writing. """ def __init__(self, tag, attrs=None, parent=None): super(TagNode, self).__init__(parent) self.attrs = attrs self.tag = tag def write(self, doc): self._write_start(doc) self._write_children(doc) self._write_end(doc) def _write_start(self, doc): handler_name = 'start_%s' % self.tag if hasattr(doc.style, handler_name): getattr(doc.style, handler_name)(self.attrs) def _write_end(self, doc): handler_name = 'end_%s' % self.tag if hasattr(doc.style, handler_name): getattr(doc.style, handler_name)() class LineItemNode(TagNode): def __init__(self, attrs=None, parent=None): super(LineItemNode, self).__init__('li', attrs, parent) def write(self, doc): self._lstrip(self) super(LineItemNode, self).write(doc) def _lstrip(self, node): """ Traverses the tree, stripping out whitespace until text data is found :param node: The node to strip :return: True if non-whitespace data was found, False otherwise """ for child in node.children: if isinstance(child, DataNode): child.lstrip() if child.data: return True else: found = self._lstrip(child) if found: return True return False class DataNode(Node): """ A Node that contains only string data. """ def __init__(self, data, parent=None): super(DataNode, self).__init__(parent) if not isinstance(data, six.string_types): raise ValueError("Expecting string type, %s given." % type(data)) self.data = data def lstrip(self): self.data = self.data.lstrip() def write(self, doc): if not self.data: return if self.data.isspace(): str_data = ' ' else: end_space = self.data[-1].isspace() words = self.data.split() words = doc.translate_words(words) str_data = ' '.join(words) if end_space: str_data += ' ' doc.handle_data(str_data)
5,889
Python
28.303482
80
0.591272
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/bcdoc/style.py
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging logger = logging.getLogger('bcdocs') class BaseStyle(object): def __init__(self, doc, indent_width=2): self.doc = doc self.indent_width = indent_width self._indent = 0 self.keep_data = True @property def indentation(self): return self._indent @indentation.setter def indentation(self, value): self._indent = value def new_paragraph(self): return '\n%s' % self.spaces() def indent(self): self._indent += 1 def dedent(self): if self._indent > 0: self._indent -= 1 def spaces(self): return ' ' * (self._indent * self.indent_width) def bold(self, s): return s def ref(self, link, title=None): return link def h2(self, s): return s def h3(self, s): return s def underline(self, s): return s def italics(self, s): return s class ReSTStyle(BaseStyle): def __init__(self, doc, indent_width=2): BaseStyle.__init__(self, doc, indent_width) self.do_p = True self.a_href = None self.list_depth = 0 def new_paragraph(self): self.doc.write('\n\n%s' % self.spaces()) def new_line(self): self.doc.write('\n%s' % self.spaces()) def _start_inline(self, markup): self.doc.write(markup) def _end_inline(self, markup): # Sometimes the HTML markup has whitespace between the end # of the text inside the inline markup and the closing element # (e.g. <b>foobar </b>). This trailing space will cause # problems in the ReST inline markup so we remove it here # by popping the last item written off the stack, striping # the whitespace and then pushing it back on the stack. last_write = self.doc.pop_write().rstrip(' ') # Sometimes, for whatever reason, a tag like <b/> is present. This # is problematic because if we simply translate that directly then # we end up with something like ****, which rst will assume is a # heading instead of an empty bold. if last_write == markup: return self.doc.push_write(last_write) self.doc.write(markup + ' ') def start_bold(self, attrs=None): self._start_inline('**') def end_bold(self): self._end_inline('**') def start_b(self, attrs=None): self.doc.do_translation = True self.start_bold(attrs) def end_b(self): self.doc.do_translation = False self.end_bold() def bold(self, s): if s: self.start_bold() self.doc.write(s) self.end_bold() def ref(self, title, link=None): if link is None: link = title self.doc.write(':doc:`%s <%s>`' % (title, link)) def _heading(self, s, border_char): border = border_char * len(s) self.new_paragraph() self.doc.write('%s\n%s\n%s' % (border, s, border)) self.new_paragraph() def h1(self, s): self._heading(s, '*') def h2(self, s): self._heading(s, '=') def h3(self, s): self._heading(s, '-') def start_italics(self, attrs=None): self._start_inline('*') def end_italics(self): self._end_inline('*') def italics(self, s): if s: self.start_italics() self.doc.write(s) self.end_italics() def start_p(self, attrs=None): if self.do_p: self.doc.write('\n\n%s' % self.spaces()) def end_p(self): if self.do_p: self.doc.write('\n\n%s' % self.spaces()) def start_code(self, attrs=None): self.doc.do_translation = True self._start_inline('``') def end_code(self): self.doc.do_translation = False self._end_inline('``') def code(self, s): if s: self.start_code() self.doc.write(s) self.end_code() def start_note(self, attrs=None): self.new_paragraph() self.doc.write('.. note::') self.indent() self.new_paragraph() def end_note(self): self.dedent() self.new_paragraph() def start_important(self, attrs=None): self.new_paragraph() self.doc.write('.. warning::') self.indent() self.new_paragraph() def end_important(self): self.dedent() self.new_paragraph() def start_danger(self, attrs=None): self.new_paragraph() self.doc.write('.. danger::') self.indent() self.new_paragraph() def end_danger(self): self.dedent() self.new_paragraph() def start_a(self, attrs=None): if attrs: for attr_key, attr_value in attrs: if attr_key == 'href': self.a_href = attr_value self.doc.write('`') else: # There are some model documentation that # looks like this: <a>DescribeInstances</a>. # In this case we just write out an empty # string. self.doc.write(' ') self.doc.do_translation = True def link_target_definition(self, refname, link): self.doc.writeln('.. _%s: %s' % (refname, link)) def sphinx_reference_label(self, label, text=None): if text is None: text = label if self.doc.target == 'html': self.doc.write(':ref:`%s <%s>`' % (text, label)) else: self.doc.write(text) def end_a(self): self.doc.do_translation = False if self.a_href: last_write = self.doc.pop_write() last_write = last_write.rstrip(' ') if last_write and last_write != '`': if ':' in last_write: last_write = last_write.replace(':', r'\:') self.doc.push_write(last_write) self.doc.push_write(' <%s>`__' % self.a_href) elif last_write == '`': # Look at start_a(). It will do a self.doc.write('`') # which is the start of the link title. If that is the # case then there was no link text. We should just # use an inline link. The syntax of this is # `<http://url>`_ self.doc.push_write('`<%s>`__' % self.a_href) else: self.doc.push_write(self.a_href) self.doc.hrefs[self.a_href] = self.a_href self.doc.write('`__') self.a_href = None self.doc.write(' ') def start_i(self, attrs=None): self.doc.do_translation = True self.start_italics() def end_i(self): self.doc.do_translation = False self.end_italics() def start_li(self, attrs=None): self.new_line() self.do_p = False self.doc.write('* ') def end_li(self): self.do_p = True self.new_line() def li(self, s): if s: self.start_li() self.doc.writeln(s) self.end_li() def start_ul(self, attrs=None): if self.list_depth != 0: self.indent() self.list_depth += 1 self.new_paragraph() def end_ul(self): self.list_depth -= 1 if self.list_depth != 0: self.dedent() self.new_paragraph() def start_ol(self, attrs=None): # TODO: Need to control the bullets used for LI items if self.list_depth != 0: self.indent() self.list_depth += 1 self.new_paragraph() def end_ol(self): self.list_depth -= 1 if self.list_depth != 0: self.dedent() self.new_paragraph() def start_examples(self, attrs=None): self.doc.keep_data = False def end_examples(self): self.doc.keep_data = True def start_fullname(self, attrs=None): self.doc.keep_data = False def end_fullname(self): self.doc.keep_data = True def start_codeblock(self, attrs=None): self.doc.write('::') self.indent() self.new_paragraph() def end_codeblock(self): self.dedent() self.new_paragraph() def codeblock(self, code): """ Literal code blocks are introduced by ending a paragraph with the special marker ::. The literal block must be indented (and, like all paragraphs, separated from the surrounding ones by blank lines). """ self.start_codeblock() self.doc.writeln(code) self.end_codeblock() def toctree(self): if self.doc.target == 'html': self.doc.write('\n.. toctree::\n') self.doc.write(' :maxdepth: 1\n') self.doc.write(' :titlesonly:\n\n') else: self.start_ul() def tocitem(self, item, file_name=None): if self.doc.target == 'man': self.li(item) else: if file_name: self.doc.writeln(' %s' % file_name) else: self.doc.writeln(' %s' % item) def hidden_toctree(self): if self.doc.target == 'html': self.doc.write('\n.. toctree::\n') self.doc.write(' :maxdepth: 1\n') self.doc.write(' :hidden:\n\n') def hidden_tocitem(self, item): if self.doc.target == 'html': self.tocitem(item) def table_of_contents(self, title=None, depth=None): self.doc.write('.. contents:: ') if title is not None: self.doc.writeln(title) if depth is not None: self.doc.writeln(' :depth: %s' % depth) def start_sphinx_py_class(self, class_name): self.new_paragraph() self.doc.write('.. py:class:: %s' % class_name) self.indent() self.new_paragraph() def end_sphinx_py_class(self): self.dedent() self.new_paragraph() def start_sphinx_py_method(self, method_name, parameters=None): self.new_paragraph() content = '.. py:method:: %s' % method_name if parameters is not None: content += '(%s)' % parameters self.doc.write(content) self.indent() self.new_paragraph() def end_sphinx_py_method(self): self.dedent() self.new_paragraph() def start_sphinx_py_attr(self, attr_name): self.new_paragraph() self.doc.write('.. py:attribute:: %s' % attr_name) self.indent() self.new_paragraph() def end_sphinx_py_attr(self): self.dedent() self.new_paragraph() def write_py_doc_string(self, docstring): docstring_lines = docstring.splitlines() for docstring_line in docstring_lines: self.doc.writeln(docstring_line) def external_link(self, title, link): if self.doc.target == 'html': self.doc.write('`%s <%s>`_' % (title, link)) else: self.doc.write(title) def internal_link(self, title, page): if self.doc.target == 'html': self.doc.write(':doc:`%s <%s>`' % (title, page)) else: self.doc.write(title)
11,833
Python
27.243437
78
0.542381
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/bcdoc/__init__.py
# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. __version__ = '0.16.0'
588
Python
41.071426
73
0.741497
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/botocore/docs/bcdoc/restdoc.py
# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import logging from botocore.compat import OrderedDict from botocore.docs.bcdoc.docstringparser import DocStringParser from botocore.docs.bcdoc.style import ReSTStyle LOG = logging.getLogger('bcdocs') class ReSTDocument(object): def __init__(self, target='man'): self.style = ReSTStyle(self) self.target = target self.parser = DocStringParser(self) self.keep_data = True self.do_translation = False self.translation_map = {} self.hrefs = {} self._writes = [] self._last_doc_string = None def _write(self, s): if self.keep_data and s is not None: self._writes.append(s) def write(self, content): """ Write content into the document. """ self._write(content) def writeln(self, content): """ Write content on a newline. """ self._write('%s%s\n' % (self.style.spaces(), content)) def peek_write(self): """ Returns the last content written to the document without removing it from the stack. """ return self._writes[-1] def pop_write(self): """ Removes and returns the last content written to the stack. """ return self._writes.pop() def push_write(self, s): """ Places new content on the stack. """ self._writes.append(s) def getvalue(self): """ Returns the current content of the document as a string. """ if self.hrefs: self.style.new_paragraph() for refname, link in self.hrefs.items(): self.style.link_target_definition(refname, link) return ''.join(self._writes).encode('utf-8') def translate_words(self, words): return [self.translation_map.get(w, w) for w in words] def handle_data(self, data): if data and self.keep_data: self._write(data) def include_doc_string(self, doc_string): if doc_string: try: start = len(self._writes) self.parser.feed(doc_string) self.parser.close() end = len(self._writes) self._last_doc_string = (start, end) except Exception: LOG.debug('Error parsing doc string', exc_info=True) LOG.debug(doc_string) def remove_last_doc_string(self): # Removes all writes inserted by last doc string if self._last_doc_string is not None: start, end = self._last_doc_string del self._writes[start:end] class DocumentStructure(ReSTDocument): def __init__(self, name, section_names=None, target='man', context=None): """Provides a Hierarichial structure to a ReSTDocument You can write to it similiar to as you can to a ReSTDocument but has an innate structure for more orginaztion and abstraction. :param name: The name of the document :param section_names: A list of sections to be included in the document. :param target: The target documentation of the Document structure :param context: A dictionary of data to store with the strucuture. These are only stored per section not the entire structure. """ super(DocumentStructure, self).__init__(target=target) self._name = name self._structure = OrderedDict() self._path = [self._name] self._context = {} if context is not None: self._context = context if section_names is not None: self._generate_structure(section_names) @property def name(self): """The name of the document structure""" return self._name @property def path(self): """ A list of where to find a particular document structure in the overlying document structure. """ return self._path @path.setter def path(self, value): self._path = value @property def available_sections(self): return list(self._structure) @property def context(self): return self._context def _generate_structure(self, section_names): for section_name in section_names: self.add_new_section(section_name) def add_new_section(self, name, context=None): """Adds a new section to the current document structure This document structure will be considered a section to the current document structure but will in itself be an entirely new document structure that can be written to and have sections as well :param name: The name of the section. :param context: A dictionary of data to store with the strucuture. These are only stored per section not the entire structure. :rtype: DocumentStructure :returns: A new document structure to add to but lives as a section to the document structure it was instantiated from. """ # Add a new section section = self.__class__(name=name, target=self.target, context=context) section.path = self.path + [name] # Indent the section apporpriately as well section.style.indentation = self.style.indentation section.translation_map = self.translation_map section.hrefs = self.hrefs self._structure[name] = section return section def get_section(self, name): """Retrieve a section""" return self._structure[name] def delete_section(self, name): """Delete a section""" del self._structure[name] def flush_structure(self): """Flushes a doc structure to a ReSTructed string The document is flushed out in a DFS style where sections and their subsections' values are added to the string as they are visited. """ # We are at the root flush the links at the beginning of the # document if len(self.path) == 1: if self.hrefs: self.style.new_paragraph() for refname, link in self.hrefs.items(): self.style.link_target_definition(refname, link) value = self.getvalue() for name, section in self._structure.items(): value += section.flush_structure() return value def getvalue(self): return ''.join(self._writes).encode('utf-8') def remove_all_sections(self): self._structure = OrderedDict() def clear_text(self): self._writes = []
7,226
Python
32
80
0.608912
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/vengine_gen.py
# # DEPRECATED: implementation for ffi.verify() # import sys, os import types from . import model from .error import VerificationError class VGenericEngine(object): _class_key = 'g' _gen_python_module = False def __init__(self, verifier): self.verifier = verifier self.ffi = verifier.ffi self.export_symbols = [] self._struct_pending_verification = {} def patch_extension_kwds(self, kwds): # add 'export_symbols' to the dictionary. Note that we add the # list before filling it. When we fill it, it will thus also show # up in kwds['export_symbols']. kwds.setdefault('export_symbols', self.export_symbols) def find_module(self, module_name, path, so_suffixes): for so_suffix in so_suffixes: basename = module_name + so_suffix if path is None: path = sys.path for dirname in path: filename = os.path.join(dirname, basename) if os.path.isfile(filename): return filename def collect_types(self): pass # not needed in the generic engine def _prnt(self, what=''): self._f.write(what + '\n') def write_source_to_f(self): prnt = self._prnt # first paste some standard set of lines that are mostly '#include' prnt(cffimod_header) # then paste the C source given by the user, verbatim. prnt(self.verifier.preamble) # # call generate_gen_xxx_decl(), for every xxx found from # ffi._parser._declarations. This generates all the functions. self._generate('decl') # # on Windows, distutils insists on putting init_cffi_xyz in # 'export_symbols', so instead of fighting it, just give up and # give it one if sys.platform == 'win32': if sys.version_info >= (3,): prefix = 'PyInit_' else: prefix = 'init' modname = self.verifier.get_module_name() prnt("void %s%s(void) { }\n" % (prefix, modname)) def load_library(self, flags=0): # import it with the CFFI backend backend = self.ffi._backend # needs to make a path that contains '/', on Posix filename = os.path.join(os.curdir, self.verifier.modulefilename) module = backend.load_library(filename, flags) # # call loading_gen_struct() to get the struct layout inferred by # the C compiler self._load(module, 'loading') # build the FFILibrary class and instance, this is a module subclass # because modules are expected to have usually-constant-attributes and # in PyPy this means the JIT is able to treat attributes as constant, # which we want. class FFILibrary(types.ModuleType): _cffi_generic_module = module _cffi_ffi = self.ffi _cffi_dir = [] def __dir__(self): return FFILibrary._cffi_dir library = FFILibrary("") # # finally, call the loaded_gen_xxx() functions. This will set # up the 'library' object. self._load(module, 'loaded', library=library) return library def _get_declarations(self): lst = [(key, tp) for (key, (tp, qual)) in self.ffi._parser._declarations.items()] lst.sort() return lst def _generate(self, step_name): for name, tp in self._get_declarations(): kind, realname = name.split(' ', 1) try: method = getattr(self, '_generate_gen_%s_%s' % (kind, step_name)) except AttributeError: raise VerificationError( "not implemented in verify(): %r" % name) try: method(tp, realname) except Exception as e: model.attach_exception_info(e, name) raise def _load(self, module, step_name, **kwds): for name, tp in self._get_declarations(): kind, realname = name.split(' ', 1) method = getattr(self, '_%s_gen_%s' % (step_name, kind)) try: method(tp, realname, module, **kwds) except Exception as e: model.attach_exception_info(e, name) raise def _generate_nothing(self, tp, name): pass def _loaded_noop(self, tp, name, module, **kwds): pass # ---------- # typedefs: generates no code so far _generate_gen_typedef_decl = _generate_nothing _loading_gen_typedef = _loaded_noop _loaded_gen_typedef = _loaded_noop # ---------- # function declarations def _generate_gen_function_decl(self, tp, name): assert isinstance(tp, model.FunctionPtrType) if tp.ellipsis: # cannot support vararg functions better than this: check for its # exact type (including the fixed arguments), and build it as a # constant function pointer (no _cffi_f_%s wrapper) self._generate_gen_const(False, name, tp) return prnt = self._prnt numargs = len(tp.args) argnames = [] for i, type in enumerate(tp.args): indirection = '' if isinstance(type, model.StructOrUnion): indirection = '*' argnames.append('%sx%d' % (indirection, i)) context = 'argument of %s' % name arglist = [type.get_c_name(' %s' % arg, context) for type, arg in zip(tp.args, argnames)] tpresult = tp.result if isinstance(tpresult, model.StructOrUnion): arglist.insert(0, tpresult.get_c_name(' *r', context)) tpresult = model.void_type arglist = ', '.join(arglist) or 'void' wrappername = '_cffi_f_%s' % name self.export_symbols.append(wrappername) if tp.abi: abi = tp.abi + ' ' else: abi = '' funcdecl = ' %s%s(%s)' % (abi, wrappername, arglist) context = 'result of %s' % name prnt(tpresult.get_c_name(funcdecl, context)) prnt('{') # if isinstance(tp.result, model.StructOrUnion): result_code = '*r = ' elif not isinstance(tp.result, model.VoidType): result_code = 'return ' else: result_code = '' prnt(' %s%s(%s);' % (result_code, name, ', '.join(argnames))) prnt('}') prnt() _loading_gen_function = _loaded_noop def _loaded_gen_function(self, tp, name, module, library): assert isinstance(tp, model.FunctionPtrType) if tp.ellipsis: newfunction = self._load_constant(False, tp, name, module) else: indirections = [] base_tp = tp if (any(isinstance(typ, model.StructOrUnion) for typ in tp.args) or isinstance(tp.result, model.StructOrUnion)): indirect_args = [] for i, typ in enumerate(tp.args): if isinstance(typ, model.StructOrUnion): typ = model.PointerType(typ) indirections.append((i, typ)) indirect_args.append(typ) indirect_result = tp.result if isinstance(indirect_result, model.StructOrUnion): if indirect_result.fldtypes is None: raise TypeError("'%s' is used as result type, " "but is opaque" % ( indirect_result._get_c_name(),)) indirect_result = model.PointerType(indirect_result) indirect_args.insert(0, indirect_result) indirections.insert(0, ("result", indirect_result)) indirect_result = model.void_type tp = model.FunctionPtrType(tuple(indirect_args), indirect_result, tp.ellipsis) BFunc = self.ffi._get_cached_btype(tp) wrappername = '_cffi_f_%s' % name newfunction = module.load_function(BFunc, wrappername) for i, typ in indirections: newfunction = self._make_struct_wrapper(newfunction, i, typ, base_tp) setattr(library, name, newfunction) type(library)._cffi_dir.append(name) def _make_struct_wrapper(self, oldfunc, i, tp, base_tp): backend = self.ffi._backend BType = self.ffi._get_cached_btype(tp) if i == "result": ffi = self.ffi def newfunc(*args): res = ffi.new(BType) oldfunc(res, *args) return res[0] else: def newfunc(*args): args = args[:i] + (backend.newp(BType, args[i]),) + args[i+1:] return oldfunc(*args) newfunc._cffi_base_type = base_tp return newfunc # ---------- # named structs def _generate_gen_struct_decl(self, tp, name): assert name == tp.name self._generate_struct_or_union_decl(tp, 'struct', name) def _loading_gen_struct(self, tp, name, module): self._loading_struct_or_union(tp, 'struct', name, module) def _loaded_gen_struct(self, tp, name, module, **kwds): self._loaded_struct_or_union(tp) def _generate_gen_union_decl(self, tp, name): assert name == tp.name self._generate_struct_or_union_decl(tp, 'union', name) def _loading_gen_union(self, tp, name, module): self._loading_struct_or_union(tp, 'union', name, module) def _loaded_gen_union(self, tp, name, module, **kwds): self._loaded_struct_or_union(tp) def _generate_struct_or_union_decl(self, tp, prefix, name): if tp.fldnames is None: return # nothing to do with opaque structs checkfuncname = '_cffi_check_%s_%s' % (prefix, name) layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) cname = ('%s %s' % (prefix, name)).strip() # prnt = self._prnt prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double prnt(' (void)((p->%s) << 1);' % fname) else: # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), fname)) except VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') self.export_symbols.append(layoutfuncname) prnt('intptr_t %s(intptr_t i)' % (layoutfuncname,)) prnt('{') prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname) prnt(' static intptr_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) if isinstance(ftype, model.ArrayType) and ftype.length is None: prnt(' 0, /* %s */' % ftype._get_c_name()) else: prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' return nums[i];') prnt(' /* the next line is not executed, but compiled */') prnt(' %s(0);' % (checkfuncname,)) prnt('}') prnt() def _loading_struct_or_union(self, tp, prefix, name, module): if tp.fldnames is None: return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # BFunc = self.ffi._typeof_locked("intptr_t(*)(intptr_t)")[0] function = module.load_function(BFunc, layoutfuncname) layout = [] num = 0 while True: x = function(num) if x < 0: break layout.append(x) num += 1 if isinstance(tp, model.StructOrUnion) and tp.partial: # use the function()'s sizes and offsets to guide the # layout of the struct totalsize = layout[0] totalalignment = layout[1] fieldofs = layout[2::2] fieldsize = layout[3::2] tp.force_flatten() assert len(fieldofs) == len(fieldsize) == len(tp.fldnames) tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment else: cname = ('%s %s' % (prefix, name)).strip() self._struct_pending_verification[tp] = layout, cname def _loaded_struct_or_union(self, tp): if tp.fldnames is None: return # nothing to do with opaque structs self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered if tp in self._struct_pending_verification: # check that the layout sizes and offsets match the real ones def check(realvalue, expectedvalue, msg): if realvalue != expectedvalue: raise VerificationError( "%s (we have %d, but C compiler says %d)" % (msg, expectedvalue, realvalue)) ffi = self.ffi BStruct = ffi._get_cached_btype(tp) layout, cname = self._struct_pending_verification.pop(tp) check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) if layout[i+1] != 0: BField = ffi._get_cached_btype(ftype) check(layout[i+1], ffi.sizeof(BField), "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) # ---------- # 'anonymous' declarations. These are produced for anonymous structs # or unions; the 'name' is obtained by a typedef. def _generate_gen_anonymous_decl(self, tp, name): if isinstance(tp, model.EnumType): self._generate_gen_enum_decl(tp, name, '') else: self._generate_struct_or_union_decl(tp, '', name) def _loading_gen_anonymous(self, tp, name, module): if isinstance(tp, model.EnumType): self._loading_gen_enum(tp, name, module, '') else: self._loading_struct_or_union(tp, '', name, module) def _loaded_gen_anonymous(self, tp, name, module, **kwds): if isinstance(tp, model.EnumType): self._loaded_gen_enum(tp, name, module, **kwds) else: self._loaded_struct_or_union(tp) # ---------- # constants, likely declared with '#define' def _generate_gen_const(self, is_int, name, tp=None, category='const', check_value=None): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) self.export_symbols.append(funcname) if check_value is not None: assert is_int assert category == 'const' prnt('int %s(char *out_error)' % funcname) prnt('{') self._check_int_constant_value(name, check_value) prnt(' return 0;') prnt('}') elif is_int: assert category == 'const' prnt('int %s(long long *out_value)' % funcname) prnt('{') prnt(' *out_value = (long long)(%s);' % (name,)) prnt(' return (%s) <= 0;' % (name,)) prnt('}') else: assert tp is not None assert check_value is None if category == 'var': ampersand = '&' else: ampersand = '' extra = '' if category == 'const' and isinstance(tp, model.StructOrUnion): extra = 'const *' ampersand = '&' prnt(tp.get_c_name(' %s%s(void)' % (extra, funcname), name)) prnt('{') prnt(' return (%s%s);' % (ampersand, name)) prnt('}') prnt() def _generate_gen_constant_decl(self, tp, name): is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() self._generate_gen_const(is_int, name, tp) _loading_gen_constant = _loaded_noop def _load_constant(self, is_int, tp, name, module, check_value=None): funcname = '_cffi_const_%s' % name if check_value is not None: assert is_int self._load_known_int_constant(module, funcname) value = check_value elif is_int: BType = self.ffi._typeof_locked("long long*")[0] BFunc = self.ffi._typeof_locked("int(*)(long long*)")[0] function = module.load_function(BFunc, funcname) p = self.ffi.new(BType) negative = function(p) value = int(p[0]) if value < 0 and not negative: BLongLong = self.ffi._typeof_locked("long long")[0] value += (1 << (8*self.ffi.sizeof(BLongLong))) else: assert check_value is None fntypeextra = '(*)(void)' if isinstance(tp, model.StructOrUnion): fntypeextra = '*' + fntypeextra BFunc = self.ffi._typeof_locked(tp.get_c_name(fntypeextra, name))[0] function = module.load_function(BFunc, funcname) value = function() if isinstance(tp, model.StructOrUnion): value = value[0] return value def _loaded_gen_constant(self, tp, name, module, library): is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() value = self._load_constant(is_int, tp, name, module) setattr(library, name, value) type(library)._cffi_dir.append(name) # ---------- # enums def _check_int_constant_value(self, name, value): prnt = self._prnt if value <= 0: prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( name, name, value)) else: prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( name, name, value)) prnt(' char buf[64];') prnt(' if ((%s) <= 0)' % name) prnt(' sprintf(buf, "%%ld", (long)(%s));' % name) prnt(' else') prnt(' sprintf(buf, "%%lu", (unsigned long)(%s));' % name) prnt(' sprintf(out_error, "%s has the real value %s, not %s",') prnt(' "%s", buf, "%d");' % (name[:100], value)) prnt(' return -1;') prnt(' }') def _load_known_int_constant(self, module, funcname): BType = self.ffi._typeof_locked("char[]")[0] BFunc = self.ffi._typeof_locked("int(*)(char*)")[0] function = module.load_function(BFunc, funcname) p = self.ffi.new(BType, 256) if function(p) < 0: error = self.ffi.string(p) if sys.version_info >= (3,): error = str(error, 'utf-8') raise VerificationError(error) def _enum_funcname(self, prefix, name): # "$enum_$1" => "___D_enum____D_1" name = name.replace('$', '___D_') return '_cffi_e_%s_%s' % (prefix, name) def _generate_gen_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_gen_const(True, enumerator) return # funcname = self._enum_funcname(prefix, name) self.export_symbols.append(funcname) prnt = self._prnt prnt('int %s(char *out_error)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): self._check_int_constant_value(enumerator, enumvalue) prnt(' return 0;') prnt('}') prnt() def _loading_gen_enum(self, tp, name, module, prefix='enum'): if tp.partial: enumvalues = [self._load_constant(True, tp, enumerator, module) for enumerator in tp.enumerators] tp.enumvalues = tuple(enumvalues) tp.partial_resolved = True else: funcname = self._enum_funcname(prefix, name) self._load_known_int_constant(module, funcname) def _loaded_gen_enum(self, tp, name, module, library): for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): setattr(library, enumerator, enumvalue) type(library)._cffi_dir.append(enumerator) # ---------- # macros: for now only for integers def _generate_gen_macro_decl(self, tp, name): if tp == '...': check_value = None else: check_value = tp # an integer self._generate_gen_const(True, name, check_value=check_value) _loading_gen_macro = _loaded_noop def _loaded_gen_macro(self, tp, name, module, library): if tp == '...': check_value = None else: check_value = tp # an integer value = self._load_constant(True, tp, name, module, check_value=check_value) setattr(library, name, value) type(library)._cffi_dir.append(name) # ---------- # global variables def _generate_gen_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): if tp.length_is_unknown(): prnt = self._prnt funcname = '_cffi_sizeof_%s' % (name,) self.export_symbols.append(funcname) prnt("size_t %s(void)" % funcname) prnt("{") prnt(" return sizeof(%s);" % (name,)) prnt("}") tp_ptr = model.PointerType(tp.item) self._generate_gen_const(False, name, tp_ptr) else: tp_ptr = model.PointerType(tp) self._generate_gen_const(False, name, tp_ptr, category='var') _loading_gen_variable = _loaded_noop def _loaded_gen_variable(self, tp, name, module, library): if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the # sense that "a=..." is forbidden if tp.length_is_unknown(): funcname = '_cffi_sizeof_%s' % (name,) BFunc = self.ffi._typeof_locked('size_t(*)(void)')[0] function = module.load_function(BFunc, funcname) size = function() BItemType = self.ffi._get_cached_btype(tp.item) length, rest = divmod(size, self.ffi.sizeof(BItemType)) if rest != 0: raise VerificationError( "bad size: %r does not seem to be an array of %s" % (name, tp.item)) tp = tp.resolve_length(length) tp_ptr = model.PointerType(tp.item) value = self._load_constant(False, tp_ptr, name, module) # 'value' is a <cdata 'type *'> which we have to replace with # a <cdata 'type[N]'> if the N is actually known if tp.length is not None: BArray = self.ffi._get_cached_btype(tp) value = self.ffi.cast(BArray, value) setattr(library, name, value) type(library)._cffi_dir.append(name) return # remove ptr=<cdata 'int *'> from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. funcname = '_cffi_var_%s' % name BFunc = self.ffi._typeof_locked(tp.get_c_name('*(*)(void)', name))[0] function = module.load_function(BFunc, funcname) ptr = function() def getter(library): return ptr[0] def setter(library, value): ptr[0] = value setattr(type(library), name, property(getter, setter)) type(library)._cffi_dir.append(name) cffimod_header = r''' #include <stdio.h> #include <stddef.h> #include <stdarg.h> #include <errno.h> #include <sys/types.h> /* XXX for ssize_t on some platforms */ /* this block of #ifs should be kept exactly identical between c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py and cffi/_cffi_include.h */ #if defined(_MSC_VER) # include <malloc.h> /* for alloca() */ # if _MSC_VER < 1600 /* MSVC < 2010 */ typedef __int8 int8_t; typedef __int16 int16_t; typedef __int32 int32_t; typedef __int64 int64_t; typedef unsigned __int8 uint8_t; typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; typedef __int8 int_least8_t; typedef __int16 int_least16_t; typedef __int32 int_least32_t; typedef __int64 int_least64_t; typedef unsigned __int8 uint_least8_t; typedef unsigned __int16 uint_least16_t; typedef unsigned __int32 uint_least32_t; typedef unsigned __int64 uint_least64_t; typedef __int8 int_fast8_t; typedef __int16 int_fast16_t; typedef __int32 int_fast32_t; typedef __int64 int_fast64_t; typedef unsigned __int8 uint_fast8_t; typedef unsigned __int16 uint_fast16_t; typedef unsigned __int32 uint_fast32_t; typedef unsigned __int64 uint_fast64_t; typedef __int64 intmax_t; typedef unsigned __int64 uintmax_t; # else # include <stdint.h> # endif # if _MSC_VER < 1800 /* MSVC < 2013 */ # ifndef __cplusplus typedef unsigned char _Bool; # endif # endif #else # include <stdint.h> # if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux) # include <alloca.h> # endif #endif '''
26,684
Python
38.474852
80
0.532716
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/pkgconfig.py
# pkg-config, https://www.freedesktop.org/wiki/Software/pkg-config/ integration for cffi import sys, os, subprocess from .error import PkgConfigError def merge_flags(cfg1, cfg2): """Merge values from cffi config flags cfg2 to cf1 Example: merge_flags({"libraries": ["one"]}, {"libraries": ["two"]}) {"libraries": ["one", "two"]} """ for key, value in cfg2.items(): if key not in cfg1: cfg1[key] = value else: if not isinstance(cfg1[key], list): raise TypeError("cfg1[%r] should be a list of strings" % (key,)) if not isinstance(value, list): raise TypeError("cfg2[%r] should be a list of strings" % (key,)) cfg1[key].extend(value) return cfg1 def call(libname, flag, encoding=sys.getfilesystemencoding()): """Calls pkg-config and returns the output if found """ a = ["pkg-config", "--print-errors"] a.append(flag) a.append(libname) try: pc = subprocess.Popen(a, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except EnvironmentError as e: raise PkgConfigError("cannot run pkg-config: %s" % (str(e).strip(),)) bout, berr = pc.communicate() if pc.returncode != 0: try: berr = berr.decode(encoding) except Exception: pass raise PkgConfigError(berr.strip()) if sys.version_info >= (3,) and not isinstance(bout, str): # Python 3.x try: bout = bout.decode(encoding) except UnicodeDecodeError: raise PkgConfigError("pkg-config %s %s returned bytes that cannot " "be decoded with encoding %r:\n%r" % (flag, libname, encoding, bout)) if os.altsep != '\\' and '\\' in bout: raise PkgConfigError("pkg-config %s %s returned an unsupported " "backslash-escaped output:\n%r" % (flag, libname, bout)) return bout def flags_from_pkgconfig(libs): r"""Return compiler line flags for FFI.set_source based on pkg-config output Usage ... ffibuilder.set_source("_foo", pkgconfig = ["libfoo", "libbar >= 1.8.3"]) If pkg-config is installed on build machine, then arguments include_dirs, library_dirs, libraries, define_macros, extra_compile_args and extra_link_args are extended with an output of pkg-config for libfoo and libbar. Raises PkgConfigError in case the pkg-config call fails. """ def get_include_dirs(string): return [x[2:] for x in string.split() if x.startswith("-I")] def get_library_dirs(string): return [x[2:] for x in string.split() if x.startswith("-L")] def get_libraries(string): return [x[2:] for x in string.split() if x.startswith("-l")] # convert -Dfoo=bar to list of tuples [("foo", "bar")] expected by distutils def get_macros(string): def _macro(x): x = x[2:] # drop "-D" if '=' in x: return tuple(x.split("=", 1)) # "-Dfoo=bar" => ("foo", "bar") else: return (x, None) # "-Dfoo" => ("foo", None) return [_macro(x) for x in string.split() if x.startswith("-D")] def get_other_cflags(string): return [x for x in string.split() if not x.startswith("-I") and not x.startswith("-D")] def get_other_libs(string): return [x for x in string.split() if not x.startswith("-L") and not x.startswith("-l")] # return kwargs for given libname def kwargs(libname): fse = sys.getfilesystemencoding() all_cflags = call(libname, "--cflags") all_libs = call(libname, "--libs") return { "include_dirs": get_include_dirs(all_cflags), "library_dirs": get_library_dirs(all_libs), "libraries": get_libraries(all_libs), "define_macros": get_macros(all_cflags), "extra_compile_args": get_other_cflags(all_cflags), "extra_link_args": get_other_libs(all_libs), } # merge all arguments together ret = {} for libname in libs: lib_flags = kwargs(libname) merge_flags(ret, lib_flags) return ret
4,374
Python
34.860655
88
0.562414
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/cparser.py
from . import model from .commontypes import COMMON_TYPES, resolve_common_type from .error import FFIError, CDefError try: from . import _pycparser as pycparser except ImportError: import pycparser import weakref, re, sys try: if sys.version_info < (3,): import thread as _thread else: import _thread lock = _thread.allocate_lock() except ImportError: lock = None def _workaround_for_static_import_finders(): # Issue #392: packaging tools like cx_Freeze can not find these # because pycparser uses exec dynamic import. This is an obscure # workaround. This function is never called. import pycparser.yacctab import pycparser.lextab CDEF_SOURCE_STRING = "<cdef source string>" _r_comment = re.compile(r"/\*.*?\*/|//([^\n\\]|\\.)*?$", re.DOTALL | re.MULTILINE) _r_define = re.compile(r"^\s*#\s*define\s+([A-Za-z_][A-Za-z_0-9]*)" r"\b((?:[^\n\\]|\\.)*?)$", re.DOTALL | re.MULTILINE) _r_line_directive = re.compile(r"^[ \t]*#[ \t]*(?:line|\d+)\b.*$", re.MULTILINE) _r_partial_enum = re.compile(r"=\s*\.\.\.\s*[,}]|\.\.\.\s*\}") _r_enum_dotdotdot = re.compile(r"__dotdotdot\d+__$") _r_partial_array = re.compile(r"\[\s*\.\.\.\s*\]") _r_words = re.compile(r"\w+|\S") _parser_cache = None _r_int_literal = re.compile(r"-?0?x?[0-9a-f]+[lu]*$", re.IGNORECASE) _r_stdcall1 = re.compile(r"\b(__stdcall|WINAPI)\b") _r_stdcall2 = re.compile(r"[(]\s*(__stdcall|WINAPI)\b") _r_cdecl = re.compile(r"\b__cdecl\b") _r_extern_python = re.compile(r'\bextern\s*"' r'(Python|Python\s*\+\s*C|C\s*\+\s*Python)"\s*.') _r_star_const_space = re.compile( # matches "* const " r"[*]\s*((const|volatile|restrict)\b\s*)+") _r_int_dotdotdot = re.compile(r"(\b(int|long|short|signed|unsigned|char)\s*)+" r"\.\.\.") _r_float_dotdotdot = re.compile(r"\b(double|float)\s*\.\.\.") def _get_parser(): global _parser_cache if _parser_cache is None: _parser_cache = pycparser.CParser() return _parser_cache def _workaround_for_old_pycparser(csource): # Workaround for a pycparser issue (fixed between pycparser 2.10 and # 2.14): "char*const***" gives us a wrong syntax tree, the same as # for "char***(*const)". This means we can't tell the difference # afterwards. But "char(*const(***))" gives us the right syntax # tree. The issue only occurs if there are several stars in # sequence with no parenthesis inbetween, just possibly qualifiers. # Attempt to fix it by adding some parentheses in the source: each # time we see "* const" or "* const *", we add an opening # parenthesis before each star---the hard part is figuring out where # to close them. parts = [] while True: match = _r_star_const_space.search(csource) if not match: break #print repr(''.join(parts)+csource), '=>', parts.append(csource[:match.start()]) parts.append('('); closing = ')' parts.append(match.group()) # e.g. "* const " endpos = match.end() if csource.startswith('*', endpos): parts.append('('); closing += ')' level = 0 i = endpos while i < len(csource): c = csource[i] if c == '(': level += 1 elif c == ')': if level == 0: break level -= 1 elif c in ',;=': if level == 0: break i += 1 csource = csource[endpos:i] + closing + csource[i:] #print repr(''.join(parts)+csource) parts.append(csource) return ''.join(parts) def _preprocess_extern_python(csource): # input: `extern "Python" int foo(int);` or # `extern "Python" { int foo(int); }` # output: # void __cffi_extern_python_start; # int foo(int); # void __cffi_extern_python_stop; # # input: `extern "Python+C" int foo(int);` # output: # void __cffi_extern_python_plus_c_start; # int foo(int); # void __cffi_extern_python_stop; parts = [] while True: match = _r_extern_python.search(csource) if not match: break endpos = match.end() - 1 #print #print ''.join(parts)+csource #print '=>' parts.append(csource[:match.start()]) if 'C' in match.group(1): parts.append('void __cffi_extern_python_plus_c_start; ') else: parts.append('void __cffi_extern_python_start; ') if csource[endpos] == '{': # grouping variant closing = csource.find('}', endpos) if closing < 0: raise CDefError("'extern \"Python\" {': no '}' found") if csource.find('{', endpos + 1, closing) >= 0: raise NotImplementedError("cannot use { } inside a block " "'extern \"Python\" { ... }'") parts.append(csource[endpos+1:closing]) csource = csource[closing+1:] else: # non-grouping variant semicolon = csource.find(';', endpos) if semicolon < 0: raise CDefError("'extern \"Python\": no ';' found") parts.append(csource[endpos:semicolon+1]) csource = csource[semicolon+1:] parts.append(' void __cffi_extern_python_stop;') #print ''.join(parts)+csource #print parts.append(csource) return ''.join(parts) def _warn_for_string_literal(csource): if '"' not in csource: return for line in csource.splitlines(): if '"' in line and not line.lstrip().startswith('#'): import warnings warnings.warn("String literal found in cdef() or type source. " "String literals are ignored here, but you should " "remove them anyway because some character sequences " "confuse pre-parsing.") break def _warn_for_non_extern_non_static_global_variable(decl): if not decl.storage: import warnings warnings.warn("Global variable '%s' in cdef(): for consistency " "with C it should have a storage class specifier " "(usually 'extern')" % (decl.name,)) def _remove_line_directives(csource): # _r_line_directive matches whole lines, without the final \n, if they # start with '#line' with some spacing allowed, or '#NUMBER'. This # function stores them away and replaces them with exactly the string # '#line@N', where N is the index in the list 'line_directives'. line_directives = [] def replace(m): i = len(line_directives) line_directives.append(m.group()) return '#line@%d' % i csource = _r_line_directive.sub(replace, csource) return csource, line_directives def _put_back_line_directives(csource, line_directives): def replace(m): s = m.group() if not s.startswith('#line@'): raise AssertionError("unexpected #line directive " "(should have been processed and removed") return line_directives[int(s[6:])] return _r_line_directive.sub(replace, csource) def _preprocess(csource): # First, remove the lines of the form '#line N "filename"' because # the "filename" part could confuse the rest csource, line_directives = _remove_line_directives(csource) # Remove comments. NOTE: this only work because the cdef() section # should not contain any string literals (except in line directives)! def replace_keeping_newlines(m): return ' ' + m.group().count('\n') * '\n' csource = _r_comment.sub(replace_keeping_newlines, csource) # Remove the "#define FOO x" lines macros = {} for match in _r_define.finditer(csource): macroname, macrovalue = match.groups() macrovalue = macrovalue.replace('\\\n', '').strip() macros[macroname] = macrovalue csource = _r_define.sub('', csource) # if pycparser.__version__ < '2.14': csource = _workaround_for_old_pycparser(csource) # # BIG HACK: replace WINAPI or __stdcall with "volatile const". # It doesn't make sense for the return type of a function to be # "volatile volatile const", so we abuse it to detect __stdcall... # Hack number 2 is that "int(volatile *fptr)();" is not valid C # syntax, so we place the "volatile" before the opening parenthesis. csource = _r_stdcall2.sub(' volatile volatile const(', csource) csource = _r_stdcall1.sub(' volatile volatile const ', csource) csource = _r_cdecl.sub(' ', csource) # # Replace `extern "Python"` with start/end markers csource = _preprocess_extern_python(csource) # # Now there should not be any string literal left; warn if we get one _warn_for_string_literal(csource) # # Replace "[...]" with "[__dotdotdotarray__]" csource = _r_partial_array.sub('[__dotdotdotarray__]', csource) # # Replace "...}" with "__dotdotdotNUM__}". This construction should # occur only at the end of enums; at the end of structs we have "...;}" # and at the end of vararg functions "...);". Also replace "=...[,}]" # with ",__dotdotdotNUM__[,}]": this occurs in the enums too, when # giving an unknown value. matches = list(_r_partial_enum.finditer(csource)) for number, match in enumerate(reversed(matches)): p = match.start() if csource[p] == '=': p2 = csource.find('...', p, match.end()) assert p2 > p csource = '%s,__dotdotdot%d__ %s' % (csource[:p], number, csource[p2+3:]) else: assert csource[p:p+3] == '...' csource = '%s __dotdotdot%d__ %s' % (csource[:p], number, csource[p+3:]) # Replace "int ..." or "unsigned long int..." with "__dotdotdotint__" csource = _r_int_dotdotdot.sub(' __dotdotdotint__ ', csource) # Replace "float ..." or "double..." with "__dotdotdotfloat__" csource = _r_float_dotdotdot.sub(' __dotdotdotfloat__ ', csource) # Replace all remaining "..." with the same name, "__dotdotdot__", # which is declared with a typedef for the purpose of C parsing. csource = csource.replace('...', ' __dotdotdot__ ') # Finally, put back the line directives csource = _put_back_line_directives(csource, line_directives) return csource, macros def _common_type_names(csource): # Look in the source for what looks like usages of types from the # list of common types. A "usage" is approximated here as the # appearance of the word, minus a "definition" of the type, which # is the last word in a "typedef" statement. Approximative only # but should be fine for all the common types. look_for_words = set(COMMON_TYPES) look_for_words.add(';') look_for_words.add(',') look_for_words.add('(') look_for_words.add(')') look_for_words.add('typedef') words_used = set() is_typedef = False paren = 0 previous_word = '' for word in _r_words.findall(csource): if word in look_for_words: if word == ';': if is_typedef: words_used.discard(previous_word) look_for_words.discard(previous_word) is_typedef = False elif word == 'typedef': is_typedef = True paren = 0 elif word == '(': paren += 1 elif word == ')': paren -= 1 elif word == ',': if is_typedef and paren == 0: words_used.discard(previous_word) look_for_words.discard(previous_word) else: # word in COMMON_TYPES words_used.add(word) previous_word = word return words_used class Parser(object): def __init__(self): self._declarations = {} self._included_declarations = set() self._anonymous_counter = 0 self._structnode2type = weakref.WeakKeyDictionary() self._options = {} self._int_constants = {} self._recomplete = [] self._uses_new_feature = None def _parse(self, csource): csource, macros = _preprocess(csource) # XXX: for more efficiency we would need to poke into the # internals of CParser... the following registers the # typedefs, because their presence or absence influences the # parsing itself (but what they are typedef'ed to plays no role) ctn = _common_type_names(csource) typenames = [] for name in sorted(self._declarations): if name.startswith('typedef '): name = name[8:] typenames.append(name) ctn.discard(name) typenames += sorted(ctn) # csourcelines = [] csourcelines.append('# 1 "<cdef automatic initialization code>"') for typename in typenames: csourcelines.append('typedef int %s;' % typename) csourcelines.append('typedef int __dotdotdotint__, __dotdotdotfloat__,' ' __dotdotdot__;') # this forces pycparser to consider the following in the file # called <cdef source string> from line 1 csourcelines.append('# 1 "%s"' % (CDEF_SOURCE_STRING,)) csourcelines.append(csource) fullcsource = '\n'.join(csourcelines) if lock is not None: lock.acquire() # pycparser is not thread-safe... try: ast = _get_parser().parse(fullcsource) except pycparser.c_parser.ParseError as e: self.convert_pycparser_error(e, csource) finally: if lock is not None: lock.release() # csource will be used to find buggy source text return ast, macros, csource def _convert_pycparser_error(self, e, csource): # xxx look for "<cdef source string>:NUM:" at the start of str(e) # and interpret that as a line number. This will not work if # the user gives explicit ``# NUM "FILE"`` directives. line = None msg = str(e) match = re.match(r"%s:(\d+):" % (CDEF_SOURCE_STRING,), msg) if match: linenum = int(match.group(1), 10) csourcelines = csource.splitlines() if 1 <= linenum <= len(csourcelines): line = csourcelines[linenum-1] return line def convert_pycparser_error(self, e, csource): line = self._convert_pycparser_error(e, csource) msg = str(e) if line: msg = 'cannot parse "%s"\n%s' % (line.strip(), msg) else: msg = 'parse error\n%s' % (msg,) raise CDefError(msg) def parse(self, csource, override=False, packed=False, pack=None, dllexport=False): if packed: if packed != True: raise ValueError("'packed' should be False or True; use " "'pack' to give another value") if pack: raise ValueError("cannot give both 'pack' and 'packed'") pack = 1 elif pack: if pack & (pack - 1): raise ValueError("'pack' must be a power of two, not %r" % (pack,)) else: pack = 0 prev_options = self._options try: self._options = {'override': override, 'packed': pack, 'dllexport': dllexport} self._internal_parse(csource) finally: self._options = prev_options def _internal_parse(self, csource): ast, macros, csource = self._parse(csource) # add the macros self._process_macros(macros) # find the first "__dotdotdot__" and use that as a separator # between the repeated typedefs and the real csource iterator = iter(ast.ext) for decl in iterator: if decl.name == '__dotdotdot__': break else: assert 0 current_decl = None # try: self._inside_extern_python = '__cffi_extern_python_stop' for decl in iterator: current_decl = decl if isinstance(decl, pycparser.c_ast.Decl): self._parse_decl(decl) elif isinstance(decl, pycparser.c_ast.Typedef): if not decl.name: raise CDefError("typedef does not declare any name", decl) quals = 0 if (isinstance(decl.type.type, pycparser.c_ast.IdentifierType) and decl.type.type.names[-1].startswith('__dotdotdot')): realtype = self._get_unknown_type(decl) elif (isinstance(decl.type, pycparser.c_ast.PtrDecl) and isinstance(decl.type.type, pycparser.c_ast.TypeDecl) and isinstance(decl.type.type.type, pycparser.c_ast.IdentifierType) and decl.type.type.type.names[-1].startswith('__dotdotdot')): realtype = self._get_unknown_ptr_type(decl) else: realtype, quals = self._get_type_and_quals( decl.type, name=decl.name, partial_length_ok=True, typedef_example="*(%s *)0" % (decl.name,)) self._declare('typedef ' + decl.name, realtype, quals=quals) elif decl.__class__.__name__ == 'Pragma': pass # skip pragma, only in pycparser 2.15 else: raise CDefError("unexpected <%s>: this construct is valid " "C but not valid in cdef()" % decl.__class__.__name__, decl) except CDefError as e: if len(e.args) == 1: e.args = e.args + (current_decl,) raise except FFIError as e: msg = self._convert_pycparser_error(e, csource) if msg: e.args = (e.args[0] + "\n *** Err: %s" % msg,) raise def _add_constants(self, key, val): if key in self._int_constants: if self._int_constants[key] == val: return # ignore identical double declarations raise FFIError( "multiple declarations of constant: %s" % (key,)) self._int_constants[key] = val def _add_integer_constant(self, name, int_str): int_str = int_str.lower().rstrip("ul") neg = int_str.startswith('-') if neg: int_str = int_str[1:] # "010" is not valid oct in py3 if (int_str.startswith("0") and int_str != '0' and not int_str.startswith("0x")): int_str = "0o" + int_str[1:] pyvalue = int(int_str, 0) if neg: pyvalue = -pyvalue self._add_constants(name, pyvalue) self._declare('macro ' + name, pyvalue) def _process_macros(self, macros): for key, value in macros.items(): value = value.strip() if _r_int_literal.match(value): self._add_integer_constant(key, value) elif value == '...': self._declare('macro ' + key, value) else: raise CDefError( 'only supports one of the following syntax:\n' ' #define %s ... (literally dot-dot-dot)\n' ' #define %s NUMBER (with NUMBER an integer' ' constant, decimal/hex/octal)\n' 'got:\n' ' #define %s %s' % (key, key, key, value)) def _declare_function(self, tp, quals, decl): tp = self._get_type_pointer(tp, quals) if self._options.get('dllexport'): tag = 'dllexport_python ' elif self._inside_extern_python == '__cffi_extern_python_start': tag = 'extern_python ' elif self._inside_extern_python == '__cffi_extern_python_plus_c_start': tag = 'extern_python_plus_c ' else: tag = 'function ' self._declare(tag + decl.name, tp) def _parse_decl(self, decl): node = decl.type if isinstance(node, pycparser.c_ast.FuncDecl): tp, quals = self._get_type_and_quals(node, name=decl.name) assert isinstance(tp, model.RawFunctionType) self._declare_function(tp, quals, decl) else: if isinstance(node, pycparser.c_ast.Struct): self._get_struct_union_enum_type('struct', node) elif isinstance(node, pycparser.c_ast.Union): self._get_struct_union_enum_type('union', node) elif isinstance(node, pycparser.c_ast.Enum): self._get_struct_union_enum_type('enum', node) elif not decl.name: raise CDefError("construct does not declare any variable", decl) # if decl.name: tp, quals = self._get_type_and_quals(node, partial_length_ok=True) if tp.is_raw_function: self._declare_function(tp, quals, decl) elif (tp.is_integer_type() and hasattr(decl, 'init') and hasattr(decl.init, 'value') and _r_int_literal.match(decl.init.value)): self._add_integer_constant(decl.name, decl.init.value) elif (tp.is_integer_type() and isinstance(decl.init, pycparser.c_ast.UnaryOp) and decl.init.op == '-' and hasattr(decl.init.expr, 'value') and _r_int_literal.match(decl.init.expr.value)): self._add_integer_constant(decl.name, '-' + decl.init.expr.value) elif (tp is model.void_type and decl.name.startswith('__cffi_extern_python_')): # hack: `extern "Python"` in the C source is replaced # with "void __cffi_extern_python_start;" and # "void __cffi_extern_python_stop;" self._inside_extern_python = decl.name else: if self._inside_extern_python !='__cffi_extern_python_stop': raise CDefError( "cannot declare constants or " "variables with 'extern \"Python\"'") if (quals & model.Q_CONST) and not tp.is_array_type: self._declare('constant ' + decl.name, tp, quals=quals) else: _warn_for_non_extern_non_static_global_variable(decl) self._declare('variable ' + decl.name, tp, quals=quals) def parse_type(self, cdecl): return self.parse_type_and_quals(cdecl)[0] def parse_type_and_quals(self, cdecl): ast, macros = self._parse('void __dummy(\n%s\n);' % cdecl)[:2] assert not macros exprnode = ast.ext[-1].type.args.params[0] if isinstance(exprnode, pycparser.c_ast.ID): raise CDefError("unknown identifier '%s'" % (exprnode.name,)) return self._get_type_and_quals(exprnode.type) def _declare(self, name, obj, included=False, quals=0): if name in self._declarations: prevobj, prevquals = self._declarations[name] if prevobj is obj and prevquals == quals: return if not self._options.get('override'): raise FFIError( "multiple declarations of %s (for interactive usage, " "try cdef(xx, override=True))" % (name,)) assert '__dotdotdot__' not in name.split() self._declarations[name] = (obj, quals) if included: self._included_declarations.add(obj) def _extract_quals(self, type): quals = 0 if isinstance(type, (pycparser.c_ast.TypeDecl, pycparser.c_ast.PtrDecl)): if 'const' in type.quals: quals |= model.Q_CONST if 'volatile' in type.quals: quals |= model.Q_VOLATILE if 'restrict' in type.quals: quals |= model.Q_RESTRICT return quals def _get_type_pointer(self, type, quals, declname=None): if isinstance(type, model.RawFunctionType): return type.as_function_pointer() if (isinstance(type, model.StructOrUnionOrEnum) and type.name.startswith('$') and type.name[1:].isdigit() and type.forcename is None and declname is not None): return model.NamedPointerType(type, declname, quals) return model.PointerType(type, quals) def _get_type_and_quals(self, typenode, name=None, partial_length_ok=False, typedef_example=None): # first, dereference typedefs, if we have it already parsed, we're good if (isinstance(typenode, pycparser.c_ast.TypeDecl) and isinstance(typenode.type, pycparser.c_ast.IdentifierType) and len(typenode.type.names) == 1 and ('typedef ' + typenode.type.names[0]) in self._declarations): tp, quals = self._declarations['typedef ' + typenode.type.names[0]] quals |= self._extract_quals(typenode) return tp, quals # if isinstance(typenode, pycparser.c_ast.ArrayDecl): # array type if typenode.dim is None: length = None else: length = self._parse_constant( typenode.dim, partial_length_ok=partial_length_ok) # a hack: in 'typedef int foo_t[...][...];', don't use '...' as # the length but use directly the C expression that would be # generated by recompiler.py. This lets the typedef be used in # many more places within recompiler.py if typedef_example is not None: if length == '...': length = '_cffi_array_len(%s)' % (typedef_example,) typedef_example = "*" + typedef_example # tp, quals = self._get_type_and_quals(typenode.type, partial_length_ok=partial_length_ok, typedef_example=typedef_example) return model.ArrayType(tp, length), quals # if isinstance(typenode, pycparser.c_ast.PtrDecl): # pointer type itemtype, itemquals = self._get_type_and_quals(typenode.type) tp = self._get_type_pointer(itemtype, itemquals, declname=name) quals = self._extract_quals(typenode) return tp, quals # if isinstance(typenode, pycparser.c_ast.TypeDecl): quals = self._extract_quals(typenode) type = typenode.type if isinstance(type, pycparser.c_ast.IdentifierType): # assume a primitive type. get it from .names, but reduce # synonyms to a single chosen combination names = list(type.names) if names != ['signed', 'char']: # keep this unmodified prefixes = {} while names: name = names[0] if name in ('short', 'long', 'signed', 'unsigned'): prefixes[name] = prefixes.get(name, 0) + 1 del names[0] else: break # ignore the 'signed' prefix below, and reorder the others newnames = [] for prefix in ('unsigned', 'short', 'long'): for i in range(prefixes.get(prefix, 0)): newnames.append(prefix) if not names: names = ['int'] # implicitly if names == ['int']: # but kill it if 'short' or 'long' if 'short' in prefixes or 'long' in prefixes: names = [] names = newnames + names ident = ' '.join(names) if ident == 'void': return model.void_type, quals if ident == '__dotdotdot__': raise FFIError(':%d: bad usage of "..."' % typenode.coord.line) tp0, quals0 = resolve_common_type(self, ident) return tp0, (quals | quals0) # if isinstance(type, pycparser.c_ast.Struct): # 'struct foobar' tp = self._get_struct_union_enum_type('struct', type, name) return tp, quals # if isinstance(type, pycparser.c_ast.Union): # 'union foobar' tp = self._get_struct_union_enum_type('union', type, name) return tp, quals # if isinstance(type, pycparser.c_ast.Enum): # 'enum foobar' tp = self._get_struct_union_enum_type('enum', type, name) return tp, quals # if isinstance(typenode, pycparser.c_ast.FuncDecl): # a function type return self._parse_function_type(typenode, name), 0 # # nested anonymous structs or unions end up here if isinstance(typenode, pycparser.c_ast.Struct): return self._get_struct_union_enum_type('struct', typenode, name, nested=True), 0 if isinstance(typenode, pycparser.c_ast.Union): return self._get_struct_union_enum_type('union', typenode, name, nested=True), 0 # raise FFIError(":%d: bad or unsupported type declaration" % typenode.coord.line) def _parse_function_type(self, typenode, funcname=None): params = list(getattr(typenode.args, 'params', [])) for i, arg in enumerate(params): if not hasattr(arg, 'type'): raise CDefError("%s arg %d: unknown type '%s'" " (if you meant to use the old C syntax of giving" " untyped arguments, it is not supported)" % (funcname or 'in expression', i + 1, getattr(arg, 'name', '?'))) ellipsis = ( len(params) > 0 and isinstance(params[-1].type, pycparser.c_ast.TypeDecl) and isinstance(params[-1].type.type, pycparser.c_ast.IdentifierType) and params[-1].type.type.names == ['__dotdotdot__']) if ellipsis: params.pop() if not params: raise CDefError( "%s: a function with only '(...)' as argument" " is not correct C" % (funcname or 'in expression')) args = [self._as_func_arg(*self._get_type_and_quals(argdeclnode.type)) for argdeclnode in params] if not ellipsis and args == [model.void_type]: args = [] result, quals = self._get_type_and_quals(typenode.type) # the 'quals' on the result type are ignored. HACK: we absure them # to detect __stdcall functions: we textually replace "__stdcall" # with "volatile volatile const" above. abi = None if hasattr(typenode.type, 'quals'): # else, probable syntax error anyway if typenode.type.quals[-3:] == ['volatile', 'volatile', 'const']: abi = '__stdcall' return model.RawFunctionType(tuple(args), result, ellipsis, abi) def _as_func_arg(self, type, quals): if isinstance(type, model.ArrayType): return model.PointerType(type.item, quals) elif isinstance(type, model.RawFunctionType): return type.as_function_pointer() else: return type def _get_struct_union_enum_type(self, kind, type, name=None, nested=False): # First, a level of caching on the exact 'type' node of the AST. # This is obscure, but needed because pycparser "unrolls" declarations # such as "typedef struct { } foo_t, *foo_p" and we end up with # an AST that is not a tree, but a DAG, with the "type" node of the # two branches foo_t and foo_p of the trees being the same node. # It's a bit silly but detecting "DAG-ness" in the AST tree seems # to be the only way to distinguish this case from two independent # structs. See test_struct_with_two_usages. try: return self._structnode2type[type] except KeyError: pass # # Note that this must handle parsing "struct foo" any number of # times and always return the same StructType object. Additionally, # one of these times (not necessarily the first), the fields of # the struct can be specified with "struct foo { ...fields... }". # If no name is given, then we have to create a new anonymous struct # with no caching; in this case, the fields are either specified # right now or never. # force_name = name name = type.name # # get the type or create it if needed if name is None: # 'force_name' is used to guess a more readable name for # anonymous structs, for the common case "typedef struct { } foo". if force_name is not None: explicit_name = '$%s' % force_name else: self._anonymous_counter += 1 explicit_name = '$%d' % self._anonymous_counter tp = None else: explicit_name = name key = '%s %s' % (kind, name) tp, _ = self._declarations.get(key, (None, None)) # if tp is None: if kind == 'struct': tp = model.StructType(explicit_name, None, None, None) elif kind == 'union': tp = model.UnionType(explicit_name, None, None, None) elif kind == 'enum': if explicit_name == '__dotdotdot__': raise CDefError("Enums cannot be declared with ...") tp = self._build_enum_type(explicit_name, type.values) else: raise AssertionError("kind = %r" % (kind,)) if name is not None: self._declare(key, tp) else: if kind == 'enum' and type.values is not None: raise NotImplementedError( "enum %s: the '{}' declaration should appear on the first " "time the enum is mentioned, not later" % explicit_name) if not tp.forcename: tp.force_the_name(force_name) if tp.forcename and '$' in tp.name: self._declare('anonymous %s' % tp.forcename, tp) # self._structnode2type[type] = tp # # enums: done here if kind == 'enum': return tp # # is there a 'type.decls'? If yes, then this is the place in the # C sources that declare the fields. If no, then just return the # existing type, possibly still incomplete. if type.decls is None: return tp # if tp.fldnames is not None: raise CDefError("duplicate declaration of struct %s" % name) fldnames = [] fldtypes = [] fldbitsize = [] fldquals = [] for decl in type.decls: if (isinstance(decl.type, pycparser.c_ast.IdentifierType) and ''.join(decl.type.names) == '__dotdotdot__'): # XXX pycparser is inconsistent: 'names' should be a list # of strings, but is sometimes just one string. Use # str.join() as a way to cope with both. self._make_partial(tp, nested) continue if decl.bitsize is None: bitsize = -1 else: bitsize = self._parse_constant(decl.bitsize) self._partial_length = False type, fqual = self._get_type_and_quals(decl.type, partial_length_ok=True) if self._partial_length: self._make_partial(tp, nested) if isinstance(type, model.StructType) and type.partial: self._make_partial(tp, nested) fldnames.append(decl.name or '') fldtypes.append(type) fldbitsize.append(bitsize) fldquals.append(fqual) tp.fldnames = tuple(fldnames) tp.fldtypes = tuple(fldtypes) tp.fldbitsize = tuple(fldbitsize) tp.fldquals = tuple(fldquals) if fldbitsize != [-1] * len(fldbitsize): if isinstance(tp, model.StructType) and tp.partial: raise NotImplementedError("%s: using both bitfields and '...;'" % (tp,)) tp.packed = self._options.get('packed') if tp.completed: # must be re-completed: it is not opaque any more tp.completed = 0 self._recomplete.append(tp) return tp def _make_partial(self, tp, nested): if not isinstance(tp, model.StructOrUnion): raise CDefError("%s cannot be partial" % (tp,)) if not tp.has_c_name() and not nested: raise NotImplementedError("%s is partial but has no C name" %(tp,)) tp.partial = True def _parse_constant(self, exprnode, partial_length_ok=False): # for now, limited to expressions that are an immediate number # or positive/negative number if isinstance(exprnode, pycparser.c_ast.Constant): s = exprnode.value if '0' <= s[0] <= '9': s = s.rstrip('uUlL') try: if s.startswith('0'): return int(s, 8) else: return int(s, 10) except ValueError: if len(s) > 1: if s.lower()[0:2] == '0x': return int(s, 16) elif s.lower()[0:2] == '0b': return int(s, 2) raise CDefError("invalid constant %r" % (s,)) elif s[0] == "'" and s[-1] == "'" and ( len(s) == 3 or (len(s) == 4 and s[1] == "\\")): return ord(s[-2]) else: raise CDefError("invalid constant %r" % (s,)) # if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '+'): return self._parse_constant(exprnode.expr) # if (isinstance(exprnode, pycparser.c_ast.UnaryOp) and exprnode.op == '-'): return -self._parse_constant(exprnode.expr) # load previously defined int constant if (isinstance(exprnode, pycparser.c_ast.ID) and exprnode.name in self._int_constants): return self._int_constants[exprnode.name] # if (isinstance(exprnode, pycparser.c_ast.ID) and exprnode.name == '__dotdotdotarray__'): if partial_length_ok: self._partial_length = True return '...' raise FFIError(":%d: unsupported '[...]' here, cannot derive " "the actual array length in this context" % exprnode.coord.line) # if isinstance(exprnode, pycparser.c_ast.BinaryOp): left = self._parse_constant(exprnode.left) right = self._parse_constant(exprnode.right) if exprnode.op == '+': return left + right elif exprnode.op == '-': return left - right elif exprnode.op == '*': return left * right elif exprnode.op == '/': return self._c_div(left, right) elif exprnode.op == '%': return left - self._c_div(left, right) * right elif exprnode.op == '<<': return left << right elif exprnode.op == '>>': return left >> right elif exprnode.op == '&': return left & right elif exprnode.op == '|': return left | right elif exprnode.op == '^': return left ^ right # raise FFIError(":%d: unsupported expression: expected a " "simple numeric constant" % exprnode.coord.line) def _c_div(self, a, b): result = a // b if ((a < 0) ^ (b < 0)) and (a % b) != 0: result += 1 return result def _build_enum_type(self, explicit_name, decls): if decls is not None: partial = False enumerators = [] enumvalues = [] nextenumvalue = 0 for enum in decls.enumerators: if _r_enum_dotdotdot.match(enum.name): partial = True continue if enum.value is not None: nextenumvalue = self._parse_constant(enum.value) enumerators.append(enum.name) enumvalues.append(nextenumvalue) self._add_constants(enum.name, nextenumvalue) nextenumvalue += 1 enumerators = tuple(enumerators) enumvalues = tuple(enumvalues) tp = model.EnumType(explicit_name, enumerators, enumvalues) tp.partial = partial else: # opaque enum tp = model.EnumType(explicit_name, (), ()) return tp def include(self, other): for name, (tp, quals) in other._declarations.items(): if name.startswith('anonymous $enum_$'): continue # fix for test_anonymous_enum_include kind = name.split(' ', 1)[0] if kind in ('struct', 'union', 'enum', 'anonymous', 'typedef'): self._declare(name, tp, included=True, quals=quals) for k, v in other._int_constants.items(): self._add_constants(k, v) def _get_unknown_type(self, decl): typenames = decl.type.type.names if typenames == ['__dotdotdot__']: return model.unknown_type(decl.name) if typenames == ['__dotdotdotint__']: if self._uses_new_feature is None: self._uses_new_feature = "'typedef int... %s'" % decl.name return model.UnknownIntegerType(decl.name) if typenames == ['__dotdotdotfloat__']: # note: not for 'long double' so far if self._uses_new_feature is None: self._uses_new_feature = "'typedef float... %s'" % decl.name return model.UnknownFloatType(decl.name) raise FFIError(':%d: unsupported usage of "..." in typedef' % decl.coord.line) def _get_unknown_ptr_type(self, decl): if decl.type.type.type.names == ['__dotdotdot__']: return model.unknown_ptr_type(decl.name) raise FFIError(':%d: unsupported usage of "..." in typedef' % decl.coord.line)
44,231
Python
42.924528
86
0.525989
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/recompiler.py
import os, sys, io from . import ffiplatform, model from .error import VerificationError from .cffi_opcode import * VERSION_BASE = 0x2601 VERSION_EMBEDDED = 0x2701 VERSION_CHAR16CHAR32 = 0x2801 USE_LIMITED_API = (sys.platform != 'win32' or sys.version_info < (3, 0) or sys.version_info >= (3, 5)) class GlobalExpr: def __init__(self, name, address, type_op, size=0, check_value=0): self.name = name self.address = address self.type_op = type_op self.size = size self.check_value = check_value def as_c_expr(self): return ' { "%s", (void *)%s, %s, (void *)%s },' % ( self.name, self.address, self.type_op.as_c_expr(), self.size) def as_python_expr(self): return "b'%s%s',%d" % (self.type_op.as_python_bytes(), self.name, self.check_value) class FieldExpr: def __init__(self, name, field_offset, field_size, fbitsize, field_type_op): self.name = name self.field_offset = field_offset self.field_size = field_size self.fbitsize = fbitsize self.field_type_op = field_type_op def as_c_expr(self): spaces = " " * len(self.name) return (' { "%s", %s,\n' % (self.name, self.field_offset) + ' %s %s,\n' % (spaces, self.field_size) + ' %s %s },' % (spaces, self.field_type_op.as_c_expr())) def as_python_expr(self): raise NotImplementedError def as_field_python_expr(self): if self.field_type_op.op == OP_NOOP: size_expr = '' elif self.field_type_op.op == OP_BITFIELD: size_expr = format_four_bytes(self.fbitsize) else: raise NotImplementedError return "b'%s%s%s'" % (self.field_type_op.as_python_bytes(), size_expr, self.name) class StructUnionExpr: def __init__(self, name, type_index, flags, size, alignment, comment, first_field_index, c_fields): self.name = name self.type_index = type_index self.flags = flags self.size = size self.alignment = alignment self.comment = comment self.first_field_index = first_field_index self.c_fields = c_fields def as_c_expr(self): return (' { "%s", %d, %s,' % (self.name, self.type_index, self.flags) + '\n %s, %s, ' % (self.size, self.alignment) + '%d, %d ' % (self.first_field_index, len(self.c_fields)) + ('/* %s */ ' % self.comment if self.comment else '') + '},') def as_python_expr(self): flags = eval(self.flags, G_FLAGS) fields_expr = [c_field.as_field_python_expr() for c_field in self.c_fields] return "(b'%s%s%s',%s)" % ( format_four_bytes(self.type_index), format_four_bytes(flags), self.name, ','.join(fields_expr)) class EnumExpr: def __init__(self, name, type_index, size, signed, allenums): self.name = name self.type_index = type_index self.size = size self.signed = signed self.allenums = allenums def as_c_expr(self): return (' { "%s", %d, _cffi_prim_int(%s, %s),\n' ' "%s" },' % (self.name, self.type_index, self.size, self.signed, self.allenums)) def as_python_expr(self): prim_index = { (1, 0): PRIM_UINT8, (1, 1): PRIM_INT8, (2, 0): PRIM_UINT16, (2, 1): PRIM_INT16, (4, 0): PRIM_UINT32, (4, 1): PRIM_INT32, (8, 0): PRIM_UINT64, (8, 1): PRIM_INT64, }[self.size, self.signed] return "b'%s%s%s\\x00%s'" % (format_four_bytes(self.type_index), format_four_bytes(prim_index), self.name, self.allenums) class TypenameExpr: def __init__(self, name, type_index): self.name = name self.type_index = type_index def as_c_expr(self): return ' { "%s", %d },' % (self.name, self.type_index) def as_python_expr(self): return "b'%s%s'" % (format_four_bytes(self.type_index), self.name) # ____________________________________________________________ class Recompiler: _num_externpy = 0 def __init__(self, ffi, module_name, target_is_python=False): self.ffi = ffi self.module_name = module_name self.target_is_python = target_is_python self._version = VERSION_BASE def needs_version(self, ver): self._version = max(self._version, ver) def collect_type_table(self): self._typesdict = {} self._generate("collecttype") # all_decls = sorted(self._typesdict, key=str) # # prepare all FUNCTION bytecode sequences first self.cffi_types = [] for tp in all_decls: if tp.is_raw_function: assert self._typesdict[tp] is None self._typesdict[tp] = len(self.cffi_types) self.cffi_types.append(tp) # placeholder for tp1 in tp.args: assert isinstance(tp1, (model.VoidType, model.BasePrimitiveType, model.PointerType, model.StructOrUnionOrEnum, model.FunctionPtrType)) if self._typesdict[tp1] is None: self._typesdict[tp1] = len(self.cffi_types) self.cffi_types.append(tp1) # placeholder self.cffi_types.append('END') # placeholder # # prepare all OTHER bytecode sequences for tp in all_decls: if not tp.is_raw_function and self._typesdict[tp] is None: self._typesdict[tp] = len(self.cffi_types) self.cffi_types.append(tp) # placeholder if tp.is_array_type and tp.length is not None: self.cffi_types.append('LEN') # placeholder assert None not in self._typesdict.values() # # collect all structs and unions and enums self._struct_unions = {} self._enums = {} for tp in all_decls: if isinstance(tp, model.StructOrUnion): self._struct_unions[tp] = None elif isinstance(tp, model.EnumType): self._enums[tp] = None for i, tp in enumerate(sorted(self._struct_unions, key=lambda tp: tp.name)): self._struct_unions[tp] = i for i, tp in enumerate(sorted(self._enums, key=lambda tp: tp.name)): self._enums[tp] = i # # emit all bytecode sequences now for tp in all_decls: method = getattr(self, '_emit_bytecode_' + tp.__class__.__name__) method(tp, self._typesdict[tp]) # # consistency check for op in self.cffi_types: assert isinstance(op, CffiOp) self.cffi_types = tuple(self.cffi_types) # don't change any more def _enum_fields(self, tp): # When producing C, expand all anonymous struct/union fields. # That's necessary to have C code checking the offsets of the # individual fields contained in them. When producing Python, # don't do it and instead write it like it is, with the # corresponding fields having an empty name. Empty names are # recognized at runtime when we import the generated Python # file. expand_anonymous_struct_union = not self.target_is_python return tp.enumfields(expand_anonymous_struct_union) def _do_collect_type(self, tp): if not isinstance(tp, model.BaseTypeByIdentity): if isinstance(tp, tuple): for x in tp: self._do_collect_type(x) return if tp not in self._typesdict: self._typesdict[tp] = None if isinstance(tp, model.FunctionPtrType): self._do_collect_type(tp.as_raw_function()) elif isinstance(tp, model.StructOrUnion): if tp.fldtypes is not None and ( tp not in self.ffi._parser._included_declarations): for name1, tp1, _, _ in self._enum_fields(tp): self._do_collect_type(self._field_type(tp, name1, tp1)) else: for _, x in tp._get_items(): self._do_collect_type(x) def _generate(self, step_name): lst = self.ffi._parser._declarations.items() for name, (tp, quals) in sorted(lst): kind, realname = name.split(' ', 1) try: method = getattr(self, '_generate_cpy_%s_%s' % (kind, step_name)) except AttributeError: raise VerificationError( "not implemented in recompile(): %r" % name) try: self._current_quals = quals method(tp, realname) except Exception as e: model.attach_exception_info(e, name) raise # ---------- ALL_STEPS = ["global", "field", "struct_union", "enum", "typename"] def collect_step_tables(self): # collect the declarations for '_cffi_globals', '_cffi_typenames', etc. self._lsts = {} for step_name in self.ALL_STEPS: self._lsts[step_name] = [] self._seen_struct_unions = set() self._generate("ctx") self._add_missing_struct_unions() # for step_name in self.ALL_STEPS: lst = self._lsts[step_name] if step_name != "field": lst.sort(key=lambda entry: entry.name) self._lsts[step_name] = tuple(lst) # don't change any more # # check for a possible internal inconsistency: _cffi_struct_unions # should have been generated with exactly self._struct_unions lst = self._lsts["struct_union"] for tp, i in self._struct_unions.items(): assert i < len(lst) assert lst[i].name == tp.name assert len(lst) == len(self._struct_unions) # same with enums lst = self._lsts["enum"] for tp, i in self._enums.items(): assert i < len(lst) assert lst[i].name == tp.name assert len(lst) == len(self._enums) # ---------- def _prnt(self, what=''): self._f.write(what + '\n') def write_source_to_f(self, f, preamble): if self.target_is_python: assert preamble is None self.write_py_source_to_f(f) else: assert preamble is not None self.write_c_source_to_f(f, preamble) def _rel_readlines(self, filename): g = open(os.path.join(os.path.dirname(__file__), filename), 'r') lines = g.readlines() g.close() return lines def write_c_source_to_f(self, f, preamble): self._f = f prnt = self._prnt if self.ffi._embedding is not None: prnt('#define _CFFI_USE_EMBEDDING') if not USE_LIMITED_API: prnt('#define _CFFI_NO_LIMITED_API') # # first the '#include' (actually done by inlining the file's content) lines = self._rel_readlines('_cffi_include.h') i = lines.index('#include "parse_c_type.h"\n') lines[i:i+1] = self._rel_readlines('parse_c_type.h') prnt(''.join(lines)) # # if we have ffi._embedding != None, we give it here as a macro # and include an extra file base_module_name = self.module_name.split('.')[-1] if self.ffi._embedding is not None: prnt('#define _CFFI_MODULE_NAME "%s"' % (self.module_name,)) prnt('static const char _CFFI_PYTHON_STARTUP_CODE[] = {') self._print_string_literal_in_array(self.ffi._embedding) prnt('0 };') prnt('#ifdef PYPY_VERSION') prnt('# define _CFFI_PYTHON_STARTUP_FUNC _cffi_pypyinit_%s' % ( base_module_name,)) prnt('#elif PY_MAJOR_VERSION >= 3') prnt('# define _CFFI_PYTHON_STARTUP_FUNC PyInit_%s' % ( base_module_name,)) prnt('#else') prnt('# define _CFFI_PYTHON_STARTUP_FUNC init%s' % ( base_module_name,)) prnt('#endif') lines = self._rel_readlines('_embedding.h') i = lines.index('#include "_cffi_errors.h"\n') lines[i:i+1] = self._rel_readlines('_cffi_errors.h') prnt(''.join(lines)) self.needs_version(VERSION_EMBEDDED) # # then paste the C source given by the user, verbatim. prnt('/************************************************************/') prnt() prnt(preamble) prnt() prnt('/************************************************************/') prnt() # # the declaration of '_cffi_types' prnt('static void *_cffi_types[] = {') typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()]) for i, op in enumerate(self.cffi_types): comment = '' if i in typeindex2type: comment = ' // ' + typeindex2type[i]._get_c_name() prnt('/* %2d */ %s,%s' % (i, op.as_c_expr(), comment)) if not self.cffi_types: prnt(' 0') prnt('};') prnt() # # call generate_cpy_xxx_decl(), for every xxx found from # ffi._parser._declarations. This generates all the functions. self._seen_constants = set() self._generate("decl") # # the declaration of '_cffi_globals' and '_cffi_typenames' nums = {} for step_name in self.ALL_STEPS: lst = self._lsts[step_name] nums[step_name] = len(lst) if nums[step_name] > 0: prnt('static const struct _cffi_%s_s _cffi_%ss[] = {' % ( step_name, step_name)) for entry in lst: prnt(entry.as_c_expr()) prnt('};') prnt() # # the declaration of '_cffi_includes' if self.ffi._included_ffis: prnt('static const char * const _cffi_includes[] = {') for ffi_to_include in self.ffi._included_ffis: try: included_module_name, included_source = ( ffi_to_include._assigned_source[:2]) except AttributeError: raise VerificationError( "ffi object %r includes %r, but the latter has not " "been prepared with set_source()" % ( self.ffi, ffi_to_include,)) if included_source is None: raise VerificationError( "not implemented yet: ffi.include() of a Python-based " "ffi inside a C-based ffi") prnt(' "%s",' % (included_module_name,)) prnt(' NULL') prnt('};') prnt() # # the declaration of '_cffi_type_context' prnt('static const struct _cffi_type_context_s _cffi_type_context = {') prnt(' _cffi_types,') for step_name in self.ALL_STEPS: if nums[step_name] > 0: prnt(' _cffi_%ss,' % step_name) else: prnt(' NULL, /* no %ss */' % step_name) for step_name in self.ALL_STEPS: if step_name != "field": prnt(' %d, /* num_%ss */' % (nums[step_name], step_name)) if self.ffi._included_ffis: prnt(' _cffi_includes,') else: prnt(' NULL, /* no includes */') prnt(' %d, /* num_types */' % (len(self.cffi_types),)) flags = 0 if self._num_externpy > 0 or self.ffi._embedding is not None: flags |= 1 # set to mean that we use extern "Python" prnt(' %d, /* flags */' % flags) prnt('};') prnt() # # the init function prnt('#ifdef __GNUC__') prnt('# pragma GCC visibility push(default) /* for -fvisibility= */') prnt('#endif') prnt() prnt('#ifdef PYPY_VERSION') prnt('PyMODINIT_FUNC') prnt('_cffi_pypyinit_%s(const void *p[])' % (base_module_name,)) prnt('{') if flags & 1: prnt(' if (((intptr_t)p[0]) >= 0x0A03) {') prnt(' _cffi_call_python_org = ' '(void(*)(struct _cffi_externpy_s *, char *))p[1];') prnt(' }') prnt(' p[0] = (const void *)0x%x;' % self._version) prnt(' p[1] = &_cffi_type_context;') prnt('#if PY_MAJOR_VERSION >= 3') prnt(' return NULL;') prnt('#endif') prnt('}') # on Windows, distutils insists on putting init_cffi_xyz in # 'export_symbols', so instead of fighting it, just give up and # give it one prnt('# ifdef _MSC_VER') prnt(' PyMODINIT_FUNC') prnt('# if PY_MAJOR_VERSION >= 3') prnt(' PyInit_%s(void) { return NULL; }' % (base_module_name,)) prnt('# else') prnt(' init%s(void) { }' % (base_module_name,)) prnt('# endif') prnt('# endif') prnt('#elif PY_MAJOR_VERSION >= 3') prnt('PyMODINIT_FUNC') prnt('PyInit_%s(void)' % (base_module_name,)) prnt('{') prnt(' return _cffi_init("%s", 0x%x, &_cffi_type_context);' % ( self.module_name, self._version)) prnt('}') prnt('#else') prnt('PyMODINIT_FUNC') prnt('init%s(void)' % (base_module_name,)) prnt('{') prnt(' _cffi_init("%s", 0x%x, &_cffi_type_context);' % ( self.module_name, self._version)) prnt('}') prnt('#endif') prnt() prnt('#ifdef __GNUC__') prnt('# pragma GCC visibility pop') prnt('#endif') self._version = None def _to_py(self, x): if isinstance(x, str): return "b'%s'" % (x,) if isinstance(x, (list, tuple)): rep = [self._to_py(item) for item in x] if len(rep) == 1: rep.append('') return "(%s)" % (','.join(rep),) return x.as_python_expr() # Py2: unicode unexpected; Py3: bytes unexp. def write_py_source_to_f(self, f): self._f = f prnt = self._prnt # # header prnt("# auto-generated file") prnt("import _cffi_backend") # # the 'import' of the included ffis num_includes = len(self.ffi._included_ffis or ()) for i in range(num_includes): ffi_to_include = self.ffi._included_ffis[i] try: included_module_name, included_source = ( ffi_to_include._assigned_source[:2]) except AttributeError: raise VerificationError( "ffi object %r includes %r, but the latter has not " "been prepared with set_source()" % ( self.ffi, ffi_to_include,)) if included_source is not None: raise VerificationError( "not implemented yet: ffi.include() of a C-based " "ffi inside a Python-based ffi") prnt('from %s import ffi as _ffi%d' % (included_module_name, i)) prnt() prnt("ffi = _cffi_backend.FFI('%s'," % (self.module_name,)) prnt(" _version = 0x%x," % (self._version,)) self._version = None # # the '_types' keyword argument self.cffi_types = tuple(self.cffi_types) # don't change any more types_lst = [op.as_python_bytes() for op in self.cffi_types] prnt(' _types = %s,' % (self._to_py(''.join(types_lst)),)) typeindex2type = dict([(i, tp) for (tp, i) in self._typesdict.items()]) # # the keyword arguments from ALL_STEPS for step_name in self.ALL_STEPS: lst = self._lsts[step_name] if len(lst) > 0 and step_name != "field": prnt(' _%ss = %s,' % (step_name, self._to_py(lst))) # # the '_includes' keyword argument if num_includes > 0: prnt(' _includes = (%s,),' % ( ', '.join(['_ffi%d' % i for i in range(num_includes)]),)) # # the footer prnt(')') # ---------- def _gettypenum(self, type): # a KeyError here is a bug. please report it! :-) return self._typesdict[type] def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode): extraarg = '' if isinstance(tp, model.BasePrimitiveType) and not tp.is_complex_type(): if tp.is_integer_type() and tp.name != '_Bool': converter = '_cffi_to_c_int' extraarg = ', %s' % tp.name elif isinstance(tp, model.UnknownFloatType): # don't check with is_float_type(): it may be a 'long # double' here, and _cffi_to_c_double would loose precision converter = '(%s)_cffi_to_c_double' % (tp.get_c_name(''),) else: cname = tp.get_c_name('') converter = '(%s)_cffi_to_c_%s' % (cname, tp.name.replace(' ', '_')) if cname in ('char16_t', 'char32_t'): self.needs_version(VERSION_CHAR16CHAR32) errvalue = '-1' # elif isinstance(tp, model.PointerType): self._convert_funcarg_to_c_ptr_or_array(tp, fromvar, tovar, errcode) return # elif (isinstance(tp, model.StructOrUnionOrEnum) or isinstance(tp, model.BasePrimitiveType)): # a struct (not a struct pointer) as a function argument; # or, a complex (the same code works) self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)' % (tovar, self._gettypenum(tp), fromvar)) self._prnt(' %s;' % errcode) return # elif isinstance(tp, model.FunctionPtrType): converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('') extraarg = ', _cffi_type(%d)' % self._gettypenum(tp) errvalue = 'NULL' # else: raise NotImplementedError(tp) # self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg)) self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % ( tovar, tp.get_c_name(''), errvalue)) self._prnt(' %s;' % errcode) def _extra_local_variables(self, tp, localvars, freelines): if isinstance(tp, model.PointerType): localvars.add('Py_ssize_t datasize') localvars.add('struct _cffi_freeme_s *large_args_free = NULL') freelines.add('if (large_args_free != NULL)' ' _cffi_free_array_arguments(large_args_free);') def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode): self._prnt(' datasize = _cffi_prepare_pointer_call_argument(') self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % ( self._gettypenum(tp), fromvar, tovar)) self._prnt(' if (datasize != 0) {') self._prnt(' %s = ((size_t)datasize) <= 640 ? ' '(%s)alloca((size_t)datasize) : NULL;' % ( tovar, tp.get_c_name(''))) self._prnt(' if (_cffi_convert_array_argument(_cffi_type(%d), %s, ' '(char **)&%s,' % (self._gettypenum(tp), fromvar, tovar)) self._prnt(' datasize, &large_args_free) < 0)') self._prnt(' %s;' % errcode) self._prnt(' }') def _convert_expr_from_c(self, tp, var, context): if isinstance(tp, model.BasePrimitiveType): if tp.is_integer_type() and tp.name != '_Bool': return '_cffi_from_c_int(%s, %s)' % (var, tp.name) elif isinstance(tp, model.UnknownFloatType): return '_cffi_from_c_double(%s)' % (var,) elif tp.name != 'long double' and not tp.is_complex_type(): cname = tp.name.replace(' ', '_') if cname in ('char16_t', 'char32_t'): self.needs_version(VERSION_CHAR16CHAR32) return '_cffi_from_c_%s(%s)' % (cname, var) else: return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) elif isinstance(tp, (model.PointerType, model.FunctionPtrType)): return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) elif isinstance(tp, model.ArrayType): return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( var, self._gettypenum(model.PointerType(tp.item))) elif isinstance(tp, model.StructOrUnion): if tp.fldnames is None: raise TypeError("'%s' is used as %s, but is opaque" % ( tp._get_c_name(), context)) return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) elif isinstance(tp, model.EnumType): return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) else: raise NotImplementedError(tp) # ---------- # typedefs def _typedef_type(self, tp, name): return self._global_type(tp, "(*(%s *)0)" % (name,)) def _generate_cpy_typedef_collecttype(self, tp, name): self._do_collect_type(self._typedef_type(tp, name)) def _generate_cpy_typedef_decl(self, tp, name): pass def _typedef_ctx(self, tp, name): type_index = self._typesdict[tp] self._lsts["typename"].append(TypenameExpr(name, type_index)) def _generate_cpy_typedef_ctx(self, tp, name): tp = self._typedef_type(tp, name) self._typedef_ctx(tp, name) if getattr(tp, "origin", None) == "unknown_type": self._struct_ctx(tp, tp.name, approxname=None) elif isinstance(tp, model.NamedPointerType): self._struct_ctx(tp.totype, tp.totype.name, approxname=tp.name, named_ptr=tp) # ---------- # function declarations def _generate_cpy_function_collecttype(self, tp, name): self._do_collect_type(tp.as_raw_function()) if tp.ellipsis and not self.target_is_python: self._do_collect_type(tp) def _generate_cpy_function_decl(self, tp, name): assert not self.target_is_python assert isinstance(tp, model.FunctionPtrType) if tp.ellipsis: # cannot support vararg functions better than this: check for its # exact type (including the fixed arguments), and build it as a # constant function pointer (no CPython wrapper) self._generate_cpy_constant_decl(tp, name) return prnt = self._prnt numargs = len(tp.args) if numargs == 0: argname = 'noarg' elif numargs == 1: argname = 'arg0' else: argname = 'args' # # ------------------------------ # the 'd' version of the function, only for addressof(lib, 'func') arguments = [] call_arguments = [] context = 'argument of %s' % name for i, type in enumerate(tp.args): arguments.append(type.get_c_name(' x%d' % i, context)) call_arguments.append('x%d' % i) repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' if tp.abi: abi = tp.abi + ' ' else: abi = '' name_and_arguments = '%s_cffi_d_%s(%s)' % (abi, name, repr_arguments) prnt('static %s' % (tp.result.get_c_name(name_and_arguments),)) prnt('{') call_arguments = ', '.join(call_arguments) result_code = 'return ' if isinstance(tp.result, model.VoidType): result_code = '' prnt(' %s%s(%s);' % (result_code, name, call_arguments)) prnt('}') # prnt('#ifndef PYPY_VERSION') # ------------------------------ # prnt('static PyObject *') prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname)) prnt('{') # context = 'argument of %s' % name for i, type in enumerate(tp.args): arg = type.get_c_name(' x%d' % i, context) prnt(' %s;' % arg) # localvars = set() freelines = set() for type in tp.args: self._extra_local_variables(type, localvars, freelines) for decl in sorted(localvars): prnt(' %s;' % (decl,)) # if not isinstance(tp.result, model.VoidType): result_code = 'result = ' context = 'result of %s' % name result_decl = ' %s;' % tp.result.get_c_name(' result', context) prnt(result_decl) prnt(' PyObject *pyresult;') else: result_decl = None result_code = '' # if len(tp.args) > 1: rng = range(len(tp.args)) for i in rng: prnt(' PyObject *arg%d;' % i) prnt() prnt(' if (!PyArg_UnpackTuple(args, "%s", %d, %d, %s))' % ( name, len(rng), len(rng), ', '.join(['&arg%d' % i for i in rng]))) prnt(' return NULL;') prnt() # for i, type in enumerate(tp.args): self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i, 'return NULL') prnt() # prnt(' Py_BEGIN_ALLOW_THREADS') prnt(' _cffi_restore_errno();') call_arguments = ['x%d' % i for i in range(len(tp.args))] call_arguments = ', '.join(call_arguments) prnt(' { %s%s(%s); }' % (result_code, name, call_arguments)) prnt(' _cffi_save_errno();') prnt(' Py_END_ALLOW_THREADS') prnt() # prnt(' (void)self; /* unused */') if numargs == 0: prnt(' (void)noarg; /* unused */') if result_code: prnt(' pyresult = %s;' % self._convert_expr_from_c(tp.result, 'result', 'result type')) for freeline in freelines: prnt(' ' + freeline) prnt(' return pyresult;') else: for freeline in freelines: prnt(' ' + freeline) prnt(' Py_INCREF(Py_None);') prnt(' return Py_None;') prnt('}') # prnt('#else') # ------------------------------ # # the PyPy version: need to replace struct/union arguments with # pointers, and if the result is a struct/union, insert a first # arg that is a pointer to the result. We also do that for # complex args and return type. def need_indirection(type): return (isinstance(type, model.StructOrUnion) or (isinstance(type, model.PrimitiveType) and type.is_complex_type())) difference = False arguments = [] call_arguments = [] context = 'argument of %s' % name for i, type in enumerate(tp.args): indirection = '' if need_indirection(type): indirection = '*' difference = True arg = type.get_c_name(' %sx%d' % (indirection, i), context) arguments.append(arg) call_arguments.append('%sx%d' % (indirection, i)) tp_result = tp.result if need_indirection(tp_result): context = 'result of %s' % name arg = tp_result.get_c_name(' *result', context) arguments.insert(0, arg) tp_result = model.void_type result_decl = None result_code = '*result = ' difference = True if difference: repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' name_and_arguments = '%s_cffi_f_%s(%s)' % (abi, name, repr_arguments) prnt('static %s' % (tp_result.get_c_name(name_and_arguments),)) prnt('{') if result_decl: prnt(result_decl) call_arguments = ', '.join(call_arguments) prnt(' { %s%s(%s); }' % (result_code, name, call_arguments)) if result_decl: prnt(' return result;') prnt('}') else: prnt('# define _cffi_f_%s _cffi_d_%s' % (name, name)) # prnt('#endif') # ------------------------------ prnt() def _generate_cpy_function_ctx(self, tp, name): if tp.ellipsis and not self.target_is_python: self._generate_cpy_constant_ctx(tp, name) return type_index = self._typesdict[tp.as_raw_function()] numargs = len(tp.args) if self.target_is_python: meth_kind = OP_DLOPEN_FUNC elif numargs == 0: meth_kind = OP_CPYTHON_BLTN_N # 'METH_NOARGS' elif numargs == 1: meth_kind = OP_CPYTHON_BLTN_O # 'METH_O' else: meth_kind = OP_CPYTHON_BLTN_V # 'METH_VARARGS' self._lsts["global"].append( GlobalExpr(name, '_cffi_f_%s' % name, CffiOp(meth_kind, type_index), size='_cffi_d_%s' % name)) # ---------- # named structs or unions def _field_type(self, tp_struct, field_name, tp_field): if isinstance(tp_field, model.ArrayType): actual_length = tp_field.length if actual_length == '...': ptr_struct_name = tp_struct.get_c_name('*') actual_length = '_cffi_array_len(((%s)0)->%s)' % ( ptr_struct_name, field_name) tp_item = self._field_type(tp_struct, '%s[0]' % field_name, tp_field.item) tp_field = model.ArrayType(tp_item, actual_length) return tp_field def _struct_collecttype(self, tp): self._do_collect_type(tp) if self.target_is_python: # also requires nested anon struct/unions in ABI mode, recursively for fldtype in tp.anonymous_struct_fields(): self._struct_collecttype(fldtype) def _struct_decl(self, tp, cname, approxname): if tp.fldtypes is None: return prnt = self._prnt checkfuncname = '_cffi_checkfld_%s' % (approxname,) prnt('_CFFI_UNUSED_FN') prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') for fname, ftype, fbitsize, fqual in self._enum_fields(tp): try: if ftype.is_integer_type() or fbitsize >= 0: # accept all integers, but complain on float or double if fname != '': prnt(" (void)((p->%s) | 0); /* check that '%s.%s' is " "an integer */" % (fname, cname, fname)) continue # only accept exactly the type declared, except that '[]' # is interpreted as a '*' and so will match any array length. # (It would also match '*', but that's harder to detect...) while (isinstance(ftype, model.ArrayType) and (ftype.length is None or ftype.length == '...')): ftype = ftype.item fname = fname + '[0]' prnt(' { %s = &p->%s; (void)tmp; }' % ( ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), fname)) except VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') prnt('struct _cffi_align_%s { char x; %s y; };' % (approxname, cname)) prnt() def _struct_ctx(self, tp, cname, approxname, named_ptr=None): type_index = self._typesdict[tp] reason_for_not_expanding = None flags = [] if isinstance(tp, model.UnionType): flags.append("_CFFI_F_UNION") if tp.fldtypes is None: flags.append("_CFFI_F_OPAQUE") reason_for_not_expanding = "opaque" if (tp not in self.ffi._parser._included_declarations and (named_ptr is None or named_ptr not in self.ffi._parser._included_declarations)): if tp.fldtypes is None: pass # opaque elif tp.partial or any(tp.anonymous_struct_fields()): pass # field layout obtained silently from the C compiler else: flags.append("_CFFI_F_CHECK_FIELDS") if tp.packed: if tp.packed > 1: raise NotImplementedError( "%r is declared with 'pack=%r'; only 0 or 1 are " "supported in API mode (try to use \"...;\", which " "does not require a 'pack' declaration)" % (tp, tp.packed)) flags.append("_CFFI_F_PACKED") else: flags.append("_CFFI_F_EXTERNAL") reason_for_not_expanding = "external" flags = '|'.join(flags) or '0' c_fields = [] if reason_for_not_expanding is None: enumfields = list(self._enum_fields(tp)) for fldname, fldtype, fbitsize, fqual in enumfields: fldtype = self._field_type(tp, fldname, fldtype) self._check_not_opaque(fldtype, "field '%s.%s'" % (tp.name, fldname)) # cname is None for _add_missing_struct_unions() only op = OP_NOOP if fbitsize >= 0: op = OP_BITFIELD size = '%d /* bits */' % fbitsize elif cname is None or ( isinstance(fldtype, model.ArrayType) and fldtype.length is None): size = '(size_t)-1' else: size = 'sizeof(((%s)0)->%s)' % ( tp.get_c_name('*') if named_ptr is None else named_ptr.name, fldname) if cname is None or fbitsize >= 0: offset = '(size_t)-1' elif named_ptr is not None: offset = '((char *)&((%s)0)->%s) - (char *)0' % ( named_ptr.name, fldname) else: offset = 'offsetof(%s, %s)' % (tp.get_c_name(''), fldname) c_fields.append( FieldExpr(fldname, offset, size, fbitsize, CffiOp(op, self._typesdict[fldtype]))) first_field_index = len(self._lsts["field"]) self._lsts["field"].extend(c_fields) # if cname is None: # unknown name, for _add_missing_struct_unions size = '(size_t)-2' align = -2 comment = "unnamed" else: if named_ptr is not None: size = 'sizeof(*(%s)0)' % (named_ptr.name,) align = '-1 /* unknown alignment */' else: size = 'sizeof(%s)' % (cname,) align = 'offsetof(struct _cffi_align_%s, y)' % (approxname,) comment = None else: size = '(size_t)-1' align = -1 first_field_index = -1 comment = reason_for_not_expanding self._lsts["struct_union"].append( StructUnionExpr(tp.name, type_index, flags, size, align, comment, first_field_index, c_fields)) self._seen_struct_unions.add(tp) def _check_not_opaque(self, tp, location): while isinstance(tp, model.ArrayType): tp = tp.item if isinstance(tp, model.StructOrUnion) and tp.fldtypes is None: raise TypeError( "%s is of an opaque type (not declared in cdef())" % location) def _add_missing_struct_unions(self): # not very nice, but some struct declarations might be missing # because they don't have any known C name. Check that they are # not partial (we can't complete or verify them!) and emit them # anonymously. lst = list(self._struct_unions.items()) lst.sort(key=lambda tp_order: tp_order[1]) for tp, order in lst: if tp not in self._seen_struct_unions: if tp.partial: raise NotImplementedError("internal inconsistency: %r is " "partial but was not seen at " "this point" % (tp,)) if tp.name.startswith('$') and tp.name[1:].isdigit(): approxname = tp.name[1:] elif tp.name == '_IO_FILE' and tp.forcename == 'FILE': approxname = 'FILE' self._typedef_ctx(tp, 'FILE') else: raise NotImplementedError("internal inconsistency: %r" % (tp,)) self._struct_ctx(tp, None, approxname) def _generate_cpy_struct_collecttype(self, tp, name): self._struct_collecttype(tp) _generate_cpy_union_collecttype = _generate_cpy_struct_collecttype def _struct_names(self, tp): cname = tp.get_c_name('') if ' ' in cname: return cname, cname.replace(' ', '_') else: return cname, '_' + cname def _generate_cpy_struct_decl(self, tp, name): self._struct_decl(tp, *self._struct_names(tp)) _generate_cpy_union_decl = _generate_cpy_struct_decl def _generate_cpy_struct_ctx(self, tp, name): self._struct_ctx(tp, *self._struct_names(tp)) _generate_cpy_union_ctx = _generate_cpy_struct_ctx # ---------- # 'anonymous' declarations. These are produced for anonymous structs # or unions; the 'name' is obtained by a typedef. def _generate_cpy_anonymous_collecttype(self, tp, name): if isinstance(tp, model.EnumType): self._generate_cpy_enum_collecttype(tp, name) else: self._struct_collecttype(tp) def _generate_cpy_anonymous_decl(self, tp, name): if isinstance(tp, model.EnumType): self._generate_cpy_enum_decl(tp) else: self._struct_decl(tp, name, 'typedef_' + name) def _generate_cpy_anonymous_ctx(self, tp, name): if isinstance(tp, model.EnumType): self._enum_ctx(tp, name) else: self._struct_ctx(tp, name, 'typedef_' + name) # ---------- # constants, declared with "static const ..." def _generate_cpy_const(self, is_int, name, tp=None, category='const', check_value=None): if (category, name) in self._seen_constants: raise VerificationError( "duplicate declaration of %s '%s'" % (category, name)) self._seen_constants.add((category, name)) # prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) if is_int: prnt('static int %s(unsigned long long *o)' % funcname) prnt('{') prnt(' int n = (%s) <= 0;' % (name,)) prnt(' *o = (unsigned long long)((%s) | 0);' ' /* check that %s is an integer */' % (name, name)) if check_value is not None: if check_value > 0: check_value = '%dU' % (check_value,) prnt(' if (!_cffi_check_int(*o, n, %s))' % (check_value,)) prnt(' n |= 2;') prnt(' return n;') prnt('}') else: assert check_value is None prnt('static void %s(char *o)' % funcname) prnt('{') prnt(' *(%s)o = %s;' % (tp.get_c_name('*'), name)) prnt('}') prnt() def _generate_cpy_constant_collecttype(self, tp, name): is_int = tp.is_integer_type() if not is_int or self.target_is_python: self._do_collect_type(tp) def _generate_cpy_constant_decl(self, tp, name): is_int = tp.is_integer_type() self._generate_cpy_const(is_int, name, tp) def _generate_cpy_constant_ctx(self, tp, name): if not self.target_is_python and tp.is_integer_type(): type_op = CffiOp(OP_CONSTANT_INT, -1) else: if self.target_is_python: const_kind = OP_DLOPEN_CONST else: const_kind = OP_CONSTANT type_index = self._typesdict[tp] type_op = CffiOp(const_kind, type_index) self._lsts["global"].append( GlobalExpr(name, '_cffi_const_%s' % name, type_op)) # ---------- # enums def _generate_cpy_enum_collecttype(self, tp, name): self._do_collect_type(tp) def _generate_cpy_enum_decl(self, tp, name=None): for enumerator in tp.enumerators: self._generate_cpy_const(True, enumerator) def _enum_ctx(self, tp, cname): type_index = self._typesdict[tp] type_op = CffiOp(OP_ENUM, -1) if self.target_is_python: tp.check_not_partial() for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): self._lsts["global"].append( GlobalExpr(enumerator, '_cffi_const_%s' % enumerator, type_op, check_value=enumvalue)) # if cname is not None and '$' not in cname and not self.target_is_python: size = "sizeof(%s)" % cname signed = "((%s)-1) <= 0" % cname else: basetp = tp.build_baseinttype(self.ffi, []) size = self.ffi.sizeof(basetp) signed = int(int(self.ffi.cast(basetp, -1)) < 0) allenums = ",".join(tp.enumerators) self._lsts["enum"].append( EnumExpr(tp.name, type_index, size, signed, allenums)) def _generate_cpy_enum_ctx(self, tp, name): self._enum_ctx(tp, tp._get_c_name()) # ---------- # macros: for now only for integers def _generate_cpy_macro_collecttype(self, tp, name): pass def _generate_cpy_macro_decl(self, tp, name): if tp == '...': check_value = None else: check_value = tp # an integer self._generate_cpy_const(True, name, check_value=check_value) def _generate_cpy_macro_ctx(self, tp, name): if tp == '...': if self.target_is_python: raise VerificationError( "cannot use the syntax '...' in '#define %s ...' when " "using the ABI mode" % (name,)) check_value = None else: check_value = tp # an integer type_op = CffiOp(OP_CONSTANT_INT, -1) self._lsts["global"].append( GlobalExpr(name, '_cffi_const_%s' % name, type_op, check_value=check_value)) # ---------- # global variables def _global_type(self, tp, global_name): if isinstance(tp, model.ArrayType): actual_length = tp.length if actual_length == '...': actual_length = '_cffi_array_len(%s)' % (global_name,) tp_item = self._global_type(tp.item, '%s[0]' % global_name) tp = model.ArrayType(tp_item, actual_length) return tp def _generate_cpy_variable_collecttype(self, tp, name): self._do_collect_type(self._global_type(tp, name)) def _generate_cpy_variable_decl(self, tp, name): prnt = self._prnt tp = self._global_type(tp, name) if isinstance(tp, model.ArrayType) and tp.length is None: tp = tp.item ampersand = '' else: ampersand = '&' # This code assumes that casts from "tp *" to "void *" is a # no-op, i.e. a function that returns a "tp *" can be called # as if it returned a "void *". This should be generally true # on any modern machine. The only exception to that rule (on # uncommon architectures, and as far as I can tell) might be # if 'tp' were a function type, but that is not possible here. # (If 'tp' is a function _pointer_ type, then casts from "fn_t # **" to "void *" are again no-ops, as far as I can tell.) decl = '*_cffi_var_%s(void)' % (name,) prnt('static ' + tp.get_c_name(decl, quals=self._current_quals)) prnt('{') prnt(' return %s(%s);' % (ampersand, name)) prnt('}') prnt() def _generate_cpy_variable_ctx(self, tp, name): tp = self._global_type(tp, name) type_index = self._typesdict[tp] if self.target_is_python: op = OP_GLOBAL_VAR else: op = OP_GLOBAL_VAR_F self._lsts["global"].append( GlobalExpr(name, '_cffi_var_%s' % name, CffiOp(op, type_index))) # ---------- # extern "Python" def _generate_cpy_extern_python_collecttype(self, tp, name): assert isinstance(tp, model.FunctionPtrType) self._do_collect_type(tp) _generate_cpy_dllexport_python_collecttype = \ _generate_cpy_extern_python_plus_c_collecttype = \ _generate_cpy_extern_python_collecttype def _extern_python_decl(self, tp, name, tag_and_space): prnt = self._prnt if isinstance(tp.result, model.VoidType): size_of_result = '0' else: context = 'result of %s' % name size_of_result = '(int)sizeof(%s)' % ( tp.result.get_c_name('', context),) prnt('static struct _cffi_externpy_s _cffi_externpy__%s =' % name) prnt(' { "%s.%s", %s, 0, 0 };' % ( self.module_name, name, size_of_result)) prnt() # arguments = [] context = 'argument of %s' % name for i, type in enumerate(tp.args): arg = type.get_c_name(' a%d' % i, context) arguments.append(arg) # repr_arguments = ', '.join(arguments) repr_arguments = repr_arguments or 'void' name_and_arguments = '%s(%s)' % (name, repr_arguments) if tp.abi == "__stdcall": name_and_arguments = '_cffi_stdcall ' + name_and_arguments # def may_need_128_bits(tp): return (isinstance(tp, model.PrimitiveType) and tp.name == 'long double') # size_of_a = max(len(tp.args)*8, 8) if may_need_128_bits(tp.result): size_of_a = max(size_of_a, 16) if isinstance(tp.result, model.StructOrUnion): size_of_a = 'sizeof(%s) > %d ? sizeof(%s) : %d' % ( tp.result.get_c_name(''), size_of_a, tp.result.get_c_name(''), size_of_a) prnt('%s%s' % (tag_and_space, tp.result.get_c_name(name_and_arguments))) prnt('{') prnt(' char a[%s];' % size_of_a) prnt(' char *p = a;') for i, type in enumerate(tp.args): arg = 'a%d' % i if (isinstance(type, model.StructOrUnion) or may_need_128_bits(type)): arg = '&' + arg type = model.PointerType(type) prnt(' *(%s)(p + %d) = %s;' % (type.get_c_name('*'), i*8, arg)) prnt(' _cffi_call_python(&_cffi_externpy__%s, p);' % name) if not isinstance(tp.result, model.VoidType): prnt(' return *(%s)p;' % (tp.result.get_c_name('*'),)) prnt('}') prnt() self._num_externpy += 1 def _generate_cpy_extern_python_decl(self, tp, name): self._extern_python_decl(tp, name, 'static ') def _generate_cpy_dllexport_python_decl(self, tp, name): self._extern_python_decl(tp, name, 'CFFI_DLLEXPORT ') def _generate_cpy_extern_python_plus_c_decl(self, tp, name): self._extern_python_decl(tp, name, '') def _generate_cpy_extern_python_ctx(self, tp, name): if self.target_is_python: raise VerificationError( "cannot use 'extern \"Python\"' in the ABI mode") if tp.ellipsis: raise NotImplementedError("a vararg function is extern \"Python\"") type_index = self._typesdict[tp] type_op = CffiOp(OP_EXTERN_PYTHON, type_index) self._lsts["global"].append( GlobalExpr(name, '&_cffi_externpy__%s' % name, type_op, name)) _generate_cpy_dllexport_python_ctx = \ _generate_cpy_extern_python_plus_c_ctx = \ _generate_cpy_extern_python_ctx def _print_string_literal_in_array(self, s): prnt = self._prnt prnt('// # NB. this is not a string because of a size limit in MSVC') if not isinstance(s, bytes): # unicode s = s.encode('utf-8') # -> bytes else: s.decode('utf-8') # got bytes, check for valid utf-8 try: s.decode('ascii') except UnicodeDecodeError: s = b'# -*- encoding: utf8 -*-\n' + s for line in s.splitlines(True): comment = line if type('//') is bytes: # python2 line = map(ord, line) # make a list of integers else: # python3 # type(line) is bytes, which enumerates like a list of integers comment = ascii(comment)[1:-1] prnt(('// ' + comment).rstrip()) printed_line = '' for c in line: if len(printed_line) >= 76: prnt(printed_line) printed_line = '' printed_line += '%d,' % (c,) prnt(printed_line) # ---------- # emitting the opcodes for individual types def _emit_bytecode_VoidType(self, tp, index): self.cffi_types[index] = CffiOp(OP_PRIMITIVE, PRIM_VOID) def _emit_bytecode_PrimitiveType(self, tp, index): prim_index = PRIMITIVE_TO_INDEX[tp.name] self.cffi_types[index] = CffiOp(OP_PRIMITIVE, prim_index) def _emit_bytecode_UnknownIntegerType(self, tp, index): s = ('_cffi_prim_int(sizeof(%s), (\n' ' ((%s)-1) | 0 /* check that %s is an integer type */\n' ' ) <= 0)' % (tp.name, tp.name, tp.name)) self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s) def _emit_bytecode_UnknownFloatType(self, tp, index): s = ('_cffi_prim_float(sizeof(%s) *\n' ' (((%s)1) / 2) * 2 /* integer => 0, float => 1 */\n' ' )' % (tp.name, tp.name)) self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s) def _emit_bytecode_RawFunctionType(self, tp, index): self.cffi_types[index] = CffiOp(OP_FUNCTION, self._typesdict[tp.result]) index += 1 for tp1 in tp.args: realindex = self._typesdict[tp1] if index != realindex: if isinstance(tp1, model.PrimitiveType): self._emit_bytecode_PrimitiveType(tp1, index) else: self.cffi_types[index] = CffiOp(OP_NOOP, realindex) index += 1 flags = int(tp.ellipsis) if tp.abi is not None: if tp.abi == '__stdcall': flags |= 2 else: raise NotImplementedError("abi=%r" % (tp.abi,)) self.cffi_types[index] = CffiOp(OP_FUNCTION_END, flags) def _emit_bytecode_PointerType(self, tp, index): self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[tp.totype]) _emit_bytecode_ConstPointerType = _emit_bytecode_PointerType _emit_bytecode_NamedPointerType = _emit_bytecode_PointerType def _emit_bytecode_FunctionPtrType(self, tp, index): raw = tp.as_raw_function() self.cffi_types[index] = CffiOp(OP_POINTER, self._typesdict[raw]) def _emit_bytecode_ArrayType(self, tp, index): item_index = self._typesdict[tp.item] if tp.length is None: self.cffi_types[index] = CffiOp(OP_OPEN_ARRAY, item_index) elif tp.length == '...': raise VerificationError( "type %s badly placed: the '...' array length can only be " "used on global arrays or on fields of structures" % ( str(tp).replace('/*...*/', '...'),)) else: assert self.cffi_types[index + 1] == 'LEN' self.cffi_types[index] = CffiOp(OP_ARRAY, item_index) self.cffi_types[index + 1] = CffiOp(None, str(tp.length)) def _emit_bytecode_StructType(self, tp, index): struct_index = self._struct_unions[tp] self.cffi_types[index] = CffiOp(OP_STRUCT_UNION, struct_index) _emit_bytecode_UnionType = _emit_bytecode_StructType def _emit_bytecode_EnumType(self, tp, index): enum_index = self._enums[tp] self.cffi_types[index] = CffiOp(OP_ENUM, enum_index) if sys.version_info >= (3,): NativeIO = io.StringIO else: class NativeIO(io.BytesIO): def write(self, s): if isinstance(s, unicode): s = s.encode('ascii') super(NativeIO, self).write(s) def _make_c_or_py_source(ffi, module_name, preamble, target_file, verbose): if verbose: print("generating %s" % (target_file,)) recompiler = Recompiler(ffi, module_name, target_is_python=(preamble is None)) recompiler.collect_type_table() recompiler.collect_step_tables() f = NativeIO() recompiler.write_source_to_f(f, preamble) output = f.getvalue() try: with open(target_file, 'r') as f1: if f1.read(len(output) + 1) != output: raise IOError if verbose: print("(already up-to-date)") return False # already up-to-date except IOError: tmp_file = '%s.~%d' % (target_file, os.getpid()) with open(tmp_file, 'w') as f1: f1.write(output) try: os.rename(tmp_file, target_file) except OSError: os.unlink(target_file) os.rename(tmp_file, target_file) return True def make_c_source(ffi, module_name, preamble, target_c_file, verbose=False): assert preamble is not None return _make_c_or_py_source(ffi, module_name, preamble, target_c_file, verbose) def make_py_source(ffi, module_name, target_py_file, verbose=False): return _make_c_or_py_source(ffi, module_name, None, target_py_file, verbose) def _modname_to_file(outputdir, modname, extension): parts = modname.split('.') try: os.makedirs(os.path.join(outputdir, *parts[:-1])) except OSError: pass parts[-1] += extension return os.path.join(outputdir, *parts), parts # Aaargh. Distutils is not tested at all for the purpose of compiling # DLLs that are not extension modules. Here are some hacks to work # around that, in the _patch_for_*() functions... def _patch_meth(patchlist, cls, name, new_meth): old = getattr(cls, name) patchlist.append((cls, name, old)) setattr(cls, name, new_meth) return old def _unpatch_meths(patchlist): for cls, name, old_meth in reversed(patchlist): setattr(cls, name, old_meth) def _patch_for_embedding(patchlist): if sys.platform == 'win32': # we must not remove the manifest when building for embedding! from distutils.msvc9compiler import MSVCCompiler _patch_meth(patchlist, MSVCCompiler, '_remove_visual_c_ref', lambda self, manifest_file: manifest_file) if sys.platform == 'darwin': # we must not make a '-bundle', but a '-dynamiclib' instead from distutils.ccompiler import CCompiler def my_link_shared_object(self, *args, **kwds): if '-bundle' in self.linker_so: self.linker_so = list(self.linker_so) i = self.linker_so.index('-bundle') self.linker_so[i] = '-dynamiclib' return old_link_shared_object(self, *args, **kwds) old_link_shared_object = _patch_meth(patchlist, CCompiler, 'link_shared_object', my_link_shared_object) def _patch_for_target(patchlist, target): from distutils.command.build_ext import build_ext # if 'target' is different from '*', we need to patch some internal # method to just return this 'target' value, instead of having it # built from module_name if target.endswith('.*'): target = target[:-2] if sys.platform == 'win32': target += '.dll' elif sys.platform == 'darwin': target += '.dylib' else: target += '.so' _patch_meth(patchlist, build_ext, 'get_ext_filename', lambda self, ext_name: target) def recompile(ffi, module_name, preamble, tmpdir='.', call_c_compiler=True, c_file=None, source_extension='.c', extradir=None, compiler_verbose=1, target=None, debug=None, **kwds): if not isinstance(module_name, str): module_name = module_name.encode('ascii') if ffi._windows_unicode: ffi._apply_windows_unicode(kwds) if preamble is not None: embedding = (ffi._embedding is not None) if embedding: ffi._apply_embedding_fix(kwds) if c_file is None: c_file, parts = _modname_to_file(tmpdir, module_name, source_extension) if extradir: parts = [extradir] + parts ext_c_file = os.path.join(*parts) else: ext_c_file = c_file # if target is None: if embedding: target = '%s.*' % module_name else: target = '*' # ext = ffiplatform.get_extension(ext_c_file, module_name, **kwds) updated = make_c_source(ffi, module_name, preamble, c_file, verbose=compiler_verbose) if call_c_compiler: patchlist = [] cwd = os.getcwd() try: if embedding: _patch_for_embedding(patchlist) if target != '*': _patch_for_target(patchlist, target) if compiler_verbose: if tmpdir == '.': msg = 'the current directory is' else: msg = 'setting the current directory to' print('%s %r' % (msg, os.path.abspath(tmpdir))) os.chdir(tmpdir) outputfilename = ffiplatform.compile('.', ext, compiler_verbose, debug) finally: os.chdir(cwd) _unpatch_meths(patchlist) return outputfilename else: return ext, updated else: if c_file is None: c_file, _ = _modname_to_file(tmpdir, module_name, '.py') updated = make_py_source(ffi, module_name, c_file, verbose=compiler_verbose) if call_c_compiler: return c_file else: return None, updated
64,598
Python
39.833755
80
0.507601
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/backend_ctypes.py
import ctypes, ctypes.util, operator, sys from . import model if sys.version_info < (3,): bytechr = chr else: unicode = str long = int xrange = range bytechr = lambda num: bytes([num]) class CTypesType(type): pass class CTypesData(object): __metaclass__ = CTypesType __slots__ = ['__weakref__'] __name__ = '<cdata>' def __init__(self, *args): raise TypeError("cannot instantiate %r" % (self.__class__,)) @classmethod def _newp(cls, init): raise TypeError("expected a pointer or array ctype, got '%s'" % (cls._get_c_name(),)) @staticmethod def _to_ctypes(value): raise TypeError @classmethod def _arg_to_ctypes(cls, *value): try: ctype = cls._ctype except AttributeError: raise TypeError("cannot create an instance of %r" % (cls,)) if value: res = cls._to_ctypes(*value) if not isinstance(res, ctype): res = cls._ctype(res) else: res = cls._ctype() return res @classmethod def _create_ctype_obj(cls, init): if init is None: return cls._arg_to_ctypes() else: return cls._arg_to_ctypes(init) @staticmethod def _from_ctypes(ctypes_value): raise TypeError @classmethod def _get_c_name(cls, replace_with=''): return cls._reftypename.replace(' &', replace_with) @classmethod def _fix_class(cls): cls.__name__ = 'CData<%s>' % (cls._get_c_name(),) cls.__qualname__ = 'CData<%s>' % (cls._get_c_name(),) cls.__module__ = 'ffi' def _get_own_repr(self): raise NotImplementedError def _addr_repr(self, address): if address == 0: return 'NULL' else: if address < 0: address += 1 << (8*ctypes.sizeof(ctypes.c_void_p)) return '0x%x' % address def __repr__(self, c_name=None): own = self._get_own_repr() return '<cdata %r %s>' % (c_name or self._get_c_name(), own) def _convert_to_address(self, BClass): if BClass is None: raise TypeError("cannot convert %r to an address" % ( self._get_c_name(),)) else: raise TypeError("cannot convert %r to %r" % ( self._get_c_name(), BClass._get_c_name())) @classmethod def _get_size(cls): return ctypes.sizeof(cls._ctype) def _get_size_of_instance(self): return ctypes.sizeof(self._ctype) @classmethod def _cast_from(cls, source): raise TypeError("cannot cast to %r" % (cls._get_c_name(),)) def _cast_to_integer(self): return self._convert_to_address(None) @classmethod def _alignment(cls): return ctypes.alignment(cls._ctype) def __iter__(self): raise TypeError("cdata %r does not support iteration" % ( self._get_c_name()),) def _make_cmp(name): cmpfunc = getattr(operator, name) def cmp(self, other): v_is_ptr = not isinstance(self, CTypesGenericPrimitive) w_is_ptr = (isinstance(other, CTypesData) and not isinstance(other, CTypesGenericPrimitive)) if v_is_ptr and w_is_ptr: return cmpfunc(self._convert_to_address(None), other._convert_to_address(None)) elif v_is_ptr or w_is_ptr: return NotImplemented else: if isinstance(self, CTypesGenericPrimitive): self = self._value if isinstance(other, CTypesGenericPrimitive): other = other._value return cmpfunc(self, other) cmp.func_name = name return cmp __eq__ = _make_cmp('__eq__') __ne__ = _make_cmp('__ne__') __lt__ = _make_cmp('__lt__') __le__ = _make_cmp('__le__') __gt__ = _make_cmp('__gt__') __ge__ = _make_cmp('__ge__') def __hash__(self): return hash(self._convert_to_address(None)) def _to_string(self, maxlen): raise TypeError("string(): %r" % (self,)) class CTypesGenericPrimitive(CTypesData): __slots__ = [] def __hash__(self): return hash(self._value) def _get_own_repr(self): return repr(self._from_ctypes(self._value)) class CTypesGenericArray(CTypesData): __slots__ = [] @classmethod def _newp(cls, init): return cls(init) def __iter__(self): for i in xrange(len(self)): yield self[i] def _get_own_repr(self): return self._addr_repr(ctypes.addressof(self._blob)) class CTypesGenericPtr(CTypesData): __slots__ = ['_address', '_as_ctype_ptr'] _automatic_casts = False kind = "pointer" @classmethod def _newp(cls, init): return cls(init) @classmethod def _cast_from(cls, source): if source is None: address = 0 elif isinstance(source, CTypesData): address = source._cast_to_integer() elif isinstance(source, (int, long)): address = source else: raise TypeError("bad type for cast to %r: %r" % (cls, type(source).__name__)) return cls._new_pointer_at(address) @classmethod def _new_pointer_at(cls, address): self = cls.__new__(cls) self._address = address self._as_ctype_ptr = ctypes.cast(address, cls._ctype) return self def _get_own_repr(self): try: return self._addr_repr(self._address) except AttributeError: return '???' def _cast_to_integer(self): return self._address def __nonzero__(self): return bool(self._address) __bool__ = __nonzero__ @classmethod def _to_ctypes(cls, value): if not isinstance(value, CTypesData): raise TypeError("unexpected %s object" % type(value).__name__) address = value._convert_to_address(cls) return ctypes.cast(address, cls._ctype) @classmethod def _from_ctypes(cls, ctypes_ptr): address = ctypes.cast(ctypes_ptr, ctypes.c_void_p).value or 0 return cls._new_pointer_at(address) @classmethod def _initialize(cls, ctypes_ptr, value): if value: ctypes_ptr.contents = cls._to_ctypes(value).contents def _convert_to_address(self, BClass): if (BClass in (self.__class__, None) or BClass._automatic_casts or self._automatic_casts): return self._address else: return CTypesData._convert_to_address(self, BClass) class CTypesBaseStructOrUnion(CTypesData): __slots__ = ['_blob'] @classmethod def _create_ctype_obj(cls, init): # may be overridden raise TypeError("cannot instantiate opaque type %s" % (cls,)) def _get_own_repr(self): return self._addr_repr(ctypes.addressof(self._blob)) @classmethod def _offsetof(cls, fieldname): return getattr(cls._ctype, fieldname).offset def _convert_to_address(self, BClass): if getattr(BClass, '_BItem', None) is self.__class__: return ctypes.addressof(self._blob) else: return CTypesData._convert_to_address(self, BClass) @classmethod def _from_ctypes(cls, ctypes_struct_or_union): self = cls.__new__(cls) self._blob = ctypes_struct_or_union return self @classmethod def _to_ctypes(cls, value): return value._blob def __repr__(self, c_name=None): return CTypesData.__repr__(self, c_name or self._get_c_name(' &')) class CTypesBackend(object): PRIMITIVE_TYPES = { 'char': ctypes.c_char, 'short': ctypes.c_short, 'int': ctypes.c_int, 'long': ctypes.c_long, 'long long': ctypes.c_longlong, 'signed char': ctypes.c_byte, 'unsigned char': ctypes.c_ubyte, 'unsigned short': ctypes.c_ushort, 'unsigned int': ctypes.c_uint, 'unsigned long': ctypes.c_ulong, 'unsigned long long': ctypes.c_ulonglong, 'float': ctypes.c_float, 'double': ctypes.c_double, '_Bool': ctypes.c_bool, } for _name in ['unsigned long long', 'unsigned long', 'unsigned int', 'unsigned short', 'unsigned char']: _size = ctypes.sizeof(PRIMITIVE_TYPES[_name]) PRIMITIVE_TYPES['uint%d_t' % (8*_size)] = PRIMITIVE_TYPES[_name] if _size == ctypes.sizeof(ctypes.c_void_p): PRIMITIVE_TYPES['uintptr_t'] = PRIMITIVE_TYPES[_name] if _size == ctypes.sizeof(ctypes.c_size_t): PRIMITIVE_TYPES['size_t'] = PRIMITIVE_TYPES[_name] for _name in ['long long', 'long', 'int', 'short', 'signed char']: _size = ctypes.sizeof(PRIMITIVE_TYPES[_name]) PRIMITIVE_TYPES['int%d_t' % (8*_size)] = PRIMITIVE_TYPES[_name] if _size == ctypes.sizeof(ctypes.c_void_p): PRIMITIVE_TYPES['intptr_t'] = PRIMITIVE_TYPES[_name] PRIMITIVE_TYPES['ptrdiff_t'] = PRIMITIVE_TYPES[_name] if _size == ctypes.sizeof(ctypes.c_size_t): PRIMITIVE_TYPES['ssize_t'] = PRIMITIVE_TYPES[_name] def __init__(self): self.RTLD_LAZY = 0 # not supported anyway by ctypes self.RTLD_NOW = 0 self.RTLD_GLOBAL = ctypes.RTLD_GLOBAL self.RTLD_LOCAL = ctypes.RTLD_LOCAL def set_ffi(self, ffi): self.ffi = ffi def _get_types(self): return CTypesData, CTypesType def load_library(self, path, flags=0): cdll = ctypes.CDLL(path, flags) return CTypesLibrary(self, cdll) def new_void_type(self): class CTypesVoid(CTypesData): __slots__ = [] _reftypename = 'void &' @staticmethod def _from_ctypes(novalue): return None @staticmethod def _to_ctypes(novalue): if novalue is not None: raise TypeError("None expected, got %s object" % (type(novalue).__name__,)) return None CTypesVoid._fix_class() return CTypesVoid def new_primitive_type(self, name): if name == 'wchar_t': raise NotImplementedError(name) ctype = self.PRIMITIVE_TYPES[name] if name == 'char': kind = 'char' elif name in ('float', 'double'): kind = 'float' else: if name in ('signed char', 'unsigned char'): kind = 'byte' elif name == '_Bool': kind = 'bool' else: kind = 'int' is_signed = (ctype(-1).value == -1) # def _cast_source_to_int(source): if isinstance(source, (int, long, float)): source = int(source) elif isinstance(source, CTypesData): source = source._cast_to_integer() elif isinstance(source, bytes): source = ord(source) elif source is None: source = 0 else: raise TypeError("bad type for cast to %r: %r" % (CTypesPrimitive, type(source).__name__)) return source # kind1 = kind class CTypesPrimitive(CTypesGenericPrimitive): __slots__ = ['_value'] _ctype = ctype _reftypename = '%s &' % name kind = kind1 def __init__(self, value): self._value = value @staticmethod def _create_ctype_obj(init): if init is None: return ctype() return ctype(CTypesPrimitive._to_ctypes(init)) if kind == 'int' or kind == 'byte': @classmethod def _cast_from(cls, source): source = _cast_source_to_int(source) source = ctype(source).value # cast within range return cls(source) def __int__(self): return self._value if kind == 'bool': @classmethod def _cast_from(cls, source): if not isinstance(source, (int, long, float)): source = _cast_source_to_int(source) return cls(bool(source)) def __int__(self): return int(self._value) if kind == 'char': @classmethod def _cast_from(cls, source): source = _cast_source_to_int(source) source = bytechr(source & 0xFF) return cls(source) def __int__(self): return ord(self._value) if kind == 'float': @classmethod def _cast_from(cls, source): if isinstance(source, float): pass elif isinstance(source, CTypesGenericPrimitive): if hasattr(source, '__float__'): source = float(source) else: source = int(source) else: source = _cast_source_to_int(source) source = ctype(source).value # fix precision return cls(source) def __int__(self): return int(self._value) def __float__(self): return self._value _cast_to_integer = __int__ if kind == 'int' or kind == 'byte' or kind == 'bool': @staticmethod def _to_ctypes(x): if not isinstance(x, (int, long)): if isinstance(x, CTypesData): x = int(x) else: raise TypeError("integer expected, got %s" % type(x).__name__) if ctype(x).value != x: if not is_signed and x < 0: raise OverflowError("%s: negative integer" % name) else: raise OverflowError("%s: integer out of bounds" % name) return x if kind == 'char': @staticmethod def _to_ctypes(x): if isinstance(x, bytes) and len(x) == 1: return x if isinstance(x, CTypesPrimitive): # <CData <char>> return x._value raise TypeError("character expected, got %s" % type(x).__name__) def __nonzero__(self): return ord(self._value) != 0 else: def __nonzero__(self): return self._value != 0 __bool__ = __nonzero__ if kind == 'float': @staticmethod def _to_ctypes(x): if not isinstance(x, (int, long, float, CTypesData)): raise TypeError("float expected, got %s" % type(x).__name__) return ctype(x).value @staticmethod def _from_ctypes(value): return getattr(value, 'value', value) @staticmethod def _initialize(blob, init): blob.value = CTypesPrimitive._to_ctypes(init) if kind == 'char': def _to_string(self, maxlen): return self._value if kind == 'byte': def _to_string(self, maxlen): return chr(self._value & 0xff) # CTypesPrimitive._fix_class() return CTypesPrimitive def new_pointer_type(self, BItem): getbtype = self.ffi._get_cached_btype if BItem is getbtype(model.PrimitiveType('char')): kind = 'charp' elif BItem in (getbtype(model.PrimitiveType('signed char')), getbtype(model.PrimitiveType('unsigned char'))): kind = 'bytep' elif BItem is getbtype(model.void_type): kind = 'voidp' else: kind = 'generic' # class CTypesPtr(CTypesGenericPtr): __slots__ = ['_own'] if kind == 'charp': __slots__ += ['__as_strbuf'] _BItem = BItem if hasattr(BItem, '_ctype'): _ctype = ctypes.POINTER(BItem._ctype) _bitem_size = ctypes.sizeof(BItem._ctype) else: _ctype = ctypes.c_void_p if issubclass(BItem, CTypesGenericArray): _reftypename = BItem._get_c_name('(* &)') else: _reftypename = BItem._get_c_name(' * &') def __init__(self, init): ctypeobj = BItem._create_ctype_obj(init) if kind == 'charp': self.__as_strbuf = ctypes.create_string_buffer( ctypeobj.value + b'\x00') self._as_ctype_ptr = ctypes.cast( self.__as_strbuf, self._ctype) else: self._as_ctype_ptr = ctypes.pointer(ctypeobj) self._address = ctypes.cast(self._as_ctype_ptr, ctypes.c_void_p).value self._own = True def __add__(self, other): if isinstance(other, (int, long)): return self._new_pointer_at(self._address + other * self._bitem_size) else: return NotImplemented def __sub__(self, other): if isinstance(other, (int, long)): return self._new_pointer_at(self._address - other * self._bitem_size) elif type(self) is type(other): return (self._address - other._address) // self._bitem_size else: return NotImplemented def __getitem__(self, index): if getattr(self, '_own', False) and index != 0: raise IndexError return BItem._from_ctypes(self._as_ctype_ptr[index]) def __setitem__(self, index, value): self._as_ctype_ptr[index] = BItem._to_ctypes(value) if kind == 'charp' or kind == 'voidp': @classmethod def _arg_to_ctypes(cls, *value): if value and isinstance(value[0], bytes): return ctypes.c_char_p(value[0]) else: return super(CTypesPtr, cls)._arg_to_ctypes(*value) if kind == 'charp' or kind == 'bytep': def _to_string(self, maxlen): if maxlen < 0: maxlen = sys.maxsize p = ctypes.cast(self._as_ctype_ptr, ctypes.POINTER(ctypes.c_char)) n = 0 while n < maxlen and p[n] != b'\x00': n += 1 return b''.join([p[i] for i in range(n)]) def _get_own_repr(self): if getattr(self, '_own', False): return 'owning %d bytes' % ( ctypes.sizeof(self._as_ctype_ptr.contents),) return super(CTypesPtr, self)._get_own_repr() # if (BItem is self.ffi._get_cached_btype(model.void_type) or BItem is self.ffi._get_cached_btype(model.PrimitiveType('char'))): CTypesPtr._automatic_casts = True # CTypesPtr._fix_class() return CTypesPtr def new_array_type(self, CTypesPtr, length): if length is None: brackets = ' &[]' else: brackets = ' &[%d]' % length BItem = CTypesPtr._BItem getbtype = self.ffi._get_cached_btype if BItem is getbtype(model.PrimitiveType('char')): kind = 'char' elif BItem in (getbtype(model.PrimitiveType('signed char')), getbtype(model.PrimitiveType('unsigned char'))): kind = 'byte' else: kind = 'generic' # class CTypesArray(CTypesGenericArray): __slots__ = ['_blob', '_own'] if length is not None: _ctype = BItem._ctype * length else: __slots__.append('_ctype') _reftypename = BItem._get_c_name(brackets) _declared_length = length _CTPtr = CTypesPtr def __init__(self, init): if length is None: if isinstance(init, (int, long)): len1 = init init = None elif kind == 'char' and isinstance(init, bytes): len1 = len(init) + 1 # extra null else: init = tuple(init) len1 = len(init) self._ctype = BItem._ctype * len1 self._blob = self._ctype() self._own = True if init is not None: self._initialize(self._blob, init) @staticmethod def _initialize(blob, init): if isinstance(init, bytes): init = [init[i:i+1] for i in range(len(init))] else: if isinstance(init, CTypesGenericArray): if (len(init) != len(blob) or not isinstance(init, CTypesArray)): raise TypeError("length/type mismatch: %s" % (init,)) init = tuple(init) if len(init) > len(blob): raise IndexError("too many initializers") addr = ctypes.cast(blob, ctypes.c_void_p).value PTR = ctypes.POINTER(BItem._ctype) itemsize = ctypes.sizeof(BItem._ctype) for i, value in enumerate(init): p = ctypes.cast(addr + i * itemsize, PTR) BItem._initialize(p.contents, value) def __len__(self): return len(self._blob) def __getitem__(self, index): if not (0 <= index < len(self._blob)): raise IndexError return BItem._from_ctypes(self._blob[index]) def __setitem__(self, index, value): if not (0 <= index < len(self._blob)): raise IndexError self._blob[index] = BItem._to_ctypes(value) if kind == 'char' or kind == 'byte': def _to_string(self, maxlen): if maxlen < 0: maxlen = len(self._blob) p = ctypes.cast(self._blob, ctypes.POINTER(ctypes.c_char)) n = 0 while n < maxlen and p[n] != b'\x00': n += 1 return b''.join([p[i] for i in range(n)]) def _get_own_repr(self): if getattr(self, '_own', False): return 'owning %d bytes' % (ctypes.sizeof(self._blob),) return super(CTypesArray, self)._get_own_repr() def _convert_to_address(self, BClass): if BClass in (CTypesPtr, None) or BClass._automatic_casts: return ctypes.addressof(self._blob) else: return CTypesData._convert_to_address(self, BClass) @staticmethod def _from_ctypes(ctypes_array): self = CTypesArray.__new__(CTypesArray) self._blob = ctypes_array return self @staticmethod def _arg_to_ctypes(value): return CTypesPtr._arg_to_ctypes(value) def __add__(self, other): if isinstance(other, (int, long)): return CTypesPtr._new_pointer_at( ctypes.addressof(self._blob) + other * ctypes.sizeof(BItem._ctype)) else: return NotImplemented @classmethod def _cast_from(cls, source): raise NotImplementedError("casting to %r" % ( cls._get_c_name(),)) # CTypesArray._fix_class() return CTypesArray def _new_struct_or_union(self, kind, name, base_ctypes_class): # class struct_or_union(base_ctypes_class): pass struct_or_union.__name__ = '%s_%s' % (kind, name) kind1 = kind # class CTypesStructOrUnion(CTypesBaseStructOrUnion): __slots__ = ['_blob'] _ctype = struct_or_union _reftypename = '%s &' % (name,) _kind = kind = kind1 # CTypesStructOrUnion._fix_class() return CTypesStructOrUnion def new_struct_type(self, name): return self._new_struct_or_union('struct', name, ctypes.Structure) def new_union_type(self, name): return self._new_struct_or_union('union', name, ctypes.Union) def complete_struct_or_union(self, CTypesStructOrUnion, fields, tp, totalsize=-1, totalalignment=-1, sflags=0, pack=0): if totalsize >= 0 or totalalignment >= 0: raise NotImplementedError("the ctypes backend of CFFI does not support " "structures completed by verify(); please " "compile and install the _cffi_backend module.") struct_or_union = CTypesStructOrUnion._ctype fnames = [fname for (fname, BField, bitsize) in fields] btypes = [BField for (fname, BField, bitsize) in fields] bitfields = [bitsize for (fname, BField, bitsize) in fields] # bfield_types = {} cfields = [] for (fname, BField, bitsize) in fields: if bitsize < 0: cfields.append((fname, BField._ctype)) bfield_types[fname] = BField else: cfields.append((fname, BField._ctype, bitsize)) bfield_types[fname] = Ellipsis if sflags & 8: struct_or_union._pack_ = 1 elif pack: struct_or_union._pack_ = pack struct_or_union._fields_ = cfields CTypesStructOrUnion._bfield_types = bfield_types # @staticmethod def _create_ctype_obj(init): result = struct_or_union() if init is not None: initialize(result, init) return result CTypesStructOrUnion._create_ctype_obj = _create_ctype_obj # def initialize(blob, init): if is_union: if len(init) > 1: raise ValueError("union initializer: %d items given, but " "only one supported (use a dict if needed)" % (len(init),)) if not isinstance(init, dict): if isinstance(init, (bytes, unicode)): raise TypeError("union initializer: got a str") init = tuple(init) if len(init) > len(fnames): raise ValueError("too many values for %s initializer" % CTypesStructOrUnion._get_c_name()) init = dict(zip(fnames, init)) addr = ctypes.addressof(blob) for fname, value in init.items(): BField, bitsize = name2fieldtype[fname] assert bitsize < 0, \ "not implemented: initializer with bit fields" offset = CTypesStructOrUnion._offsetof(fname) PTR = ctypes.POINTER(BField._ctype) p = ctypes.cast(addr + offset, PTR) BField._initialize(p.contents, value) is_union = CTypesStructOrUnion._kind == 'union' name2fieldtype = dict(zip(fnames, zip(btypes, bitfields))) # for fname, BField, bitsize in fields: if fname == '': raise NotImplementedError("nested anonymous structs/unions") if hasattr(CTypesStructOrUnion, fname): raise ValueError("the field name %r conflicts in " "the ctypes backend" % fname) if bitsize < 0: def getter(self, fname=fname, BField=BField, offset=CTypesStructOrUnion._offsetof(fname), PTR=ctypes.POINTER(BField._ctype)): addr = ctypes.addressof(self._blob) p = ctypes.cast(addr + offset, PTR) return BField._from_ctypes(p.contents) def setter(self, value, fname=fname, BField=BField): setattr(self._blob, fname, BField._to_ctypes(value)) # if issubclass(BField, CTypesGenericArray): setter = None if BField._declared_length == 0: def getter(self, fname=fname, BFieldPtr=BField._CTPtr, offset=CTypesStructOrUnion._offsetof(fname), PTR=ctypes.POINTER(BField._ctype)): addr = ctypes.addressof(self._blob) p = ctypes.cast(addr + offset, PTR) return BFieldPtr._from_ctypes(p) # else: def getter(self, fname=fname, BField=BField): return BField._from_ctypes(getattr(self._blob, fname)) def setter(self, value, fname=fname, BField=BField): # xxx obscure workaround value = BField._to_ctypes(value) oldvalue = getattr(self._blob, fname) setattr(self._blob, fname, value) if value != getattr(self._blob, fname): setattr(self._blob, fname, oldvalue) raise OverflowError("value too large for bitfield") setattr(CTypesStructOrUnion, fname, property(getter, setter)) # CTypesPtr = self.ffi._get_cached_btype(model.PointerType(tp)) for fname in fnames: if hasattr(CTypesPtr, fname): raise ValueError("the field name %r conflicts in " "the ctypes backend" % fname) def getter(self, fname=fname): return getattr(self[0], fname) def setter(self, value, fname=fname): setattr(self[0], fname, value) setattr(CTypesPtr, fname, property(getter, setter)) def new_function_type(self, BArgs, BResult, has_varargs): nameargs = [BArg._get_c_name() for BArg in BArgs] if has_varargs: nameargs.append('...') nameargs = ', '.join(nameargs) # class CTypesFunctionPtr(CTypesGenericPtr): __slots__ = ['_own_callback', '_name'] _ctype = ctypes.CFUNCTYPE(getattr(BResult, '_ctype', None), *[BArg._ctype for BArg in BArgs], use_errno=True) _reftypename = BResult._get_c_name('(* &)(%s)' % (nameargs,)) def __init__(self, init, error=None): # create a callback to the Python callable init() import traceback assert not has_varargs, "varargs not supported for callbacks" if getattr(BResult, '_ctype', None) is not None: error = BResult._from_ctypes( BResult._create_ctype_obj(error)) else: error = None def callback(*args): args2 = [] for arg, BArg in zip(args, BArgs): args2.append(BArg._from_ctypes(arg)) try: res2 = init(*args2) res2 = BResult._to_ctypes(res2) except: traceback.print_exc() res2 = error if issubclass(BResult, CTypesGenericPtr): if res2: res2 = ctypes.cast(res2, ctypes.c_void_p).value # .value: http://bugs.python.org/issue1574593 else: res2 = None #print repr(res2) return res2 if issubclass(BResult, CTypesGenericPtr): # The only pointers callbacks can return are void*s: # http://bugs.python.org/issue5710 callback_ctype = ctypes.CFUNCTYPE( ctypes.c_void_p, *[BArg._ctype for BArg in BArgs], use_errno=True) else: callback_ctype = CTypesFunctionPtr._ctype self._as_ctype_ptr = callback_ctype(callback) self._address = ctypes.cast(self._as_ctype_ptr, ctypes.c_void_p).value self._own_callback = init @staticmethod def _initialize(ctypes_ptr, value): if value: raise NotImplementedError("ctypes backend: not supported: " "initializers for function pointers") def __repr__(self): c_name = getattr(self, '_name', None) if c_name: i = self._reftypename.index('(* &)') if self._reftypename[i-1] not in ' )*': c_name = ' ' + c_name c_name = self._reftypename.replace('(* &)', c_name) return CTypesData.__repr__(self, c_name) def _get_own_repr(self): if getattr(self, '_own_callback', None) is not None: return 'calling %r' % (self._own_callback,) return super(CTypesFunctionPtr, self)._get_own_repr() def __call__(self, *args): if has_varargs: assert len(args) >= len(BArgs) extraargs = args[len(BArgs):] args = args[:len(BArgs)] else: assert len(args) == len(BArgs) ctypes_args = [] for arg, BArg in zip(args, BArgs): ctypes_args.append(BArg._arg_to_ctypes(arg)) if has_varargs: for i, arg in enumerate(extraargs): if arg is None: ctypes_args.append(ctypes.c_void_p(0)) # NULL continue if not isinstance(arg, CTypesData): raise TypeError( "argument %d passed in the variadic part " "needs to be a cdata object (got %s)" % (1 + len(BArgs) + i, type(arg).__name__)) ctypes_args.append(arg._arg_to_ctypes(arg)) result = self._as_ctype_ptr(*ctypes_args) return BResult._from_ctypes(result) # CTypesFunctionPtr._fix_class() return CTypesFunctionPtr def new_enum_type(self, name, enumerators, enumvalues, CTypesInt): assert isinstance(name, str) reverse_mapping = dict(zip(reversed(enumvalues), reversed(enumerators))) # class CTypesEnum(CTypesInt): __slots__ = [] _reftypename = '%s &' % name def _get_own_repr(self): value = self._value try: return '%d: %s' % (value, reverse_mapping[value]) except KeyError: return str(value) def _to_string(self, maxlen): value = self._value try: return reverse_mapping[value] except KeyError: return str(value) # CTypesEnum._fix_class() return CTypesEnum def get_errno(self): return ctypes.get_errno() def set_errno(self, value): ctypes.set_errno(value) def string(self, b, maxlen=-1): return b._to_string(maxlen) def buffer(self, bptr, size=-1): raise NotImplementedError("buffer() with ctypes backend") def sizeof(self, cdata_or_BType): if isinstance(cdata_or_BType, CTypesData): return cdata_or_BType._get_size_of_instance() else: assert issubclass(cdata_or_BType, CTypesData) return cdata_or_BType._get_size() def alignof(self, BType): assert issubclass(BType, CTypesData) return BType._alignment() def newp(self, BType, source): if not issubclass(BType, CTypesData): raise TypeError return BType._newp(source) def cast(self, BType, source): return BType._cast_from(source) def callback(self, BType, source, error, onerror): assert onerror is None # XXX not implemented return BType(source, error) _weakref_cache_ref = None def gcp(self, cdata, destructor, size=0): if self._weakref_cache_ref is None: import weakref class MyRef(weakref.ref): def __eq__(self, other): myref = self() return self is other or ( myref is not None and myref is other()) def __ne__(self, other): return not (self == other) def __hash__(self): try: return self._hash except AttributeError: self._hash = hash(self()) return self._hash self._weakref_cache_ref = {}, MyRef weak_cache, MyRef = self._weakref_cache_ref if destructor is None: try: del weak_cache[MyRef(cdata)] except KeyError: raise TypeError("Can remove destructor only on a object " "previously returned by ffi.gc()") return None def remove(k): cdata, destructor = weak_cache.pop(k, (None, None)) if destructor is not None: destructor(cdata) new_cdata = self.cast(self.typeof(cdata), cdata) assert new_cdata is not cdata weak_cache[MyRef(new_cdata, remove)] = (cdata, destructor) return new_cdata typeof = type def getcname(self, BType, replace_with): return BType._get_c_name(replace_with) def typeoffsetof(self, BType, fieldname, num=0): if isinstance(fieldname, str): if num == 0 and issubclass(BType, CTypesGenericPtr): BType = BType._BItem if not issubclass(BType, CTypesBaseStructOrUnion): raise TypeError("expected a struct or union ctype") BField = BType._bfield_types[fieldname] if BField is Ellipsis: raise TypeError("not supported for bitfields") return (BField, BType._offsetof(fieldname)) elif isinstance(fieldname, (int, long)): if issubclass(BType, CTypesGenericArray): BType = BType._CTPtr if not issubclass(BType, CTypesGenericPtr): raise TypeError("expected an array or ptr ctype") BItem = BType._BItem offset = BItem._get_size() * fieldname if offset > sys.maxsize: raise OverflowError return (BItem, offset) else: raise TypeError(type(fieldname)) def rawaddressof(self, BTypePtr, cdata, offset=None): if isinstance(cdata, CTypesBaseStructOrUnion): ptr = ctypes.pointer(type(cdata)._to_ctypes(cdata)) elif isinstance(cdata, CTypesGenericPtr): if offset is None or not issubclass(type(cdata)._BItem, CTypesBaseStructOrUnion): raise TypeError("unexpected cdata type") ptr = type(cdata)._to_ctypes(cdata) elif isinstance(cdata, CTypesGenericArray): ptr = type(cdata)._to_ctypes(cdata) else: raise TypeError("expected a <cdata 'struct-or-union'>") if offset: ptr = ctypes.cast( ctypes.c_void_p( ctypes.cast(ptr, ctypes.c_void_p).value + offset), type(ptr)) return BTypePtr._from_ctypes(ptr) class CTypesLibrary(object): def __init__(self, backend, cdll): self.backend = backend self.cdll = cdll def load_function(self, BType, name): c_func = getattr(self.cdll, name) funcobj = BType._from_ctypes(c_func) funcobj._name = name return funcobj def read_variable(self, BType, name): try: ctypes_obj = BType._ctype.in_dll(self.cdll, name) except AttributeError as e: raise NotImplementedError(e) return BType._from_ctypes(ctypes_obj) def write_variable(self, BType, name, value): new_ctypes_obj = BType._to_ctypes(value) ctypes_obj = BType._ctype.in_dll(self.cdll, name) ctypes.memmove(ctypes.addressof(ctypes_obj), ctypes.addressof(new_ctypes_obj), ctypes.sizeof(BType._ctype))
42,454
Python
36.838681
86
0.49046
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/vengine_cpy.py
# # DEPRECATED: implementation for ffi.verify() # import sys, imp from . import model from .error import VerificationError class VCPythonEngine(object): _class_key = 'x' _gen_python_module = True def __init__(self, verifier): self.verifier = verifier self.ffi = verifier.ffi self._struct_pending_verification = {} self._types_of_builtin_functions = {} def patch_extension_kwds(self, kwds): pass def find_module(self, module_name, path, so_suffixes): try: f, filename, descr = imp.find_module(module_name, path) except ImportError: return None if f is not None: f.close() # Note that after a setuptools installation, there are both .py # and .so files with the same basename. The code here relies on # imp.find_module() locating the .so in priority. if descr[0] not in so_suffixes: return None return filename def collect_types(self): self._typesdict = {} self._generate("collecttype") def _prnt(self, what=''): self._f.write(what + '\n') def _gettypenum(self, type): # a KeyError here is a bug. please report it! :-) return self._typesdict[type] def _do_collect_type(self, tp): if ((not isinstance(tp, model.PrimitiveType) or tp.name == 'long double') and tp not in self._typesdict): num = len(self._typesdict) self._typesdict[tp] = num def write_source_to_f(self): self.collect_types() # # The new module will have a _cffi_setup() function that receives # objects from the ffi world, and that calls some setup code in # the module. This setup code is split in several independent # functions, e.g. one per constant. The functions are "chained" # by ending in a tail call to each other. # # This is further split in two chained lists, depending on if we # can do it at import-time or if we must wait for _cffi_setup() to # provide us with the <ctype> objects. This is needed because we # need the values of the enum constants in order to build the # <ctype 'enum'> that we may have to pass to _cffi_setup(). # # The following two 'chained_list_constants' items contains # the head of these two chained lists, as a string that gives the # call to do, if any. self._chained_list_constants = ['((void)lib,0)', '((void)lib,0)'] # prnt = self._prnt # first paste some standard set of lines that are mostly '#define' prnt(cffimod_header) prnt() # then paste the C source given by the user, verbatim. prnt(self.verifier.preamble) prnt() # # call generate_cpy_xxx_decl(), for every xxx found from # ffi._parser._declarations. This generates all the functions. self._generate("decl") # # implement the function _cffi_setup_custom() as calling the # head of the chained list. self._generate_setup_custom() prnt() # # produce the method table, including the entries for the # generated Python->C function wrappers, which are done # by generate_cpy_function_method(). prnt('static PyMethodDef _cffi_methods[] = {') self._generate("method") prnt(' {"_cffi_setup", _cffi_setup, METH_VARARGS, NULL},') prnt(' {NULL, NULL, 0, NULL} /* Sentinel */') prnt('};') prnt() # # standard init. modname = self.verifier.get_module_name() constants = self._chained_list_constants[False] prnt('#if PY_MAJOR_VERSION >= 3') prnt() prnt('static struct PyModuleDef _cffi_module_def = {') prnt(' PyModuleDef_HEAD_INIT,') prnt(' "%s",' % modname) prnt(' NULL,') prnt(' -1,') prnt(' _cffi_methods,') prnt(' NULL, NULL, NULL, NULL') prnt('};') prnt() prnt('PyMODINIT_FUNC') prnt('PyInit_%s(void)' % modname) prnt('{') prnt(' PyObject *lib;') prnt(' lib = PyModule_Create(&_cffi_module_def);') prnt(' if (lib == NULL)') prnt(' return NULL;') prnt(' if (%s < 0 || _cffi_init() < 0) {' % (constants,)) prnt(' Py_DECREF(lib);') prnt(' return NULL;') prnt(' }') prnt(' return lib;') prnt('}') prnt() prnt('#else') prnt() prnt('PyMODINIT_FUNC') prnt('init%s(void)' % modname) prnt('{') prnt(' PyObject *lib;') prnt(' lib = Py_InitModule("%s", _cffi_methods);' % modname) prnt(' if (lib == NULL)') prnt(' return;') prnt(' if (%s < 0 || _cffi_init() < 0)' % (constants,)) prnt(' return;') prnt(' return;') prnt('}') prnt() prnt('#endif') def load_library(self, flags=None): # XXX review all usages of 'self' here! # import it as a new extension module imp.acquire_lock() try: if hasattr(sys, "getdlopenflags"): previous_flags = sys.getdlopenflags() try: if hasattr(sys, "setdlopenflags") and flags is not None: sys.setdlopenflags(flags) module = imp.load_dynamic(self.verifier.get_module_name(), self.verifier.modulefilename) except ImportError as e: error = "importing %r: %s" % (self.verifier.modulefilename, e) raise VerificationError(error) finally: if hasattr(sys, "setdlopenflags"): sys.setdlopenflags(previous_flags) finally: imp.release_lock() # # call loading_cpy_struct() to get the struct layout inferred by # the C compiler self._load(module, 'loading') # # the C code will need the <ctype> objects. Collect them in # order in a list. revmapping = dict([(value, key) for (key, value) in self._typesdict.items()]) lst = [revmapping[i] for i in range(len(revmapping))] lst = list(map(self.ffi._get_cached_btype, lst)) # # build the FFILibrary class and instance and call _cffi_setup(). # this will set up some fields like '_cffi_types', and only then # it will invoke the chained list of functions that will really # build (notably) the constant objects, as <cdata> if they are # pointers, and store them as attributes on the 'library' object. class FFILibrary(object): _cffi_python_module = module _cffi_ffi = self.ffi _cffi_dir = [] def __dir__(self): return FFILibrary._cffi_dir + list(self.__dict__) library = FFILibrary() if module._cffi_setup(lst, VerificationError, library): import warnings warnings.warn("reimporting %r might overwrite older definitions" % (self.verifier.get_module_name())) # # finally, call the loaded_cpy_xxx() functions. This will perform # the final adjustments, like copying the Python->C wrapper # functions from the module to the 'library' object, and setting # up the FFILibrary class with properties for the global C variables. self._load(module, 'loaded', library=library) module._cffi_original_ffi = self.ffi module._cffi_types_of_builtin_funcs = self._types_of_builtin_functions return library def _get_declarations(self): lst = [(key, tp) for (key, (tp, qual)) in self.ffi._parser._declarations.items()] lst.sort() return lst def _generate(self, step_name): for name, tp in self._get_declarations(): kind, realname = name.split(' ', 1) try: method = getattr(self, '_generate_cpy_%s_%s' % (kind, step_name)) except AttributeError: raise VerificationError( "not implemented in verify(): %r" % name) try: method(tp, realname) except Exception as e: model.attach_exception_info(e, name) raise def _load(self, module, step_name, **kwds): for name, tp in self._get_declarations(): kind, realname = name.split(' ', 1) method = getattr(self, '_%s_cpy_%s' % (step_name, kind)) try: method(tp, realname, module, **kwds) except Exception as e: model.attach_exception_info(e, name) raise def _generate_nothing(self, tp, name): pass def _loaded_noop(self, tp, name, module, **kwds): pass # ---------- def _convert_funcarg_to_c(self, tp, fromvar, tovar, errcode): extraarg = '' if isinstance(tp, model.PrimitiveType): if tp.is_integer_type() and tp.name != '_Bool': converter = '_cffi_to_c_int' extraarg = ', %s' % tp.name else: converter = '(%s)_cffi_to_c_%s' % (tp.get_c_name(''), tp.name.replace(' ', '_')) errvalue = '-1' # elif isinstance(tp, model.PointerType): self._convert_funcarg_to_c_ptr_or_array(tp, fromvar, tovar, errcode) return # elif isinstance(tp, (model.StructOrUnion, model.EnumType)): # a struct (not a struct pointer) as a function argument self._prnt(' if (_cffi_to_c((char *)&%s, _cffi_type(%d), %s) < 0)' % (tovar, self._gettypenum(tp), fromvar)) self._prnt(' %s;' % errcode) return # elif isinstance(tp, model.FunctionPtrType): converter = '(%s)_cffi_to_c_pointer' % tp.get_c_name('') extraarg = ', _cffi_type(%d)' % self._gettypenum(tp) errvalue = 'NULL' # else: raise NotImplementedError(tp) # self._prnt(' %s = %s(%s%s);' % (tovar, converter, fromvar, extraarg)) self._prnt(' if (%s == (%s)%s && PyErr_Occurred())' % ( tovar, tp.get_c_name(''), errvalue)) self._prnt(' %s;' % errcode) def _extra_local_variables(self, tp, localvars, freelines): if isinstance(tp, model.PointerType): localvars.add('Py_ssize_t datasize') localvars.add('struct _cffi_freeme_s *large_args_free = NULL') freelines.add('if (large_args_free != NULL)' ' _cffi_free_array_arguments(large_args_free);') def _convert_funcarg_to_c_ptr_or_array(self, tp, fromvar, tovar, errcode): self._prnt(' datasize = _cffi_prepare_pointer_call_argument(') self._prnt(' _cffi_type(%d), %s, (char **)&%s);' % ( self._gettypenum(tp), fromvar, tovar)) self._prnt(' if (datasize != 0) {') self._prnt(' %s = ((size_t)datasize) <= 640 ? ' 'alloca((size_t)datasize) : NULL;' % (tovar,)) self._prnt(' if (_cffi_convert_array_argument(_cffi_type(%d), %s, ' '(char **)&%s,' % (self._gettypenum(tp), fromvar, tovar)) self._prnt(' datasize, &large_args_free) < 0)') self._prnt(' %s;' % errcode) self._prnt(' }') def _convert_expr_from_c(self, tp, var, context): if isinstance(tp, model.PrimitiveType): if tp.is_integer_type() and tp.name != '_Bool': return '_cffi_from_c_int(%s, %s)' % (var, tp.name) elif tp.name != 'long double': return '_cffi_from_c_%s(%s)' % (tp.name.replace(' ', '_'), var) else: return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) elif isinstance(tp, (model.PointerType, model.FunctionPtrType)): return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) elif isinstance(tp, model.ArrayType): return '_cffi_from_c_pointer((char *)%s, _cffi_type(%d))' % ( var, self._gettypenum(model.PointerType(tp.item))) elif isinstance(tp, model.StructOrUnion): if tp.fldnames is None: raise TypeError("'%s' is used as %s, but is opaque" % ( tp._get_c_name(), context)) return '_cffi_from_c_struct((char *)&%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) elif isinstance(tp, model.EnumType): return '_cffi_from_c_deref((char *)&%s, _cffi_type(%d))' % ( var, self._gettypenum(tp)) else: raise NotImplementedError(tp) # ---------- # typedefs: generates no code so far _generate_cpy_typedef_collecttype = _generate_nothing _generate_cpy_typedef_decl = _generate_nothing _generate_cpy_typedef_method = _generate_nothing _loading_cpy_typedef = _loaded_noop _loaded_cpy_typedef = _loaded_noop # ---------- # function declarations def _generate_cpy_function_collecttype(self, tp, name): assert isinstance(tp, model.FunctionPtrType) if tp.ellipsis: self._do_collect_type(tp) else: # don't call _do_collect_type(tp) in this common case, # otherwise test_autofilled_struct_as_argument fails for type in tp.args: self._do_collect_type(type) self._do_collect_type(tp.result) def _generate_cpy_function_decl(self, tp, name): assert isinstance(tp, model.FunctionPtrType) if tp.ellipsis: # cannot support vararg functions better than this: check for its # exact type (including the fixed arguments), and build it as a # constant function pointer (no CPython wrapper) self._generate_cpy_const(False, name, tp) return prnt = self._prnt numargs = len(tp.args) if numargs == 0: argname = 'noarg' elif numargs == 1: argname = 'arg0' else: argname = 'args' prnt('static PyObject *') prnt('_cffi_f_%s(PyObject *self, PyObject *%s)' % (name, argname)) prnt('{') # context = 'argument of %s' % name for i, type in enumerate(tp.args): prnt(' %s;' % type.get_c_name(' x%d' % i, context)) # localvars = set() freelines = set() for type in tp.args: self._extra_local_variables(type, localvars, freelines) for decl in sorted(localvars): prnt(' %s;' % (decl,)) # if not isinstance(tp.result, model.VoidType): result_code = 'result = ' context = 'result of %s' % name prnt(' %s;' % tp.result.get_c_name(' result', context)) prnt(' PyObject *pyresult;') else: result_code = '' # if len(tp.args) > 1: rng = range(len(tp.args)) for i in rng: prnt(' PyObject *arg%d;' % i) prnt() prnt(' if (!PyArg_ParseTuple(args, "%s:%s", %s))' % ( 'O' * numargs, name, ', '.join(['&arg%d' % i for i in rng]))) prnt(' return NULL;') prnt() # for i, type in enumerate(tp.args): self._convert_funcarg_to_c(type, 'arg%d' % i, 'x%d' % i, 'return NULL') prnt() # prnt(' Py_BEGIN_ALLOW_THREADS') prnt(' _cffi_restore_errno();') prnt(' { %s%s(%s); }' % ( result_code, name, ', '.join(['x%d' % i for i in range(len(tp.args))]))) prnt(' _cffi_save_errno();') prnt(' Py_END_ALLOW_THREADS') prnt() # prnt(' (void)self; /* unused */') if numargs == 0: prnt(' (void)noarg; /* unused */') if result_code: prnt(' pyresult = %s;' % self._convert_expr_from_c(tp.result, 'result', 'result type')) for freeline in freelines: prnt(' ' + freeline) prnt(' return pyresult;') else: for freeline in freelines: prnt(' ' + freeline) prnt(' Py_INCREF(Py_None);') prnt(' return Py_None;') prnt('}') prnt() def _generate_cpy_function_method(self, tp, name): if tp.ellipsis: return numargs = len(tp.args) if numargs == 0: meth = 'METH_NOARGS' elif numargs == 1: meth = 'METH_O' else: meth = 'METH_VARARGS' self._prnt(' {"%s", _cffi_f_%s, %s, NULL},' % (name, name, meth)) _loading_cpy_function = _loaded_noop def _loaded_cpy_function(self, tp, name, module, library): if tp.ellipsis: return func = getattr(module, name) setattr(library, name, func) self._types_of_builtin_functions[func] = tp # ---------- # named structs _generate_cpy_struct_collecttype = _generate_nothing def _generate_cpy_struct_decl(self, tp, name): assert name == tp.name self._generate_struct_or_union_decl(tp, 'struct', name) def _generate_cpy_struct_method(self, tp, name): self._generate_struct_or_union_method(tp, 'struct', name) def _loading_cpy_struct(self, tp, name, module): self._loading_struct_or_union(tp, 'struct', name, module) def _loaded_cpy_struct(self, tp, name, module, **kwds): self._loaded_struct_or_union(tp) _generate_cpy_union_collecttype = _generate_nothing def _generate_cpy_union_decl(self, tp, name): assert name == tp.name self._generate_struct_or_union_decl(tp, 'union', name) def _generate_cpy_union_method(self, tp, name): self._generate_struct_or_union_method(tp, 'union', name) def _loading_cpy_union(self, tp, name, module): self._loading_struct_or_union(tp, 'union', name, module) def _loaded_cpy_union(self, tp, name, module, **kwds): self._loaded_struct_or_union(tp) def _generate_struct_or_union_decl(self, tp, prefix, name): if tp.fldnames is None: return # nothing to do with opaque structs checkfuncname = '_cffi_check_%s_%s' % (prefix, name) layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) cname = ('%s %s' % (prefix, name)).strip() # prnt = self._prnt prnt('static void %s(%s *p)' % (checkfuncname, cname)) prnt('{') prnt(' /* only to generate compile-time warnings or errors */') prnt(' (void)p;') for fname, ftype, fbitsize, fqual in tp.enumfields(): if (isinstance(ftype, model.PrimitiveType) and ftype.is_integer_type()) or fbitsize >= 0: # accept all integers, but complain on float or double prnt(' (void)((p->%s) << 1);' % fname) else: # only accept exactly the type declared. try: prnt(' { %s = &p->%s; (void)tmp; }' % ( ftype.get_c_name('*tmp', 'field %r'%fname, quals=fqual), fname)) except VerificationError as e: prnt(' /* %s */' % str(e)) # cannot verify it, ignore prnt('}') prnt('static PyObject *') prnt('%s(PyObject *self, PyObject *noarg)' % (layoutfuncname,)) prnt('{') prnt(' struct _cffi_aligncheck { char x; %s y; };' % cname) prnt(' static Py_ssize_t nums[] = {') prnt(' sizeof(%s),' % cname) prnt(' offsetof(struct _cffi_aligncheck, y),') for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now prnt(' offsetof(%s, %s),' % (cname, fname)) if isinstance(ftype, model.ArrayType) and ftype.length is None: prnt(' 0, /* %s */' % ftype._get_c_name()) else: prnt(' sizeof(((%s *)0)->%s),' % (cname, fname)) prnt(' -1') prnt(' };') prnt(' (void)self; /* unused */') prnt(' (void)noarg; /* unused */') prnt(' return _cffi_get_struct_layout(nums);') prnt(' /* the next line is not executed, but compiled */') prnt(' %s(0);' % (checkfuncname,)) prnt('}') prnt() def _generate_struct_or_union_method(self, tp, prefix, name): if tp.fldnames is None: return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) self._prnt(' {"%s", %s, METH_NOARGS, NULL},' % (layoutfuncname, layoutfuncname)) def _loading_struct_or_union(self, tp, prefix, name, module): if tp.fldnames is None: return # nothing to do with opaque structs layoutfuncname = '_cffi_layout_%s_%s' % (prefix, name) # function = getattr(module, layoutfuncname) layout = function() if isinstance(tp, model.StructOrUnion) and tp.partial: # use the function()'s sizes and offsets to guide the # layout of the struct totalsize = layout[0] totalalignment = layout[1] fieldofs = layout[2::2] fieldsize = layout[3::2] tp.force_flatten() assert len(fieldofs) == len(fieldsize) == len(tp.fldnames) tp.fixedlayout = fieldofs, fieldsize, totalsize, totalalignment else: cname = ('%s %s' % (prefix, name)).strip() self._struct_pending_verification[tp] = layout, cname def _loaded_struct_or_union(self, tp): if tp.fldnames is None: return # nothing to do with opaque structs self.ffi._get_cached_btype(tp) # force 'fixedlayout' to be considered if tp in self._struct_pending_verification: # check that the layout sizes and offsets match the real ones def check(realvalue, expectedvalue, msg): if realvalue != expectedvalue: raise VerificationError( "%s (we have %d, but C compiler says %d)" % (msg, expectedvalue, realvalue)) ffi = self.ffi BStruct = ffi._get_cached_btype(tp) layout, cname = self._struct_pending_verification.pop(tp) check(layout[0], ffi.sizeof(BStruct), "wrong total size") check(layout[1], ffi.alignof(BStruct), "wrong total alignment") i = 2 for fname, ftype, fbitsize, fqual in tp.enumfields(): if fbitsize >= 0: continue # xxx ignore fbitsize for now check(layout[i], ffi.offsetof(BStruct, fname), "wrong offset for field %r" % (fname,)) if layout[i+1] != 0: BField = ffi._get_cached_btype(ftype) check(layout[i+1], ffi.sizeof(BField), "wrong size for field %r" % (fname,)) i += 2 assert i == len(layout) # ---------- # 'anonymous' declarations. These are produced for anonymous structs # or unions; the 'name' is obtained by a typedef. _generate_cpy_anonymous_collecttype = _generate_nothing def _generate_cpy_anonymous_decl(self, tp, name): if isinstance(tp, model.EnumType): self._generate_cpy_enum_decl(tp, name, '') else: self._generate_struct_or_union_decl(tp, '', name) def _generate_cpy_anonymous_method(self, tp, name): if not isinstance(tp, model.EnumType): self._generate_struct_or_union_method(tp, '', name) def _loading_cpy_anonymous(self, tp, name, module): if isinstance(tp, model.EnumType): self._loading_cpy_enum(tp, name, module) else: self._loading_struct_or_union(tp, '', name, module) def _loaded_cpy_anonymous(self, tp, name, module, **kwds): if isinstance(tp, model.EnumType): self._loaded_cpy_enum(tp, name, module, **kwds) else: self._loaded_struct_or_union(tp) # ---------- # constants, likely declared with '#define' def _generate_cpy_const(self, is_int, name, tp=None, category='const', vartp=None, delayed=True, size_too=False, check_value=None): prnt = self._prnt funcname = '_cffi_%s_%s' % (category, name) prnt('static int %s(PyObject *lib)' % funcname) prnt('{') prnt(' PyObject *o;') prnt(' int res;') if not is_int: prnt(' %s;' % (vartp or tp).get_c_name(' i', name)) else: assert category == 'const' # if check_value is not None: self._check_int_constant_value(name, check_value) # if not is_int: if category == 'var': realexpr = '&' + name else: realexpr = name prnt(' i = (%s);' % (realexpr,)) prnt(' o = %s;' % (self._convert_expr_from_c(tp, 'i', 'variable type'),)) assert delayed else: prnt(' o = _cffi_from_c_int_const(%s);' % name) prnt(' if (o == NULL)') prnt(' return -1;') if size_too: prnt(' {') prnt(' PyObject *o1 = o;') prnt(' o = Py_BuildValue("On", o1, (Py_ssize_t)sizeof(%s));' % (name,)) prnt(' Py_DECREF(o1);') prnt(' if (o == NULL)') prnt(' return -1;') prnt(' }') prnt(' res = PyObject_SetAttrString(lib, "%s", o);' % name) prnt(' Py_DECREF(o);') prnt(' if (res < 0)') prnt(' return -1;') prnt(' return %s;' % self._chained_list_constants[delayed]) self._chained_list_constants[delayed] = funcname + '(lib)' prnt('}') prnt() def _generate_cpy_constant_collecttype(self, tp, name): is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() if not is_int: self._do_collect_type(tp) def _generate_cpy_constant_decl(self, tp, name): is_int = isinstance(tp, model.PrimitiveType) and tp.is_integer_type() self._generate_cpy_const(is_int, name, tp) _generate_cpy_constant_method = _generate_nothing _loading_cpy_constant = _loaded_noop _loaded_cpy_constant = _loaded_noop # ---------- # enums def _check_int_constant_value(self, name, value, err_prefix=''): prnt = self._prnt if value <= 0: prnt(' if ((%s) > 0 || (long)(%s) != %dL) {' % ( name, name, value)) else: prnt(' if ((%s) <= 0 || (unsigned long)(%s) != %dUL) {' % ( name, name, value)) prnt(' char buf[64];') prnt(' if ((%s) <= 0)' % name) prnt(' snprintf(buf, 63, "%%ld", (long)(%s));' % name) prnt(' else') prnt(' snprintf(buf, 63, "%%lu", (unsigned long)(%s));' % name) prnt(' PyErr_Format(_cffi_VerificationError,') prnt(' "%s%s has the real value %s, not %s",') prnt(' "%s", "%s", buf, "%d");' % ( err_prefix, name, value)) prnt(' return -1;') prnt(' }') def _enum_funcname(self, prefix, name): # "$enum_$1" => "___D_enum____D_1" name = name.replace('$', '___D_') return '_cffi_e_%s_%s' % (prefix, name) def _generate_cpy_enum_decl(self, tp, name, prefix='enum'): if tp.partial: for enumerator in tp.enumerators: self._generate_cpy_const(True, enumerator, delayed=False) return # funcname = self._enum_funcname(prefix, name) prnt = self._prnt prnt('static int %s(PyObject *lib)' % funcname) prnt('{') for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): self._check_int_constant_value(enumerator, enumvalue, "enum %s: " % name) prnt(' return %s;' % self._chained_list_constants[True]) self._chained_list_constants[True] = funcname + '(lib)' prnt('}') prnt() _generate_cpy_enum_collecttype = _generate_nothing _generate_cpy_enum_method = _generate_nothing def _loading_cpy_enum(self, tp, name, module): if tp.partial: enumvalues = [getattr(module, enumerator) for enumerator in tp.enumerators] tp.enumvalues = tuple(enumvalues) tp.partial_resolved = True def _loaded_cpy_enum(self, tp, name, module, library): for enumerator, enumvalue in zip(tp.enumerators, tp.enumvalues): setattr(library, enumerator, enumvalue) # ---------- # macros: for now only for integers def _generate_cpy_macro_decl(self, tp, name): if tp == '...': check_value = None else: check_value = tp # an integer self._generate_cpy_const(True, name, check_value=check_value) _generate_cpy_macro_collecttype = _generate_nothing _generate_cpy_macro_method = _generate_nothing _loading_cpy_macro = _loaded_noop _loaded_cpy_macro = _loaded_noop # ---------- # global variables def _generate_cpy_variable_collecttype(self, tp, name): if isinstance(tp, model.ArrayType): tp_ptr = model.PointerType(tp.item) else: tp_ptr = model.PointerType(tp) self._do_collect_type(tp_ptr) def _generate_cpy_variable_decl(self, tp, name): if isinstance(tp, model.ArrayType): tp_ptr = model.PointerType(tp.item) self._generate_cpy_const(False, name, tp, vartp=tp_ptr, size_too = tp.length_is_unknown()) else: tp_ptr = model.PointerType(tp) self._generate_cpy_const(False, name, tp_ptr, category='var') _generate_cpy_variable_method = _generate_nothing _loading_cpy_variable = _loaded_noop def _loaded_cpy_variable(self, tp, name, module, library): value = getattr(library, name) if isinstance(tp, model.ArrayType): # int a[5] is "constant" in the # sense that "a=..." is forbidden if tp.length_is_unknown(): assert isinstance(value, tuple) (value, size) = value BItemType = self.ffi._get_cached_btype(tp.item) length, rest = divmod(size, self.ffi.sizeof(BItemType)) if rest != 0: raise VerificationError( "bad size: %r does not seem to be an array of %s" % (name, tp.item)) tp = tp.resolve_length(length) # 'value' is a <cdata 'type *'> which we have to replace with # a <cdata 'type[N]'> if the N is actually known if tp.length is not None: BArray = self.ffi._get_cached_btype(tp) value = self.ffi.cast(BArray, value) setattr(library, name, value) return # remove ptr=<cdata 'int *'> from the library instance, and replace # it by a property on the class, which reads/writes into ptr[0]. ptr = value delattr(library, name) def getter(library): return ptr[0] def setter(library, value): ptr[0] = value setattr(type(library), name, property(getter, setter)) type(library)._cffi_dir.append(name) # ---------- def _generate_setup_custom(self): prnt = self._prnt prnt('static int _cffi_setup_custom(PyObject *lib)') prnt('{') prnt(' return %s;' % self._chained_list_constants[True]) prnt('}') cffimod_header = r''' #include <Python.h> #include <stddef.h> /* this block of #ifs should be kept exactly identical between c/_cffi_backend.c, cffi/vengine_cpy.py, cffi/vengine_gen.py and cffi/_cffi_include.h */ #if defined(_MSC_VER) # include <malloc.h> /* for alloca() */ # if _MSC_VER < 1600 /* MSVC < 2010 */ typedef __int8 int8_t; typedef __int16 int16_t; typedef __int32 int32_t; typedef __int64 int64_t; typedef unsigned __int8 uint8_t; typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; typedef unsigned __int64 uint64_t; typedef __int8 int_least8_t; typedef __int16 int_least16_t; typedef __int32 int_least32_t; typedef __int64 int_least64_t; typedef unsigned __int8 uint_least8_t; typedef unsigned __int16 uint_least16_t; typedef unsigned __int32 uint_least32_t; typedef unsigned __int64 uint_least64_t; typedef __int8 int_fast8_t; typedef __int16 int_fast16_t; typedef __int32 int_fast32_t; typedef __int64 int_fast64_t; typedef unsigned __int8 uint_fast8_t; typedef unsigned __int16 uint_fast16_t; typedef unsigned __int32 uint_fast32_t; typedef unsigned __int64 uint_fast64_t; typedef __int64 intmax_t; typedef unsigned __int64 uintmax_t; # else # include <stdint.h> # endif # if _MSC_VER < 1800 /* MSVC < 2013 */ # ifndef __cplusplus typedef unsigned char _Bool; # endif # endif #else # include <stdint.h> # if (defined (__SVR4) && defined (__sun)) || defined(_AIX) || defined(__hpux) # include <alloca.h> # endif #endif #if PY_MAJOR_VERSION < 3 # undef PyCapsule_CheckExact # undef PyCapsule_GetPointer # define PyCapsule_CheckExact(capsule) (PyCObject_Check(capsule)) # define PyCapsule_GetPointer(capsule, name) \ (PyCObject_AsVoidPtr(capsule)) #endif #if PY_MAJOR_VERSION >= 3 # define PyInt_FromLong PyLong_FromLong #endif #define _cffi_from_c_double PyFloat_FromDouble #define _cffi_from_c_float PyFloat_FromDouble #define _cffi_from_c_long PyInt_FromLong #define _cffi_from_c_ulong PyLong_FromUnsignedLong #define _cffi_from_c_longlong PyLong_FromLongLong #define _cffi_from_c_ulonglong PyLong_FromUnsignedLongLong #define _cffi_from_c__Bool PyBool_FromLong #define _cffi_to_c_double PyFloat_AsDouble #define _cffi_to_c_float PyFloat_AsDouble #define _cffi_from_c_int_const(x) \ (((x) > 0) ? \ ((unsigned long long)(x) <= (unsigned long long)LONG_MAX) ? \ PyInt_FromLong((long)(x)) : \ PyLong_FromUnsignedLongLong((unsigned long long)(x)) : \ ((long long)(x) >= (long long)LONG_MIN) ? \ PyInt_FromLong((long)(x)) : \ PyLong_FromLongLong((long long)(x))) #define _cffi_from_c_int(x, type) \ (((type)-1) > 0 ? /* unsigned */ \ (sizeof(type) < sizeof(long) ? \ PyInt_FromLong((long)x) : \ sizeof(type) == sizeof(long) ? \ PyLong_FromUnsignedLong((unsigned long)x) : \ PyLong_FromUnsignedLongLong((unsigned long long)x)) : \ (sizeof(type) <= sizeof(long) ? \ PyInt_FromLong((long)x) : \ PyLong_FromLongLong((long long)x))) #define _cffi_to_c_int(o, type) \ ((type)( \ sizeof(type) == 1 ? (((type)-1) > 0 ? (type)_cffi_to_c_u8(o) \ : (type)_cffi_to_c_i8(o)) : \ sizeof(type) == 2 ? (((type)-1) > 0 ? (type)_cffi_to_c_u16(o) \ : (type)_cffi_to_c_i16(o)) : \ sizeof(type) == 4 ? (((type)-1) > 0 ? (type)_cffi_to_c_u32(o) \ : (type)_cffi_to_c_i32(o)) : \ sizeof(type) == 8 ? (((type)-1) > 0 ? (type)_cffi_to_c_u64(o) \ : (type)_cffi_to_c_i64(o)) : \ (Py_FatalError("unsupported size for type " #type), (type)0))) #define _cffi_to_c_i8 \ ((int(*)(PyObject *))_cffi_exports[1]) #define _cffi_to_c_u8 \ ((int(*)(PyObject *))_cffi_exports[2]) #define _cffi_to_c_i16 \ ((int(*)(PyObject *))_cffi_exports[3]) #define _cffi_to_c_u16 \ ((int(*)(PyObject *))_cffi_exports[4]) #define _cffi_to_c_i32 \ ((int(*)(PyObject *))_cffi_exports[5]) #define _cffi_to_c_u32 \ ((unsigned int(*)(PyObject *))_cffi_exports[6]) #define _cffi_to_c_i64 \ ((long long(*)(PyObject *))_cffi_exports[7]) #define _cffi_to_c_u64 \ ((unsigned long long(*)(PyObject *))_cffi_exports[8]) #define _cffi_to_c_char \ ((int(*)(PyObject *))_cffi_exports[9]) #define _cffi_from_c_pointer \ ((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[10]) #define _cffi_to_c_pointer \ ((char *(*)(PyObject *, CTypeDescrObject *))_cffi_exports[11]) #define _cffi_get_struct_layout \ ((PyObject *(*)(Py_ssize_t[]))_cffi_exports[12]) #define _cffi_restore_errno \ ((void(*)(void))_cffi_exports[13]) #define _cffi_save_errno \ ((void(*)(void))_cffi_exports[14]) #define _cffi_from_c_char \ ((PyObject *(*)(char))_cffi_exports[15]) #define _cffi_from_c_deref \ ((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[16]) #define _cffi_to_c \ ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[17]) #define _cffi_from_c_struct \ ((PyObject *(*)(char *, CTypeDescrObject *))_cffi_exports[18]) #define _cffi_to_c_wchar_t \ ((wchar_t(*)(PyObject *))_cffi_exports[19]) #define _cffi_from_c_wchar_t \ ((PyObject *(*)(wchar_t))_cffi_exports[20]) #define _cffi_to_c_long_double \ ((long double(*)(PyObject *))_cffi_exports[21]) #define _cffi_to_c__Bool \ ((_Bool(*)(PyObject *))_cffi_exports[22]) #define _cffi_prepare_pointer_call_argument \ ((Py_ssize_t(*)(CTypeDescrObject *, PyObject *, char **))_cffi_exports[23]) #define _cffi_convert_array_from_object \ ((int(*)(char *, CTypeDescrObject *, PyObject *))_cffi_exports[24]) #define _CFFI_NUM_EXPORTS 25 typedef struct _ctypedescr CTypeDescrObject; static void *_cffi_exports[_CFFI_NUM_EXPORTS]; static PyObject *_cffi_types, *_cffi_VerificationError; static int _cffi_setup_custom(PyObject *lib); /* forward */ static PyObject *_cffi_setup(PyObject *self, PyObject *args) { PyObject *library; int was_alive = (_cffi_types != NULL); (void)self; /* unused */ if (!PyArg_ParseTuple(args, "OOO", &_cffi_types, &_cffi_VerificationError, &library)) return NULL; Py_INCREF(_cffi_types); Py_INCREF(_cffi_VerificationError); if (_cffi_setup_custom(library) < 0) return NULL; return PyBool_FromLong(was_alive); } union _cffi_union_alignment_u { unsigned char m_char; unsigned short m_short; unsigned int m_int; unsigned long m_long; unsigned long long m_longlong; float m_float; double m_double; long double m_longdouble; }; struct _cffi_freeme_s { struct _cffi_freeme_s *next; union _cffi_union_alignment_u alignment; }; #ifdef __GNUC__ __attribute__((unused)) #endif static int _cffi_convert_array_argument(CTypeDescrObject *ctptr, PyObject *arg, char **output_data, Py_ssize_t datasize, struct _cffi_freeme_s **freeme) { char *p; if (datasize < 0) return -1; p = *output_data; if (p == NULL) { struct _cffi_freeme_s *fp = (struct _cffi_freeme_s *)PyObject_Malloc( offsetof(struct _cffi_freeme_s, alignment) + (size_t)datasize); if (fp == NULL) return -1; fp->next = *freeme; *freeme = fp; p = *output_data = (char *)&fp->alignment; } memset((void *)p, 0, (size_t)datasize); return _cffi_convert_array_from_object(p, ctptr, arg); } #ifdef __GNUC__ __attribute__((unused)) #endif static void _cffi_free_array_arguments(struct _cffi_freeme_s *freeme) { do { void *p = (void *)freeme; freeme = freeme->next; PyObject_Free(p); } while (freeme != NULL); } static int _cffi_init(void) { PyObject *module, *c_api_object = NULL; module = PyImport_ImportModule("_cffi_backend"); if (module == NULL) goto failure; c_api_object = PyObject_GetAttrString(module, "_C_API"); if (c_api_object == NULL) goto failure; if (!PyCapsule_CheckExact(c_api_object)) { PyErr_SetNone(PyExc_ImportError); goto failure; } memcpy(_cffi_exports, PyCapsule_GetPointer(c_api_object, "cffi"), _CFFI_NUM_EXPORTS * sizeof(void *)); Py_DECREF(module); Py_DECREF(c_api_object); return 0; failure: Py_XDECREF(module); Py_XDECREF(c_api_object); return -1; } #define _cffi_type(num) ((CTypeDescrObject *)PyList_GET_ITEM(_cffi_types, num)) /**********/ '''
43,320
Python
39.22377
80
0.514358
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/verifier.py
# # DEPRECATED: implementation for ffi.verify() # import sys, os, binascii, shutil, io from . import __version_verifier_modules__ from . import ffiplatform from .error import VerificationError if sys.version_info >= (3, 3): import importlib.machinery def _extension_suffixes(): return importlib.machinery.EXTENSION_SUFFIXES[:] else: import imp def _extension_suffixes(): return [suffix for suffix, _, type in imp.get_suffixes() if type == imp.C_EXTENSION] if sys.version_info >= (3,): NativeIO = io.StringIO else: class NativeIO(io.BytesIO): def write(self, s): if isinstance(s, unicode): s = s.encode('ascii') super(NativeIO, self).write(s) class Verifier(object): def __init__(self, ffi, preamble, tmpdir=None, modulename=None, ext_package=None, tag='', force_generic_engine=False, source_extension='.c', flags=None, relative_to=None, **kwds): if ffi._parser._uses_new_feature: raise VerificationError( "feature not supported with ffi.verify(), but only " "with ffi.set_source(): %s" % (ffi._parser._uses_new_feature,)) self.ffi = ffi self.preamble = preamble if not modulename: flattened_kwds = ffiplatform.flatten(kwds) vengine_class = _locate_engine_class(ffi, force_generic_engine) self._vengine = vengine_class(self) self._vengine.patch_extension_kwds(kwds) self.flags = flags self.kwds = self.make_relative_to(kwds, relative_to) # if modulename: if tag: raise TypeError("can't specify both 'modulename' and 'tag'") else: key = '\x00'.join(['%d.%d' % sys.version_info[:2], __version_verifier_modules__, preamble, flattened_kwds] + ffi._cdefsources) if sys.version_info >= (3,): key = key.encode('utf-8') k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff) k1 = k1.lstrip('0x').rstrip('L') k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff) k2 = k2.lstrip('0').rstrip('L') modulename = '_cffi_%s_%s%s%s' % (tag, self._vengine._class_key, k1, k2) suffix = _get_so_suffixes()[0] self.tmpdir = tmpdir or _caller_dir_pycache() self.sourcefilename = os.path.join(self.tmpdir, modulename + source_extension) self.modulefilename = os.path.join(self.tmpdir, modulename + suffix) self.ext_package = ext_package self._has_source = False self._has_module = False def write_source(self, file=None): """Write the C source code. It is produced in 'self.sourcefilename', which can be tweaked beforehand.""" with self.ffi._lock: if self._has_source and file is None: raise VerificationError( "source code already written") self._write_source(file) def compile_module(self): """Write the C source code (if not done already) and compile it. This produces a dynamic link library in 'self.modulefilename'.""" with self.ffi._lock: if self._has_module: raise VerificationError("module already compiled") if not self._has_source: self._write_source() self._compile_module() def load_library(self): """Get a C module from this Verifier instance. Returns an instance of a FFILibrary class that behaves like the objects returned by ffi.dlopen(), but that delegates all operations to the C module. If necessary, the C code is written and compiled first. """ with self.ffi._lock: if not self._has_module: self._locate_module() if not self._has_module: if not self._has_source: self._write_source() self._compile_module() return self._load_library() def get_module_name(self): basename = os.path.basename(self.modulefilename) # kill both the .so extension and the other .'s, as introduced # by Python 3: 'basename.cpython-33m.so' basename = basename.split('.', 1)[0] # and the _d added in Python 2 debug builds --- but try to be # conservative and not kill a legitimate _d if basename.endswith('_d') and hasattr(sys, 'gettotalrefcount'): basename = basename[:-2] return basename def get_extension(self): ffiplatform._hack_at_distutils() # backward compatibility hack if not self._has_source: with self.ffi._lock: if not self._has_source: self._write_source() sourcename = ffiplatform.maybe_relative_path(self.sourcefilename) modname = self.get_module_name() return ffiplatform.get_extension(sourcename, modname, **self.kwds) def generates_python_module(self): return self._vengine._gen_python_module def make_relative_to(self, kwds, relative_to): if relative_to and os.path.dirname(relative_to): dirname = os.path.dirname(relative_to) kwds = kwds.copy() for key in ffiplatform.LIST_OF_FILE_NAMES: if key in kwds: lst = kwds[key] if not isinstance(lst, (list, tuple)): raise TypeError("keyword '%s' should be a list or tuple" % (key,)) lst = [os.path.join(dirname, fn) for fn in lst] kwds[key] = lst return kwds # ---------- def _locate_module(self): if not os.path.isfile(self.modulefilename): if self.ext_package: try: pkg = __import__(self.ext_package, None, None, ['__doc__']) except ImportError: return # cannot import the package itself, give up # (e.g. it might be called differently before installation) path = pkg.__path__ else: path = None filename = self._vengine.find_module(self.get_module_name(), path, _get_so_suffixes()) if filename is None: return self.modulefilename = filename self._vengine.collect_types() self._has_module = True def _write_source_to(self, file): self._vengine._f = file try: self._vengine.write_source_to_f() finally: del self._vengine._f def _write_source(self, file=None): if file is not None: self._write_source_to(file) else: # Write our source file to an in memory file. f = NativeIO() self._write_source_to(f) source_data = f.getvalue() # Determine if this matches the current file if os.path.exists(self.sourcefilename): with open(self.sourcefilename, "r") as fp: needs_written = not (fp.read() == source_data) else: needs_written = True # Actually write the file out if it doesn't match if needs_written: _ensure_dir(self.sourcefilename) with open(self.sourcefilename, "w") as fp: fp.write(source_data) # Set this flag self._has_source = True def _compile_module(self): # compile this C source tmpdir = os.path.dirname(self.sourcefilename) outputfilename = ffiplatform.compile(tmpdir, self.get_extension()) try: same = ffiplatform.samefile(outputfilename, self.modulefilename) except OSError: same = False if not same: _ensure_dir(self.modulefilename) shutil.move(outputfilename, self.modulefilename) self._has_module = True def _load_library(self): assert self._has_module if self.flags is not None: return self._vengine.load_library(self.flags) else: return self._vengine.load_library() # ____________________________________________________________ _FORCE_GENERIC_ENGINE = False # for tests def _locate_engine_class(ffi, force_generic_engine): if _FORCE_GENERIC_ENGINE: force_generic_engine = True if not force_generic_engine: if '__pypy__' in sys.builtin_module_names: force_generic_engine = True else: try: import _cffi_backend except ImportError: _cffi_backend = '?' if ffi._backend is not _cffi_backend: force_generic_engine = True if force_generic_engine: from . import vengine_gen return vengine_gen.VGenericEngine else: from . import vengine_cpy return vengine_cpy.VCPythonEngine # ____________________________________________________________ _TMPDIR = None def _caller_dir_pycache(): if _TMPDIR: return _TMPDIR result = os.environ.get('CFFI_TMPDIR') if result: return result filename = sys._getframe(2).f_code.co_filename return os.path.abspath(os.path.join(os.path.dirname(filename), '__pycache__')) def set_tmpdir(dirname): """Set the temporary directory to use instead of __pycache__.""" global _TMPDIR _TMPDIR = dirname def cleanup_tmpdir(tmpdir=None, keep_so=False): """Clean up the temporary directory by removing all files in it called `_cffi_*.{c,so}` as well as the `build` subdirectory.""" tmpdir = tmpdir or _caller_dir_pycache() try: filelist = os.listdir(tmpdir) except OSError: return if keep_so: suffix = '.c' # only remove .c files else: suffix = _get_so_suffixes()[0].lower() for fn in filelist: if fn.lower().startswith('_cffi_') and ( fn.lower().endswith(suffix) or fn.lower().endswith('.c')): try: os.unlink(os.path.join(tmpdir, fn)) except OSError: pass clean_dir = [os.path.join(tmpdir, 'build')] for dir in clean_dir: try: for fn in os.listdir(dir): fn = os.path.join(dir, fn) if os.path.isdir(fn): clean_dir.append(fn) else: os.unlink(fn) except OSError: pass def _get_so_suffixes(): suffixes = _extension_suffixes() if not suffixes: # bah, no C_EXTENSION available. Occurs on pypy without cpyext if sys.platform == 'win32': suffixes = [".pyd"] else: suffixes = [".so"] return suffixes def _ensure_dir(filename): dirname = os.path.dirname(filename) if dirname and not os.path.isdir(dirname): os.makedirs(dirname)
11,253
Python
35.538961
86
0.545455
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/__init__.py
__all__ = ['FFI', 'VerificationError', 'VerificationMissing', 'CDefError', 'FFIError'] from .api import FFI from .error import CDefError, FFIError, VerificationError, VerificationMissing from .error import PkgConfigError __version__ = "1.15.1" __version_info__ = (1, 15, 1) # The verifier module file names are based on the CRC32 of a string that # contains the following version number. It may be older than __version__ # if nothing is clearly incompatible. __version_verifier_modules__ = "0.8.6"
513
Python
33.266664
78
0.71345
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/commontypes.py
import sys from . import model from .error import FFIError COMMON_TYPES = {} try: # fetch "bool" and all simple Windows types from _cffi_backend import _get_common_types _get_common_types(COMMON_TYPES) except ImportError: pass COMMON_TYPES['FILE'] = model.unknown_type('FILE', '_IO_FILE') COMMON_TYPES['bool'] = '_Bool' # in case we got ImportError above for _type in model.PrimitiveType.ALL_PRIMITIVE_TYPES: if _type.endswith('_t'): COMMON_TYPES[_type] = _type del _type _CACHE = {} def resolve_common_type(parser, commontype): try: return _CACHE[commontype] except KeyError: cdecl = COMMON_TYPES.get(commontype, commontype) if not isinstance(cdecl, str): result, quals = cdecl, 0 # cdecl is already a BaseType elif cdecl in model.PrimitiveType.ALL_PRIMITIVE_TYPES: result, quals = model.PrimitiveType(cdecl), 0 elif cdecl == 'set-unicode-needed': raise FFIError("The Windows type %r is only available after " "you call ffi.set_unicode()" % (commontype,)) else: if commontype == cdecl: raise FFIError( "Unsupported type: %r. Please look at " "http://cffi.readthedocs.io/en/latest/cdef.html#ffi-cdef-limitations " "and file an issue if you think this type should really " "be supported." % (commontype,)) result, quals = parser.parse_type_and_quals(cdecl) # recursive assert isinstance(result, model.BaseTypeByIdentity) _CACHE[commontype] = result, quals return result, quals # ____________________________________________________________ # extra types for Windows (most of them are in commontypes.c) def win_common_types(): return { "UNICODE_STRING": model.StructType( "_UNICODE_STRING", ["Length", "MaximumLength", "Buffer"], [model.PrimitiveType("unsigned short"), model.PrimitiveType("unsigned short"), model.PointerType(model.PrimitiveType("wchar_t"))], [-1, -1, -1]), "PUNICODE_STRING": "UNICODE_STRING *", "PCUNICODE_STRING": "const UNICODE_STRING *", "TBYTE": "set-unicode-needed", "TCHAR": "set-unicode-needed", "LPCTSTR": "set-unicode-needed", "PCTSTR": "set-unicode-needed", "LPTSTR": "set-unicode-needed", "PTSTR": "set-unicode-needed", "PTBYTE": "set-unicode-needed", "PTCHAR": "set-unicode-needed", } if sys.platform == 'win32': COMMON_TYPES.update(win_common_types())
2,689
Python
32.209876
78
0.57791
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/model.py
import types import weakref from .lock import allocate_lock from .error import CDefError, VerificationError, VerificationMissing # type qualifiers Q_CONST = 0x01 Q_RESTRICT = 0x02 Q_VOLATILE = 0x04 def qualify(quals, replace_with): if quals & Q_CONST: replace_with = ' const ' + replace_with.lstrip() if quals & Q_VOLATILE: replace_with = ' volatile ' + replace_with.lstrip() if quals & Q_RESTRICT: # It seems that __restrict is supported by gcc and msvc. # If you hit some different compiler, add a #define in # _cffi_include.h for it (and in its copies, documented there) replace_with = ' __restrict ' + replace_with.lstrip() return replace_with class BaseTypeByIdentity(object): is_array_type = False is_raw_function = False def get_c_name(self, replace_with='', context='a C file', quals=0): result = self.c_name_with_marker assert result.count('&') == 1 # some logic duplication with ffi.getctype()... :-( replace_with = replace_with.strip() if replace_with: if replace_with.startswith('*') and '&[' in result: replace_with = '(%s)' % replace_with elif not replace_with[0] in '[(': replace_with = ' ' + replace_with replace_with = qualify(quals, replace_with) result = result.replace('&', replace_with) if '$' in result: raise VerificationError( "cannot generate '%s' in %s: unknown type name" % (self._get_c_name(), context)) return result def _get_c_name(self): return self.c_name_with_marker.replace('&', '') def has_c_name(self): return '$' not in self._get_c_name() def is_integer_type(self): return False def get_cached_btype(self, ffi, finishlist, can_delay=False): try: BType = ffi._cached_btypes[self] except KeyError: BType = self.build_backend_type(ffi, finishlist) BType2 = ffi._cached_btypes.setdefault(self, BType) assert BType2 is BType return BType def __repr__(self): return '<%s>' % (self._get_c_name(),) def _get_items(self): return [(name, getattr(self, name)) for name in self._attrs_] class BaseType(BaseTypeByIdentity): def __eq__(self, other): return (self.__class__ == other.__class__ and self._get_items() == other._get_items()) def __ne__(self, other): return not self == other def __hash__(self): return hash((self.__class__, tuple(self._get_items()))) class VoidType(BaseType): _attrs_ = () def __init__(self): self.c_name_with_marker = 'void&' def build_backend_type(self, ffi, finishlist): return global_cache(self, ffi, 'new_void_type') void_type = VoidType() class BasePrimitiveType(BaseType): def is_complex_type(self): return False class PrimitiveType(BasePrimitiveType): _attrs_ = ('name',) ALL_PRIMITIVE_TYPES = { 'char': 'c', 'short': 'i', 'int': 'i', 'long': 'i', 'long long': 'i', 'signed char': 'i', 'unsigned char': 'i', 'unsigned short': 'i', 'unsigned int': 'i', 'unsigned long': 'i', 'unsigned long long': 'i', 'float': 'f', 'double': 'f', 'long double': 'f', 'float _Complex': 'j', 'double _Complex': 'j', '_Bool': 'i', # the following types are not primitive in the C sense 'wchar_t': 'c', 'char16_t': 'c', 'char32_t': 'c', 'int8_t': 'i', 'uint8_t': 'i', 'int16_t': 'i', 'uint16_t': 'i', 'int32_t': 'i', 'uint32_t': 'i', 'int64_t': 'i', 'uint64_t': 'i', 'int_least8_t': 'i', 'uint_least8_t': 'i', 'int_least16_t': 'i', 'uint_least16_t': 'i', 'int_least32_t': 'i', 'uint_least32_t': 'i', 'int_least64_t': 'i', 'uint_least64_t': 'i', 'int_fast8_t': 'i', 'uint_fast8_t': 'i', 'int_fast16_t': 'i', 'uint_fast16_t': 'i', 'int_fast32_t': 'i', 'uint_fast32_t': 'i', 'int_fast64_t': 'i', 'uint_fast64_t': 'i', 'intptr_t': 'i', 'uintptr_t': 'i', 'intmax_t': 'i', 'uintmax_t': 'i', 'ptrdiff_t': 'i', 'size_t': 'i', 'ssize_t': 'i', } def __init__(self, name): assert name in self.ALL_PRIMITIVE_TYPES self.name = name self.c_name_with_marker = name + '&' def is_char_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'c' def is_integer_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'i' def is_float_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'f' def is_complex_type(self): return self.ALL_PRIMITIVE_TYPES[self.name] == 'j' def build_backend_type(self, ffi, finishlist): return global_cache(self, ffi, 'new_primitive_type', self.name) class UnknownIntegerType(BasePrimitiveType): _attrs_ = ('name',) def __init__(self, name): self.name = name self.c_name_with_marker = name + '&' def is_integer_type(self): return True def build_backend_type(self, ffi, finishlist): raise NotImplementedError("integer type '%s' can only be used after " "compilation" % self.name) class UnknownFloatType(BasePrimitiveType): _attrs_ = ('name', ) def __init__(self, name): self.name = name self.c_name_with_marker = name + '&' def build_backend_type(self, ffi, finishlist): raise NotImplementedError("float type '%s' can only be used after " "compilation" % self.name) class BaseFunctionType(BaseType): _attrs_ = ('args', 'result', 'ellipsis', 'abi') def __init__(self, args, result, ellipsis, abi=None): self.args = args self.result = result self.ellipsis = ellipsis self.abi = abi # reprargs = [arg._get_c_name() for arg in self.args] if self.ellipsis: reprargs.append('...') reprargs = reprargs or ['void'] replace_with = self._base_pattern % (', '.join(reprargs),) if abi is not None: replace_with = replace_with[:1] + abi + ' ' + replace_with[1:] self.c_name_with_marker = ( self.result.c_name_with_marker.replace('&', replace_with)) class RawFunctionType(BaseFunctionType): # Corresponds to a C type like 'int(int)', which is the C type of # a function, but not a pointer-to-function. The backend has no # notion of such a type; it's used temporarily by parsing. _base_pattern = '(&)(%s)' is_raw_function = True def build_backend_type(self, ffi, finishlist): raise CDefError("cannot render the type %r: it is a function " "type, not a pointer-to-function type" % (self,)) def as_function_pointer(self): return FunctionPtrType(self.args, self.result, self.ellipsis, self.abi) class FunctionPtrType(BaseFunctionType): _base_pattern = '(*&)(%s)' def build_backend_type(self, ffi, finishlist): result = self.result.get_cached_btype(ffi, finishlist) args = [] for tp in self.args: args.append(tp.get_cached_btype(ffi, finishlist)) abi_args = () if self.abi == "__stdcall": if not self.ellipsis: # __stdcall ignored for variadic funcs try: abi_args = (ffi._backend.FFI_STDCALL,) except AttributeError: pass return global_cache(self, ffi, 'new_function_type', tuple(args), result, self.ellipsis, *abi_args) def as_raw_function(self): return RawFunctionType(self.args, self.result, self.ellipsis, self.abi) class PointerType(BaseType): _attrs_ = ('totype', 'quals') def __init__(self, totype, quals=0): self.totype = totype self.quals = quals extra = qualify(quals, " *&") if totype.is_array_type: extra = "(%s)" % (extra.lstrip(),) self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra) def build_backend_type(self, ffi, finishlist): BItem = self.totype.get_cached_btype(ffi, finishlist, can_delay=True) return global_cache(self, ffi, 'new_pointer_type', BItem) voidp_type = PointerType(void_type) def ConstPointerType(totype): return PointerType(totype, Q_CONST) const_voidp_type = ConstPointerType(void_type) class NamedPointerType(PointerType): _attrs_ = ('totype', 'name') def __init__(self, totype, name, quals=0): PointerType.__init__(self, totype, quals) self.name = name self.c_name_with_marker = name + '&' class ArrayType(BaseType): _attrs_ = ('item', 'length') is_array_type = True def __init__(self, item, length): self.item = item self.length = length # if length is None: brackets = '&[]' elif length == '...': brackets = '&[/*...*/]' else: brackets = '&[%s]' % length self.c_name_with_marker = ( self.item.c_name_with_marker.replace('&', brackets)) def length_is_unknown(self): return isinstance(self.length, str) def resolve_length(self, newlength): return ArrayType(self.item, newlength) def build_backend_type(self, ffi, finishlist): if self.length_is_unknown(): raise CDefError("cannot render the type %r: unknown length" % (self,)) self.item.get_cached_btype(ffi, finishlist) # force the item BType BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist) return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length) char_array_type = ArrayType(PrimitiveType('char'), None) class StructOrUnionOrEnum(BaseTypeByIdentity): _attrs_ = ('name',) forcename = None def build_c_name_with_marker(self): name = self.forcename or '%s %s' % (self.kind, self.name) self.c_name_with_marker = name + '&' def force_the_name(self, forcename): self.forcename = forcename self.build_c_name_with_marker() def get_official_name(self): assert self.c_name_with_marker.endswith('&') return self.c_name_with_marker[:-1] class StructOrUnion(StructOrUnionOrEnum): fixedlayout = None completed = 0 partial = False packed = 0 def __init__(self, name, fldnames, fldtypes, fldbitsize, fldquals=None): self.name = name self.fldnames = fldnames self.fldtypes = fldtypes self.fldbitsize = fldbitsize self.fldquals = fldquals self.build_c_name_with_marker() def anonymous_struct_fields(self): if self.fldtypes is not None: for name, type in zip(self.fldnames, self.fldtypes): if name == '' and isinstance(type, StructOrUnion): yield type def enumfields(self, expand_anonymous_struct_union=True): fldquals = self.fldquals if fldquals is None: fldquals = (0,) * len(self.fldnames) for name, type, bitsize, quals in zip(self.fldnames, self.fldtypes, self.fldbitsize, fldquals): if (name == '' and isinstance(type, StructOrUnion) and expand_anonymous_struct_union): # nested anonymous struct/union for result in type.enumfields(): yield result else: yield (name, type, bitsize, quals) def force_flatten(self): # force the struct or union to have a declaration that lists # directly all fields returned by enumfields(), flattening # nested anonymous structs/unions. names = [] types = [] bitsizes = [] fldquals = [] for name, type, bitsize, quals in self.enumfields(): names.append(name) types.append(type) bitsizes.append(bitsize) fldquals.append(quals) self.fldnames = tuple(names) self.fldtypes = tuple(types) self.fldbitsize = tuple(bitsizes) self.fldquals = tuple(fldquals) def get_cached_btype(self, ffi, finishlist, can_delay=False): BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist, can_delay) if not can_delay: self.finish_backend_type(ffi, finishlist) return BType def finish_backend_type(self, ffi, finishlist): if self.completed: if self.completed != 2: raise NotImplementedError("recursive structure declaration " "for '%s'" % (self.name,)) return BType = ffi._cached_btypes[self] # self.completed = 1 # if self.fldtypes is None: pass # not completing it: it's an opaque struct # elif self.fixedlayout is None: fldtypes = [tp.get_cached_btype(ffi, finishlist) for tp in self.fldtypes] lst = list(zip(self.fldnames, fldtypes, self.fldbitsize)) extra_flags = () if self.packed: if self.packed == 1: extra_flags = (8,) # SF_PACKED else: extra_flags = (0, self.packed) ffi._backend.complete_struct_or_union(BType, lst, self, -1, -1, *extra_flags) # else: fldtypes = [] fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout for i in range(len(self.fldnames)): fsize = fieldsize[i] ftype = self.fldtypes[i] # if isinstance(ftype, ArrayType) and ftype.length_is_unknown(): # fix the length to match the total size BItemType = ftype.item.get_cached_btype(ffi, finishlist) nlen, nrest = divmod(fsize, ffi.sizeof(BItemType)) if nrest != 0: self._verification_error( "field '%s.%s' has a bogus size?" % ( self.name, self.fldnames[i] or '{}')) ftype = ftype.resolve_length(nlen) self.fldtypes = (self.fldtypes[:i] + (ftype,) + self.fldtypes[i+1:]) # BFieldType = ftype.get_cached_btype(ffi, finishlist) if isinstance(ftype, ArrayType) and ftype.length is None: assert fsize == 0 else: bitemsize = ffi.sizeof(BFieldType) if bitemsize != fsize: self._verification_error( "field '%s.%s' is declared as %d bytes, but is " "really %d bytes" % (self.name, self.fldnames[i] or '{}', bitemsize, fsize)) fldtypes.append(BFieldType) # lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs)) ffi._backend.complete_struct_or_union(BType, lst, self, totalsize, totalalignment) self.completed = 2 def _verification_error(self, msg): raise VerificationError(msg) def check_not_partial(self): if self.partial and self.fixedlayout is None: raise VerificationMissing(self._get_c_name()) def build_backend_type(self, ffi, finishlist): self.check_not_partial() finishlist.append(self) # return global_cache(self, ffi, 'new_%s_type' % self.kind, self.get_official_name(), key=self) class StructType(StructOrUnion): kind = 'struct' class UnionType(StructOrUnion): kind = 'union' class EnumType(StructOrUnionOrEnum): kind = 'enum' partial = False partial_resolved = False def __init__(self, name, enumerators, enumvalues, baseinttype=None): self.name = name self.enumerators = enumerators self.enumvalues = enumvalues self.baseinttype = baseinttype self.build_c_name_with_marker() def force_the_name(self, forcename): StructOrUnionOrEnum.force_the_name(self, forcename) if self.forcename is None: name = self.get_official_name() self.forcename = '$' + name.replace(' ', '_') def check_not_partial(self): if self.partial and not self.partial_resolved: raise VerificationMissing(self._get_c_name()) def build_backend_type(self, ffi, finishlist): self.check_not_partial() base_btype = self.build_baseinttype(ffi, finishlist) return global_cache(self, ffi, 'new_enum_type', self.get_official_name(), self.enumerators, self.enumvalues, base_btype, key=self) def build_baseinttype(self, ffi, finishlist): if self.baseinttype is not None: return self.baseinttype.get_cached_btype(ffi, finishlist) # if self.enumvalues: smallest_value = min(self.enumvalues) largest_value = max(self.enumvalues) else: import warnings try: # XXX! The goal is to ensure that the warnings.warn() # will not suppress the warning. We want to get it # several times if we reach this point several times. __warningregistry__.clear() except NameError: pass warnings.warn("%r has no values explicitly defined; " "guessing that it is equivalent to 'unsigned int'" % self._get_c_name()) smallest_value = largest_value = 0 if smallest_value < 0: # needs a signed type sign = 1 candidate1 = PrimitiveType("int") candidate2 = PrimitiveType("long") else: sign = 0 candidate1 = PrimitiveType("unsigned int") candidate2 = PrimitiveType("unsigned long") btype1 = candidate1.get_cached_btype(ffi, finishlist) btype2 = candidate2.get_cached_btype(ffi, finishlist) size1 = ffi.sizeof(btype1) size2 = ffi.sizeof(btype2) if (smallest_value >= ((-1) << (8*size1-1)) and largest_value < (1 << (8*size1-sign))): return btype1 if (smallest_value >= ((-1) << (8*size2-1)) and largest_value < (1 << (8*size2-sign))): return btype2 raise CDefError("%s values don't all fit into either 'long' " "or 'unsigned long'" % self._get_c_name()) def unknown_type(name, structname=None): if structname is None: structname = '$%s' % name tp = StructType(structname, None, None, None) tp.force_the_name(name) tp.origin = "unknown_type" return tp def unknown_ptr_type(name, structname=None): if structname is None: structname = '$$%s' % name tp = StructType(structname, None, None, None) return NamedPointerType(tp, name) global_lock = allocate_lock() _typecache_cffi_backend = weakref.WeakValueDictionary() def get_typecache(backend): # returns _typecache_cffi_backend if backend is the _cffi_backend # module, or type(backend).__typecache if backend is an instance of # CTypesBackend (or some FakeBackend class during tests) if isinstance(backend, types.ModuleType): return _typecache_cffi_backend with global_lock: if not hasattr(type(backend), '__typecache'): type(backend).__typecache = weakref.WeakValueDictionary() return type(backend).__typecache def global_cache(srctype, ffi, funcname, *args, **kwds): key = kwds.pop('key', (funcname, args)) assert not kwds try: return ffi._typecache[key] except KeyError: pass try: res = getattr(ffi._backend, funcname)(*args) except NotImplementedError as e: raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e)) # note that setdefault() on WeakValueDictionary is not atomic # and contains a rare bug (http://bugs.python.org/issue19542); # we have to use a lock and do it ourselves cache = ffi._typecache with global_lock: res1 = cache.get(key) if res1 is None: cache[key] = res return res else: return res1 def pointer_cache(ffi, BType): return global_cache('?', ffi, 'new_pointer_type', BType) def attach_exception_info(e, name): if e.args and type(e.args[0]) is str: e.args = ('%s: %s' % (name, e.args[0]),) + e.args[1:]
21,768
Python
34.224919
79
0.543275
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/error.py
class FFIError(Exception): __module__ = 'cffi' class CDefError(Exception): __module__ = 'cffi' def __str__(self): try: current_decl = self.args[1] filename = current_decl.coord.file linenum = current_decl.coord.line prefix = '%s:%d: ' % (filename, linenum) except (AttributeError, TypeError, IndexError): prefix = '' return '%s%s' % (prefix, self.args[0]) class VerificationError(Exception): """ An error raised when verification fails """ __module__ = 'cffi' class VerificationMissing(Exception): """ An error raised when incomplete structures are passed into cdef, but no verification has been done """ __module__ = 'cffi' class PkgConfigError(Exception): """ An error raised for missing modules in pkg-config """ __module__ = 'cffi'
877
Python
26.437499
66
0.596351
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/cffi_opcode.py
from .error import VerificationError class CffiOp(object): def __init__(self, op, arg): self.op = op self.arg = arg def as_c_expr(self): if self.op is None: assert isinstance(self.arg, str) return '(_cffi_opcode_t)(%s)' % (self.arg,) classname = CLASS_NAME[self.op] return '_CFFI_OP(_CFFI_OP_%s, %s)' % (classname, self.arg) def as_python_bytes(self): if self.op is None and self.arg.isdigit(): value = int(self.arg) # non-negative: '-' not in self.arg if value >= 2**31: raise OverflowError("cannot emit %r: limited to 2**31-1" % (self.arg,)) return format_four_bytes(value) if isinstance(self.arg, str): raise VerificationError("cannot emit to Python: %r" % (self.arg,)) return format_four_bytes((self.arg << 8) | self.op) def __str__(self): classname = CLASS_NAME.get(self.op, self.op) return '(%s %s)' % (classname, self.arg) def format_four_bytes(num): return '\\x%02X\\x%02X\\x%02X\\x%02X' % ( (num >> 24) & 0xFF, (num >> 16) & 0xFF, (num >> 8) & 0xFF, (num ) & 0xFF) OP_PRIMITIVE = 1 OP_POINTER = 3 OP_ARRAY = 5 OP_OPEN_ARRAY = 7 OP_STRUCT_UNION = 9 OP_ENUM = 11 OP_FUNCTION = 13 OP_FUNCTION_END = 15 OP_NOOP = 17 OP_BITFIELD = 19 OP_TYPENAME = 21 OP_CPYTHON_BLTN_V = 23 # varargs OP_CPYTHON_BLTN_N = 25 # noargs OP_CPYTHON_BLTN_O = 27 # O (i.e. a single arg) OP_CONSTANT = 29 OP_CONSTANT_INT = 31 OP_GLOBAL_VAR = 33 OP_DLOPEN_FUNC = 35 OP_DLOPEN_CONST = 37 OP_GLOBAL_VAR_F = 39 OP_EXTERN_PYTHON = 41 PRIM_VOID = 0 PRIM_BOOL = 1 PRIM_CHAR = 2 PRIM_SCHAR = 3 PRIM_UCHAR = 4 PRIM_SHORT = 5 PRIM_USHORT = 6 PRIM_INT = 7 PRIM_UINT = 8 PRIM_LONG = 9 PRIM_ULONG = 10 PRIM_LONGLONG = 11 PRIM_ULONGLONG = 12 PRIM_FLOAT = 13 PRIM_DOUBLE = 14 PRIM_LONGDOUBLE = 15 PRIM_WCHAR = 16 PRIM_INT8 = 17 PRIM_UINT8 = 18 PRIM_INT16 = 19 PRIM_UINT16 = 20 PRIM_INT32 = 21 PRIM_UINT32 = 22 PRIM_INT64 = 23 PRIM_UINT64 = 24 PRIM_INTPTR = 25 PRIM_UINTPTR = 26 PRIM_PTRDIFF = 27 PRIM_SIZE = 28 PRIM_SSIZE = 29 PRIM_INT_LEAST8 = 30 PRIM_UINT_LEAST8 = 31 PRIM_INT_LEAST16 = 32 PRIM_UINT_LEAST16 = 33 PRIM_INT_LEAST32 = 34 PRIM_UINT_LEAST32 = 35 PRIM_INT_LEAST64 = 36 PRIM_UINT_LEAST64 = 37 PRIM_INT_FAST8 = 38 PRIM_UINT_FAST8 = 39 PRIM_INT_FAST16 = 40 PRIM_UINT_FAST16 = 41 PRIM_INT_FAST32 = 42 PRIM_UINT_FAST32 = 43 PRIM_INT_FAST64 = 44 PRIM_UINT_FAST64 = 45 PRIM_INTMAX = 46 PRIM_UINTMAX = 47 PRIM_FLOATCOMPLEX = 48 PRIM_DOUBLECOMPLEX = 49 PRIM_CHAR16 = 50 PRIM_CHAR32 = 51 _NUM_PRIM = 52 _UNKNOWN_PRIM = -1 _UNKNOWN_FLOAT_PRIM = -2 _UNKNOWN_LONG_DOUBLE = -3 _IO_FILE_STRUCT = -1 PRIMITIVE_TO_INDEX = { 'char': PRIM_CHAR, 'short': PRIM_SHORT, 'int': PRIM_INT, 'long': PRIM_LONG, 'long long': PRIM_LONGLONG, 'signed char': PRIM_SCHAR, 'unsigned char': PRIM_UCHAR, 'unsigned short': PRIM_USHORT, 'unsigned int': PRIM_UINT, 'unsigned long': PRIM_ULONG, 'unsigned long long': PRIM_ULONGLONG, 'float': PRIM_FLOAT, 'double': PRIM_DOUBLE, 'long double': PRIM_LONGDOUBLE, 'float _Complex': PRIM_FLOATCOMPLEX, 'double _Complex': PRIM_DOUBLECOMPLEX, '_Bool': PRIM_BOOL, 'wchar_t': PRIM_WCHAR, 'char16_t': PRIM_CHAR16, 'char32_t': PRIM_CHAR32, 'int8_t': PRIM_INT8, 'uint8_t': PRIM_UINT8, 'int16_t': PRIM_INT16, 'uint16_t': PRIM_UINT16, 'int32_t': PRIM_INT32, 'uint32_t': PRIM_UINT32, 'int64_t': PRIM_INT64, 'uint64_t': PRIM_UINT64, 'intptr_t': PRIM_INTPTR, 'uintptr_t': PRIM_UINTPTR, 'ptrdiff_t': PRIM_PTRDIFF, 'size_t': PRIM_SIZE, 'ssize_t': PRIM_SSIZE, 'int_least8_t': PRIM_INT_LEAST8, 'uint_least8_t': PRIM_UINT_LEAST8, 'int_least16_t': PRIM_INT_LEAST16, 'uint_least16_t': PRIM_UINT_LEAST16, 'int_least32_t': PRIM_INT_LEAST32, 'uint_least32_t': PRIM_UINT_LEAST32, 'int_least64_t': PRIM_INT_LEAST64, 'uint_least64_t': PRIM_UINT_LEAST64, 'int_fast8_t': PRIM_INT_FAST8, 'uint_fast8_t': PRIM_UINT_FAST8, 'int_fast16_t': PRIM_INT_FAST16, 'uint_fast16_t': PRIM_UINT_FAST16, 'int_fast32_t': PRIM_INT_FAST32, 'uint_fast32_t': PRIM_UINT_FAST32, 'int_fast64_t': PRIM_INT_FAST64, 'uint_fast64_t': PRIM_UINT_FAST64, 'intmax_t': PRIM_INTMAX, 'uintmax_t': PRIM_UINTMAX, } F_UNION = 0x01 F_CHECK_FIELDS = 0x02 F_PACKED = 0x04 F_EXTERNAL = 0x08 F_OPAQUE = 0x10 G_FLAGS = dict([('_CFFI_' + _key, globals()[_key]) for _key in ['F_UNION', 'F_CHECK_FIELDS', 'F_PACKED', 'F_EXTERNAL', 'F_OPAQUE']]) CLASS_NAME = {} for _name, _value in list(globals().items()): if _name.startswith('OP_') and isinstance(_value, int): CLASS_NAME[_value] = _name[3:]
5,724
Python
29.452128
78
0.513277
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/api.py
import sys, types from .lock import allocate_lock from .error import CDefError from . import model try: callable except NameError: # Python 3.1 from collections import Callable callable = lambda x: isinstance(x, Callable) try: basestring except NameError: # Python 3.x basestring = str _unspecified = object() class FFI(object): r''' The main top-level class that you instantiate once, or once per module. Example usage: ffi = FFI() ffi.cdef(""" int printf(const char *, ...); """) C = ffi.dlopen(None) # standard library -or- C = ffi.verify() # use a C compiler: verify the decl above is right C.printf("hello, %s!\n", ffi.new("char[]", "world")) ''' def __init__(self, backend=None): """Create an FFI instance. The 'backend' argument is used to select a non-default backend, mostly for tests. """ if backend is None: # You need PyPy (>= 2.0 beta), or a CPython (>= 2.6) with # _cffi_backend.so compiled. import _cffi_backend as backend from . import __version__ if backend.__version__ != __version__: # bad version! Try to be as explicit as possible. if hasattr(backend, '__file__'): # CPython raise Exception("Version mismatch: this is the 'cffi' package version %s, located in %r. When we import the top-level '_cffi_backend' extension module, we get version %s, located in %r. The two versions should be equal; check your installation." % ( __version__, __file__, backend.__version__, backend.__file__)) else: # PyPy raise Exception("Version mismatch: this is the 'cffi' package version %s, located in %r. This interpreter comes with a built-in '_cffi_backend' module, which is version %s. The two versions should be equal; check your installation." % ( __version__, __file__, backend.__version__)) # (If you insist you can also try to pass the option # 'backend=backend_ctypes.CTypesBackend()', but don't # rely on it! It's probably not going to work well.) from . import cparser self._backend = backend self._lock = allocate_lock() self._parser = cparser.Parser() self._cached_btypes = {} self._parsed_types = types.ModuleType('parsed_types').__dict__ self._new_types = types.ModuleType('new_types').__dict__ self._function_caches = [] self._libraries = [] self._cdefsources = [] self._included_ffis = [] self._windows_unicode = None self._init_once_cache = {} self._cdef_version = None self._embedding = None self._typecache = model.get_typecache(backend) if hasattr(backend, 'set_ffi'): backend.set_ffi(self) for name in list(backend.__dict__): if name.startswith('RTLD_'): setattr(self, name, getattr(backend, name)) # with self._lock: self.BVoidP = self._get_cached_btype(model.voidp_type) self.BCharA = self._get_cached_btype(model.char_array_type) if isinstance(backend, types.ModuleType): # _cffi_backend: attach these constants to the class if not hasattr(FFI, 'NULL'): FFI.NULL = self.cast(self.BVoidP, 0) FFI.CData, FFI.CType = backend._get_types() else: # ctypes backend: attach these constants to the instance self.NULL = self.cast(self.BVoidP, 0) self.CData, self.CType = backend._get_types() self.buffer = backend.buffer def cdef(self, csource, override=False, packed=False, pack=None): """Parse the given C source. This registers all declared functions, types, and global variables. The functions and global variables can then be accessed via either 'ffi.dlopen()' or 'ffi.verify()'. The types can be used in 'ffi.new()' and other functions. If 'packed' is specified as True, all structs declared inside this cdef are packed, i.e. laid out without any field alignment at all. Alternatively, 'pack' can be a small integer, and requests for alignment greater than that are ignored (pack=1 is equivalent to packed=True). """ self._cdef(csource, override=override, packed=packed, pack=pack) def embedding_api(self, csource, packed=False, pack=None): self._cdef(csource, packed=packed, pack=pack, dllexport=True) if self._embedding is None: self._embedding = '' def _cdef(self, csource, override=False, **options): if not isinstance(csource, str): # unicode, on Python 2 if not isinstance(csource, basestring): raise TypeError("cdef() argument must be a string") csource = csource.encode('ascii') with self._lock: self._cdef_version = object() self._parser.parse(csource, override=override, **options) self._cdefsources.append(csource) if override: for cache in self._function_caches: cache.clear() finishlist = self._parser._recomplete if finishlist: self._parser._recomplete = [] for tp in finishlist: tp.finish_backend_type(self, finishlist) def dlopen(self, name, flags=0): """Load and return a dynamic library identified by 'name'. The standard C library can be loaded by passing None. Note that functions and types declared by 'ffi.cdef()' are not linked to a particular library, just like C headers; in the library we only look for the actual (untyped) symbols. """ if not (isinstance(name, basestring) or name is None or isinstance(name, self.CData)): raise TypeError("dlopen(name): name must be a file name, None, " "or an already-opened 'void *' handle") with self._lock: lib, function_cache = _make_ffi_library(self, name, flags) self._function_caches.append(function_cache) self._libraries.append(lib) return lib def dlclose(self, lib): """Close a library obtained with ffi.dlopen(). After this call, access to functions or variables from the library will fail (possibly with a segmentation fault). """ type(lib).__cffi_close__(lib) def _typeof_locked(self, cdecl): # call me with the lock! key = cdecl if key in self._parsed_types: return self._parsed_types[key] # if not isinstance(cdecl, str): # unicode, on Python 2 cdecl = cdecl.encode('ascii') # type = self._parser.parse_type(cdecl) really_a_function_type = type.is_raw_function if really_a_function_type: type = type.as_function_pointer() btype = self._get_cached_btype(type) result = btype, really_a_function_type self._parsed_types[key] = result return result def _typeof(self, cdecl, consider_function_as_funcptr=False): # string -> ctype object try: result = self._parsed_types[cdecl] except KeyError: with self._lock: result = self._typeof_locked(cdecl) # btype, really_a_function_type = result if really_a_function_type and not consider_function_as_funcptr: raise CDefError("the type %r is a function type, not a " "pointer-to-function type" % (cdecl,)) return btype def typeof(self, cdecl): """Parse the C type given as a string and return the corresponding <ctype> object. It can also be used on 'cdata' instance to get its C type. """ if isinstance(cdecl, basestring): return self._typeof(cdecl) if isinstance(cdecl, self.CData): return self._backend.typeof(cdecl) if isinstance(cdecl, types.BuiltinFunctionType): res = _builtin_function_type(cdecl) if res is not None: return res if (isinstance(cdecl, types.FunctionType) and hasattr(cdecl, '_cffi_base_type')): with self._lock: return self._get_cached_btype(cdecl._cffi_base_type) raise TypeError(type(cdecl)) def sizeof(self, cdecl): """Return the size in bytes of the argument. It can be a string naming a C type, or a 'cdata' instance. """ if isinstance(cdecl, basestring): BType = self._typeof(cdecl) return self._backend.sizeof(BType) else: return self._backend.sizeof(cdecl) def alignof(self, cdecl): """Return the natural alignment size in bytes of the C type given as a string. """ if isinstance(cdecl, basestring): cdecl = self._typeof(cdecl) return self._backend.alignof(cdecl) def offsetof(self, cdecl, *fields_or_indexes): """Return the offset of the named field inside the given structure or array, which must be given as a C type name. You can give several field names in case of nested structures. You can also give numeric values which correspond to array items, in case of an array type. """ if isinstance(cdecl, basestring): cdecl = self._typeof(cdecl) return self._typeoffsetof(cdecl, *fields_or_indexes)[1] def new(self, cdecl, init=None): """Allocate an instance according to the specified C type and return a pointer to it. The specified C type must be either a pointer or an array: ``new('X *')`` allocates an X and returns a pointer to it, whereas ``new('X[n]')`` allocates an array of n X'es and returns an array referencing it (which works mostly like a pointer, like in C). You can also use ``new('X[]', n)`` to allocate an array of a non-constant length n. The memory is initialized following the rules of declaring a global variable in C: by default it is zero-initialized, but an explicit initializer can be given which can be used to fill all or part of the memory. When the returned <cdata> object goes out of scope, the memory is freed. In other words the returned <cdata> object has ownership of the value of type 'cdecl' that it points to. This means that the raw data can be used as long as this object is kept alive, but must not be used for a longer time. Be careful about that when copying the pointer to the memory somewhere else, e.g. into another structure. """ if isinstance(cdecl, basestring): cdecl = self._typeof(cdecl) return self._backend.newp(cdecl, init) def new_allocator(self, alloc=None, free=None, should_clear_after_alloc=True): """Return a new allocator, i.e. a function that behaves like ffi.new() but uses the provided low-level 'alloc' and 'free' functions. 'alloc' is called with the size as argument. If it returns NULL, a MemoryError is raised. 'free' is called with the result of 'alloc' as argument. Both can be either Python function or directly C functions. If 'free' is None, then no free function is called. If both 'alloc' and 'free' are None, the default is used. If 'should_clear_after_alloc' is set to False, then the memory returned by 'alloc' is assumed to be already cleared (or you are fine with garbage); otherwise CFFI will clear it. """ compiled_ffi = self._backend.FFI() allocator = compiled_ffi.new_allocator(alloc, free, should_clear_after_alloc) def allocate(cdecl, init=None): if isinstance(cdecl, basestring): cdecl = self._typeof(cdecl) return allocator(cdecl, init) return allocate def cast(self, cdecl, source): """Similar to a C cast: returns an instance of the named C type initialized with the given 'source'. The source is casted between integers or pointers of any type. """ if isinstance(cdecl, basestring): cdecl = self._typeof(cdecl) return self._backend.cast(cdecl, source) def string(self, cdata, maxlen=-1): """Return a Python string (or unicode string) from the 'cdata'. If 'cdata' is a pointer or array of characters or bytes, returns the null-terminated string. The returned string extends until the first null character, or at most 'maxlen' characters. If 'cdata' is an array then 'maxlen' defaults to its length. If 'cdata' is a pointer or array of wchar_t, returns a unicode string following the same rules. If 'cdata' is a single character or byte or a wchar_t, returns it as a string or unicode string. If 'cdata' is an enum, returns the value of the enumerator as a string, or 'NUMBER' if the value is out of range. """ return self._backend.string(cdata, maxlen) def unpack(self, cdata, length): """Unpack an array of C data of the given length, returning a Python string/unicode/list. If 'cdata' is a pointer to 'char', returns a byte string. It does not stop at the first null. This is equivalent to: ffi.buffer(cdata, length)[:] If 'cdata' is a pointer to 'wchar_t', returns a unicode string. 'length' is measured in wchar_t's; it is not the size in bytes. If 'cdata' is a pointer to anything else, returns a list of 'length' items. This is a faster equivalent to: [cdata[i] for i in range(length)] """ return self._backend.unpack(cdata, length) #def buffer(self, cdata, size=-1): # """Return a read-write buffer object that references the raw C data # pointed to by the given 'cdata'. The 'cdata' must be a pointer or # an array. Can be passed to functions expecting a buffer, or directly # manipulated with: # # buf[:] get a copy of it in a regular string, or # buf[idx] as a single character # buf[:] = ... # buf[idx] = ... change the content # """ # note that 'buffer' is a type, set on this instance by __init__ def from_buffer(self, cdecl, python_buffer=_unspecified, require_writable=False): """Return a cdata of the given type pointing to the data of the given Python object, which must support the buffer interface. Note that this is not meant to be used on the built-in types str or unicode (you can build 'char[]' arrays explicitly) but only on objects containing large quantities of raw data in some other format, like 'array.array' or numpy arrays. The first argument is optional and default to 'char[]'. """ if python_buffer is _unspecified: cdecl, python_buffer = self.BCharA, cdecl elif isinstance(cdecl, basestring): cdecl = self._typeof(cdecl) return self._backend.from_buffer(cdecl, python_buffer, require_writable) def memmove(self, dest, src, n): """ffi.memmove(dest, src, n) copies n bytes of memory from src to dest. Like the C function memmove(), the memory areas may overlap; apart from that it behaves like the C function memcpy(). 'src' can be any cdata ptr or array, or any Python buffer object. 'dest' can be any cdata ptr or array, or a writable Python buffer object. The size to copy, 'n', is always measured in bytes. Unlike other methods, this one supports all Python buffer including byte strings and bytearrays---but it still does not support non-contiguous buffers. """ return self._backend.memmove(dest, src, n) def callback(self, cdecl, python_callable=None, error=None, onerror=None): """Return a callback object or a decorator making such a callback object. 'cdecl' must name a C function pointer type. The callback invokes the specified 'python_callable' (which may be provided either directly or via a decorator). Important: the callback object must be manually kept alive for as long as the callback may be invoked from the C level. """ def callback_decorator_wrap(python_callable): if not callable(python_callable): raise TypeError("the 'python_callable' argument " "is not callable") return self._backend.callback(cdecl, python_callable, error, onerror) if isinstance(cdecl, basestring): cdecl = self._typeof(cdecl, consider_function_as_funcptr=True) if python_callable is None: return callback_decorator_wrap # decorator mode else: return callback_decorator_wrap(python_callable) # direct mode def getctype(self, cdecl, replace_with=''): """Return a string giving the C type 'cdecl', which may be itself a string or a <ctype> object. If 'replace_with' is given, it gives extra text to append (or insert for more complicated C types), like a variable name, or '*' to get actually the C type 'pointer-to-cdecl'. """ if isinstance(cdecl, basestring): cdecl = self._typeof(cdecl) replace_with = replace_with.strip() if (replace_with.startswith('*') and '&[' in self._backend.getcname(cdecl, '&')): replace_with = '(%s)' % replace_with elif replace_with and not replace_with[0] in '[(': replace_with = ' ' + replace_with return self._backend.getcname(cdecl, replace_with) def gc(self, cdata, destructor, size=0): """Return a new cdata object that points to the same data. Later, when this new cdata object is garbage-collected, 'destructor(old_cdata_object)' will be called. The optional 'size' gives an estimate of the size, used to trigger the garbage collection more eagerly. So far only used on PyPy. It tells the GC that the returned object keeps alive roughly 'size' bytes of external memory. """ return self._backend.gcp(cdata, destructor, size) def _get_cached_btype(self, type): assert self._lock.acquire(False) is False # call me with the lock! try: BType = self._cached_btypes[type] except KeyError: finishlist = [] BType = type.get_cached_btype(self, finishlist) for type in finishlist: type.finish_backend_type(self, finishlist) return BType def verify(self, source='', tmpdir=None, **kwargs): """Verify that the current ffi signatures compile on this machine, and return a dynamic library object. The dynamic library can be used to call functions and access global variables declared in this 'ffi'. The library is compiled by the C compiler: it gives you C-level API compatibility (including calling macros). This is unlike 'ffi.dlopen()', which requires binary compatibility in the signatures. """ from .verifier import Verifier, _caller_dir_pycache # # If set_unicode(True) was called, insert the UNICODE and # _UNICODE macro declarations if self._windows_unicode: self._apply_windows_unicode(kwargs) # # Set the tmpdir here, and not in Verifier.__init__: it picks # up the caller's directory, which we want to be the caller of # ffi.verify(), as opposed to the caller of Veritier(). tmpdir = tmpdir or _caller_dir_pycache() # # Make a Verifier() and use it to load the library. self.verifier = Verifier(self, source, tmpdir, **kwargs) lib = self.verifier.load_library() # # Save the loaded library for keep-alive purposes, even # if the caller doesn't keep it alive itself (it should). self._libraries.append(lib) return lib def _get_errno(self): return self._backend.get_errno() def _set_errno(self, errno): self._backend.set_errno(errno) errno = property(_get_errno, _set_errno, None, "the value of 'errno' from/to the C calls") def getwinerror(self, code=-1): return self._backend.getwinerror(code) def _pointer_to(self, ctype): with self._lock: return model.pointer_cache(self, ctype) def addressof(self, cdata, *fields_or_indexes): """Return the address of a <cdata 'struct-or-union'>. If 'fields_or_indexes' are given, returns the address of that field or array item in the structure or array, recursively in case of nested structures. """ try: ctype = self._backend.typeof(cdata) except TypeError: if '__addressof__' in type(cdata).__dict__: return type(cdata).__addressof__(cdata, *fields_or_indexes) raise if fields_or_indexes: ctype, offset = self._typeoffsetof(ctype, *fields_or_indexes) else: if ctype.kind == "pointer": raise TypeError("addressof(pointer)") offset = 0 ctypeptr = self._pointer_to(ctype) return self._backend.rawaddressof(ctypeptr, cdata, offset) def _typeoffsetof(self, ctype, field_or_index, *fields_or_indexes): ctype, offset = self._backend.typeoffsetof(ctype, field_or_index) for field1 in fields_or_indexes: ctype, offset1 = self._backend.typeoffsetof(ctype, field1, 1) offset += offset1 return ctype, offset def include(self, ffi_to_include): """Includes the typedefs, structs, unions and enums defined in another FFI instance. Usage is similar to a #include in C, where a part of the program might include types defined in another part for its own usage. Note that the include() method has no effect on functions, constants and global variables, which must anyway be accessed directly from the lib object returned by the original FFI instance. """ if not isinstance(ffi_to_include, FFI): raise TypeError("ffi.include() expects an argument that is also of" " type cffi.FFI, not %r" % ( type(ffi_to_include).__name__,)) if ffi_to_include is self: raise ValueError("self.include(self)") with ffi_to_include._lock: with self._lock: self._parser.include(ffi_to_include._parser) self._cdefsources.append('[') self._cdefsources.extend(ffi_to_include._cdefsources) self._cdefsources.append(']') self._included_ffis.append(ffi_to_include) def new_handle(self, x): return self._backend.newp_handle(self.BVoidP, x) def from_handle(self, x): return self._backend.from_handle(x) def release(self, x): self._backend.release(x) def set_unicode(self, enabled_flag): """Windows: if 'enabled_flag' is True, enable the UNICODE and _UNICODE defines in C, and declare the types like TCHAR and LPTCSTR to be (pointers to) wchar_t. If 'enabled_flag' is False, declare these types to be (pointers to) plain 8-bit characters. This is mostly for backward compatibility; you usually want True. """ if self._windows_unicode is not None: raise ValueError("set_unicode() can only be called once") enabled_flag = bool(enabled_flag) if enabled_flag: self.cdef("typedef wchar_t TBYTE;" "typedef wchar_t TCHAR;" "typedef const wchar_t *LPCTSTR;" "typedef const wchar_t *PCTSTR;" "typedef wchar_t *LPTSTR;" "typedef wchar_t *PTSTR;" "typedef TBYTE *PTBYTE;" "typedef TCHAR *PTCHAR;") else: self.cdef("typedef char TBYTE;" "typedef char TCHAR;" "typedef const char *LPCTSTR;" "typedef const char *PCTSTR;" "typedef char *LPTSTR;" "typedef char *PTSTR;" "typedef TBYTE *PTBYTE;" "typedef TCHAR *PTCHAR;") self._windows_unicode = enabled_flag def _apply_windows_unicode(self, kwds): defmacros = kwds.get('define_macros', ()) if not isinstance(defmacros, (list, tuple)): raise TypeError("'define_macros' must be a list or tuple") defmacros = list(defmacros) + [('UNICODE', '1'), ('_UNICODE', '1')] kwds['define_macros'] = defmacros def _apply_embedding_fix(self, kwds): # must include an argument like "-lpython2.7" for the compiler def ensure(key, value): lst = kwds.setdefault(key, []) if value not in lst: lst.append(value) # if '__pypy__' in sys.builtin_module_names: import os if sys.platform == "win32": # we need 'libpypy-c.lib'. Current distributions of # pypy (>= 4.1) contain it as 'libs/python27.lib'. pythonlib = "python{0[0]}{0[1]}".format(sys.version_info) if hasattr(sys, 'prefix'): ensure('library_dirs', os.path.join(sys.prefix, 'libs')) else: # we need 'libpypy-c.{so,dylib}', which should be by # default located in 'sys.prefix/bin' for installed # systems. if sys.version_info < (3,): pythonlib = "pypy-c" else: pythonlib = "pypy3-c" if hasattr(sys, 'prefix'): ensure('library_dirs', os.path.join(sys.prefix, 'bin')) # On uninstalled pypy's, the libpypy-c is typically found in # .../pypy/goal/. if hasattr(sys, 'prefix'): ensure('library_dirs', os.path.join(sys.prefix, 'pypy', 'goal')) else: if sys.platform == "win32": template = "python%d%d" if hasattr(sys, 'gettotalrefcount'): template += '_d' else: try: import sysconfig except ImportError: # 2.6 from distutils import sysconfig template = "python%d.%d" if sysconfig.get_config_var('DEBUG_EXT'): template += sysconfig.get_config_var('DEBUG_EXT') pythonlib = (template % (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff)) if hasattr(sys, 'abiflags'): pythonlib += sys.abiflags ensure('libraries', pythonlib) if sys.platform == "win32": ensure('extra_link_args', '/MANIFEST') def set_source(self, module_name, source, source_extension='.c', **kwds): import os if hasattr(self, '_assigned_source'): raise ValueError("set_source() cannot be called several times " "per ffi object") if not isinstance(module_name, basestring): raise TypeError("'module_name' must be a string") if os.sep in module_name or (os.altsep and os.altsep in module_name): raise ValueError("'module_name' must not contain '/': use a dotted " "name to make a 'package.module' location") self._assigned_source = (str(module_name), source, source_extension, kwds) def set_source_pkgconfig(self, module_name, pkgconfig_libs, source, source_extension='.c', **kwds): from . import pkgconfig if not isinstance(pkgconfig_libs, list): raise TypeError("the pkgconfig_libs argument must be a list " "of package names") kwds2 = pkgconfig.flags_from_pkgconfig(pkgconfig_libs) pkgconfig.merge_flags(kwds, kwds2) self.set_source(module_name, source, source_extension, **kwds) def distutils_extension(self, tmpdir='build', verbose=True): from distutils.dir_util import mkpath from .recompiler import recompile # if not hasattr(self, '_assigned_source'): if hasattr(self, 'verifier'): # fallback, 'tmpdir' ignored return self.verifier.get_extension() raise ValueError("set_source() must be called before" " distutils_extension()") module_name, source, source_extension, kwds = self._assigned_source if source is None: raise TypeError("distutils_extension() is only for C extension " "modules, not for dlopen()-style pure Python " "modules") mkpath(tmpdir) ext, updated = recompile(self, module_name, source, tmpdir=tmpdir, extradir=tmpdir, source_extension=source_extension, call_c_compiler=False, **kwds) if verbose: if updated: sys.stderr.write("regenerated: %r\n" % (ext.sources[0],)) else: sys.stderr.write("not modified: %r\n" % (ext.sources[0],)) return ext def emit_c_code(self, filename): from .recompiler import recompile # if not hasattr(self, '_assigned_source'): raise ValueError("set_source() must be called before emit_c_code()") module_name, source, source_extension, kwds = self._assigned_source if source is None: raise TypeError("emit_c_code() is only for C extension modules, " "not for dlopen()-style pure Python modules") recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) def emit_python_code(self, filename): from .recompiler import recompile # if not hasattr(self, '_assigned_source'): raise ValueError("set_source() must be called before emit_c_code()") module_name, source, source_extension, kwds = self._assigned_source if source is not None: raise TypeError("emit_python_code() is only for dlopen()-style " "pure Python modules, not for C extension modules") recompile(self, module_name, source, c_file=filename, call_c_compiler=False, **kwds) def compile(self, tmpdir='.', verbose=0, target=None, debug=None): """The 'target' argument gives the final file name of the compiled DLL. Use '*' to force distutils' choice, suitable for regular CPython C API modules. Use a file name ending in '.*' to ask for the system's default extension for dynamic libraries (.so/.dll/.dylib). The default is '*' when building a non-embedded C API extension, and (module_name + '.*') when building an embedded library. """ from .recompiler import recompile # if not hasattr(self, '_assigned_source'): raise ValueError("set_source() must be called before compile()") module_name, source, source_extension, kwds = self._assigned_source return recompile(self, module_name, source, tmpdir=tmpdir, target=target, source_extension=source_extension, compiler_verbose=verbose, debug=debug, **kwds) def init_once(self, func, tag): # Read _init_once_cache[tag], which is either (False, lock) if # we're calling the function now in some thread, or (True, result). # Don't call setdefault() in most cases, to avoid allocating and # immediately freeing a lock; but still use setdefaut() to avoid # races. try: x = self._init_once_cache[tag] except KeyError: x = self._init_once_cache.setdefault(tag, (False, allocate_lock())) # Common case: we got (True, result), so we return the result. if x[0]: return x[1] # Else, it's a lock. Acquire it to serialize the following tests. with x[1]: # Read again from _init_once_cache the current status. x = self._init_once_cache[tag] if x[0]: return x[1] # Call the function and store the result back. result = func() self._init_once_cache[tag] = (True, result) return result def embedding_init_code(self, pysource): if self._embedding: raise ValueError("embedding_init_code() can only be called once") # fix 'pysource' before it gets dumped into the C file: # - remove empty lines at the beginning, so it starts at "line 1" # - dedent, if all non-empty lines are indented # - check for SyntaxErrors import re match = re.match(r'\s*\n', pysource) if match: pysource = pysource[match.end():] lines = pysource.splitlines() or [''] prefix = re.match(r'\s*', lines[0]).group() for i in range(1, len(lines)): line = lines[i] if line.rstrip(): while not line.startswith(prefix): prefix = prefix[:-1] i = len(prefix) lines = [line[i:]+'\n' for line in lines] pysource = ''.join(lines) # compile(pysource, "cffi_init", "exec") # self._embedding = pysource def def_extern(self, *args, **kwds): raise ValueError("ffi.def_extern() is only available on API-mode FFI " "objects") def list_types(self): """Returns the user type names known to this FFI instance. This returns a tuple containing three lists of names: (typedef_names, names_of_structs, names_of_unions) """ typedefs = [] structs = [] unions = [] for key in self._parser._declarations: if key.startswith('typedef '): typedefs.append(key[8:]) elif key.startswith('struct '): structs.append(key[7:]) elif key.startswith('union '): unions.append(key[6:]) typedefs.sort() structs.sort() unions.sort() return (typedefs, structs, unions) def _load_backend_lib(backend, name, flags): import os if not isinstance(name, basestring): if sys.platform != "win32" or name is not None: return backend.load_library(name, flags) name = "c" # Windows: load_library(None) fails, but this works # on Python 2 (backward compatibility hack only) first_error = None if '.' in name or '/' in name or os.sep in name: try: return backend.load_library(name, flags) except OSError as e: first_error = e import ctypes.util path = ctypes.util.find_library(name) if path is None: if name == "c" and sys.platform == "win32" and sys.version_info >= (3,): raise OSError("dlopen(None) cannot work on Windows for Python 3 " "(see http://bugs.python.org/issue23606)") msg = ("ctypes.util.find_library() did not manage " "to locate a library called %r" % (name,)) if first_error is not None: msg = "%s. Additionally, %s" % (first_error, msg) raise OSError(msg) return backend.load_library(path, flags) def _make_ffi_library(ffi, libname, flags): backend = ffi._backend backendlib = _load_backend_lib(backend, libname, flags) # def accessor_function(name): key = 'function ' + name tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) value = backendlib.load_function(BType, name) library.__dict__[name] = value # def accessor_variable(name): key = 'variable ' + name tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) read_variable = backendlib.read_variable write_variable = backendlib.write_variable setattr(FFILibrary, name, property( lambda self: read_variable(BType, name), lambda self, value: write_variable(BType, name, value))) # def addressof_var(name): try: return addr_variables[name] except KeyError: with ffi._lock: if name not in addr_variables: key = 'variable ' + name tp, _ = ffi._parser._declarations[key] BType = ffi._get_cached_btype(tp) if BType.kind != 'array': BType = model.pointer_cache(ffi, BType) p = backendlib.load_function(BType, name) addr_variables[name] = p return addr_variables[name] # def accessor_constant(name): raise NotImplementedError("non-integer constant '%s' cannot be " "accessed from a dlopen() library" % (name,)) # def accessor_int_constant(name): library.__dict__[name] = ffi._parser._int_constants[name] # accessors = {} accessors_version = [False] addr_variables = {} # def update_accessors(): if accessors_version[0] is ffi._cdef_version: return # for key, (tp, _) in ffi._parser._declarations.items(): if not isinstance(tp, model.EnumType): tag, name = key.split(' ', 1) if tag == 'function': accessors[name] = accessor_function elif tag == 'variable': accessors[name] = accessor_variable elif tag == 'constant': accessors[name] = accessor_constant else: for i, enumname in enumerate(tp.enumerators): def accessor_enum(name, tp=tp, i=i): tp.check_not_partial() library.__dict__[name] = tp.enumvalues[i] accessors[enumname] = accessor_enum for name in ffi._parser._int_constants: accessors.setdefault(name, accessor_int_constant) accessors_version[0] = ffi._cdef_version # def make_accessor(name): with ffi._lock: if name in library.__dict__ or name in FFILibrary.__dict__: return # added by another thread while waiting for the lock if name not in accessors: update_accessors() if name not in accessors: raise AttributeError(name) accessors[name](name) # class FFILibrary(object): def __getattr__(self, name): make_accessor(name) return getattr(self, name) def __setattr__(self, name, value): try: property = getattr(self.__class__, name) except AttributeError: make_accessor(name) setattr(self, name, value) else: property.__set__(self, value) def __dir__(self): with ffi._lock: update_accessors() return accessors.keys() def __addressof__(self, name): if name in library.__dict__: return library.__dict__[name] if name in FFILibrary.__dict__: return addressof_var(name) make_accessor(name) if name in library.__dict__: return library.__dict__[name] if name in FFILibrary.__dict__: return addressof_var(name) raise AttributeError("cffi library has no function or " "global variable named '%s'" % (name,)) def __cffi_close__(self): backendlib.close_lib() self.__dict__.clear() # if isinstance(libname, basestring): try: if not isinstance(libname, str): # unicode, on Python 2 libname = libname.encode('utf-8') FFILibrary.__name__ = 'FFILibrary_%s' % libname except UnicodeError: pass library = FFILibrary() return library, library.__dict__ def _builtin_function_type(func): # a hack to make at least ffi.typeof(builtin_function) work, # if the builtin function was obtained by 'vengine_cpy'. import sys try: module = sys.modules[func.__module__] ffi = module._cffi_original_ffi types_of_builtin_funcs = module._cffi_types_of_builtin_funcs tp = types_of_builtin_funcs[func] except (KeyError, AttributeError, TypeError): return None else: with ffi._lock: return ffi._get_cached_btype(tp)
42,064
Python
42.545549
271
0.574315
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/setuptools_ext.py
import os import sys try: basestring except NameError: # Python 3.x basestring = str def error(msg): from distutils.errors import DistutilsSetupError raise DistutilsSetupError(msg) def execfile(filename, glob): # We use execfile() (here rewritten for Python 3) instead of # __import__() to load the build script. The problem with # a normal import is that in some packages, the intermediate # __init__.py files may already try to import the file that # we are generating. with open(filename) as f: src = f.read() src += '\n' # Python 2.6 compatibility code = compile(src, filename, 'exec') exec(code, glob, glob) def add_cffi_module(dist, mod_spec): from cffi.api import FFI if not isinstance(mod_spec, basestring): error("argument to 'cffi_modules=...' must be a str or a list of str," " not %r" % (type(mod_spec).__name__,)) mod_spec = str(mod_spec) try: build_file_name, ffi_var_name = mod_spec.split(':') except ValueError: error("%r must be of the form 'path/build.py:ffi_variable'" % (mod_spec,)) if not os.path.exists(build_file_name): ext = '' rewritten = build_file_name.replace('.', '/') + '.py' if os.path.exists(rewritten): ext = ' (rewrite cffi_modules to [%r])' % ( rewritten + ':' + ffi_var_name,) error("%r does not name an existing file%s" % (build_file_name, ext)) mod_vars = {'__name__': '__cffi__', '__file__': build_file_name} execfile(build_file_name, mod_vars) try: ffi = mod_vars[ffi_var_name] except KeyError: error("%r: object %r not found in module" % (mod_spec, ffi_var_name)) if not isinstance(ffi, FFI): ffi = ffi() # maybe it's a function instead of directly an ffi if not isinstance(ffi, FFI): error("%r is not an FFI instance (got %r)" % (mod_spec, type(ffi).__name__)) if not hasattr(ffi, '_assigned_source'): error("%r: the set_source() method was not called" % (mod_spec,)) module_name, source, source_extension, kwds = ffi._assigned_source if ffi._windows_unicode: kwds = kwds.copy() ffi._apply_windows_unicode(kwds) if source is None: _add_py_module(dist, ffi, module_name) else: _add_c_module(dist, ffi, module_name, source, source_extension, kwds) def _set_py_limited_api(Extension, kwds): """ Add py_limited_api to kwds if setuptools >= 26 is in use. Do not alter the setting if it already exists. Setuptools takes care of ignoring the flag on Python 2 and PyPy. CPython itself should ignore the flag in a debugging version (by not listing .abi3.so in the extensions it supports), but it doesn't so far, creating troubles. That's why we check for "not hasattr(sys, 'gettotalrefcount')" (the 2.7 compatible equivalent of 'd' not in sys.abiflags). (http://bugs.python.org/issue28401) On Windows, with CPython <= 3.4, it's better not to use py_limited_api because virtualenv *still* doesn't copy PYTHON3.DLL on these versions. Recently (2020) we started shipping only >= 3.5 wheels, though. So we'll give it another try and set py_limited_api on Windows >= 3.5. """ from cffi import recompiler if ('py_limited_api' not in kwds and not hasattr(sys, 'gettotalrefcount') and recompiler.USE_LIMITED_API): import setuptools try: setuptools_major_version = int(setuptools.__version__.partition('.')[0]) if setuptools_major_version >= 26: kwds['py_limited_api'] = True except ValueError: # certain development versions of setuptools # If we don't know the version number of setuptools, we # try to set 'py_limited_api' anyway. At worst, we get a # warning. kwds['py_limited_api'] = True return kwds def _add_c_module(dist, ffi, module_name, source, source_extension, kwds): from distutils.core import Extension # We are a setuptools extension. Need this build_ext for py_limited_api. from setuptools.command.build_ext import build_ext from distutils.dir_util import mkpath from distutils import log from cffi import recompiler allsources = ['$PLACEHOLDER'] allsources.extend(kwds.pop('sources', [])) kwds = _set_py_limited_api(Extension, kwds) ext = Extension(name=module_name, sources=allsources, **kwds) def make_mod(tmpdir, pre_run=None): c_file = os.path.join(tmpdir, module_name + source_extension) log.info("generating cffi module %r" % c_file) mkpath(tmpdir) # a setuptools-only, API-only hook: called with the "ext" and "ffi" # arguments just before we turn the ffi into C code. To use it, # subclass the 'distutils.command.build_ext.build_ext' class and # add a method 'def pre_run(self, ext, ffi)'. if pre_run is not None: pre_run(ext, ffi) updated = recompiler.make_c_source(ffi, module_name, source, c_file) if not updated: log.info("already up-to-date") return c_file if dist.ext_modules is None: dist.ext_modules = [] dist.ext_modules.append(ext) base_class = dist.cmdclass.get('build_ext', build_ext) class build_ext_make_mod(base_class): def run(self): if ext.sources[0] == '$PLACEHOLDER': pre_run = getattr(self, 'pre_run', None) ext.sources[0] = make_mod(self.build_temp, pre_run) base_class.run(self) dist.cmdclass['build_ext'] = build_ext_make_mod # NB. multiple runs here will create multiple 'build_ext_make_mod' # classes. Even in this case the 'build_ext' command should be # run once; but just in case, the logic above does nothing if # called again. def _add_py_module(dist, ffi, module_name): from distutils.dir_util import mkpath from setuptools.command.build_py import build_py from setuptools.command.build_ext import build_ext from distutils import log from cffi import recompiler def generate_mod(py_file): log.info("generating cffi module %r" % py_file) mkpath(os.path.dirname(py_file)) updated = recompiler.make_py_source(ffi, module_name, py_file) if not updated: log.info("already up-to-date") base_class = dist.cmdclass.get('build_py', build_py) class build_py_make_mod(base_class): def run(self): base_class.run(self) module_path = module_name.split('.') module_path[-1] += '.py' generate_mod(os.path.join(self.build_lib, *module_path)) def get_source_files(self): # This is called from 'setup.py sdist' only. Exclude # the generate .py module in this case. saved_py_modules = self.py_modules try: if saved_py_modules: self.py_modules = [m for m in saved_py_modules if m != module_name] return base_class.get_source_files(self) finally: self.py_modules = saved_py_modules dist.cmdclass['build_py'] = build_py_make_mod # distutils and setuptools have no notion I could find of a # generated python module. If we don't add module_name to # dist.py_modules, then things mostly work but there are some # combination of options (--root and --record) that will miss # the module. So we add it here, which gives a few apparently # harmless warnings about not finding the file outside the # build directory. # Then we need to hack more in get_source_files(); see above. if dist.py_modules is None: dist.py_modules = [] dist.py_modules.append(module_name) # the following is only for "build_ext -i" base_class_2 = dist.cmdclass.get('build_ext', build_ext) class build_ext_make_mod(base_class_2): def run(self): base_class_2.run(self) if self.inplace: # from get_ext_fullpath() in distutils/command/build_ext.py module_path = module_name.split('.') package = '.'.join(module_path[:-1]) build_py = self.get_finalized_command('build_py') package_dir = build_py.get_package_dir(package) file_name = module_path[-1] + '.py' generate_mod(os.path.join(package_dir, file_name)) dist.cmdclass['build_ext'] = build_ext_make_mod def cffi_modules(dist, attr, value): assert attr == 'cffi_modules' if isinstance(value, basestring): value = [value] for cffi_module in value: add_cffi_module(dist, cffi_module)
8,931
Python
39.6
84
0.610122
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/lock.py
import sys if sys.version_info < (3,): try: from thread import allocate_lock except ImportError: from dummy_thread import allocate_lock else: try: from _thread import allocate_lock except ImportError: from _dummy_thread import allocate_lock ##import sys ##l1 = allocate_lock ##class allocate_lock(object): ## def __init__(self): ## self._real = l1() ## def __enter__(self): ## for i in range(4, 0, -1): ## print sys._getframe(i).f_code ## print ## return self._real.__enter__() ## def __exit__(self, *args): ## return self._real.__exit__(*args) ## def acquire(self, f): ## assert f is False ## return self._real.acquire(f)
747
Python
23.129032
47
0.56091
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/cffi/ffiplatform.py
import sys, os from .error import VerificationError LIST_OF_FILE_NAMES = ['sources', 'include_dirs', 'library_dirs', 'extra_objects', 'depends'] def get_extension(srcfilename, modname, sources=(), **kwds): _hack_at_distutils() from distutils.core import Extension allsources = [srcfilename] for src in sources: allsources.append(os.path.normpath(src)) return Extension(name=modname, sources=allsources, **kwds) def compile(tmpdir, ext, compiler_verbose=0, debug=None): """Compile a C extension module using distutils.""" _hack_at_distutils() saved_environ = os.environ.copy() try: outputfilename = _build(tmpdir, ext, compiler_verbose, debug) outputfilename = os.path.abspath(outputfilename) finally: # workaround for a distutils bugs where some env vars can # become longer and longer every time it is used for key, value in saved_environ.items(): if os.environ.get(key) != value: os.environ[key] = value return outputfilename def _build(tmpdir, ext, compiler_verbose=0, debug=None): # XXX compact but horrible :-( from distutils.core import Distribution import distutils.errors, distutils.log # dist = Distribution({'ext_modules': [ext]}) dist.parse_config_files() options = dist.get_option_dict('build_ext') if debug is None: debug = sys.flags.debug options['debug'] = ('ffiplatform', debug) options['force'] = ('ffiplatform', True) options['build_lib'] = ('ffiplatform', tmpdir) options['build_temp'] = ('ffiplatform', tmpdir) # try: old_level = distutils.log.set_threshold(0) or 0 try: distutils.log.set_verbosity(compiler_verbose) dist.run_command('build_ext') cmd_obj = dist.get_command_obj('build_ext') [soname] = cmd_obj.get_outputs() finally: distutils.log.set_threshold(old_level) except (distutils.errors.CompileError, distutils.errors.LinkError) as e: raise VerificationError('%s: %s' % (e.__class__.__name__, e)) # return soname try: from os.path import samefile except ImportError: def samefile(f1, f2): return os.path.abspath(f1) == os.path.abspath(f2) def maybe_relative_path(path): if not os.path.isabs(path): return path # already relative dir = path names = [] while True: prevdir = dir dir, name = os.path.split(prevdir) if dir == prevdir or not dir: return path # failed to make it relative names.append(name) try: if samefile(dir, os.curdir): names.reverse() return os.path.join(*names) except OSError: pass # ____________________________________________________________ try: int_or_long = (int, long) import cStringIO except NameError: int_or_long = int # Python 3 import io as cStringIO def _flatten(x, f): if isinstance(x, str): f.write('%ds%s' % (len(x), x)) elif isinstance(x, dict): keys = sorted(x.keys()) f.write('%dd' % len(keys)) for key in keys: _flatten(key, f) _flatten(x[key], f) elif isinstance(x, (list, tuple)): f.write('%dl' % len(x)) for value in x: _flatten(value, f) elif isinstance(x, int_or_long): f.write('%di' % (x,)) else: raise TypeError( "the keywords to verify() contains unsupported object %r" % (x,)) def flatten(x): f = cStringIO.StringIO() _flatten(x, f) return f.getvalue() def _hack_at_distutils(): # Windows-only workaround for some configurations: see # https://bugs.python.org/issue23246 (Python 2.7 with # a specific MS compiler suite download) if sys.platform == "win32": try: import setuptools # for side-effects, patches distutils except ImportError: pass
4,046
Python
30.617187
77
0.587988
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycares/_version.py
__version__ = '3.1.1'
23
Python
6.999998
21
0.434783
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycares/__init__.py
from ._cares import ffi as _ffi, lib as _lib import _cffi_backend # hint for bundler tools if _lib.ARES_SUCCESS != _lib.ares_library_init(_lib.ARES_LIB_INIT_ALL): raise RuntimeError('Could not initialize c-ares') from . import errno from .utils import ascii_bytes, maybe_str, parse_name from ._version import __version__ import collections.abc import socket import math import functools import sys exported_pycares_symbols = [ # Flag values 'ARES_FLAG_USEVC', 'ARES_FLAG_PRIMARY', 'ARES_FLAG_IGNTC', 'ARES_FLAG_NORECURSE', 'ARES_FLAG_STAYOPEN', 'ARES_FLAG_NOSEARCH', 'ARES_FLAG_NOALIASES', 'ARES_FLAG_NOCHECKRESP', # Nameinfo flag values 'ARES_NI_NOFQDN', 'ARES_NI_NUMERICHOST', 'ARES_NI_NAMEREQD', 'ARES_NI_NUMERICSERV', 'ARES_NI_DGRAM', 'ARES_NI_TCP', 'ARES_NI_UDP', 'ARES_NI_SCTP', 'ARES_NI_DCCP', 'ARES_NI_NUMERICSCOPE', 'ARES_NI_LOOKUPHOST', 'ARES_NI_LOOKUPSERVICE', 'ARES_NI_IDN', 'ARES_NI_IDN_ALLOW_UNASSIGNED', 'ARES_NI_IDN_USE_STD3_ASCII_RULES', # Bad socket 'ARES_SOCKET_BAD', ] for symbol in exported_pycares_symbols: globals()[symbol] = getattr(_lib, symbol) exported_pycares_symbols_map = { # Query types "QUERY_TYPE_A" : "T_A", "QUERY_TYPE_AAAA" : "T_AAAA", "QUERY_TYPE_ANY" : "T_ANY", "QUERY_TYPE_CNAME" : "T_CNAME", "QUERY_TYPE_MX" : "T_MX", "QUERY_TYPE_NAPTR" : "T_NAPTR", "QUERY_TYPE_NS" : "T_NS", "QUERY_TYPE_PTR" : "T_PTR", "QUERY_TYPE_SOA" : "T_SOA", "QUERY_TYPE_SRV" : "T_SRV", "QUERY_TYPE_TXT" : "T_TXT", } for k, v in exported_pycares_symbols_map.items(): globals()[k] = getattr(_lib, v) globals()['ARES_VERSION'] = maybe_str(_ffi.string(_lib.ares_version(_ffi.NULL))) PYCARES_ADDRTTL_SIZE = 256 class AresError(Exception): pass # callback helpers _global_set = set() @_ffi.def_extern() def _sock_state_cb(data, socket_fd, readable, writable): sock_state_cb = _ffi.from_handle(data) sock_state_cb(socket_fd, readable, writable) @_ffi.def_extern() def _host_cb(arg, status, timeouts, hostent): callback = _ffi.from_handle(arg) _global_set.discard(arg) if status != _lib.ARES_SUCCESS: result = None else: result = ares_host_result(hostent) status = None callback(result, status) @_ffi.def_extern() def _nameinfo_cb(arg, status, timeouts, node, service): callback = _ffi.from_handle(arg) _global_set.discard(arg) if status != _lib.ARES_SUCCESS: result = None else: result = ares_nameinfo_result(node, service) status = None callback(result, status) @_ffi.def_extern() def _query_cb(arg, status, timeouts, abuf, alen): callback, query_type = _ffi.from_handle(arg) _global_set.discard(arg) if status == _lib.ARES_SUCCESS: if query_type == _lib.T_ANY: result = [] for qtype in (_lib.T_A, _lib.T_AAAA, _lib.T_CNAME, _lib.T_MX, _lib.T_NAPTR, _lib.T_NS, _lib.T_PTR, _lib.T_SOA, _lib.T_SRV, _lib.T_TXT): r, status = parse_result(qtype, abuf, alen) if status not in (None, _lib.ARES_ENODATA, _lib.ARES_EBADRESP): result = None break if r is not None: if isinstance(r, collections.abc.Iterable): result.extend(r) else: result.append(r) else: status = None else: result, status = parse_result(query_type, abuf, alen) else: result = None callback(result, status) def parse_result(query_type, abuf, alen): if query_type == _lib.T_A: addrttls = _ffi.new("struct ares_addrttl[]", PYCARES_ADDRTTL_SIZE) naddrttls = _ffi.new("int*", PYCARES_ADDRTTL_SIZE) parse_status = _lib.ares_parse_a_reply(abuf, alen, _ffi.NULL, addrttls, naddrttls) if parse_status != _lib.ARES_SUCCESS: result = None status = parse_status else: result = [ares_query_a_result(addrttls[i]) for i in range(naddrttls[0])] status = None elif query_type == _lib.T_AAAA: addrttls = _ffi.new("struct ares_addr6ttl[]", PYCARES_ADDRTTL_SIZE) naddrttls = _ffi.new("int*", PYCARES_ADDRTTL_SIZE) parse_status = _lib.ares_parse_aaaa_reply(abuf, alen, _ffi.NULL, addrttls, naddrttls) if parse_status != _lib.ARES_SUCCESS: result = None status = parse_status else: result = [ares_query_aaaa_result(addrttls[i]) for i in range(naddrttls[0])] status = None elif query_type == _lib.T_CNAME: host = _ffi.new("struct hostent **") parse_status = _lib.ares_parse_a_reply(abuf, alen, host, _ffi.NULL, _ffi.NULL) if parse_status != _lib.ARES_SUCCESS: result = None status = parse_status else: result = ares_query_cname_result(host[0]) _lib.ares_free_hostent(host[0]) status = None elif query_type == _lib.T_MX: mx_reply = _ffi.new("struct ares_mx_reply **") parse_status = _lib.ares_parse_mx_reply(abuf, alen, mx_reply); if parse_status != _lib.ARES_SUCCESS: result = None status = parse_status else: result = [] mx_reply_ptr = mx_reply[0] while mx_reply_ptr != _ffi.NULL: result.append(ares_query_mx_result(mx_reply_ptr)) mx_reply_ptr = mx_reply_ptr.next _lib.ares_free_data(mx_reply[0]) status = None elif query_type == _lib.T_NAPTR: naptr_reply = _ffi.new("struct ares_naptr_reply **") parse_status = _lib.ares_parse_naptr_reply(abuf, alen, naptr_reply); if parse_status != _lib.ARES_SUCCESS: result = None status = parse_status else: result = [] naptr_reply_ptr = naptr_reply[0] while naptr_reply_ptr != _ffi.NULL: result.append(ares_query_naptr_result(naptr_reply_ptr)) naptr_reply_ptr = naptr_reply_ptr.next _lib.ares_free_data(naptr_reply[0]) status = None elif query_type == _lib.T_NS: hostent = _ffi.new("struct hostent **") parse_status = _lib.ares_parse_ns_reply(abuf, alen, hostent); if parse_status != _lib.ARES_SUCCESS: result = None status = parse_status else: result = [] host = hostent[0] i = 0 while host.h_aliases[i] != _ffi.NULL: result.append(ares_query_ns_result(host.h_aliases[i])) i += 1 _lib.ares_free_hostent(host) status = None elif query_type == _lib.T_PTR: hostent = _ffi.new("struct hostent **") hostttl = _ffi.new("int*", PYCARES_ADDRTTL_SIZE) parse_status = _lib.ares_parse_ptr_reply(abuf, alen, _ffi.NULL, 0, socket.AF_UNSPEC, hostent, hostttl); if parse_status != _lib.ARES_SUCCESS: result = None status = parse_status else: aliases = [] host = hostent[0] i = 0 while host.h_aliases[i] != _ffi.NULL: aliases.append(maybe_str(_ffi.string(host.h_aliases[i]))) i += 1 result = ares_query_ptr_result(host, hostttl[0], aliases) _lib.ares_free_hostent(host) status = None elif query_type == _lib.T_SOA: soa_reply = _ffi.new("struct ares_soa_reply **") parse_status = _lib.ares_parse_soa_reply(abuf, alen, soa_reply); if parse_status != _lib.ARES_SUCCESS: result = None status = parse_status else: result = ares_query_soa_result(soa_reply[0]) _lib.ares_free_data(soa_reply[0]) status = None elif query_type == _lib.T_SRV: srv_reply = _ffi.new("struct ares_srv_reply **") parse_status = _lib.ares_parse_srv_reply(abuf, alen, srv_reply); if parse_status != _lib.ARES_SUCCESS: result = None status = parse_status else: result = [] srv_reply_ptr = srv_reply[0] while srv_reply_ptr != _ffi.NULL: result.append(ares_query_srv_result(srv_reply_ptr)) srv_reply_ptr = srv_reply_ptr.next _lib.ares_free_data(srv_reply[0]) status = None elif query_type == _lib.T_TXT: txt_reply = _ffi.new("struct ares_txt_ext **") parse_status = _lib.ares_parse_txt_reply_ext(abuf, alen, txt_reply); if parse_status != _lib.ARES_SUCCESS: result = None status = parse_status else: result = [] txt_reply_ptr = txt_reply[0] tmp_obj = None while True: if txt_reply_ptr == _ffi.NULL: if tmp_obj is not None: result.append(ares_query_txt_result(tmp_obj)) break if txt_reply_ptr.record_start == 1: if tmp_obj is not None: result.append(ares_query_txt_result(tmp_obj)) tmp_obj = ares_query_txt_result_chunk(txt_reply_ptr) else: new_chunk = ares_query_txt_result_chunk(txt_reply_ptr) tmp_obj.text += new_chunk.text txt_reply_ptr = txt_reply_ptr.next _lib.ares_free_data(txt_reply[0]) status = None else: raise ValueError("invalid query type specified") return result, status class Channel: __qtypes__ = (_lib.T_A, _lib.T_AAAA, _lib.T_ANY, _lib.T_CNAME, _lib.T_MX, _lib.T_NAPTR, _lib.T_NS, _lib.T_PTR, _lib.T_SOA, _lib.T_SRV, _lib.T_TXT) def __init__(self, flags = None, timeout = None, tries = None, ndots = None, tcp_port = None, udp_port = None, servers = None, domains = None, lookups = None, sock_state_cb = None, socket_send_buffer_size = None, socket_receive_buffer_size = None, rotate = False, local_ip = None, local_dev = None, resolvconf_path = None): channel = _ffi.new("ares_channel *") options = _ffi.new("struct ares_options *") optmask = 0 if flags is not None: options.flags = flags optmask = optmask | _lib.ARES_OPT_FLAGS if timeout is not None: options.timeout = int(timeout * 1000) optmask = optmask | _lib.ARES_OPT_TIMEOUTMS if tries is not None: options.tries = tries optmask = optmask | _lib.ARES_OPT_TRIES if ndots is not None: options.ndots = ndots optmask = optmask | _lib.ARES_OPT_NDOTS if tcp_port is not None: options.tcp_port = tcp_port optmask = optmask | _lib.ARES_OPT_TCP_PORT if udp_port is not None: options.udp_port = udp_port optmask = optmask | _lib.ARES_OPT_UDP_PORT if socket_send_buffer_size is not None: options.socket_send_buffer_size = socket_send_buffer_size optmask = optmask | _lib.ARES_OPT_SOCK_SNDBUF if socket_receive_buffer_size is not None: options.socket_receive_buffer_size = socket_receive_buffer_size optmask = optmask | _lib.ARES_OPT_SOCK_RCVBUF if sock_state_cb: if not callable(sock_state_cb): raise TypeError("sock_state_cb is not callable") userdata = _ffi.new_handle(sock_state_cb) # This must be kept alive while the channel is alive. self._sock_state_cb_handle = userdata options.sock_state_cb = _lib._sock_state_cb options.sock_state_cb_data = userdata optmask = optmask | _lib.ARES_OPT_SOCK_STATE_CB if lookups: options.lookups = _ffi.new('char[]', ascii_bytes(lookups)) optmask = optmask | _lib.ARES_OPT_LOOKUPS if domains: strs = [_ffi.new("char[]", ascii_bytes(i)) for i in domains] c = _ffi.new("char *[%d]" % (len(domains) + 1)) for i in range(len(domains)): c[i] = strs[i] options.domains = c options.ndomains = len(domains) optmask = optmask | _lib.ARES_OPT_DOMAINS if rotate: optmask = optmask | _lib.ARES_OPT_ROTATE if resolvconf_path is not None: optmask = optmask | _lib.ARES_OPT_RESOLVCONF options.resolvconf_path = _ffi.new('char[]', ascii_bytes(resolvconf_path)) r = _lib.ares_init_options(channel, options, optmask) if r != _lib.ARES_SUCCESS: raise AresError('Failed to initialize c-ares channel') self._channel = _ffi.gc(channel, lambda x: _lib.ares_destroy(x[0])) if servers: self.servers = servers if local_ip: self.set_local_ip(local_ip) if local_dev: self.set_local_dev(local_dev) def cancel(self): _lib.ares_cancel(self._channel[0]) @property def servers(self): servers = _ffi.new("struct ares_addr_node **") r = _lib.ares_get_servers(self._channel[0], servers) if r != _lib.ARES_SUCCESS: raise AresError(r, errno.strerror(r)) server_list = [] server = _ffi.new("struct ares_addr_node **", servers[0]) while True: if server == _ffi.NULL: break ip = _ffi.new("char []", _lib.INET6_ADDRSTRLEN) s = server[0] if _ffi.NULL != _lib.ares_inet_ntop(s.family, _ffi.addressof(s.addr), ip, _lib.INET6_ADDRSTRLEN): server_list.append(maybe_str(_ffi.string(ip, _lib.INET6_ADDRSTRLEN))) server = s.next return server_list @servers.setter def servers(self, servers): c = _ffi.new("struct ares_addr_node[%d]" % len(servers)) for i, server in enumerate(servers): if _lib.ares_inet_pton(socket.AF_INET, ascii_bytes(server), _ffi.addressof(c[i].addr.addr4)) == 1: c[i].family = socket.AF_INET elif _lib.ares_inet_pton(socket.AF_INET6, ascii_bytes(server), _ffi.addressof(c[i].addr.addr6)) == 1: c[i].family = socket.AF_INET6 else: raise ValueError("invalid IP address") if i > 0: c[i - 1].next = _ffi.addressof(c[i]) r = _lib.ares_set_servers(self._channel[0], c) if r != _lib.ARES_SUCCESS: raise AresError(r, errno.strerror(r)) def getsock(self): rfds = [] wfds = [] socks = _ffi.new("ares_socket_t [%d]" % _lib.ARES_GETSOCK_MAXNUM) bitmask = _lib.ares_getsock(self._channel[0], socks, _lib.ARES_GETSOCK_MAXNUM) for i in range(_lib.ARES_GETSOCK_MAXNUM): if _lib.ARES_GETSOCK_READABLE(bitmask, i): rfds.append(socks[i]) if _lib.ARES_GETSOCK_WRITABLE(bitmask, i): wfds.append(socks[i]) return rfds, wfds def process_fd(self, read_fd, write_fd): _lib.ares_process_fd(self._channel[0], _ffi.cast("ares_socket_t", read_fd), _ffi.cast("ares_socket_t", write_fd)) def timeout(self, t = None): maxtv = _ffi.NULL tv = _ffi.new("struct timeval*") if t is not None: if t >= 0.0: maxtv = _ffi.new("struct timeval*") maxtv.tv_sec = int(math.floor(t)) maxtv.tv_usec = int(math.fmod(t, 1.0) * 1000000) else: raise ValueError("timeout needs to be a positive number or None") _lib.ares_timeout(self._channel[0], maxtv, tv) if tv == _ffi.NULL: return 0.0 return (tv.tv_sec + tv.tv_usec / 1000000.0) def gethostbyaddr(self, addr, callback): if not callable(callback): raise TypeError("a callable is required") addr4 = _ffi.new("struct in_addr*") addr6 = _ffi.new("struct ares_in6_addr*") if _lib.ares_inet_pton(socket.AF_INET, ascii_bytes(addr), (addr4)) == 1: address = addr4 family = socket.AF_INET elif _lib.ares_inet_pton(socket.AF_INET6, ascii_bytes(addr), (addr6)) == 1: address = addr6 family = socket.AF_INET6 else: raise ValueError("invalid IP address") userdata = _ffi.new_handle(callback) _global_set.add(userdata) _lib.ares_gethostbyaddr(self._channel[0], address, _ffi.sizeof(address[0]), family, _lib._host_cb, userdata) def gethostbyname(self, name, family, callback): if not callable(callback): raise TypeError("a callable is required") userdata = _ffi.new_handle(callback) _global_set.add(userdata) _lib.ares_gethostbyname(self._channel[0], parse_name(name), family, _lib._host_cb, userdata) def query(self, name, query_type, callback): self._do_query(_lib.ares_query, name, query_type, callback) def search(self, name, query_type, callback): self._do_query(_lib.ares_search, name, query_type, callback) def _do_query(self, func, name, query_type, callback): if not callable(callback): raise TypeError('a callable is required') if query_type not in self.__qtypes__: raise ValueError('invalid query type specified') userdata = _ffi.new_handle((callback, query_type)) _global_set.add(userdata) func(self._channel[0], parse_name(name), _lib.C_IN, query_type, _lib._query_cb, userdata) def set_local_ip(self, ip): addr4 = _ffi.new("struct in_addr*") addr6 = _ffi.new("struct ares_in6_addr*") if _lib.ares_inet_pton(socket.AF_INET, ascii_bytes(ip), addr4) == 1: _lib.ares_set_local_ip4(self._channel[0], socket.ntohl(addr4.s_addr)) elif _lib.ares_inet_pton(socket.AF_INET6, ascii_bytes(ip), addr6) == 1: _lib.ares_set_local_ip6(self._channel[0], addr6) else: raise ValueError("invalid IP address") def getnameinfo(self, ip_port, flags, callback): if not callable(callback): raise TypeError("a callable is required") ip, port = ip_port if port < 0 or port > 65535: raise ValueError("port must be between 0 and 65535") sa4 = _ffi.new("struct sockaddr_in*") sa6 = _ffi.new("struct sockaddr_in6*") if _lib.ares_inet_pton(socket.AF_INET, ascii_bytes(ip), _ffi.addressof(sa4.sin_addr)) == 1: sa4.sin_family = socket.AF_INET sa4.sin_port = socket.htons(port) sa = sa4 elif _lib.ares_inet_pton(socket.AF_INET6, ascii_bytes(ip), _ffi.addressof(sa6.sin6_addr)) == 1: sa6.sin6_family = socket.AF_INET6 sa6.sin6_port = socket.htons(port) sa = sa6 else: raise ValueError("invalid IP address") userdata = _ffi.new_handle(callback) _global_set.add(userdata) _lib.ares_getnameinfo(self._channel[0], _ffi.cast("struct sockaddr*", sa), _ffi.sizeof(sa[0]), flags, _lib._nameinfo_cb, userdata) def set_local_dev(self, dev): _lib.ares_set_local_dev(self._channel[0], dev) class AresResult: __slots__ = () def __repr__(self): attrs = ['%s=%s' % (a, getattr(self, a)) for a in self.__slots__] return '<%s> %s' % (self.__class__.__name__, ', '.join(attrs)) # DNS query result types # class ares_query_a_result(AresResult): __slots__ = ('host', 'ttl') type = 'A' def __init__(self, ares_addrttl): buf = _ffi.new("char[]", _lib.INET6_ADDRSTRLEN) _lib.ares_inet_ntop(socket.AF_INET, _ffi.addressof(ares_addrttl.ipaddr), buf, _lib.INET6_ADDRSTRLEN) self.host = maybe_str(_ffi.string(buf, _lib.INET6_ADDRSTRLEN)) self.ttl = ares_addrttl.ttl class ares_query_aaaa_result(AresResult): __slots__ = ('host', 'ttl') type = 'AAAA' def __init__(self, ares_addrttl): buf = _ffi.new("char[]", _lib.INET6_ADDRSTRLEN) _lib.ares_inet_ntop(socket.AF_INET6, _ffi.addressof(ares_addrttl.ip6addr), buf, _lib.INET6_ADDRSTRLEN) self.host = maybe_str(_ffi.string(buf, _lib.INET6_ADDRSTRLEN)) self.ttl = ares_addrttl.ttl class ares_query_cname_result(AresResult): __slots__ = ('cname', 'ttl') type = 'CNAME' def __init__(self, host): self.cname = maybe_str(_ffi.string(host.h_name)) self.ttl = -1 class ares_query_mx_result(AresResult): __slots__ = ('host', 'priority', 'ttl') type = 'MX' def __init__(self, mx): self.host = maybe_str(_ffi.string(mx.host)) self.priority = mx.priority self.ttl = mx.ttl class ares_query_naptr_result(AresResult): __slots__ = ('order', 'preference', 'flags', 'service', 'regex', 'replacement', 'ttl') type = 'NAPTR' def __init__(self, naptr): self.order = naptr.order self.preference = naptr.preference self.flags = maybe_str(_ffi.string(naptr.flags)) self.service = maybe_str(_ffi.string(naptr.service)) self.regex = maybe_str(_ffi.string(naptr.regexp)) self.replacement = maybe_str(_ffi.string(naptr.replacement)) self.ttl = naptr.ttl class ares_query_ns_result(AresResult): __slots__ = ('host', 'ttl') type = 'NS' def __init__(self, ns): self.host = maybe_str(_ffi.string(ns)) self.ttl = -1 class ares_query_ptr_result(AresResult): __slots__ = ('name', 'ttl', 'aliases') type = 'PTR' def __init__(self, hostent, ttl, aliases): self.name = maybe_str(_ffi.string(hostent.h_name)) self.ttl = ttl self.aliases = aliases class ares_query_soa_result(AresResult): __slots__ = ('nsname', 'hostmaster', 'serial', 'refresh', 'retry', 'expires', 'minttl', 'ttl') type = 'SOA' def __init__(self, soa): self.nsname = maybe_str(_ffi.string(soa.nsname)) self.hostmaster = maybe_str(_ffi.string(soa.hostmaster)) self.serial = soa.serial self.refresh = soa.refresh self.retry = soa.retry self.expires = soa.expire self.minttl = soa.minttl self.ttl = soa.ttl class ares_query_srv_result(AresResult): __slots__ = ('host', 'port', 'priority', 'weight', 'ttl') type = 'SRV' def __init__(self, srv): self.host = maybe_str(_ffi.string(srv.host)) self.port = srv.port self.priority = srv.priority self.weight = srv.weight self.ttl = srv.ttl class ares_query_txt_result(AresResult): __slots__ = ('text', 'ttl') type = 'TXT' def __init__(self, txt_chunk): self.text = maybe_str(txt_chunk.text) self.ttl = txt_chunk.ttl class ares_query_txt_result_chunk(AresResult): __slots__ = ('text', 'ttl') type = 'TXT' def __init__(self, txt): self.text = _ffi.string(txt.txt) self.ttl = txt.ttl # Other result types # class ares_host_result(AresResult): __slots__ = ('name', 'aliases', 'addresses') def __init__(self, hostent): self.name = maybe_str(_ffi.string(hostent.h_name)) self.aliases = [] self.addresses = [] i = 0 while hostent.h_aliases[i] != _ffi.NULL: self.aliases.append(maybe_str(_ffi.string(hostent.h_aliases[i]))) i += 1 i = 0 while hostent.h_addr_list[i] != _ffi.NULL: buf = _ffi.new("char[]", _lib.INET6_ADDRSTRLEN) if _ffi.NULL != _lib.ares_inet_ntop(hostent.h_addrtype, hostent.h_addr_list[i], buf, _lib.INET6_ADDRSTRLEN): self.addresses.append(maybe_str(_ffi.string(buf, _lib.INET6_ADDRSTRLEN))) i += 1 class ares_nameinfo_result(AresResult): __slots__ = ('node', 'service') def __init__(self, node, service): self.node = maybe_str(_ffi.string(node)) self.service = maybe_str(_ffi.string(service)) if service != _ffi.NULL else None __all__ = exported_pycares_symbols + list(exported_pycares_symbols_map.keys()) + ['AresError', 'Channel', 'errno', '__version__'] del exported_pycares_symbols, exported_pycares_symbols_map
24,660
Python
32.921596
150
0.559124
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycares/utils.py
try: import idna as idna2008 except ImportError: idna2008 = None def ascii_bytes(data): if isinstance(data, str): return data.encode('ascii') if isinstance(data, bytes): return data raise TypeError('only str (ascii encoding) and bytes are supported') def maybe_str(data): if isinstance(data, str): return data if isinstance(data, bytes): try: return data.decode('ascii') except UnicodeDecodeError: return data raise TypeError('only str (ascii encoding) and bytes are supported') def is_all_ascii(text): for c in text: if ord(c) > 0x7f: return False return True def parse_name_idna2008(name): parts = name.split('.') r = [] for part in parts: if is_all_ascii(part): r.append(part.encode('ascii')) else: r.append(idna2008.encode(part)) return b'.'.join(r) def parse_name(name): if isinstance(name, str): if is_all_ascii(name): return name.encode('ascii') if idna2008 is not None: return parse_name_idna2008(name) return name.encode('idna') if isinstance(name, bytes): return name raise TypeError('only str and bytes are supported') __all__ = ['ascii_bytes', 'maybe_str', 'parse_name']
1,343
Python
22.578947
72
0.596426
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycares/errno.py
from ._cares import ffi as _ffi, lib as _lib from .utils import maybe_str exported_pycares_symbols = [ 'ARES_SUCCESS', # error codes 'ARES_ENODATA', 'ARES_EFORMERR', 'ARES_ESERVFAIL', 'ARES_ENOTFOUND', 'ARES_ENOTIMP', 'ARES_EREFUSED', 'ARES_EBADQUERY', 'ARES_EBADNAME', 'ARES_EBADFAMILY', 'ARES_EBADRESP', 'ARES_ECONNREFUSED', 'ARES_ETIMEOUT', 'ARES_EOF', 'ARES_EFILE', 'ARES_ENOMEM', 'ARES_EDESTRUCTION', 'ARES_EBADSTR', 'ARES_EBADFLAGS', 'ARES_ENONAME', 'ARES_EBADHINTS', 'ARES_ENOTINITIALIZED', 'ARES_ELOADIPHLPAPI', 'ARES_EADDRGETNETWORKPARAMS', 'ARES_ECANCELLED', ] errorcode = {} for symbol in exported_pycares_symbols: value = getattr(_lib, symbol) globals()[symbol] = value globals()["errorcode"][value] = symbol def strerror(code): return maybe_str(_ffi.string(_lib.ares_strerror(code))) __all__ = exported_pycares_symbols + ['errorcode', 'strerror'] del exported_pycares_symbols
1,019
Python
19.4
62
0.632974
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/pycares/__main__.py
import collections.abc import pycares import select import socket import sys def wait_channel(channel): while True: read_fds, write_fds = channel.getsock() if not read_fds and not write_fds: break timeout = channel.timeout() if not timeout: channel.process_fd(pycares.ARES_SOCKET_BAD, pycares.ARES_SOCKET_BAD) continue rlist, wlist, xlist = select.select(read_fds, write_fds, [], timeout) for fd in rlist: channel.process_fd(fd, pycares.ARES_SOCKET_BAD) for fd in wlist: channel.process_fd(pycares.ARES_SOCKET_BAD, fd) def cb(result, error): if error is not None: print('Error: (%d) %s' % (error, pycares.errno.strerror(error))) else: parts = [ ';; QUESTION SECTION:', ';%s\t\t\tIN\t%s' % (hostname, qtype.upper()), '', ';; ANSWER SECTION:' ] if not isinstance(result, collections.abc.Iterable): result = [result] for r in result: txt = '%s\t\t%d\tIN\t%s' % (hostname, r.ttl, r.type) if r.type in ('A', 'AAAA'): parts.append('%s\t%s' % (txt, r.host)) elif r.type == 'CNAME': parts.append('%s\t%s' % (txt, r.cname)) elif r.type == 'MX': parts.append('%s\t%d %s' % (txt, r.priority, r.host)) elif r.type == 'NAPTR': parts.append('%s\t%d %d "%s" "%s" "%s" %s' % (txt, r.order, r.preference, r.flags, r.service, r.regex, r.replacement)) elif r.type == 'NS': parts.append('%s\t%s' % (txt, r.host)) elif r.type == 'PTR': parts.append('%s\t%s' % (txt, r.name)) elif r.type == 'SOA': parts.append('%s\t%s %s %d %d %d %d %d' % (txt, r.nsname, r.hostmaster, r.serial, r.refresh, r.retry, r.expires, r.minttl)) elif r.type == 'SRV': parts.append('%s\t%d %d %d %s' % (txt, r.priority, r.weight, r.port, r.host)) elif r.type == 'TXT': parts.append('%s\t"%s"' % (txt, r.text)) print('\n'.join(parts)) channel = pycares.Channel() if len(sys.argv) not in (2, 3): print('Invalid arguments! Usage: python -m pycares [query_type] hostname') sys.exit(1) if len(sys.argv) == 2: _, hostname = sys.argv qtype = 'A' else: _, qtype, hostname = sys.argv try: query_type = getattr(pycares, 'QUERY_TYPE_%s' % qtype.upper()) except Exception: print('Invalid query type: %s' % qtype) sys.exit(1) channel.query(hostname, query_type, cb) wait_channel(channel)
2,674
Python
31.228915
139
0.526178
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/bin/watchmedo-script.py
#!C:\buildAgent\work\kit\kit\_build\target-deps\python\python.exe # EASY-INSTALL-ENTRY-SCRIPT: 'watchdog==0.10.4','console_scripts','watchmedo' import re import sys # for compatibility with easy_install; see #2198 __requires__ = 'watchdog==0.10.4' try: from importlib.metadata import distribution except ImportError: try: from importlib_metadata import distribution except ImportError: from pkg_resources import load_entry_point def importlib_load_entry_point(spec, group, name): dist_name, _, _ = spec.partition('==') matches = ( entry_point for entry_point in distribution(dist_name).entry_points if entry_point.group == group and entry_point.name == name ) return next(matches).load() globals().setdefault('load_entry_point', importlib_load_entry_point) if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit(load_entry_point('watchdog==0.10.4', 'console_scripts', 'watchmedo')())
1,015
Python
28.882352
84
0.66798
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/bin/jp.py
#!C:\buildAgent\work\kit\kit\_build\target-deps\python\python.exe import sys import json import argparse from pprint import pformat import jmespath from jmespath import exceptions def main(): parser = argparse.ArgumentParser() parser.add_argument('expression') parser.add_argument('-f', '--filename', help=('The filename containing the input data. ' 'If a filename is not given then data is ' 'read from stdin.')) parser.add_argument('--ast', action='store_true', help=('Pretty print the AST, do not search the data.')) args = parser.parse_args() expression = args.expression if args.ast: # Only print the AST expression = jmespath.compile(args.expression) sys.stdout.write(pformat(expression.parsed)) sys.stdout.write('\n') return 0 if args.filename: with open(args.filename, 'r') as f: data = json.load(f) else: data = sys.stdin.read() data = json.loads(data) try: sys.stdout.write(json.dumps( jmespath.search(expression, data), indent=4, ensure_ascii=False)) sys.stdout.write('\n') except exceptions.ArityError as e: sys.stderr.write("invalid-arity: %s\n" % e) return 1 except exceptions.JMESPathTypeError as e: sys.stderr.write("invalid-type: %s\n" % e) return 1 except exceptions.UnknownFunctionError as e: sys.stderr.write("unknown-function: %s\n" % e) return 1 except exceptions.ParseError as e: sys.stderr.write("syntax-error: %s\n" % e) return 1 if __name__ == '__main__': sys.exit(main())
1,742
Python
30.690909
79
0.594719
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/__init__.py
# -*- coding: utf-8 -*- """ Charset-Normalizer ~~~~~~~~~~~~~~ The Real First Universal Charset Detector. A library that helps you read text from an unknown charset encoding. Motivated by chardet, This package is trying to resolve the issue by taking a new approach. All IANA character set names for which the Python core library provides codecs are supported. Basic usage: >>> from charset_normalizer import from_bytes >>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8')) >>> best_guess = results.best() >>> str(best_guess) 'Bсеки човек има право на образование. Oбразованието!' Others methods and usages are available - see the full documentation at <https://github.com/Ousret/charset_normalizer>. :copyright: (c) 2021 by Ahmed TAHRI :license: MIT, see LICENSE for more details. """ import logging from .api import from_bytes, from_fp, from_path, normalize from .legacy import ( CharsetDetector, CharsetDoctor, CharsetNormalizerMatch, CharsetNormalizerMatches, detect, ) from .models import CharsetMatch, CharsetMatches from .utils import set_logging_handler from .version import VERSION, __version__ __all__ = ( "from_fp", "from_path", "from_bytes", "normalize", "detect", "CharsetMatch", "CharsetMatches", "CharsetNormalizerMatch", "CharsetNormalizerMatches", "CharsetDetector", "CharsetDoctor", "__version__", "VERSION", "set_logging_handler", ) # Attach a NullHandler to the top level logger by default # https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library logging.getLogger("charset_normalizer").addHandler(logging.NullHandler())
1,706
Python
28.947368
99
0.716295
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/version.py
""" Expose version """ __version__ = "2.1.1" VERSION = __version__.split(".")
79
Python
10.42857
32
0.531646
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/utils.py
try: # WARNING: unicodedata2 support is going to be removed in 3.0 # Python is quickly catching up. import unicodedata2 as unicodedata except ImportError: import unicodedata # type: ignore[no-redef] import importlib import logging from codecs import IncrementalDecoder from encodings.aliases import aliases from functools import lru_cache from re import findall from typing import Generator, List, Optional, Set, Tuple, Union from _multibytecodec import MultibyteIncrementalDecoder from .constant import ( ENCODING_MARKS, IANA_SUPPORTED_SIMILAR, RE_POSSIBLE_ENCODING_INDICATION, UNICODE_RANGES_COMBINED, UNICODE_SECONDARY_RANGE_KEYWORD, UTF8_MAXIMAL_ALLOCATION, ) @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) def is_accentuated(character: str) -> bool: try: description: str = unicodedata.name(character) except ValueError: return False return ( "WITH GRAVE" in description or "WITH ACUTE" in description or "WITH CEDILLA" in description or "WITH DIAERESIS" in description or "WITH CIRCUMFLEX" in description or "WITH TILDE" in description ) @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) def remove_accent(character: str) -> str: decomposed: str = unicodedata.decomposition(character) if not decomposed: return character codes: List[str] = decomposed.split(" ") return chr(int(codes[0], 16)) @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) def unicode_range(character: str) -> Optional[str]: """ Retrieve the Unicode range official name from a single character. """ character_ord: int = ord(character) for range_name, ord_range in UNICODE_RANGES_COMBINED.items(): if character_ord in ord_range: return range_name return None @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) def is_latin(character: str) -> bool: try: description: str = unicodedata.name(character) except ValueError: return False return "LATIN" in description @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) def is_ascii(character: str) -> bool: try: character.encode("ascii") except UnicodeEncodeError: return False return True @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) def is_punctuation(character: str) -> bool: character_category: str = unicodedata.category(character) if "P" in character_category: return True character_range: Optional[str] = unicode_range(character) if character_range is None: return False return "Punctuation" in character_range @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) def is_symbol(character: str) -> bool: character_category: str = unicodedata.category(character) if "S" in character_category or "N" in character_category: return True character_range: Optional[str] = unicode_range(character) if character_range is None: return False return "Forms" in character_range @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) def is_emoticon(character: str) -> bool: character_range: Optional[str] = unicode_range(character) if character_range is None: return False return "Emoticons" in character_range @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) def is_separator(character: str) -> bool: if character.isspace() or character in {"|", "+", ",", ";", "<", ">"}: return True character_category: str = unicodedata.category(character) return "Z" in character_category @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) def is_case_variable(character: str) -> bool: return character.islower() != character.isupper() def is_private_use_only(character: str) -> bool: character_category: str = unicodedata.category(character) return character_category == "Co" @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) def is_cjk(character: str) -> bool: try: character_name = unicodedata.name(character) except ValueError: return False return "CJK" in character_name @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) def is_hiragana(character: str) -> bool: try: character_name = unicodedata.name(character) except ValueError: return False return "HIRAGANA" in character_name @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) def is_katakana(character: str) -> bool: try: character_name = unicodedata.name(character) except ValueError: return False return "KATAKANA" in character_name @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) def is_hangul(character: str) -> bool: try: character_name = unicodedata.name(character) except ValueError: return False return "HANGUL" in character_name @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) def is_thai(character: str) -> bool: try: character_name = unicodedata.name(character) except ValueError: return False return "THAI" in character_name @lru_cache(maxsize=len(UNICODE_RANGES_COMBINED)) def is_unicode_range_secondary(range_name: str) -> bool: return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD) @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION) def is_unprintable(character: str) -> bool: return ( character.isspace() is False # includes \n \t \r \v and character.isprintable() is False and character != "\x1A" # Why? Its the ASCII substitute character. and character != "\ufeff" # bug discovered in Python, # Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space. ) def any_specified_encoding(sequence: bytes, search_zone: int = 4096) -> Optional[str]: """ Extract using ASCII-only decoder any specified encoding in the first n-bytes. """ if not isinstance(sequence, bytes): raise TypeError seq_len: int = len(sequence) results: List[str] = findall( RE_POSSIBLE_ENCODING_INDICATION, sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"), ) if len(results) == 0: return None for specified_encoding in results: specified_encoding = specified_encoding.lower().replace("-", "_") encoding_alias: str encoding_iana: str for encoding_alias, encoding_iana in aliases.items(): if encoding_alias == specified_encoding: return encoding_iana if encoding_iana == specified_encoding: return encoding_iana return None @lru_cache(maxsize=128) def is_multi_byte_encoding(name: str) -> bool: """ Verify is a specific encoding is a multi byte one based on it IANA name """ return name in { "utf_8", "utf_8_sig", "utf_16", "utf_16_be", "utf_16_le", "utf_32", "utf_32_le", "utf_32_be", "utf_7", } or issubclass( importlib.import_module("encodings.{}".format(name)).IncrementalDecoder, MultibyteIncrementalDecoder, ) def identify_sig_or_bom(sequence: bytes) -> Tuple[Optional[str], bytes]: """ Identify and extract SIG/BOM in given sequence. """ for iana_encoding in ENCODING_MARKS: marks: Union[bytes, List[bytes]] = ENCODING_MARKS[iana_encoding] if isinstance(marks, bytes): marks = [marks] for mark in marks: if sequence.startswith(mark): return iana_encoding, mark return None, b"" def should_strip_sig_or_bom(iana_encoding: str) -> bool: return iana_encoding not in {"utf_16", "utf_32"} def iana_name(cp_name: str, strict: bool = True) -> str: cp_name = cp_name.lower().replace("-", "_") encoding_alias: str encoding_iana: str for encoding_alias, encoding_iana in aliases.items(): if cp_name in [encoding_alias, encoding_iana]: return encoding_iana if strict: raise ValueError("Unable to retrieve IANA for '{}'".format(cp_name)) return cp_name def range_scan(decoded_sequence: str) -> List[str]: ranges: Set[str] = set() for character in decoded_sequence: character_range: Optional[str] = unicode_range(character) if character_range is None: continue ranges.add(character_range) return list(ranges) def cp_similarity(iana_name_a: str, iana_name_b: str) -> float: if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b): return 0.0 decoder_a = importlib.import_module( "encodings.{}".format(iana_name_a) ).IncrementalDecoder decoder_b = importlib.import_module( "encodings.{}".format(iana_name_b) ).IncrementalDecoder id_a: IncrementalDecoder = decoder_a(errors="ignore") id_b: IncrementalDecoder = decoder_b(errors="ignore") character_match_count: int = 0 for i in range(255): to_be_decoded: bytes = bytes([i]) if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded): character_match_count += 1 return character_match_count / 254 def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool: """ Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using the function cp_similarity. """ return ( iana_name_a in IANA_SUPPORTED_SIMILAR and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a] ) def set_logging_handler( name: str = "charset_normalizer", level: int = logging.INFO, format_string: str = "%(asctime)s | %(levelname)s | %(message)s", ) -> None: logger = logging.getLogger(name) logger.setLevel(level) handler = logging.StreamHandler() handler.setFormatter(logging.Formatter(format_string)) logger.addHandler(handler) def cut_sequence_chunks( sequences: bytes, encoding_iana: str, offsets: range, chunk_size: int, bom_or_sig_available: bool, strip_sig_or_bom: bool, sig_payload: bytes, is_multi_byte_decoder: bool, decoded_payload: Optional[str] = None, ) -> Generator[str, None, None]: if decoded_payload and is_multi_byte_decoder is False: for i in offsets: chunk = decoded_payload[i : i + chunk_size] if not chunk: break yield chunk else: for i in offsets: chunk_end = i + chunk_size if chunk_end > len(sequences) + 8: continue cut_sequence = sequences[i : i + chunk_size] if bom_or_sig_available and strip_sig_or_bom is False: cut_sequence = sig_payload + cut_sequence chunk = cut_sequence.decode( encoding_iana, errors="ignore" if is_multi_byte_decoder else "strict", ) # multi-byte bad cutting detector and adjustment # not the cleanest way to perform that fix but clever enough for now. if is_multi_byte_decoder and i > 0 and sequences[i] >= 0x80: chunk_partial_size_chk: int = min(chunk_size, 16) if ( decoded_payload and chunk[:chunk_partial_size_chk] not in decoded_payload ): for j in range(i, i - 4, -1): cut_sequence = sequences[j:chunk_end] if bom_or_sig_available and strip_sig_or_bom is False: cut_sequence = sig_payload + cut_sequence chunk = cut_sequence.decode(encoding_iana, errors="ignore") if chunk[:chunk_partial_size_chk] in decoded_payload: break yield chunk
11,769
Python
26.694118
115
0.639477
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/md.py
from functools import lru_cache from typing import List, Optional from .constant import COMMON_SAFE_ASCII_CHARACTERS, UNICODE_SECONDARY_RANGE_KEYWORD from .utils import ( is_accentuated, is_ascii, is_case_variable, is_cjk, is_emoticon, is_hangul, is_hiragana, is_katakana, is_latin, is_punctuation, is_separator, is_symbol, is_thai, is_unprintable, remove_accent, unicode_range, ) class MessDetectorPlugin: """ Base abstract class used for mess detection plugins. All detectors MUST extend and implement given methods. """ def eligible(self, character: str) -> bool: """ Determine if given character should be fed in. """ raise NotImplementedError # pragma: nocover def feed(self, character: str) -> None: """ The main routine to be executed upon character. Insert the logic in witch the text would be considered chaotic. """ raise NotImplementedError # pragma: nocover def reset(self) -> None: # pragma: no cover """ Permit to reset the plugin to the initial state. """ raise NotImplementedError @property def ratio(self) -> float: """ Compute the chaos ratio based on what your feed() has seen. Must NOT be lower than 0.; No restriction gt 0. """ raise NotImplementedError # pragma: nocover class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin): def __init__(self) -> None: self._punctuation_count: int = 0 self._symbol_count: int = 0 self._character_count: int = 0 self._last_printable_char: Optional[str] = None self._frenzy_symbol_in_word: bool = False def eligible(self, character: str) -> bool: return character.isprintable() def feed(self, character: str) -> None: self._character_count += 1 if ( character != self._last_printable_char and character not in COMMON_SAFE_ASCII_CHARACTERS ): if is_punctuation(character): self._punctuation_count += 1 elif ( character.isdigit() is False and is_symbol(character) and is_emoticon(character) is False ): self._symbol_count += 2 self._last_printable_char = character def reset(self) -> None: # pragma: no cover self._punctuation_count = 0 self._character_count = 0 self._symbol_count = 0 @property def ratio(self) -> float: if self._character_count == 0: return 0.0 ratio_of_punctuation: float = ( self._punctuation_count + self._symbol_count ) / self._character_count return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.0 class TooManyAccentuatedPlugin(MessDetectorPlugin): def __init__(self) -> None: self._character_count: int = 0 self._accentuated_count: int = 0 def eligible(self, character: str) -> bool: return character.isalpha() def feed(self, character: str) -> None: self._character_count += 1 if is_accentuated(character): self._accentuated_count += 1 def reset(self) -> None: # pragma: no cover self._character_count = 0 self._accentuated_count = 0 @property def ratio(self) -> float: if self._character_count == 0: return 0.0 ratio_of_accentuation: float = self._accentuated_count / self._character_count return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.0 class UnprintablePlugin(MessDetectorPlugin): def __init__(self) -> None: self._unprintable_count: int = 0 self._character_count: int = 0 def eligible(self, character: str) -> bool: return True def feed(self, character: str) -> None: if is_unprintable(character): self._unprintable_count += 1 self._character_count += 1 def reset(self) -> None: # pragma: no cover self._unprintable_count = 0 @property def ratio(self) -> float: if self._character_count == 0: return 0.0 return (self._unprintable_count * 8) / self._character_count class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin): def __init__(self) -> None: self._successive_count: int = 0 self._character_count: int = 0 self._last_latin_character: Optional[str] = None def eligible(self, character: str) -> bool: return character.isalpha() and is_latin(character) def feed(self, character: str) -> None: self._character_count += 1 if ( self._last_latin_character is not None and is_accentuated(character) and is_accentuated(self._last_latin_character) ): if character.isupper() and self._last_latin_character.isupper(): self._successive_count += 1 # Worse if its the same char duplicated with different accent. if remove_accent(character) == remove_accent(self._last_latin_character): self._successive_count += 1 self._last_latin_character = character def reset(self) -> None: # pragma: no cover self._successive_count = 0 self._character_count = 0 self._last_latin_character = None @property def ratio(self) -> float: if self._character_count == 0: return 0.0 return (self._successive_count * 2) / self._character_count class SuspiciousRange(MessDetectorPlugin): def __init__(self) -> None: self._suspicious_successive_range_count: int = 0 self._character_count: int = 0 self._last_printable_seen: Optional[str] = None def eligible(self, character: str) -> bool: return character.isprintable() def feed(self, character: str) -> None: self._character_count += 1 if ( character.isspace() or is_punctuation(character) or character in COMMON_SAFE_ASCII_CHARACTERS ): self._last_printable_seen = None return if self._last_printable_seen is None: self._last_printable_seen = character return unicode_range_a: Optional[str] = unicode_range(self._last_printable_seen) unicode_range_b: Optional[str] = unicode_range(character) if is_suspiciously_successive_range(unicode_range_a, unicode_range_b): self._suspicious_successive_range_count += 1 self._last_printable_seen = character def reset(self) -> None: # pragma: no cover self._character_count = 0 self._suspicious_successive_range_count = 0 self._last_printable_seen = None @property def ratio(self) -> float: if self._character_count == 0: return 0.0 ratio_of_suspicious_range_usage: float = ( self._suspicious_successive_range_count * 2 ) / self._character_count if ratio_of_suspicious_range_usage < 0.1: return 0.0 return ratio_of_suspicious_range_usage class SuperWeirdWordPlugin(MessDetectorPlugin): def __init__(self) -> None: self._word_count: int = 0 self._bad_word_count: int = 0 self._foreign_long_count: int = 0 self._is_current_word_bad: bool = False self._foreign_long_watch: bool = False self._character_count: int = 0 self._bad_character_count: int = 0 self._buffer: str = "" self._buffer_accent_count: int = 0 def eligible(self, character: str) -> bool: return True def feed(self, character: str) -> None: if character.isalpha(): self._buffer += character if is_accentuated(character): self._buffer_accent_count += 1 if ( self._foreign_long_watch is False and (is_latin(character) is False or is_accentuated(character)) and is_cjk(character) is False and is_hangul(character) is False and is_katakana(character) is False and is_hiragana(character) is False and is_thai(character) is False ): self._foreign_long_watch = True return if not self._buffer: return if ( character.isspace() or is_punctuation(character) or is_separator(character) ) and self._buffer: self._word_count += 1 buffer_length: int = len(self._buffer) self._character_count += buffer_length if buffer_length >= 4: if self._buffer_accent_count / buffer_length > 0.34: self._is_current_word_bad = True # Word/Buffer ending with a upper case accentuated letter are so rare, # that we will consider them all as suspicious. Same weight as foreign_long suspicious. if is_accentuated(self._buffer[-1]) and self._buffer[-1].isupper(): self._foreign_long_count += 1 self._is_current_word_bad = True if buffer_length >= 24 and self._foreign_long_watch: self._foreign_long_count += 1 self._is_current_word_bad = True if self._is_current_word_bad: self._bad_word_count += 1 self._bad_character_count += len(self._buffer) self._is_current_word_bad = False self._foreign_long_watch = False self._buffer = "" self._buffer_accent_count = 0 elif ( character not in {"<", ">", "-", "=", "~", "|", "_"} and character.isdigit() is False and is_symbol(character) ): self._is_current_word_bad = True self._buffer += character def reset(self) -> None: # pragma: no cover self._buffer = "" self._is_current_word_bad = False self._foreign_long_watch = False self._bad_word_count = 0 self._word_count = 0 self._character_count = 0 self._bad_character_count = 0 self._foreign_long_count = 0 @property def ratio(self) -> float: if self._word_count <= 10 and self._foreign_long_count == 0: return 0.0 return self._bad_character_count / self._character_count class CjkInvalidStopPlugin(MessDetectorPlugin): """ GB(Chinese) based encoding often render the stop incorrectly when the content does not fit and can be easily detected. Searching for the overuse of '丅' and '丄'. """ def __init__(self) -> None: self._wrong_stop_count: int = 0 self._cjk_character_count: int = 0 def eligible(self, character: str) -> bool: return True def feed(self, character: str) -> None: if character in {"丅", "丄"}: self._wrong_stop_count += 1 return if is_cjk(character): self._cjk_character_count += 1 def reset(self) -> None: # pragma: no cover self._wrong_stop_count = 0 self._cjk_character_count = 0 @property def ratio(self) -> float: if self._cjk_character_count < 16: return 0.0 return self._wrong_stop_count / self._cjk_character_count class ArchaicUpperLowerPlugin(MessDetectorPlugin): def __init__(self) -> None: self._buf: bool = False self._character_count_since_last_sep: int = 0 self._successive_upper_lower_count: int = 0 self._successive_upper_lower_count_final: int = 0 self._character_count: int = 0 self._last_alpha_seen: Optional[str] = None self._current_ascii_only: bool = True def eligible(self, character: str) -> bool: return True def feed(self, character: str) -> None: is_concerned = character.isalpha() and is_case_variable(character) chunk_sep = is_concerned is False if chunk_sep and self._character_count_since_last_sep > 0: if ( self._character_count_since_last_sep <= 64 and character.isdigit() is False and self._current_ascii_only is False ): self._successive_upper_lower_count_final += ( self._successive_upper_lower_count ) self._successive_upper_lower_count = 0 self._character_count_since_last_sep = 0 self._last_alpha_seen = None self._buf = False self._character_count += 1 self._current_ascii_only = True return if self._current_ascii_only is True and is_ascii(character) is False: self._current_ascii_only = False if self._last_alpha_seen is not None: if (character.isupper() and self._last_alpha_seen.islower()) or ( character.islower() and self._last_alpha_seen.isupper() ): if self._buf is True: self._successive_upper_lower_count += 2 self._buf = False else: self._buf = True else: self._buf = False self._character_count += 1 self._character_count_since_last_sep += 1 self._last_alpha_seen = character def reset(self) -> None: # pragma: no cover self._character_count = 0 self._character_count_since_last_sep = 0 self._successive_upper_lower_count = 0 self._successive_upper_lower_count_final = 0 self._last_alpha_seen = None self._buf = False self._current_ascii_only = True @property def ratio(self) -> float: if self._character_count == 0: return 0.0 return self._successive_upper_lower_count_final / self._character_count @lru_cache(maxsize=1024) def is_suspiciously_successive_range( unicode_range_a: Optional[str], unicode_range_b: Optional[str] ) -> bool: """ Determine if two Unicode range seen next to each other can be considered as suspicious. """ if unicode_range_a is None or unicode_range_b is None: return True if unicode_range_a == unicode_range_b: return False if "Latin" in unicode_range_a and "Latin" in unicode_range_b: return False if "Emoticons" in unicode_range_a or "Emoticons" in unicode_range_b: return False # Latin characters can be accompanied with a combining diacritical mark # eg. Vietnamese. if ("Latin" in unicode_range_a or "Latin" in unicode_range_b) and ( "Combining" in unicode_range_a or "Combining" in unicode_range_b ): return False keywords_range_a, keywords_range_b = unicode_range_a.split( " " ), unicode_range_b.split(" ") for el in keywords_range_a: if el in UNICODE_SECONDARY_RANGE_KEYWORD: continue if el in keywords_range_b: return False # Japanese Exception range_a_jp_chars, range_b_jp_chars = ( unicode_range_a in ( "Hiragana", "Katakana", ), unicode_range_b in ("Hiragana", "Katakana"), ) if (range_a_jp_chars or range_b_jp_chars) and ( "CJK" in unicode_range_a or "CJK" in unicode_range_b ): return False if range_a_jp_chars and range_b_jp_chars: return False if "Hangul" in unicode_range_a or "Hangul" in unicode_range_b: if "CJK" in unicode_range_a or "CJK" in unicode_range_b: return False if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin": return False # Chinese/Japanese use dedicated range for punctuation and/or separators. if ("CJK" in unicode_range_a or "CJK" in unicode_range_b) or ( unicode_range_a in ["Katakana", "Hiragana"] and unicode_range_b in ["Katakana", "Hiragana"] ): if "Punctuation" in unicode_range_a or "Punctuation" in unicode_range_b: return False if "Forms" in unicode_range_a or "Forms" in unicode_range_b: return False return True @lru_cache(maxsize=2048) def mess_ratio( decoded_sequence: str, maximum_threshold: float = 0.2, debug: bool = False ) -> float: """ Compute a mess ratio given a decoded bytes sequence. The maximum threshold does stop the computation earlier. """ detectors: List[MessDetectorPlugin] = [ md_class() for md_class in MessDetectorPlugin.__subclasses__() ] length: int = len(decoded_sequence) + 1 mean_mess_ratio: float = 0.0 if length < 512: intermediary_mean_mess_ratio_calc: int = 32 elif length <= 1024: intermediary_mean_mess_ratio_calc = 64 else: intermediary_mean_mess_ratio_calc = 128 for character, index in zip(decoded_sequence + "\n", range(length)): for detector in detectors: if detector.eligible(character): detector.feed(character) if ( index > 0 and index % intermediary_mean_mess_ratio_calc == 0 ) or index == length - 1: mean_mess_ratio = sum(dt.ratio for dt in detectors) if mean_mess_ratio >= maximum_threshold: break if debug: for dt in detectors: # pragma: nocover print(dt.__class__, dt.ratio) return round(mean_mess_ratio, 3)
17,634
Python
30.83213
113
0.582568
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/legacy.py
import warnings from typing import Dict, Optional, Union from .api import from_bytes, from_fp, from_path, normalize from .constant import CHARDET_CORRESPONDENCE from .models import CharsetMatch, CharsetMatches def detect(byte_str: bytes) -> Dict[str, Optional[Union[str, float]]]: """ chardet legacy method Detect the encoding of the given byte string. It should be mostly backward-compatible. Encoding name will match Chardet own writing whenever possible. (Not on encoding name unsupported by it) This function is deprecated and should be used to migrate your project easily, consult the documentation for further information. Not planned for removal. :param byte_str: The byte sequence to examine. """ if not isinstance(byte_str, (bytearray, bytes)): raise TypeError( # pragma: nocover "Expected object of type bytes or bytearray, got: " "{0}".format(type(byte_str)) ) if isinstance(byte_str, bytearray): byte_str = bytes(byte_str) r = from_bytes(byte_str).best() encoding = r.encoding if r is not None else None language = r.language if r is not None and r.language != "Unknown" else "" confidence = 1.0 - r.chaos if r is not None else None # Note: CharsetNormalizer does not return 'UTF-8-SIG' as the sig get stripped in the detection/normalization process # but chardet does return 'utf-8-sig' and it is a valid codec name. if r is not None and encoding == "utf_8" and r.bom: encoding += "_sig" return { "encoding": encoding if encoding not in CHARDET_CORRESPONDENCE else CHARDET_CORRESPONDENCE[encoding], "language": language, "confidence": confidence, } class CharsetNormalizerMatch(CharsetMatch): pass class CharsetNormalizerMatches(CharsetMatches): @staticmethod def from_fp(*args, **kwargs): # type: ignore warnings.warn( # pragma: nocover "staticmethod from_fp, from_bytes, from_path and normalize are deprecated " "and scheduled to be removed in 3.0", DeprecationWarning, ) return from_fp(*args, **kwargs) # pragma: nocover @staticmethod def from_bytes(*args, **kwargs): # type: ignore warnings.warn( # pragma: nocover "staticmethod from_fp, from_bytes, from_path and normalize are deprecated " "and scheduled to be removed in 3.0", DeprecationWarning, ) return from_bytes(*args, **kwargs) # pragma: nocover @staticmethod def from_path(*args, **kwargs): # type: ignore warnings.warn( # pragma: nocover "staticmethod from_fp, from_bytes, from_path and normalize are deprecated " "and scheduled to be removed in 3.0", DeprecationWarning, ) return from_path(*args, **kwargs) # pragma: nocover @staticmethod def normalize(*args, **kwargs): # type: ignore warnings.warn( # pragma: nocover "staticmethod from_fp, from_bytes, from_path and normalize are deprecated " "and scheduled to be removed in 3.0", DeprecationWarning, ) return normalize(*args, **kwargs) # pragma: nocover class CharsetDetector(CharsetNormalizerMatches): pass class CharsetDoctor(CharsetNormalizerMatches): pass
3,384
Python
34.260416
120
0.65396
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/api.py
import logging import warnings from os import PathLike from os.path import basename, splitext from typing import Any, BinaryIO, List, Optional, Set from .cd import ( coherence_ratio, encoding_languages, mb_encoding_languages, merge_coherence_ratios, ) from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE from .md import mess_ratio from .models import CharsetMatch, CharsetMatches from .utils import ( any_specified_encoding, cut_sequence_chunks, iana_name, identify_sig_or_bom, is_cp_similar, is_multi_byte_encoding, should_strip_sig_or_bom, ) # Will most likely be controversial # logging.addLevelName(TRACE, "TRACE") logger = logging.getLogger("charset_normalizer") explain_handler = logging.StreamHandler() explain_handler.setFormatter( logging.Formatter("%(asctime)s | %(levelname)s | %(message)s") ) def from_bytes( sequences: bytes, steps: int = 5, chunk_size: int = 512, threshold: float = 0.2, cp_isolation: Optional[List[str]] = None, cp_exclusion: Optional[List[str]] = None, preemptive_behaviour: bool = True, explain: bool = False, ) -> CharsetMatches: """ Given a raw bytes sequence, return the best possibles charset usable to render str objects. If there is no results, it is a strong indicator that the source is binary/not text. By default, the process will extract 5 blocs of 512o each to assess the mess and coherence of a given sequence. And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will. The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page but never take it for granted. Can improve the performance. You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that purpose. This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32. By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain' toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging. Custom logging format and handler can be set manually. """ if not isinstance(sequences, (bytearray, bytes)): raise TypeError( "Expected object of type bytes or bytearray, got: {0}".format( type(sequences) ) ) if explain: previous_logger_level: int = logger.level logger.addHandler(explain_handler) logger.setLevel(TRACE) length: int = len(sequences) if length == 0: logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.") if explain: logger.removeHandler(explain_handler) logger.setLevel(previous_logger_level or logging.WARNING) return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")]) if cp_isolation is not None: logger.log( TRACE, "cp_isolation is set. use this flag for debugging purpose. " "limited list of encoding allowed : %s.", ", ".join(cp_isolation), ) cp_isolation = [iana_name(cp, False) for cp in cp_isolation] else: cp_isolation = [] if cp_exclusion is not None: logger.log( TRACE, "cp_exclusion is set. use this flag for debugging purpose. " "limited list of encoding excluded : %s.", ", ".join(cp_exclusion), ) cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion] else: cp_exclusion = [] if length <= (chunk_size * steps): logger.log( TRACE, "override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.", steps, chunk_size, length, ) steps = 1 chunk_size = length if steps > 1 and length / steps < chunk_size: chunk_size = int(length / steps) is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE if is_too_small_sequence: logger.log( TRACE, "Trying to detect encoding from a tiny portion of ({}) byte(s).".format( length ), ) elif is_too_large_sequence: logger.log( TRACE, "Using lazy str decoding because the payload is quite large, ({}) byte(s).".format( length ), ) prioritized_encodings: List[str] = [] specified_encoding: Optional[str] = ( any_specified_encoding(sequences) if preemptive_behaviour else None ) if specified_encoding is not None: prioritized_encodings.append(specified_encoding) logger.log( TRACE, "Detected declarative mark in sequence. Priority +1 given for %s.", specified_encoding, ) tested: Set[str] = set() tested_but_hard_failure: List[str] = [] tested_but_soft_failure: List[str] = [] fallback_ascii: Optional[CharsetMatch] = None fallback_u8: Optional[CharsetMatch] = None fallback_specified: Optional[CharsetMatch] = None results: CharsetMatches = CharsetMatches() sig_encoding, sig_payload = identify_sig_or_bom(sequences) if sig_encoding is not None: prioritized_encodings.append(sig_encoding) logger.log( TRACE, "Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.", len(sig_payload), sig_encoding, ) prioritized_encodings.append("ascii") if "utf_8" not in prioritized_encodings: prioritized_encodings.append("utf_8") for encoding_iana in prioritized_encodings + IANA_SUPPORTED: if cp_isolation and encoding_iana not in cp_isolation: continue if cp_exclusion and encoding_iana in cp_exclusion: continue if encoding_iana in tested: continue tested.add(encoding_iana) decoded_payload: Optional[str] = None bom_or_sig_available: bool = sig_encoding == encoding_iana strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom( encoding_iana ) if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available: logger.log( TRACE, "Encoding %s wont be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.", encoding_iana, ) continue try: is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana) except (ModuleNotFoundError, ImportError): logger.log( TRACE, "Encoding %s does not provide an IncrementalDecoder", encoding_iana, ) continue try: if is_too_large_sequence and is_multi_byte_decoder is False: str( sequences[: int(50e4)] if strip_sig_or_bom is False else sequences[len(sig_payload) : int(50e4)], encoding=encoding_iana, ) else: decoded_payload = str( sequences if strip_sig_or_bom is False else sequences[len(sig_payload) :], encoding=encoding_iana, ) except (UnicodeDecodeError, LookupError) as e: if not isinstance(e, LookupError): logger.log( TRACE, "Code page %s does not fit given bytes sequence at ALL. %s", encoding_iana, str(e), ) tested_but_hard_failure.append(encoding_iana) continue similar_soft_failure_test: bool = False for encoding_soft_failed in tested_but_soft_failure: if is_cp_similar(encoding_iana, encoding_soft_failed): similar_soft_failure_test = True break if similar_soft_failure_test: logger.log( TRACE, "%s is deemed too similar to code page %s and was consider unsuited already. Continuing!", encoding_iana, encoding_soft_failed, ) continue r_ = range( 0 if not bom_or_sig_available else len(sig_payload), length, int(length / steps), ) multi_byte_bonus: bool = ( is_multi_byte_decoder and decoded_payload is not None and len(decoded_payload) < length ) if multi_byte_bonus: logger.log( TRACE, "Code page %s is a multi byte encoding table and it appear that at least one character " "was encoded using n-bytes.", encoding_iana, ) max_chunk_gave_up: int = int(len(r_) / 4) max_chunk_gave_up = max(max_chunk_gave_up, 2) early_stop_count: int = 0 lazy_str_hard_failure = False md_chunks: List[str] = [] md_ratios = [] try: for chunk in cut_sequence_chunks( sequences, encoding_iana, r_, chunk_size, bom_or_sig_available, strip_sig_or_bom, sig_payload, is_multi_byte_decoder, decoded_payload, ): md_chunks.append(chunk) md_ratios.append(mess_ratio(chunk, threshold)) if md_ratios[-1] >= threshold: early_stop_count += 1 if (early_stop_count >= max_chunk_gave_up) or ( bom_or_sig_available and strip_sig_or_bom is False ): break except UnicodeDecodeError as e: # Lazy str loading may have missed something there logger.log( TRACE, "LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s", encoding_iana, str(e), ) early_stop_count = max_chunk_gave_up lazy_str_hard_failure = True # We might want to check the sequence again with the whole content # Only if initial MD tests passes if ( not lazy_str_hard_failure and is_too_large_sequence and not is_multi_byte_decoder ): try: sequences[int(50e3) :].decode(encoding_iana, errors="strict") except UnicodeDecodeError as e: logger.log( TRACE, "LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s", encoding_iana, str(e), ) tested_but_hard_failure.append(encoding_iana) continue mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0 if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up: tested_but_soft_failure.append(encoding_iana) logger.log( TRACE, "%s was excluded because of initial chaos probing. Gave up %i time(s). " "Computed mean chaos is %f %%.", encoding_iana, early_stop_count, round(mean_mess_ratio * 100, ndigits=3), ) # Preparing those fallbacks in case we got nothing. if ( encoding_iana in ["ascii", "utf_8", specified_encoding] and not lazy_str_hard_failure ): fallback_entry = CharsetMatch( sequences, encoding_iana, threshold, False, [], decoded_payload ) if encoding_iana == specified_encoding: fallback_specified = fallback_entry elif encoding_iana == "ascii": fallback_ascii = fallback_entry else: fallback_u8 = fallback_entry continue logger.log( TRACE, "%s passed initial chaos probing. Mean measured chaos is %f %%", encoding_iana, round(mean_mess_ratio * 100, ndigits=3), ) if not is_multi_byte_decoder: target_languages: List[str] = encoding_languages(encoding_iana) else: target_languages = mb_encoding_languages(encoding_iana) if target_languages: logger.log( TRACE, "{} should target any language(s) of {}".format( encoding_iana, str(target_languages) ), ) cd_ratios = [] # We shall skip the CD when its about ASCII # Most of the time its not relevant to run "language-detection" on it. if encoding_iana != "ascii": for chunk in md_chunks: chunk_languages = coherence_ratio( chunk, 0.1, ",".join(target_languages) if target_languages else None ) cd_ratios.append(chunk_languages) cd_ratios_merged = merge_coherence_ratios(cd_ratios) if cd_ratios_merged: logger.log( TRACE, "We detected language {} using {}".format( cd_ratios_merged, encoding_iana ), ) results.append( CharsetMatch( sequences, encoding_iana, mean_mess_ratio, bom_or_sig_available, cd_ratios_merged, decoded_payload, ) ) if ( encoding_iana in [specified_encoding, "ascii", "utf_8"] and mean_mess_ratio < 0.1 ): logger.debug( "Encoding detection: %s is most likely the one.", encoding_iana ) if explain: logger.removeHandler(explain_handler) logger.setLevel(previous_logger_level) return CharsetMatches([results[encoding_iana]]) if encoding_iana == sig_encoding: logger.debug( "Encoding detection: %s is most likely the one as we detected a BOM or SIG within " "the beginning of the sequence.", encoding_iana, ) if explain: logger.removeHandler(explain_handler) logger.setLevel(previous_logger_level) return CharsetMatches([results[encoding_iana]]) if len(results) == 0: if fallback_u8 or fallback_ascii or fallback_specified: logger.log( TRACE, "Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.", ) if fallback_specified: logger.debug( "Encoding detection: %s will be used as a fallback match", fallback_specified.encoding, ) results.append(fallback_specified) elif ( (fallback_u8 and fallback_ascii is None) or ( fallback_u8 and fallback_ascii and fallback_u8.fingerprint != fallback_ascii.fingerprint ) or (fallback_u8 is not None) ): logger.debug("Encoding detection: utf_8 will be used as a fallback match") results.append(fallback_u8) elif fallback_ascii: logger.debug("Encoding detection: ascii will be used as a fallback match") results.append(fallback_ascii) if results: logger.debug( "Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.", results.best().encoding, # type: ignore len(results) - 1, ) else: logger.debug("Encoding detection: Unable to determine any suitable charset.") if explain: logger.removeHandler(explain_handler) logger.setLevel(previous_logger_level) return results def from_fp( fp: BinaryIO, steps: int = 5, chunk_size: int = 512, threshold: float = 0.20, cp_isolation: Optional[List[str]] = None, cp_exclusion: Optional[List[str]] = None, preemptive_behaviour: bool = True, explain: bool = False, ) -> CharsetMatches: """ Same thing than the function from_bytes but using a file pointer that is already ready. Will not close the file pointer. """ return from_bytes( fp.read(), steps, chunk_size, threshold, cp_isolation, cp_exclusion, preemptive_behaviour, explain, ) def from_path( path: "PathLike[Any]", steps: int = 5, chunk_size: int = 512, threshold: float = 0.20, cp_isolation: Optional[List[str]] = None, cp_exclusion: Optional[List[str]] = None, preemptive_behaviour: bool = True, explain: bool = False, ) -> CharsetMatches: """ Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode. Can raise IOError. """ with open(path, "rb") as fp: return from_fp( fp, steps, chunk_size, threshold, cp_isolation, cp_exclusion, preemptive_behaviour, explain, ) def normalize( path: "PathLike[Any]", steps: int = 5, chunk_size: int = 512, threshold: float = 0.20, cp_isolation: Optional[List[str]] = None, cp_exclusion: Optional[List[str]] = None, preemptive_behaviour: bool = True, ) -> CharsetMatch: """ Take a (text-based) file path and try to create another file next to it, this time using UTF-8. """ warnings.warn( "normalize is deprecated and will be removed in 3.0", DeprecationWarning, ) results = from_path( path, steps, chunk_size, threshold, cp_isolation, cp_exclusion, preemptive_behaviour, ) filename = basename(path) target_extensions = list(splitext(filename)) if len(results) == 0: raise IOError( 'Unable to normalize "{}", no encoding charset seems to fit.'.format( filename ) ) result = results.best() target_extensions[0] += "-" + result.encoding # type: ignore with open( "{}".format(str(path).replace(filename, "".join(target_extensions))), "wb" ) as fp: fp.write(result.output()) # type: ignore return result # type: ignore
19,191
Python
31.806838
120
0.556094
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/models.py
import warnings from collections import Counter from encodings.aliases import aliases from hashlib import sha256 from json import dumps from re import sub from typing import ( Any, Counter as TypeCounter, Dict, Iterator, List, Optional, Tuple, Union, ) from .constant import NOT_PRINTABLE_PATTERN, TOO_BIG_SEQUENCE from .md import mess_ratio from .utils import iana_name, is_multi_byte_encoding, unicode_range class CharsetMatch: def __init__( self, payload: bytes, guessed_encoding: str, mean_mess_ratio: float, has_sig_or_bom: bool, languages: "CoherenceMatches", decoded_payload: Optional[str] = None, ): self._payload: bytes = payload self._encoding: str = guessed_encoding self._mean_mess_ratio: float = mean_mess_ratio self._languages: CoherenceMatches = languages self._has_sig_or_bom: bool = has_sig_or_bom self._unicode_ranges: Optional[List[str]] = None self._leaves: List[CharsetMatch] = [] self._mean_coherence_ratio: float = 0.0 self._output_payload: Optional[bytes] = None self._output_encoding: Optional[str] = None self._string: Optional[str] = decoded_payload def __eq__(self, other: object) -> bool: if not isinstance(other, CharsetMatch): raise TypeError( "__eq__ cannot be invoked on {} and {}.".format( str(other.__class__), str(self.__class__) ) ) return self.encoding == other.encoding and self.fingerprint == other.fingerprint def __lt__(self, other: object) -> bool: """ Implemented to make sorted available upon CharsetMatches items. """ if not isinstance(other, CharsetMatch): raise ValueError chaos_difference: float = abs(self.chaos - other.chaos) coherence_difference: float = abs(self.coherence - other.coherence) # Bellow 1% difference --> Use Coherence if chaos_difference < 0.01 and coherence_difference > 0.02: # When having a tough decision, use the result that decoded as many multi-byte as possible. if chaos_difference == 0.0 and self.coherence == other.coherence: return self.multi_byte_usage > other.multi_byte_usage return self.coherence > other.coherence return self.chaos < other.chaos @property def multi_byte_usage(self) -> float: return 1.0 - len(str(self)) / len(self.raw) @property def chaos_secondary_pass(self) -> float: """ Check once again chaos in decoded text, except this time, with full content. Use with caution, this can be very slow. Notice: Will be removed in 3.0 """ warnings.warn( "chaos_secondary_pass is deprecated and will be removed in 3.0", DeprecationWarning, ) return mess_ratio(str(self), 1.0) @property def coherence_non_latin(self) -> float: """ Coherence ratio on the first non-latin language detected if ANY. Notice: Will be removed in 3.0 """ warnings.warn( "coherence_non_latin is deprecated and will be removed in 3.0", DeprecationWarning, ) return 0.0 @property def w_counter(self) -> TypeCounter[str]: """ Word counter instance on decoded text. Notice: Will be removed in 3.0 """ warnings.warn( "w_counter is deprecated and will be removed in 3.0", DeprecationWarning ) string_printable_only = sub(NOT_PRINTABLE_PATTERN, " ", str(self).lower()) return Counter(string_printable_only.split()) def __str__(self) -> str: # Lazy Str Loading if self._string is None: self._string = str(self._payload, self._encoding, "strict") return self._string def __repr__(self) -> str: return "<CharsetMatch '{}' bytes({})>".format(self.encoding, self.fingerprint) def add_submatch(self, other: "CharsetMatch") -> None: if not isinstance(other, CharsetMatch) or other == self: raise ValueError( "Unable to add instance <{}> as a submatch of a CharsetMatch".format( other.__class__ ) ) other._string = None # Unload RAM usage; dirty trick. self._leaves.append(other) @property def encoding(self) -> str: return self._encoding @property def encoding_aliases(self) -> List[str]: """ Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855. """ also_known_as: List[str] = [] for u, p in aliases.items(): if self.encoding == u: also_known_as.append(p) elif self.encoding == p: also_known_as.append(u) return also_known_as @property def bom(self) -> bool: return self._has_sig_or_bom @property def byte_order_mark(self) -> bool: return self._has_sig_or_bom @property def languages(self) -> List[str]: """ Return the complete list of possible languages found in decoded sequence. Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'. """ return [e[0] for e in self._languages] @property def language(self) -> str: """ Most probable language found in decoded sequence. If none were detected or inferred, the property will return "Unknown". """ if not self._languages: # Trying to infer the language based on the given encoding # Its either English or we should not pronounce ourselves in certain cases. if "ascii" in self.could_be_from_charset: return "English" # doing it there to avoid circular import from charset_normalizer.cd import encoding_languages, mb_encoding_languages languages = ( mb_encoding_languages(self.encoding) if is_multi_byte_encoding(self.encoding) else encoding_languages(self.encoding) ) if len(languages) == 0 or "Latin Based" in languages: return "Unknown" return languages[0] return self._languages[0][0] @property def chaos(self) -> float: return self._mean_mess_ratio @property def coherence(self) -> float: if not self._languages: return 0.0 return self._languages[0][1] @property def percent_chaos(self) -> float: return round(self.chaos * 100, ndigits=3) @property def percent_coherence(self) -> float: return round(self.coherence * 100, ndigits=3) @property def raw(self) -> bytes: """ Original untouched bytes. """ return self._payload @property def submatch(self) -> List["CharsetMatch"]: return self._leaves @property def has_submatch(self) -> bool: return len(self._leaves) > 0 @property def alphabets(self) -> List[str]: if self._unicode_ranges is not None: return self._unicode_ranges # list detected ranges detected_ranges: List[Optional[str]] = [ unicode_range(char) for char in str(self) ] # filter and sort self._unicode_ranges = sorted(list({r for r in detected_ranges if r})) return self._unicode_ranges @property def could_be_from_charset(self) -> List[str]: """ The complete list of encoding that output the exact SAME str result and therefore could be the originating encoding. This list does include the encoding available in property 'encoding'. """ return [self._encoding] + [m.encoding for m in self._leaves] def first(self) -> "CharsetMatch": """ Kept for BC reasons. Will be removed in 3.0. """ return self def best(self) -> "CharsetMatch": """ Kept for BC reasons. Will be removed in 3.0. """ return self def output(self, encoding: str = "utf_8") -> bytes: """ Method to get re-encoded bytes payload using given target encoding. Default to UTF-8. Any errors will be simply ignored by the encoder NOT replaced. """ if self._output_encoding is None or self._output_encoding != encoding: self._output_encoding = encoding self._output_payload = str(self).encode(encoding, "replace") return self._output_payload # type: ignore @property def fingerprint(self) -> str: """ Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one. """ return sha256(self.output()).hexdigest() class CharsetMatches: """ Container with every CharsetMatch items ordered by default from most probable to the less one. Act like a list(iterable) but does not implements all related methods. """ def __init__(self, results: Optional[List[CharsetMatch]] = None): self._results: List[CharsetMatch] = sorted(results) if results else [] def __iter__(self) -> Iterator[CharsetMatch]: yield from self._results def __getitem__(self, item: Union[int, str]) -> CharsetMatch: """ Retrieve a single item either by its position or encoding name (alias may be used here). Raise KeyError upon invalid index or encoding not present in results. """ if isinstance(item, int): return self._results[item] if isinstance(item, str): item = iana_name(item, False) for result in self._results: if item in result.could_be_from_charset: return result raise KeyError def __len__(self) -> int: return len(self._results) def __bool__(self) -> bool: return len(self._results) > 0 def append(self, item: CharsetMatch) -> None: """ Insert a single match. Will be inserted accordingly to preserve sort. Can be inserted as a submatch. """ if not isinstance(item, CharsetMatch): raise ValueError( "Cannot append instance '{}' to CharsetMatches".format( str(item.__class__) ) ) # We should disable the submatch factoring when the input file is too heavy (conserve RAM usage) if len(item.raw) <= TOO_BIG_SEQUENCE: for match in self._results: if match.fingerprint == item.fingerprint and match.chaos == item.chaos: match.add_submatch(item) return self._results.append(item) self._results = sorted(self._results) def best(self) -> Optional["CharsetMatch"]: """ Simply return the first match. Strict equivalent to matches[0]. """ if not self._results: return None return self._results[0] def first(self) -> Optional["CharsetMatch"]: """ Redundant method, call the method best(). Kept for BC reasons. """ return self.best() CoherenceMatch = Tuple[str, float] CoherenceMatches = List[CoherenceMatch] class CliDetectionResult: def __init__( self, path: str, encoding: Optional[str], encoding_aliases: List[str], alternative_encodings: List[str], language: str, alphabets: List[str], has_sig_or_bom: bool, chaos: float, coherence: float, unicode_path: Optional[str], is_preferred: bool, ): self.path: str = path self.unicode_path: Optional[str] = unicode_path self.encoding: Optional[str] = encoding self.encoding_aliases: List[str] = encoding_aliases self.alternative_encodings: List[str] = alternative_encodings self.language: str = language self.alphabets: List[str] = alphabets self.has_sig_or_bom: bool = has_sig_or_bom self.chaos: float = chaos self.coherence: float = coherence self.is_preferred: bool = is_preferred @property def __dict__(self) -> Dict[str, Any]: # type: ignore return { "path": self.path, "encoding": self.encoding, "encoding_aliases": self.encoding_aliases, "alternative_encodings": self.alternative_encodings, "language": self.language, "alphabets": self.alphabets, "has_sig_or_bom": self.has_sig_or_bom, "chaos": self.chaos, "coherence": self.coherence, "unicode_path": self.unicode_path, "is_preferred": self.is_preferred, } def to_json(self) -> str: return dumps(self.__dict__, ensure_ascii=True, indent=4)
13,167
Python
31.756219
120
0.587605
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/cd.py
import importlib from codecs import IncrementalDecoder from collections import Counter from functools import lru_cache from typing import Counter as TypeCounter, Dict, List, Optional, Tuple from .assets import FREQUENCIES from .constant import KO_NAMES, LANGUAGE_SUPPORTED_COUNT, TOO_SMALL_SEQUENCE, ZH_NAMES from .md import is_suspiciously_successive_range from .models import CoherenceMatches from .utils import ( is_accentuated, is_latin, is_multi_byte_encoding, is_unicode_range_secondary, unicode_range, ) def encoding_unicode_range(iana_name: str) -> List[str]: """ Return associated unicode ranges in a single byte code page. """ if is_multi_byte_encoding(iana_name): raise IOError("Function not supported on multi-byte code page") decoder = importlib.import_module( "encodings.{}".format(iana_name) ).IncrementalDecoder p: IncrementalDecoder = decoder(errors="ignore") seen_ranges: Dict[str, int] = {} character_count: int = 0 for i in range(0x40, 0xFF): chunk: str = p.decode(bytes([i])) if chunk: character_range: Optional[str] = unicode_range(chunk) if character_range is None: continue if is_unicode_range_secondary(character_range) is False: if character_range not in seen_ranges: seen_ranges[character_range] = 0 seen_ranges[character_range] += 1 character_count += 1 return sorted( [ character_range for character_range in seen_ranges if seen_ranges[character_range] / character_count >= 0.15 ] ) def unicode_range_languages(primary_range: str) -> List[str]: """ Return inferred languages used with a unicode range. """ languages: List[str] = [] for language, characters in FREQUENCIES.items(): for character in characters: if unicode_range(character) == primary_range: languages.append(language) break return languages @lru_cache() def encoding_languages(iana_name: str) -> List[str]: """ Single-byte encoding language association. Some code page are heavily linked to particular language(s). This function does the correspondence. """ unicode_ranges: List[str] = encoding_unicode_range(iana_name) primary_range: Optional[str] = None for specified_range in unicode_ranges: if "Latin" not in specified_range: primary_range = specified_range break if primary_range is None: return ["Latin Based"] return unicode_range_languages(primary_range) @lru_cache() def mb_encoding_languages(iana_name: str) -> List[str]: """ Multi-byte encoding language association. Some code page are heavily linked to particular language(s). This function does the correspondence. """ if ( iana_name.startswith("shift_") or iana_name.startswith("iso2022_jp") or iana_name.startswith("euc_j") or iana_name == "cp932" ): return ["Japanese"] if iana_name.startswith("gb") or iana_name in ZH_NAMES: return ["Chinese", "Classical Chinese"] if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES: return ["Korean"] return [] @lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT) def get_target_features(language: str) -> Tuple[bool, bool]: """ Determine main aspects from a supported language if it contains accents and if is pure Latin. """ target_have_accents: bool = False target_pure_latin: bool = True for character in FREQUENCIES[language]: if not target_have_accents and is_accentuated(character): target_have_accents = True if target_pure_latin and is_latin(character) is False: target_pure_latin = False return target_have_accents, target_pure_latin def alphabet_languages( characters: List[str], ignore_non_latin: bool = False ) -> List[str]: """ Return associated languages associated to given characters. """ languages: List[Tuple[str, float]] = [] source_have_accents = any(is_accentuated(character) for character in characters) for language, language_characters in FREQUENCIES.items(): target_have_accents, target_pure_latin = get_target_features(language) if ignore_non_latin and target_pure_latin is False: continue if target_have_accents is False and source_have_accents: continue character_count: int = len(language_characters) character_match_count: int = len( [c for c in language_characters if c in characters] ) ratio: float = character_match_count / character_count if ratio >= 0.2: languages.append((language, ratio)) languages = sorted(languages, key=lambda x: x[1], reverse=True) return [compatible_language[0] for compatible_language in languages] def characters_popularity_compare( language: str, ordered_characters: List[str] ) -> float: """ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) """ if language not in FREQUENCIES: raise ValueError("{} not available".format(language)) character_approved_count: int = 0 FREQUENCIES_language_set = set(FREQUENCIES[language]) for character in ordered_characters: if character not in FREQUENCIES_language_set: continue characters_before_source: List[str] = FREQUENCIES[language][ 0 : FREQUENCIES[language].index(character) ] characters_after_source: List[str] = FREQUENCIES[language][ FREQUENCIES[language].index(character) : ] characters_before: List[str] = ordered_characters[ 0 : ordered_characters.index(character) ] characters_after: List[str] = ordered_characters[ ordered_characters.index(character) : ] before_match_count: int = len( set(characters_before) & set(characters_before_source) ) after_match_count: int = len( set(characters_after) & set(characters_after_source) ) if len(characters_before_source) == 0 and before_match_count <= 4: character_approved_count += 1 continue if len(characters_after_source) == 0 and after_match_count <= 4: character_approved_count += 1 continue if ( before_match_count / len(characters_before_source) >= 0.4 or after_match_count / len(characters_after_source) >= 0.4 ): character_approved_count += 1 continue return character_approved_count / len(ordered_characters) def alpha_unicode_split(decoded_sequence: str) -> List[str]: """ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; One containing the latin letters and the other hebrew. """ layers: Dict[str, str] = {} for character in decoded_sequence: if character.isalpha() is False: continue character_range: Optional[str] = unicode_range(character) if character_range is None: continue layer_target_range: Optional[str] = None for discovered_range in layers: if ( is_suspiciously_successive_range(discovered_range, character_range) is False ): layer_target_range = discovered_range break if layer_target_range is None: layer_target_range = character_range if layer_target_range not in layers: layers[layer_target_range] = character.lower() continue layers[layer_target_range] += character.lower() return list(layers.values()) def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches: """ This function merge results previously given by the function coherence_ratio. The return type is the same as coherence_ratio. """ per_language_ratios: Dict[str, List[float]] = {} for result in results: for sub_result in result: language, ratio = sub_result if language not in per_language_ratios: per_language_ratios[language] = [ratio] continue per_language_ratios[language].append(ratio) merge = [ ( language, round( sum(per_language_ratios[language]) / len(per_language_ratios[language]), 4, ), ) for language in per_language_ratios ] return sorted(merge, key=lambda x: x[1], reverse=True) @lru_cache(maxsize=2048) def coherence_ratio( decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None ) -> CoherenceMatches: """ Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers. A layer = Character extraction by alphabets/ranges. """ results: List[Tuple[str, float]] = [] ignore_non_latin: bool = False sufficient_match_count: int = 0 lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else [] if "Latin Based" in lg_inclusion_list: ignore_non_latin = True lg_inclusion_list.remove("Latin Based") for layer in alpha_unicode_split(decoded_sequence): sequence_frequencies: TypeCounter[str] = Counter(layer) most_common = sequence_frequencies.most_common() character_count: int = sum(o for c, o in most_common) if character_count <= TOO_SMALL_SEQUENCE: continue popular_character_ordered: List[str] = [c for c, o in most_common] for language in lg_inclusion_list or alphabet_languages( popular_character_ordered, ignore_non_latin ): ratio: float = characters_popularity_compare( language, popular_character_ordered ) if ratio < threshold: continue elif ratio >= 0.8: sufficient_match_count += 1 results.append((language, round(ratio, 4))) if sufficient_match_count >= 3: break return sorted(results, key=lambda x: x[1], reverse=True)
10,811
Python
30.8
118
0.630746
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/constant.py
from codecs import BOM_UTF8, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE from encodings.aliases import aliases from re import IGNORECASE, compile as re_compile from typing import Dict, List, Set, Union from .assets import FREQUENCIES # Contain for each eligible encoding a list of/item bytes SIG/BOM ENCODING_MARKS: Dict[str, Union[bytes, List[bytes]]] = { "utf_8": BOM_UTF8, "utf_7": [ b"\x2b\x2f\x76\x38", b"\x2b\x2f\x76\x39", b"\x2b\x2f\x76\x2b", b"\x2b\x2f\x76\x2f", b"\x2b\x2f\x76\x38\x2d", ], "gb18030": b"\x84\x31\x95\x33", "utf_32": [BOM_UTF32_BE, BOM_UTF32_LE], "utf_16": [BOM_UTF16_BE, BOM_UTF16_LE], } TOO_SMALL_SEQUENCE: int = 32 TOO_BIG_SEQUENCE: int = int(10e6) UTF8_MAXIMAL_ALLOCATION: int = 1112064 UNICODE_RANGES_COMBINED: Dict[str, range] = { "Control character": range(31 + 1), "Basic Latin": range(32, 127 + 1), "Latin-1 Supplement": range(128, 255 + 1), "Latin Extended-A": range(256, 383 + 1), "Latin Extended-B": range(384, 591 + 1), "IPA Extensions": range(592, 687 + 1), "Spacing Modifier Letters": range(688, 767 + 1), "Combining Diacritical Marks": range(768, 879 + 1), "Greek and Coptic": range(880, 1023 + 1), "Cyrillic": range(1024, 1279 + 1), "Cyrillic Supplement": range(1280, 1327 + 1), "Armenian": range(1328, 1423 + 1), "Hebrew": range(1424, 1535 + 1), "Arabic": range(1536, 1791 + 1), "Syriac": range(1792, 1871 + 1), "Arabic Supplement": range(1872, 1919 + 1), "Thaana": range(1920, 1983 + 1), "NKo": range(1984, 2047 + 1), "Samaritan": range(2048, 2111 + 1), "Mandaic": range(2112, 2143 + 1), "Syriac Supplement": range(2144, 2159 + 1), "Arabic Extended-A": range(2208, 2303 + 1), "Devanagari": range(2304, 2431 + 1), "Bengali": range(2432, 2559 + 1), "Gurmukhi": range(2560, 2687 + 1), "Gujarati": range(2688, 2815 + 1), "Oriya": range(2816, 2943 + 1), "Tamil": range(2944, 3071 + 1), "Telugu": range(3072, 3199 + 1), "Kannada": range(3200, 3327 + 1), "Malayalam": range(3328, 3455 + 1), "Sinhala": range(3456, 3583 + 1), "Thai": range(3584, 3711 + 1), "Lao": range(3712, 3839 + 1), "Tibetan": range(3840, 4095 + 1), "Myanmar": range(4096, 4255 + 1), "Georgian": range(4256, 4351 + 1), "Hangul Jamo": range(4352, 4607 + 1), "Ethiopic": range(4608, 4991 + 1), "Ethiopic Supplement": range(4992, 5023 + 1), "Cherokee": range(5024, 5119 + 1), "Unified Canadian Aboriginal Syllabics": range(5120, 5759 + 1), "Ogham": range(5760, 5791 + 1), "Runic": range(5792, 5887 + 1), "Tagalog": range(5888, 5919 + 1), "Hanunoo": range(5920, 5951 + 1), "Buhid": range(5952, 5983 + 1), "Tagbanwa": range(5984, 6015 + 1), "Khmer": range(6016, 6143 + 1), "Mongolian": range(6144, 6319 + 1), "Unified Canadian Aboriginal Syllabics Extended": range(6320, 6399 + 1), "Limbu": range(6400, 6479 + 1), "Tai Le": range(6480, 6527 + 1), "New Tai Lue": range(6528, 6623 + 1), "Khmer Symbols": range(6624, 6655 + 1), "Buginese": range(6656, 6687 + 1), "Tai Tham": range(6688, 6831 + 1), "Combining Diacritical Marks Extended": range(6832, 6911 + 1), "Balinese": range(6912, 7039 + 1), "Sundanese": range(7040, 7103 + 1), "Batak": range(7104, 7167 + 1), "Lepcha": range(7168, 7247 + 1), "Ol Chiki": range(7248, 7295 + 1), "Cyrillic Extended C": range(7296, 7311 + 1), "Sundanese Supplement": range(7360, 7375 + 1), "Vedic Extensions": range(7376, 7423 + 1), "Phonetic Extensions": range(7424, 7551 + 1), "Phonetic Extensions Supplement": range(7552, 7615 + 1), "Combining Diacritical Marks Supplement": range(7616, 7679 + 1), "Latin Extended Additional": range(7680, 7935 + 1), "Greek Extended": range(7936, 8191 + 1), "General Punctuation": range(8192, 8303 + 1), "Superscripts and Subscripts": range(8304, 8351 + 1), "Currency Symbols": range(8352, 8399 + 1), "Combining Diacritical Marks for Symbols": range(8400, 8447 + 1), "Letterlike Symbols": range(8448, 8527 + 1), "Number Forms": range(8528, 8591 + 1), "Arrows": range(8592, 8703 + 1), "Mathematical Operators": range(8704, 8959 + 1), "Miscellaneous Technical": range(8960, 9215 + 1), "Control Pictures": range(9216, 9279 + 1), "Optical Character Recognition": range(9280, 9311 + 1), "Enclosed Alphanumerics": range(9312, 9471 + 1), "Box Drawing": range(9472, 9599 + 1), "Block Elements": range(9600, 9631 + 1), "Geometric Shapes": range(9632, 9727 + 1), "Miscellaneous Symbols": range(9728, 9983 + 1), "Dingbats": range(9984, 10175 + 1), "Miscellaneous Mathematical Symbols-A": range(10176, 10223 + 1), "Supplemental Arrows-A": range(10224, 10239 + 1), "Braille Patterns": range(10240, 10495 + 1), "Supplemental Arrows-B": range(10496, 10623 + 1), "Miscellaneous Mathematical Symbols-B": range(10624, 10751 + 1), "Supplemental Mathematical Operators": range(10752, 11007 + 1), "Miscellaneous Symbols and Arrows": range(11008, 11263 + 1), "Glagolitic": range(11264, 11359 + 1), "Latin Extended-C": range(11360, 11391 + 1), "Coptic": range(11392, 11519 + 1), "Georgian Supplement": range(11520, 11567 + 1), "Tifinagh": range(11568, 11647 + 1), "Ethiopic Extended": range(11648, 11743 + 1), "Cyrillic Extended-A": range(11744, 11775 + 1), "Supplemental Punctuation": range(11776, 11903 + 1), "CJK Radicals Supplement": range(11904, 12031 + 1), "Kangxi Radicals": range(12032, 12255 + 1), "Ideographic Description Characters": range(12272, 12287 + 1), "CJK Symbols and Punctuation": range(12288, 12351 + 1), "Hiragana": range(12352, 12447 + 1), "Katakana": range(12448, 12543 + 1), "Bopomofo": range(12544, 12591 + 1), "Hangul Compatibility Jamo": range(12592, 12687 + 1), "Kanbun": range(12688, 12703 + 1), "Bopomofo Extended": range(12704, 12735 + 1), "CJK Strokes": range(12736, 12783 + 1), "Katakana Phonetic Extensions": range(12784, 12799 + 1), "Enclosed CJK Letters and Months": range(12800, 13055 + 1), "CJK Compatibility": range(13056, 13311 + 1), "CJK Unified Ideographs Extension A": range(13312, 19903 + 1), "Yijing Hexagram Symbols": range(19904, 19967 + 1), "CJK Unified Ideographs": range(19968, 40959 + 1), "Yi Syllables": range(40960, 42127 + 1), "Yi Radicals": range(42128, 42191 + 1), "Lisu": range(42192, 42239 + 1), "Vai": range(42240, 42559 + 1), "Cyrillic Extended-B": range(42560, 42655 + 1), "Bamum": range(42656, 42751 + 1), "Modifier Tone Letters": range(42752, 42783 + 1), "Latin Extended-D": range(42784, 43007 + 1), "Syloti Nagri": range(43008, 43055 + 1), "Common Indic Number Forms": range(43056, 43071 + 1), "Phags-pa": range(43072, 43135 + 1), "Saurashtra": range(43136, 43231 + 1), "Devanagari Extended": range(43232, 43263 + 1), "Kayah Li": range(43264, 43311 + 1), "Rejang": range(43312, 43359 + 1), "Hangul Jamo Extended-A": range(43360, 43391 + 1), "Javanese": range(43392, 43487 + 1), "Myanmar Extended-B": range(43488, 43519 + 1), "Cham": range(43520, 43615 + 1), "Myanmar Extended-A": range(43616, 43647 + 1), "Tai Viet": range(43648, 43743 + 1), "Meetei Mayek Extensions": range(43744, 43775 + 1), "Ethiopic Extended-A": range(43776, 43823 + 1), "Latin Extended-E": range(43824, 43887 + 1), "Cherokee Supplement": range(43888, 43967 + 1), "Meetei Mayek": range(43968, 44031 + 1), "Hangul Syllables": range(44032, 55215 + 1), "Hangul Jamo Extended-B": range(55216, 55295 + 1), "High Surrogates": range(55296, 56191 + 1), "High Private Use Surrogates": range(56192, 56319 + 1), "Low Surrogates": range(56320, 57343 + 1), "Private Use Area": range(57344, 63743 + 1), "CJK Compatibility Ideographs": range(63744, 64255 + 1), "Alphabetic Presentation Forms": range(64256, 64335 + 1), "Arabic Presentation Forms-A": range(64336, 65023 + 1), "Variation Selectors": range(65024, 65039 + 1), "Vertical Forms": range(65040, 65055 + 1), "Combining Half Marks": range(65056, 65071 + 1), "CJK Compatibility Forms": range(65072, 65103 + 1), "Small Form Variants": range(65104, 65135 + 1), "Arabic Presentation Forms-B": range(65136, 65279 + 1), "Halfwidth and Fullwidth Forms": range(65280, 65519 + 1), "Specials": range(65520, 65535 + 1), "Linear B Syllabary": range(65536, 65663 + 1), "Linear B Ideograms": range(65664, 65791 + 1), "Aegean Numbers": range(65792, 65855 + 1), "Ancient Greek Numbers": range(65856, 65935 + 1), "Ancient Symbols": range(65936, 65999 + 1), "Phaistos Disc": range(66000, 66047 + 1), "Lycian": range(66176, 66207 + 1), "Carian": range(66208, 66271 + 1), "Coptic Epact Numbers": range(66272, 66303 + 1), "Old Italic": range(66304, 66351 + 1), "Gothic": range(66352, 66383 + 1), "Old Permic": range(66384, 66431 + 1), "Ugaritic": range(66432, 66463 + 1), "Old Persian": range(66464, 66527 + 1), "Deseret": range(66560, 66639 + 1), "Shavian": range(66640, 66687 + 1), "Osmanya": range(66688, 66735 + 1), "Osage": range(66736, 66815 + 1), "Elbasan": range(66816, 66863 + 1), "Caucasian Albanian": range(66864, 66927 + 1), "Linear A": range(67072, 67455 + 1), "Cypriot Syllabary": range(67584, 67647 + 1), "Imperial Aramaic": range(67648, 67679 + 1), "Palmyrene": range(67680, 67711 + 1), "Nabataean": range(67712, 67759 + 1), "Hatran": range(67808, 67839 + 1), "Phoenician": range(67840, 67871 + 1), "Lydian": range(67872, 67903 + 1), "Meroitic Hieroglyphs": range(67968, 67999 + 1), "Meroitic Cursive": range(68000, 68095 + 1), "Kharoshthi": range(68096, 68191 + 1), "Old South Arabian": range(68192, 68223 + 1), "Old North Arabian": range(68224, 68255 + 1), "Manichaean": range(68288, 68351 + 1), "Avestan": range(68352, 68415 + 1), "Inscriptional Parthian": range(68416, 68447 + 1), "Inscriptional Pahlavi": range(68448, 68479 + 1), "Psalter Pahlavi": range(68480, 68527 + 1), "Old Turkic": range(68608, 68687 + 1), "Old Hungarian": range(68736, 68863 + 1), "Rumi Numeral Symbols": range(69216, 69247 + 1), "Brahmi": range(69632, 69759 + 1), "Kaithi": range(69760, 69839 + 1), "Sora Sompeng": range(69840, 69887 + 1), "Chakma": range(69888, 69967 + 1), "Mahajani": range(69968, 70015 + 1), "Sharada": range(70016, 70111 + 1), "Sinhala Archaic Numbers": range(70112, 70143 + 1), "Khojki": range(70144, 70223 + 1), "Multani": range(70272, 70319 + 1), "Khudawadi": range(70320, 70399 + 1), "Grantha": range(70400, 70527 + 1), "Newa": range(70656, 70783 + 1), "Tirhuta": range(70784, 70879 + 1), "Siddham": range(71040, 71167 + 1), "Modi": range(71168, 71263 + 1), "Mongolian Supplement": range(71264, 71295 + 1), "Takri": range(71296, 71375 + 1), "Ahom": range(71424, 71487 + 1), "Warang Citi": range(71840, 71935 + 1), "Zanabazar Square": range(72192, 72271 + 1), "Soyombo": range(72272, 72367 + 1), "Pau Cin Hau": range(72384, 72447 + 1), "Bhaiksuki": range(72704, 72815 + 1), "Marchen": range(72816, 72895 + 1), "Masaram Gondi": range(72960, 73055 + 1), "Cuneiform": range(73728, 74751 + 1), "Cuneiform Numbers and Punctuation": range(74752, 74879 + 1), "Early Dynastic Cuneiform": range(74880, 75087 + 1), "Egyptian Hieroglyphs": range(77824, 78895 + 1), "Anatolian Hieroglyphs": range(82944, 83583 + 1), "Bamum Supplement": range(92160, 92735 + 1), "Mro": range(92736, 92783 + 1), "Bassa Vah": range(92880, 92927 + 1), "Pahawh Hmong": range(92928, 93071 + 1), "Miao": range(93952, 94111 + 1), "Ideographic Symbols and Punctuation": range(94176, 94207 + 1), "Tangut": range(94208, 100351 + 1), "Tangut Components": range(100352, 101119 + 1), "Kana Supplement": range(110592, 110847 + 1), "Kana Extended-A": range(110848, 110895 + 1), "Nushu": range(110960, 111359 + 1), "Duployan": range(113664, 113823 + 1), "Shorthand Format Controls": range(113824, 113839 + 1), "Byzantine Musical Symbols": range(118784, 119039 + 1), "Musical Symbols": range(119040, 119295 + 1), "Ancient Greek Musical Notation": range(119296, 119375 + 1), "Tai Xuan Jing Symbols": range(119552, 119647 + 1), "Counting Rod Numerals": range(119648, 119679 + 1), "Mathematical Alphanumeric Symbols": range(119808, 120831 + 1), "Sutton SignWriting": range(120832, 121519 + 1), "Glagolitic Supplement": range(122880, 122927 + 1), "Mende Kikakui": range(124928, 125151 + 1), "Adlam": range(125184, 125279 + 1), "Arabic Mathematical Alphabetic Symbols": range(126464, 126719 + 1), "Mahjong Tiles": range(126976, 127023 + 1), "Domino Tiles": range(127024, 127135 + 1), "Playing Cards": range(127136, 127231 + 1), "Enclosed Alphanumeric Supplement": range(127232, 127487 + 1), "Enclosed Ideographic Supplement": range(127488, 127743 + 1), "Miscellaneous Symbols and Pictographs": range(127744, 128511 + 1), "Emoticons range(Emoji)": range(128512, 128591 + 1), "Ornamental Dingbats": range(128592, 128639 + 1), "Transport and Map Symbols": range(128640, 128767 + 1), "Alchemical Symbols": range(128768, 128895 + 1), "Geometric Shapes Extended": range(128896, 129023 + 1), "Supplemental Arrows-C": range(129024, 129279 + 1), "Supplemental Symbols and Pictographs": range(129280, 129535 + 1), "CJK Unified Ideographs Extension B": range(131072, 173791 + 1), "CJK Unified Ideographs Extension C": range(173824, 177983 + 1), "CJK Unified Ideographs Extension D": range(177984, 178207 + 1), "CJK Unified Ideographs Extension E": range(178208, 183983 + 1), "CJK Unified Ideographs Extension F": range(183984, 191471 + 1), "CJK Compatibility Ideographs Supplement": range(194560, 195103 + 1), "Tags": range(917504, 917631 + 1), "Variation Selectors Supplement": range(917760, 917999 + 1), } UNICODE_SECONDARY_RANGE_KEYWORD: List[str] = [ "Supplement", "Extended", "Extensions", "Modifier", "Marks", "Punctuation", "Symbols", "Forms", "Operators", "Miscellaneous", "Drawing", "Block", "Shapes", "Supplemental", "Tags", ] RE_POSSIBLE_ENCODING_INDICATION = re_compile( r"(?:(?:encoding)|(?:charset)|(?:coding))(?:[\:= ]{1,10})(?:[\"\']?)([a-zA-Z0-9\-_]+)(?:[\"\']?)", IGNORECASE, ) IANA_SUPPORTED: List[str] = sorted( filter( lambda x: x.endswith("_codec") is False and x not in {"rot_13", "tactis", "mbcs"}, list(set(aliases.values())), ) ) IANA_SUPPORTED_COUNT: int = len(IANA_SUPPORTED) # pre-computed code page that are similar using the function cp_similarity. IANA_SUPPORTED_SIMILAR: Dict[str, List[str]] = { "cp037": ["cp1026", "cp1140", "cp273", "cp500"], "cp1026": ["cp037", "cp1140", "cp273", "cp500"], "cp1125": ["cp866"], "cp1140": ["cp037", "cp1026", "cp273", "cp500"], "cp1250": ["iso8859_2"], "cp1251": ["kz1048", "ptcp154"], "cp1252": ["iso8859_15", "iso8859_9", "latin_1"], "cp1253": ["iso8859_7"], "cp1254": ["iso8859_15", "iso8859_9", "latin_1"], "cp1257": ["iso8859_13"], "cp273": ["cp037", "cp1026", "cp1140", "cp500"], "cp437": ["cp850", "cp858", "cp860", "cp861", "cp862", "cp863", "cp865"], "cp500": ["cp037", "cp1026", "cp1140", "cp273"], "cp850": ["cp437", "cp857", "cp858", "cp865"], "cp857": ["cp850", "cp858", "cp865"], "cp858": ["cp437", "cp850", "cp857", "cp865"], "cp860": ["cp437", "cp861", "cp862", "cp863", "cp865"], "cp861": ["cp437", "cp860", "cp862", "cp863", "cp865"], "cp862": ["cp437", "cp860", "cp861", "cp863", "cp865"], "cp863": ["cp437", "cp860", "cp861", "cp862", "cp865"], "cp865": ["cp437", "cp850", "cp857", "cp858", "cp860", "cp861", "cp862", "cp863"], "cp866": ["cp1125"], "iso8859_10": ["iso8859_14", "iso8859_15", "iso8859_4", "iso8859_9", "latin_1"], "iso8859_11": ["tis_620"], "iso8859_13": ["cp1257"], "iso8859_14": [ "iso8859_10", "iso8859_15", "iso8859_16", "iso8859_3", "iso8859_9", "latin_1", ], "iso8859_15": [ "cp1252", "cp1254", "iso8859_10", "iso8859_14", "iso8859_16", "iso8859_3", "iso8859_9", "latin_1", ], "iso8859_16": [ "iso8859_14", "iso8859_15", "iso8859_2", "iso8859_3", "iso8859_9", "latin_1", ], "iso8859_2": ["cp1250", "iso8859_16", "iso8859_4"], "iso8859_3": ["iso8859_14", "iso8859_15", "iso8859_16", "iso8859_9", "latin_1"], "iso8859_4": ["iso8859_10", "iso8859_2", "iso8859_9", "latin_1"], "iso8859_7": ["cp1253"], "iso8859_9": [ "cp1252", "cp1254", "cp1258", "iso8859_10", "iso8859_14", "iso8859_15", "iso8859_16", "iso8859_3", "iso8859_4", "latin_1", ], "kz1048": ["cp1251", "ptcp154"], "latin_1": [ "cp1252", "cp1254", "cp1258", "iso8859_10", "iso8859_14", "iso8859_15", "iso8859_16", "iso8859_3", "iso8859_4", "iso8859_9", ], "mac_iceland": ["mac_roman", "mac_turkish"], "mac_roman": ["mac_iceland", "mac_turkish"], "mac_turkish": ["mac_iceland", "mac_roman"], "ptcp154": ["cp1251", "kz1048"], "tis_620": ["iso8859_11"], } CHARDET_CORRESPONDENCE: Dict[str, str] = { "iso2022_kr": "ISO-2022-KR", "iso2022_jp": "ISO-2022-JP", "euc_kr": "EUC-KR", "tis_620": "TIS-620", "utf_32": "UTF-32", "euc_jp": "EUC-JP", "koi8_r": "KOI8-R", "iso8859_1": "ISO-8859-1", "iso8859_2": "ISO-8859-2", "iso8859_5": "ISO-8859-5", "iso8859_6": "ISO-8859-6", "iso8859_7": "ISO-8859-7", "iso8859_8": "ISO-8859-8", "utf_16": "UTF-16", "cp855": "IBM855", "mac_cyrillic": "MacCyrillic", "gb2312": "GB2312", "gb18030": "GB18030", "cp932": "CP932", "cp866": "IBM866", "utf_8": "utf-8", "utf_8_sig": "UTF-8-SIG", "shift_jis": "SHIFT_JIS", "big5": "Big5", "cp1250": "windows-1250", "cp1251": "windows-1251", "cp1252": "Windows-1252", "cp1253": "windows-1253", "cp1255": "windows-1255", "cp1256": "windows-1256", "cp1254": "Windows-1254", "cp949": "CP949", } COMMON_SAFE_ASCII_CHARACTERS: Set[str] = { "<", ">", "=", ":", "/", "&", ";", "{", "}", "[", "]", ",", "|", '"', "-", } KO_NAMES: Set[str] = {"johab", "cp949", "euc_kr"} ZH_NAMES: Set[str] = {"big5", "cp950", "big5hkscs", "hz"} NOT_PRINTABLE_PATTERN = re_compile(r"[0-9\W\n\r\t]+") LANGUAGE_SUPPORTED_COUNT: int = len(FREQUENCIES) # Logging LEVEL bellow DEBUG TRACE: int = 5
19,157
Python
37.469879
102
0.59075
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/charset_normalizer/cli/normalizer.py
import argparse import sys from json import dumps from os.path import abspath from platform import python_version from typing import List, Optional try: from unicodedata2 import unidata_version except ImportError: from unicodedata import unidata_version from charset_normalizer import from_fp from charset_normalizer.models import CliDetectionResult from charset_normalizer.version import __version__ def query_yes_no(question: str, default: str = "yes") -> bool: """Ask a yes/no question via input() and return their answer. "question" is a string that is presented to the user. "default" is the presumed answer if the user just hits <Enter>. It must be "yes" (the default), "no" or None (meaning an answer is required of the user). The "answer" return value is True for "yes" or False for "no". Credit goes to (c) https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input """ valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} if default is None: prompt = " [y/n] " elif default == "yes": prompt = " [Y/n] " elif default == "no": prompt = " [y/N] " else: raise ValueError("invalid default answer: '%s'" % default) while True: sys.stdout.write(question + prompt) choice = input().lower() if default is not None and choice == "": return valid[default] elif choice in valid: return valid[choice] else: sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n") def cli_detect(argv: Optional[List[str]] = None) -> int: """ CLI assistant using ARGV and ArgumentParser :param argv: :return: 0 if everything is fine, anything else equal trouble """ parser = argparse.ArgumentParser( description="The Real First Universal Charset Detector. " "Discover originating encoding used on text file. " "Normalize text to unicode." ) parser.add_argument( "files", type=argparse.FileType("rb"), nargs="+", help="File(s) to be analysed" ) parser.add_argument( "-v", "--verbose", action="store_true", default=False, dest="verbose", help="Display complementary information about file if any. " "Stdout will contain logs about the detection process.", ) parser.add_argument( "-a", "--with-alternative", action="store_true", default=False, dest="alternatives", help="Output complementary possibilities if any. Top-level JSON WILL be a list.", ) parser.add_argument( "-n", "--normalize", action="store_true", default=False, dest="normalize", help="Permit to normalize input file. If not set, program does not write anything.", ) parser.add_argument( "-m", "--minimal", action="store_true", default=False, dest="minimal", help="Only output the charset detected to STDOUT. Disabling JSON output.", ) parser.add_argument( "-r", "--replace", action="store_true", default=False, dest="replace", help="Replace file when trying to normalize it instead of creating a new one.", ) parser.add_argument( "-f", "--force", action="store_true", default=False, dest="force", help="Replace file without asking if you are sure, use this flag with caution.", ) parser.add_argument( "-t", "--threshold", action="store", default=0.2, type=float, dest="threshold", help="Define a custom maximum amount of chaos allowed in decoded content. 0. <= chaos <= 1.", ) parser.add_argument( "--version", action="version", version="Charset-Normalizer {} - Python {} - Unicode {}".format( __version__, python_version(), unidata_version ), help="Show version information and exit.", ) args = parser.parse_args(argv) if args.replace is True and args.normalize is False: print("Use --replace in addition of --normalize only.", file=sys.stderr) return 1 if args.force is True and args.replace is False: print("Use --force in addition of --replace only.", file=sys.stderr) return 1 if args.threshold < 0.0 or args.threshold > 1.0: print("--threshold VALUE should be between 0. AND 1.", file=sys.stderr) return 1 x_ = [] for my_file in args.files: matches = from_fp(my_file, threshold=args.threshold, explain=args.verbose) best_guess = matches.best() if best_guess is None: print( 'Unable to identify originating encoding for "{}". {}'.format( my_file.name, "Maybe try increasing maximum amount of chaos." if args.threshold < 1.0 else "", ), file=sys.stderr, ) x_.append( CliDetectionResult( abspath(my_file.name), None, [], [], "Unknown", [], False, 1.0, 0.0, None, True, ) ) else: x_.append( CliDetectionResult( abspath(my_file.name), best_guess.encoding, best_guess.encoding_aliases, [ cp for cp in best_guess.could_be_from_charset if cp != best_guess.encoding ], best_guess.language, best_guess.alphabets, best_guess.bom, best_guess.percent_chaos, best_guess.percent_coherence, None, True, ) ) if len(matches) > 1 and args.alternatives: for el in matches: if el != best_guess: x_.append( CliDetectionResult( abspath(my_file.name), el.encoding, el.encoding_aliases, [ cp for cp in el.could_be_from_charset if cp != el.encoding ], el.language, el.alphabets, el.bom, el.percent_chaos, el.percent_coherence, None, False, ) ) if args.normalize is True: if best_guess.encoding.startswith("utf") is True: print( '"{}" file does not need to be normalized, as it already came from unicode.'.format( my_file.name ), file=sys.stderr, ) if my_file.closed is False: my_file.close() continue o_: List[str] = my_file.name.split(".") if args.replace is False: o_.insert(-1, best_guess.encoding) if my_file.closed is False: my_file.close() elif ( args.force is False and query_yes_no( 'Are you sure to normalize "{}" by replacing it ?'.format( my_file.name ), "no", ) is False ): if my_file.closed is False: my_file.close() continue try: x_[0].unicode_path = abspath("./{}".format(".".join(o_))) with open(x_[0].unicode_path, "w", encoding="utf-8") as fp: fp.write(str(best_guess)) except IOError as e: print(str(e), file=sys.stderr) if my_file.closed is False: my_file.close() return 2 if my_file.closed is False: my_file.close() if args.minimal is False: print( dumps( [el.__dict__ for el in x_] if len(x_) > 1 else x_[0].__dict__, ensure_ascii=True, indent=4, ) ) else: for my_file in args.files: print( ", ".join( [ el.encoding or "undefined" for el in x_ if el.path == abspath(my_file.name) ] ) ) return 0 if __name__ == "__main__": cli_detect()
9,521
Python
31.168919
111
0.461086
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/_endpoint_helpers.py
import aiohttp.http_exceptions from aiohttp.client_reqrep import ClientResponse import asyncio import botocore.retryhandler import wrapt # Monkey patching: We need to insert the aiohttp exception equivalents # The only other way to do this would be to have another config file :( _aiohttp_retryable_exceptions = [ aiohttp.ClientConnectionError, aiohttp.ClientPayloadError, aiohttp.ServerDisconnectedError, aiohttp.http_exceptions.HttpProcessingError, asyncio.TimeoutError, ] botocore.retryhandler.EXCEPTION_MAP['GENERAL_CONNECTION_ERROR'].extend( _aiohttp_retryable_exceptions ) def _text(s, encoding='utf-8', errors='strict'): if isinstance(s, bytes): return s.decode(encoding, errors) return s # pragma: no cover # Unfortunately aiohttp changed the behavior of streams: # github.com/aio-libs/aiohttp/issues/1907 # We need this wrapper until we have a final resolution class _IOBaseWrapper(wrapt.ObjectProxy): def close(self): # this stream should not be closed by aiohttp, like 1.x pass # This is similar to botocore.response.StreamingBody class ClientResponseContentProxy(wrapt.ObjectProxy): """Proxy object for content stream of http response. This is here in case you want to pass around the "Body" of the response without closing the response itself.""" def __init__(self, response): super().__init__(response.__wrapped__.content) self._self_response = response # Note: we don't have a __del__ method as the ClientResponse has a __del__ # which will warn the user if they didn't close/release the response # explicitly. A release here would mean reading all the unread data # (which could be very large), and a close would mean being unable to re- # use the connection, so the user MUST chose. Default is to warn + close async def __aenter__(self): await self._self_response.__aenter__() return self async def __aexit__(self, exc_type, exc_val, exc_tb): return await self._self_response.__aexit__(exc_type, exc_val, exc_tb) @property def url(self): return self._self_response.url def close(self): self._self_response.close() class ClientResponseProxy(wrapt.ObjectProxy): """Proxy object for http response useful for porting from botocore underlying http library.""" def __init__(self, *args, **kwargs): super().__init__(ClientResponse(*args, **kwargs)) # this matches ClientResponse._body self._self_body = None @property def status_code(self): return self.status @status_code.setter def status_code(self, value): # botocore tries to set this, see: # https://github.com/aio-libs/aiobotocore/issues/190 # Luckily status is an attribute we can set self.status = value @property def content(self): return self._self_body @property def raw(self): return ClientResponseContentProxy(self) async def read(self): self._self_body = await self.__wrapped__.read() return self._self_body
3,127
Python
30.28
78
0.684362
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/config.py
import copy import botocore.client from botocore.exceptions import ParamValidationError class AioConfig(botocore.client.Config): def __init__(self, connector_args=None, **kwargs): super().__init__(**kwargs) self._validate_connector_args(connector_args) self.connector_args = copy.copy(connector_args) if not self.connector_args: self.connector_args = dict() if 'keepalive_timeout' not in self.connector_args: # AWS has a 20 second idle timeout: # https://forums.aws.amazon.com/message.jspa?messageID=215367 # and aiohttp default timeout is 30s so we set it to something # reasonable here self.connector_args['keepalive_timeout'] = 12 def merge(self, other_config): # Adapted from parent class config_options = copy.copy(self._user_provided_options) config_options.update(other_config._user_provided_options) return AioConfig(self.connector_args, **config_options) @staticmethod def _validate_connector_args(connector_args): if connector_args is None: return for k, v in connector_args.items(): # verify_ssl is handled by verify parameter to create_client if k == 'use_dns_cache': if not isinstance(v, bool): raise ParamValidationError( report='{} value must be a boolean'.format(k)) elif k in ['keepalive_timeout']: if not isinstance(v, (float, int)): raise ParamValidationError( report='{} value must be a float/int'.format(k)) elif k == 'force_close': if not isinstance(v, bool): raise ParamValidationError( report='{} value must be a boolean'.format(k)) # limit is handled by max_pool_connections elif k == 'ssl_context': import ssl if not isinstance(v, ssl.SSLContext): raise ParamValidationError( report='{} must be an SSLContext instance'.format(k)) else: raise ParamValidationError( report='invalid connector_arg:{}'.format(k))
2,308
Python
38.810344
77
0.574523
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/response.py
import asyncio import wrapt from botocore.exceptions import IncompleteReadError, ReadTimeoutError class AioReadTimeoutError(ReadTimeoutError, asyncio.TimeoutError): pass class StreamingBody(wrapt.ObjectProxy): """Wrapper class for an http response body. This provides a few additional conveniences that do not exist in the urllib3 model: * Set the timeout on the socket (i.e read() timeouts) * Auto validation of content length, if the amount of bytes we read does not match the content length, an exception is raised. """ _DEFAULT_CHUNK_SIZE = 1024 def __init__(self, raw_stream, content_length): super().__init__(raw_stream) self._self_content_length = content_length self._self_amount_read = 0 # https://github.com/GrahamDumpleton/wrapt/issues/73 async def __aenter__(self): return await self.__wrapped__.__aenter__() async def __aexit__(self, exc_type, exc_val, exc_tb): return await self.__wrapped__.__aexit__(exc_type, exc_val, exc_tb) # NOTE: set_socket_timeout was only for when requests didn't support # read timeouts, so not needed def tell(self): return self._self_amount_read async def read(self, amt=None): """Read at most amt bytes from the stream. If the amt argument is omitted, read all data. """ # botocore to aiohttp mapping try: chunk = await self.__wrapped__.read(amt if amt is not None else -1) except asyncio.TimeoutError as e: raise AioReadTimeoutError(endpoint_url=self.__wrapped__.url, error=e) self._self_amount_read += len(chunk) if amt is None or (not chunk and amt > 0): # If the server sends empty contents or # we ask to read all of the contents, then we know # we need to verify the content length. self._verify_content_length() return chunk def __aiter__(self): """Return an iterator to yield 1k chunks from the raw stream. """ return self.iter_chunks(self._DEFAULT_CHUNK_SIZE) async def __anext__(self): """Return the next 1k chunk from the raw stream. """ current_chunk = await self.read(self._DEFAULT_CHUNK_SIZE) if current_chunk: return current_chunk raise StopAsyncIteration anext = __anext__ async def iter_lines(self, chunk_size=1024, keepends=False): """Return an iterator to yield lines from the raw stream. This is achieved by reading chunk of bytes (of size chunk_size) at a time from the raw stream, and then yielding lines from there. """ pending = b'' async for chunk in self.iter_chunks(chunk_size): lines = (pending + chunk).splitlines(True) for line in lines[:-1]: yield line.splitlines(keepends)[0] pending = lines[-1] if pending: yield pending.splitlines(keepends)[0] async def iter_chunks(self, chunk_size=_DEFAULT_CHUNK_SIZE): """Return an iterator to yield chunks of chunk_size bytes from the raw stream. """ while True: current_chunk = await self.read(chunk_size) if current_chunk == b"": break yield current_chunk def _verify_content_length(self): # See: https://github.com/kennethreitz/requests/issues/1855 # Basically, our http library doesn't do this for us, so we have # to do this ourself. if self._self_content_length is not None and \ self._self_amount_read != int(self._self_content_length): raise IncompleteReadError( actual_bytes=self._self_amount_read, expected_bytes=int(self._self_content_length))
3,916
Python
33.973214
79
0.607763
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/signers.py
import datetime import botocore import botocore.auth from botocore.signers import RequestSigner, UnknownSignatureVersionError, \ UnsupportedSignatureVersionError, create_request_object, prepare_request_dict, \ _should_use_global_endpoint, S3PostPresigner from botocore.exceptions import UnknownClientMethodError class AioRequestSigner(RequestSigner): async def handler(self, operation_name=None, request=None, **kwargs): # This is typically hooked up to the "request-created" event # from a client's event emitter. When a new request is created # this method is invoked to sign the request. # Don't call this method directly. return await self.sign(operation_name, request) async def sign(self, operation_name, request, region_name=None, signing_type='standard', expires_in=None, signing_name=None): explicit_region_name = region_name if region_name is None: region_name = self._region_name if signing_name is None: signing_name = self._signing_name signature_version = await self._choose_signer( operation_name, signing_type, request.context) # Allow mutating request before signing await self._event_emitter.emit( 'before-sign.{0}.{1}'.format( self._service_id.hyphenize(), operation_name), request=request, signing_name=signing_name, region_name=self._region_name, signature_version=signature_version, request_signer=self, operation_name=operation_name ) if signature_version != botocore.UNSIGNED: kwargs = { 'signing_name': signing_name, 'region_name': region_name, 'signature_version': signature_version } if expires_in is not None: kwargs['expires'] = expires_in signing_context = request.context.get('signing', {}) if not explicit_region_name and signing_context.get('region'): kwargs['region_name'] = signing_context['region'] if signing_context.get('signing_name'): kwargs['signing_name'] = signing_context['signing_name'] try: auth = await self.get_auth_instance(**kwargs) except UnknownSignatureVersionError as e: if signing_type != 'standard': raise UnsupportedSignatureVersionError( signature_version=signature_version) else: raise e auth.add_auth(request) async def get_auth_instance(self, signing_name, region_name, signature_version=None, **kwargs): if signature_version is None: signature_version = self._signature_version cls = botocore.auth.AUTH_TYPE_MAPS.get(signature_version) if cls is None: raise UnknownSignatureVersionError( signature_version=signature_version) frozen_credentials = None if self._credentials is not None: frozen_credentials = await self._credentials.get_frozen_credentials() kwargs['credentials'] = frozen_credentials if cls.REQUIRES_REGION: if self._region_name is None: raise botocore.exceptions.NoRegionError() kwargs['region_name'] = region_name kwargs['service_name'] = signing_name auth = cls(**kwargs) return auth # Alias get_auth for backwards compatibility. get_auth = get_auth_instance async def _choose_signer(self, operation_name, signing_type, context): signing_type_suffix_map = { 'presign-post': '-presign-post', 'presign-url': '-query' } suffix = signing_type_suffix_map.get(signing_type, '') signature_version = self._signature_version if signature_version is not botocore.UNSIGNED and not \ signature_version.endswith(suffix): signature_version += suffix handler, response = await self._event_emitter.emit_until_response( 'choose-signer.{0}.{1}'.format( self._service_id.hyphenize(), operation_name), signing_name=self._signing_name, region_name=self._region_name, signature_version=signature_version, context=context) if response is not None: signature_version = response # The suffix needs to be checked again in case we get an improper # signature version from choose-signer. if signature_version is not botocore.UNSIGNED and not \ signature_version.endswith(suffix): signature_version += suffix return signature_version async def generate_presigned_url(self, request_dict, operation_name, expires_in=3600, region_name=None, signing_name=None): request = create_request_object(request_dict) await self.sign(operation_name, request, region_name, 'presign-url', expires_in, signing_name) request.prepare() return request.url def add_generate_db_auth_token(class_attributes, **kwargs): class_attributes['generate_db_auth_token'] = generate_db_auth_token async def generate_db_auth_token(self, DBHostname, Port, DBUsername, Region=None): """Generates an auth token used to connect to a db with IAM credentials. :type DBHostname: str :param DBHostname: The hostname of the database to connect to. :type Port: int :param Port: The port number the database is listening on. :type DBUsername: str :param DBUsername: The username to log in as. :type Region: str :param Region: The region the database is in. If None, the client region will be used. :return: A presigned url which can be used as an auth token. """ region = Region if region is None: region = self.meta.region_name params = { 'Action': 'connect', 'DBUser': DBUsername, } request_dict = { 'url_path': '/', 'query_string': '', 'headers': {}, 'body': params, 'method': 'GET' } # RDS requires that the scheme not be set when sent over. This can cause # issues when signing because the Python url parsing libraries follow # RFC 1808 closely, which states that a netloc must be introduced by `//`. # Otherwise the url is presumed to be relative, and thus the whole # netloc would be treated as a path component. To work around this we # introduce https here and remove it once we're done processing it. scheme = 'https://' endpoint_url = '%s%s:%s' % (scheme, DBHostname, Port) prepare_request_dict(request_dict, endpoint_url) presigned_url = await self._request_signer.generate_presigned_url( operation_name='connect', request_dict=request_dict, region_name=region, expires_in=900, signing_name='rds-db' ) return presigned_url[len(scheme):] def add_generate_presigned_url(class_attributes, **kwargs): class_attributes['generate_presigned_url'] = generate_presigned_url async def generate_presigned_url(self, ClientMethod, Params=None, ExpiresIn=3600, HttpMethod=None): """Generate a presigned url given a client, its method, and arguments :type ClientMethod: string :param ClientMethod: The client method to presign for :type Params: dict :param Params: The parameters normally passed to ``ClientMethod``. :type ExpiresIn: int :param ExpiresIn: The number of seconds the presigned url is valid for. By default it expires in an hour (3600 seconds) :type HttpMethod: string :param HttpMethod: The http method to use on the generated url. By default, the http method is whatever is used in the method's model. :returns: The presigned url """ client_method = ClientMethod params = Params if params is None: params = {} expires_in = ExpiresIn http_method = HttpMethod context = { 'is_presign_request': True, 'use_global_endpoint': _should_use_global_endpoint(self), } request_signer = self._request_signer serializer = self._serializer try: operation_name = self._PY_TO_OP_NAME[client_method] except KeyError: raise UnknownClientMethodError(method_name=client_method) operation_model = self.meta.service_model.operation_model( operation_name) params = await self._emit_api_params(params, operation_model, context) # Create a request dict based on the params to serialize. request_dict = serializer.serialize_to_request( params, operation_model) # Switch out the http method if user specified it. if http_method is not None: request_dict['method'] = http_method # Prepare the request dict by including the client's endpoint url. prepare_request_dict( request_dict, endpoint_url=self.meta.endpoint_url, context=context) # Generate the presigned url. return await request_signer.generate_presigned_url( request_dict=request_dict, expires_in=expires_in, operation_name=operation_name) class AioS3PostPresigner(S3PostPresigner): async def generate_presigned_post(self, request_dict, fields=None, conditions=None, expires_in=3600, region_name=None): if fields is None: fields = {} if conditions is None: conditions = [] # Create the policy for the post. policy = {} # Create an expiration date for the policy datetime_now = datetime.datetime.utcnow() expire_date = datetime_now + datetime.timedelta(seconds=expires_in) policy['expiration'] = expire_date.strftime(botocore.auth.ISO8601) # Append all of the conditions that the user supplied. policy['conditions'] = [] for condition in conditions: policy['conditions'].append(condition) # Store the policy and the fields in the request for signing request = create_request_object(request_dict) request.context['s3-presign-post-fields'] = fields request.context['s3-presign-post-policy'] = policy await self._request_signer.sign( 'PutObject', request, region_name, 'presign-post') # Return the url and the fields for th form to post. return {'url': request.url, 'fields': fields} def add_generate_presigned_post(class_attributes, **kwargs): class_attributes['generate_presigned_post'] = generate_presigned_post async def generate_presigned_post(self, Bucket, Key, Fields=None, Conditions=None, ExpiresIn=3600): bucket = Bucket key = Key fields = Fields conditions = Conditions expires_in = ExpiresIn if fields is None: fields = {} else: fields = fields.copy() if conditions is None: conditions = [] post_presigner = AioS3PostPresigner(self._request_signer) serializer = self._serializer # We choose the CreateBucket operation model because its url gets # serialized to what a presign post requires. operation_model = self.meta.service_model.operation_model( 'CreateBucket') # Create a request dict based on the params to serialize. request_dict = serializer.serialize_to_request( {'Bucket': bucket}, operation_model) # Prepare the request dict by including the client's endpoint url. prepare_request_dict( request_dict, endpoint_url=self.meta.endpoint_url, context={ 'is_presign_request': True, 'use_global_endpoint': _should_use_global_endpoint(self), }, ) # Append that the bucket name to the list of conditions. conditions.append({'bucket': bucket}) # If the key ends with filename, the only constraint that can be # imposed is if it starts with the specified prefix. if key.endswith('${filename}'): conditions.append(["starts-with", '$key', key[:-len('${filename}')]]) else: conditions.append({'key': key}) # Add the key to the fields. fields['key'] = key return await post_presigner.generate_presigned_post( request_dict=request_dict, fields=fields, conditions=conditions, expires_in=expires_in)
12,640
Python
35.961988
84
0.632832
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/hooks.py
import asyncio from botocore.hooks import HierarchicalEmitter, logger class AioHierarchicalEmitter(HierarchicalEmitter): async def _emit(self, event_name, kwargs, stop_on_response=False): responses = [] # Invoke the event handlers from most specific # to least specific, each time stripping off a dot. handlers_to_call = self._lookup_cache.get(event_name) if handlers_to_call is None: handlers_to_call = self._handlers.prefix_search(event_name) self._lookup_cache[event_name] = handlers_to_call elif not handlers_to_call: # Short circuit and return an empty response is we have # no handlers to call. This is the common case where # for the majority of signals, nothing is listening. return [] kwargs['event_name'] = event_name responses = [] for handler in handlers_to_call: logger.debug('Event %s: calling handler %s', event_name, handler) # Await the handler if its a coroutine. if asyncio.iscoroutinefunction(handler): response = await handler(**kwargs) else: response = handler(**kwargs) responses.append((handler, response)) if stop_on_response and response is not None: return responses return responses async def emit_until_response(self, event_name, **kwargs): responses = await self._emit(event_name, kwargs, stop_on_response=True) if responses: return responses[-1] else: return None, None
1,636
Python
37.97619
79
0.614914
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/args.py
import copy from botocore.args import ClientArgsCreator import botocore.serialize import botocore.parsers from .config import AioConfig from .endpoint import AioEndpointCreator from .signers import AioRequestSigner class AioClientArgsCreator(ClientArgsCreator): # NOTE: we override this so we can pull out the custom AioConfig params and # use an AioEndpointCreator def get_client_args(self, service_model, region_name, is_secure, endpoint_url, verify, credentials, scoped_config, client_config, endpoint_bridge): final_args = self.compute_client_args( service_model, client_config, endpoint_bridge, region_name, endpoint_url, is_secure, scoped_config) # service_name = final_args['service_name'] parameter_validation = final_args['parameter_validation'] endpoint_config = final_args['endpoint_config'] protocol = final_args['protocol'] config_kwargs = final_args['config_kwargs'] s3_config = final_args['s3_config'] partition = endpoint_config['metadata'].get('partition', None) socket_options = final_args['socket_options'] signing_region = endpoint_config['signing_region'] endpoint_region_name = endpoint_config['region_name'] event_emitter = copy.copy(self._event_emitter) signer = AioRequestSigner( service_model.service_id, signing_region, endpoint_config['signing_name'], endpoint_config['signature_version'], credentials, event_emitter ) config_kwargs['s3'] = s3_config # aiobotocore addition if isinstance(client_config, AioConfig): connector_args = client_config.connector_args else: connector_args = None new_config = AioConfig(connector_args, **config_kwargs) endpoint_creator = AioEndpointCreator(event_emitter) endpoint = endpoint_creator.create_endpoint( service_model, region_name=endpoint_region_name, endpoint_url=endpoint_config['endpoint_url'], verify=verify, response_parser_factory=self._response_parser_factory, max_pool_connections=new_config.max_pool_connections, proxies=new_config.proxies, timeout=(new_config.connect_timeout, new_config.read_timeout), socket_options=socket_options, client_cert=new_config.client_cert, connector_args=new_config.connector_args) serializer = botocore.serialize.create_serializer( protocol, parameter_validation) response_parser = botocore.parsers.create_parser(protocol) return { 'serializer': serializer, 'endpoint': endpoint, 'response_parser': response_parser, 'event_emitter': event_emitter, 'request_signer': signer, 'service_model': service_model, 'loader': self._loader, 'client_config': new_config, 'partition': partition, 'exceptions_factory': self._exceptions_factory }
3,155
Python
38.949367
79
0.639303
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/__init__.py
from .session import get_session, AioSession __all__ = ['get_session', 'AioSession'] __version__ = '1.2.0'
108
Python
20.799996
44
0.648148
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/waiter.py
import asyncio # WaiterModel is required for client.py import from botocore.exceptions import ClientError from botocore.waiter import WaiterModel # noqa: F401, lgtm[py/unused-import] from botocore.waiter import Waiter, xform_name, logger, WaiterError, \ NormalizedOperationMethod as _NormalizedOperationMethod from botocore.docs.docstring import WaiterDocstring from botocore.utils import get_service_module_name class NormalizedOperationMethod(_NormalizedOperationMethod): async def __call__(self, **kwargs): try: return await self._client_method(**kwargs) except ClientError as e: return e.response class AIOWaiter(Waiter): async def wait(self, **kwargs): acceptors = list(self.config.acceptors) current_state = 'waiting' # pop the invocation specific config config = kwargs.pop('WaiterConfig', {}) sleep_amount = config.get('Delay', self.config.delay) max_attempts = config.get('MaxAttempts', self.config.max_attempts) last_matched_acceptor = None num_attempts = 0 while True: response = await self._operation_method(**kwargs) num_attempts += 1 for acceptor in acceptors: if acceptor.matcher_func(response): last_matched_acceptor = acceptor current_state = acceptor.state break else: # If none of the acceptors matched, we should # transition to the failure state if an error # response was received. if 'Error' in response: # Transition to a failure state, which we # can just handle here by raising an exception. raise WaiterError( name=self.name, reason='An error occurred (%s): %s' % ( response['Error'].get('Code', 'Unknown'), response['Error'].get('Message', 'Unknown'), ), last_response=response, ) if current_state == 'success': logger.debug("Waiting complete, waiter matched the " "success state.") return if current_state == 'failure': reason = 'Waiter encountered a terminal failure state: %s' % ( acceptor.explanation ) raise WaiterError( name=self.name, reason=reason, last_response=response, ) if num_attempts >= max_attempts: if last_matched_acceptor is None: reason = 'Max attempts exceeded' else: reason = 'Max attempts exceeded. Previously accepted state: %s' % ( acceptor.explanation ) raise WaiterError( name=self.name, reason=reason, last_response=response, ) await asyncio.sleep(sleep_amount) def create_waiter_with_client(waiter_name, waiter_model, client): """ :type waiter_name: str :param waiter_name: The name of the waiter. The name should match the name (including the casing) of the key name in the waiter model file (typically this is CamelCasing). :type waiter_model: botocore.waiter.WaiterModel :param waiter_model: The model for the waiter configuration. :type client: botocore.client.BaseClient :param client: The botocore client associated with the service. :rtype: botocore.waiter.Waiter :return: The waiter object. """ single_waiter_config = waiter_model.get_waiter(waiter_name) operation_name = xform_name(single_waiter_config.operation) operation_method = NormalizedOperationMethod( getattr(client, operation_name)) # Create a new wait method that will serve as a proxy to the underlying # Waiter.wait method. This is needed to attach a docstring to the # method. async def wait(self, **kwargs): await AIOWaiter.wait(self, **kwargs) wait.__doc__ = WaiterDocstring( waiter_name=waiter_name, event_emitter=client.meta.events, service_model=client.meta.service_model, service_waiter_model=waiter_model, include_signature=False ) # Rename the waiter class based on the type of waiter. waiter_class_name = str('%s.AIOWaiter.%s' % ( get_service_module_name(client.meta.service_model), waiter_name)) # Create the new waiter class documented_waiter_cls = type( waiter_class_name, (AIOWaiter,), {'wait': wait}) # Return an instance of the new waiter class. return documented_waiter_cls( waiter_name, single_waiter_config, operation_method )
5,020
Python
37.037879
87
0.581275
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/endpoint.py
import aiohttp import asyncio import io import ssl import aiohttp.http_exceptions from aiohttp.client import URL from botocore.endpoint import EndpointCreator, Endpoint, DEFAULT_TIMEOUT, \ MAX_POOL_CONNECTIONS, logger, history_recorder, create_request_object from botocore.exceptions import ConnectionClosedError from botocore.hooks import first_non_none_response from botocore.utils import is_valid_endpoint_url from multidict import MultiDict from urllib.parse import urlparse from urllib3.response import HTTPHeaderDict from aiobotocore.response import StreamingBody from aiobotocore._endpoint_helpers import _text, _IOBaseWrapper, \ ClientResponseProxy async def convert_to_response_dict(http_response, operation_model): """Convert an HTTP response object to a request dict. This converts the requests library's HTTP response object to a dictionary. :type http_response: botocore.vendored.requests.model.Response :param http_response: The HTTP response from an AWS service request. :rtype: dict :return: A response dictionary which will contain the following keys: * headers (dict) * status_code (int) * body (string or file-like object) """ response_dict = { # botocore converts keys to str, so make sure that they are in # the expected case. See detailed discussion here: # https://github.com/aio-libs/aiobotocore/pull/116 # aiohttp's CIMultiDict camel cases the headers :( 'headers': HTTPHeaderDict( {k.decode('utf-8').lower(): v.decode('utf-8') for k, v in http_response.raw_headers}), 'status_code': http_response.status_code, 'context': { 'operation_name': operation_model.name, } } if response_dict['status_code'] >= 300: response_dict['body'] = await http_response.read() elif operation_model.has_event_stream_output: response_dict['body'] = http_response.raw elif operation_model.has_streaming_output: length = response_dict['headers'].get('content-length') response_dict['body'] = StreamingBody(http_response.raw, length) else: response_dict['body'] = await http_response.read() return response_dict class AioEndpoint(Endpoint): def __init__(self, *args, proxies=None, **kwargs): super().__init__(*args, **kwargs) self.proxies = proxies or {} async def create_request(self, params, operation_model=None): request = create_request_object(params) if operation_model: request.stream_output = any([ operation_model.has_streaming_output, operation_model.has_event_stream_output ]) service_id = operation_model.service_model.service_id.hyphenize() event_name = 'request-created.{service_id}.{op_name}'.format( service_id=service_id, op_name=operation_model.name) await self._event_emitter.emit(event_name, request=request, operation_name=operation_model.name) prepared_request = self.prepare_request(request) return prepared_request async def _send_request(self, request_dict, operation_model): attempts = 1 request = await self.create_request(request_dict, operation_model) context = request_dict['context'] success_response, exception = await self._get_response( request, operation_model, context) while await self._needs_retry(attempts, operation_model, request_dict, success_response, exception): attempts += 1 # If there is a stream associated with the request, we need # to reset it before attempting to send the request again. # This will ensure that we resend the entire contents of the # body. request.reset_stream() # Create a new request when retried (including a new signature). request = await self.create_request( request_dict, operation_model) success_response, exception = await self._get_response( request, operation_model, context) if success_response is not None and \ 'ResponseMetadata' in success_response[1]: # We want to share num retries, not num attempts. total_retries = attempts - 1 success_response[1]['ResponseMetadata']['RetryAttempts'] = \ total_retries if exception is not None: raise exception else: return success_response async def _get_response(self, request, operation_model, context): # This will return a tuple of (success_response, exception) # and success_response is itself a tuple of # (http_response, parsed_dict). # If an exception occurs then the success_response is None. # If no exception occurs then exception is None. success_response, exception = await self._do_get_response( request, operation_model) kwargs_to_emit = { 'response_dict': None, 'parsed_response': None, 'context': context, 'exception': exception, } if success_response is not None: http_response, parsed_response = success_response kwargs_to_emit['parsed_response'] = parsed_response kwargs_to_emit['response_dict'] = await convert_to_response_dict( http_response, operation_model) service_id = operation_model.service_model.service_id.hyphenize() await self._event_emitter.emit( 'response-received.%s.%s' % ( service_id, operation_model.name), **kwargs_to_emit) return success_response, exception async def _do_get_response(self, request, operation_model): try: logger.debug("Sending http request: %s", request) history_recorder.record('HTTP_REQUEST', { 'method': request.method, 'headers': request.headers, 'streaming': operation_model.has_streaming_input, 'url': request.url, 'body': request.body }) service_id = operation_model.service_model.service_id.hyphenize() event_name = 'before-send.%s.%s' % ( service_id, operation_model.name) responses = await self._event_emitter.emit(event_name, request=request) http_response = first_non_none_response(responses) if http_response is None: http_response = await self._send(request) except aiohttp.ClientConnectionError as e: e.request = request # botocore expects the request property return None, e except aiohttp.http_exceptions.BadStatusLine: better_exception = ConnectionClosedError( endpoint_url=request.url, request=request) return None, better_exception except Exception as e: logger.debug("Exception received when sending HTTP request.", exc_info=True) return None, e # This returns the http_response and the parsed_data. response_dict = await convert_to_response_dict(http_response, operation_model) http_response_record_dict = response_dict.copy() http_response_record_dict['streaming'] = \ operation_model.has_streaming_output history_recorder.record('HTTP_RESPONSE', http_response_record_dict) protocol = operation_model.metadata['protocol'] parser = self._response_parser_factory.create_parser(protocol) parsed_response = parser.parse( response_dict, operation_model.output_shape) if http_response.status_code >= 300: self._add_modeled_error_fields( response_dict, parsed_response, operation_model, parser, ) history_recorder.record('PARSED_RESPONSE', parsed_response) return (http_response, parsed_response), None # NOTE: The only line changed here changing time.sleep to asyncio.sleep async def _needs_retry(self, attempts, operation_model, request_dict, response=None, caught_exception=None): service_id = operation_model.service_model.service_id.hyphenize() event_name = 'needs-retry.%s.%s' % ( service_id, operation_model.name) responses = await self._event_emitter.emit( event_name, response=response, endpoint=self, operation=operation_model, attempts=attempts, caught_exception=caught_exception, request_dict=request_dict) handler_response = first_non_none_response(responses) if handler_response is None: return False else: # Request needs to be retried, and we need to sleep # for the specified number of times. logger.debug("Response received to retry, sleeping for " "%s seconds", handler_response) await asyncio.sleep(handler_response) return True async def _send(self, request): # Note: When using aiobotocore with dynamodb, requests fail on crc32 # checksum computation as soon as the response data reaches ~5KB. # When AWS response is gzip compressed: # 1. aiohttp is automatically decompressing the data # (http://aiohttp.readthedocs.io/en/stable/client.html#binary-response-content) # 2. botocore computes crc32 on the uncompressed data bytes and fails # cause crc32 has been computed on the compressed data # The following line forces aws not to use gzip compression, # if there is a way to configure aiohttp not to perform decompression, # we can remove the following line and take advantage of # aws gzip compression. # https://github.com/boto/botocore/issues/1255 url = request.url headers = request.headers data = request.body headers['Accept-Encoding'] = 'identity' headers_ = MultiDict( (z[0], _text(z[1], encoding='utf-8')) for z in headers.items()) # botocore does this during the request so we do this here as well # TODO: this should be part of the ClientSession, perhaps make wrapper proxy = self.proxies.get(urlparse(url.lower()).scheme) if isinstance(data, io.IOBase): data = _IOBaseWrapper(data) url = URL(url, encoded=True) resp = await self.http_session.request( request.method, url=url, headers=headers_, data=data, proxy=proxy) # If we're not streaming, read the content so we can retry any timeout # errors, see: # https://github.com/boto/botocore/blob/develop/botocore/vendored/requests/sessions.py#L604 if not request.stream_output: await resp.read() return resp class AioEndpointCreator(EndpointCreator): # TODO: handle socket_options def create_endpoint(self, service_model, region_name, endpoint_url, verify=None, response_parser_factory=None, timeout=DEFAULT_TIMEOUT, max_pool_connections=MAX_POOL_CONNECTIONS, http_session_cls=aiohttp.ClientSession, proxies=None, socket_options=None, client_cert=None, connector_args=None): if not is_valid_endpoint_url(endpoint_url): raise ValueError("Invalid endpoint: %s" % endpoint_url) if proxies is None: proxies = self._get_proxies(endpoint_url) endpoint_prefix = service_model.endpoint_prefix logger.debug('Setting %s timeout as %s', endpoint_prefix, timeout) if isinstance(timeout, (list, tuple)): conn_timeout, read_timeout = timeout else: conn_timeout = read_timeout = timeout if connector_args is None: # AWS has a 20 second idle timeout: # https://forums.aws.amazon.com/message.jspa?messageID=215367 # aiohttp default timeout is 30s so set something reasonable here connector_args = dict(keepalive_timeout=12) timeout = aiohttp.ClientTimeout( sock_connect=conn_timeout, sock_read=read_timeout ) ssl_context = None if client_cert: if isinstance(client_cert, str): key_file = None cert_file = client_cert elif isinstance(client_cert, tuple): cert_file, key_file = client_cert else: assert False ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH) ssl_context.load_cert_chain(cert_file, key_file) connector = aiohttp.TCPConnector( limit=max_pool_connections, verify_ssl=self._get_verify_value(verify), ssl_context=ssl_context, **connector_args) aio_session = http_session_cls( connector=connector, timeout=timeout, skip_auto_headers={'CONTENT-TYPE'}, response_class=ClientResponseProxy, auto_decompress=False) return AioEndpoint( endpoint_url, endpoint_prefix=endpoint_prefix, event_emitter=self._event_emitter, response_parser_factory=response_parser_factory, http_session=aio_session, proxies=proxies)
13,904
Python
42.317757
99
0.611263
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/utils.py
import asyncio import logging import json import aiohttp import aiohttp.client_exceptions from botocore.utils import ContainerMetadataFetcher, InstanceMetadataFetcher, \ IMDSFetcher, get_environ_proxies, BadIMDSRequestError, S3RegionRedirector, \ ClientError from botocore.exceptions import ( InvalidIMDSEndpointError, MetadataRetrievalError, ) import botocore.awsrequest logger = logging.getLogger(__name__) RETRYABLE_HTTP_ERRORS = (aiohttp.client_exceptions.ClientError, asyncio.TimeoutError) class AioIMDSFetcher(IMDSFetcher): class Response(object): def __init__(self, status_code, text, url): self.status_code = status_code self.url = url self.text = text self.content = text def __init__(self, *args, session=None, **kwargs): super(AioIMDSFetcher, self).__init__(*args, **kwargs) self._trust_env = bool(get_environ_proxies(self._base_url)) self._session = session or aiohttp.ClientSession async def _fetch_metadata_token(self): self._assert_enabled() url = self._base_url + self._TOKEN_PATH headers = { 'x-aws-ec2-metadata-token-ttl-seconds': self._TOKEN_TTL, } self._add_user_agent(headers) request = botocore.awsrequest.AWSRequest( method='PUT', url=url, headers=headers) timeout = aiohttp.ClientTimeout(total=self._timeout) async with self._session(timeout=timeout, trust_env=self._trust_env) as session: for i in range(self._num_attempts): try: async with session.put(url, headers=headers) as resp: text = await resp.text() if resp.status == 200: return text elif resp.status in (404, 403, 405): return None elif resp.status in (400,): raise BadIMDSRequestError(request) except asyncio.TimeoutError: return None except RETRYABLE_HTTP_ERRORS as e: logger.debug( "Caught retryable HTTP exception while making metadata " "service request to %s: %s", url, e, exc_info=True) except aiohttp.client_exceptions.ClientConnectorError as e: if getattr(e, 'errno', None) == 8 or \ str(getattr(e, 'os_error', None)) == \ 'Domain name not found': # threaded vs async resolver raise InvalidIMDSEndpointError(endpoint=url, error=e) else: raise return None async def _get_request(self, url_path, retry_func, token=None): self._assert_enabled() if retry_func is None: retry_func = self._default_retry url = self._base_url + url_path headers = {} if token is not None: headers['x-aws-ec2-metadata-token'] = token self._add_user_agent(headers) timeout = aiohttp.ClientTimeout(total=self._timeout) async with self._session(timeout=timeout, trust_env=self._trust_env) as session: for i in range(self._num_attempts): try: async with session.get(url, headers=headers) as resp: text = await resp.text() response = self.Response(resp.status, text, resp.url) if not retry_func(response): return response except RETRYABLE_HTTP_ERRORS as e: logger.debug( "Caught retryable HTTP exception while making metadata " "service request to %s: %s", url, e, exc_info=True) raise self._RETRIES_EXCEEDED_ERROR_CLS() class AioInstanceMetadataFetcher(AioIMDSFetcher, InstanceMetadataFetcher): async def retrieve_iam_role_credentials(self): try: token = await self._fetch_metadata_token() role_name = await self._get_iam_role(token) credentials = await self._get_credentials(role_name, token) if self._contains_all_credential_fields(credentials): return { 'role_name': role_name, 'access_key': credentials['AccessKeyId'], 'secret_key': credentials['SecretAccessKey'], 'token': credentials['Token'], 'expiry_time': credentials['Expiration'], } else: if 'Code' in credentials and 'Message' in credentials: logger.debug('Error response received when retrieving' 'credentials: %s.', credentials) return {} except self._RETRIES_EXCEEDED_ERROR_CLS: logger.debug("Max number of attempts exceeded (%s) when " "attempting to retrieve data from metadata service.", self._num_attempts) except BadIMDSRequestError as e: logger.debug("Bad IMDS request: %s", e.request) return {} async def _get_iam_role(self, token=None): r = await self._get_request( url_path=self._URL_PATH, retry_func=self._needs_retry_for_role_name, token=token ) return r.text async def _get_credentials(self, role_name, token=None): r = await self._get_request( url_path=self._URL_PATH + role_name, retry_func=self._needs_retry_for_credentials, token=token ) return json.loads(r.text) class AioS3RegionRedirector(S3RegionRedirector): async def redirect_from_error(self, request_dict, response, operation, **kwargs): if response is None: # This could be none if there was a ConnectionError or other # transport error. return if self._is_s3_accesspoint(request_dict.get('context', {})): logger.debug( 'S3 request was previously to an accesspoint, not redirecting.' ) return if request_dict.get('context', {}).get('s3_redirected'): logger.debug( 'S3 request was previously redirected, not redirecting.') return error = response[1].get('Error', {}) error_code = error.get('Code') response_metadata = response[1].get('ResponseMetadata', {}) # We have to account for 400 responses because # if we sign a Head* request with the wrong region, # we'll get a 400 Bad Request but we won't get a # body saying it's an "AuthorizationHeaderMalformed". is_special_head_object = ( error_code in ['301', '400'] and operation.name == 'HeadObject' ) is_special_head_bucket = ( error_code in ['301', '400'] and operation.name == 'HeadBucket' and 'x-amz-bucket-region' in response_metadata.get('HTTPHeaders', {}) ) is_wrong_signing_region = ( error_code == 'AuthorizationHeaderMalformed' and 'Region' in error ) is_redirect_status = response[0] is not None and \ response[0].status_code in [301, 302, 307] is_permanent_redirect = error_code == 'PermanentRedirect' if not any([is_special_head_object, is_wrong_signing_region, is_permanent_redirect, is_special_head_bucket, is_redirect_status]): return bucket = request_dict['context']['signing']['bucket'] client_region = request_dict['context'].get('client_region') new_region = await self.get_bucket_region(bucket, response) if new_region is None: logger.debug( "S3 client configured for region %s but the bucket %s is not " "in that region and the proper region could not be " "automatically determined." % (client_region, bucket)) return logger.debug( "S3 client configured for region %s but the bucket %s is in region" " %s; Please configure the proper region to avoid multiple " "unnecessary redirects and signing attempts." % ( client_region, bucket, new_region)) endpoint = self._endpoint_resolver.resolve('s3', new_region) endpoint = endpoint['endpoint_url'] signing_context = { 'region': new_region, 'bucket': bucket, 'endpoint': endpoint } request_dict['context']['signing'] = signing_context self._cache[bucket] = signing_context self.set_request_url(request_dict, request_dict['context']) request_dict['context']['s3_redirected'] = True # Return 0 so it doesn't wait to retry return 0 async def get_bucket_region(self, bucket, response): # First try to source the region from the headers. service_response = response[1] response_headers = service_response['ResponseMetadata']['HTTPHeaders'] if 'x-amz-bucket-region' in response_headers: return response_headers['x-amz-bucket-region'] # Next, check the error body region = service_response.get('Error', {}).get('Region', None) if region is not None: return region # Finally, HEAD the bucket. No other choice sadly. try: response = await self._client.head_bucket(Bucket=bucket) headers = response['ResponseMetadata']['HTTPHeaders'] except ClientError as e: headers = e.response['ResponseMetadata']['HTTPHeaders'] region = headers.get('x-amz-bucket-region', None) return region class AioContainerMetadataFetcher(ContainerMetadataFetcher): def __init__(self, session=None, sleep=asyncio.sleep): if session is None: session = aiohttp.ClientSession super(AioContainerMetadataFetcher, self).__init__(session, sleep) async def retrieve_full_uri(self, full_url, headers=None): self._validate_allowed_url(full_url) return await self._retrieve_credentials(full_url, headers) async def retrieve_uri(self, relative_uri): """Retrieve JSON metadata from ECS metadata. :type relative_uri: str :param relative_uri: A relative URI, e.g "/foo/bar?id=123" :return: The parsed JSON response. """ full_url = self.full_url(relative_uri) return await self._retrieve_credentials(full_url) async def _retrieve_credentials(self, full_url, extra_headers=None): headers = {'Accept': 'application/json'} if extra_headers is not None: headers.update(extra_headers) attempts = 0 while True: try: return await self._get_response( full_url, headers, self.TIMEOUT_SECONDS) except MetadataRetrievalError as e: logger.debug("Received error when attempting to retrieve " "container metadata: %s", e, exc_info=True) await self._sleep(self.SLEEP_TIME) attempts += 1 if attempts >= self.RETRY_ATTEMPTS: raise async def _get_response(self, full_url, headers, timeout): try: timeout = aiohttp.ClientTimeout(total=self.TIMEOUT_SECONDS) async with self._session(timeout=timeout) as session: async with session.get(full_url, headers=headers) as resp: if resp.status != 200: text = await resp.text() raise MetadataRetrievalError( error_msg=( "Received non 200 response (%d) " "from ECS metadata: %s" ) % (resp.status, text)) try: return await resp.json() except ValueError: text = await resp.text() error_msg = ( "Unable to parse JSON returned from ECS metadata services" ) logger.debug('%s:%s', error_msg, text) raise MetadataRetrievalError(error_msg=error_msg) except RETRYABLE_HTTP_ERRORS as e: error_msg = ("Received error when attempting to retrieve " "ECS metadata: %s" % e) raise MetadataRetrievalError(error_msg=error_msg)
12,902
Python
40.223642
86
0.557976
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/credentials.py
import asyncio import datetime import logging import subprocess import json from copy import deepcopy from typing import Optional from hashlib import sha1 from dateutil.tz import tzutc from botocore import UNSIGNED from botocore.config import Config import botocore.compat from botocore.credentials import EnvProvider, Credentials, RefreshableCredentials, \ ReadOnlyCredentials, ContainerProvider, ContainerMetadataFetcher, \ _parse_if_needed, InstanceMetadataProvider, _get_client_creator, \ ProfileProviderBuilder, ConfigProvider, SharedCredentialProvider, \ ProcessProvider, AssumeRoleWithWebIdentityProvider, _local_now, \ CachedCredentialFetcher, _serialize_if_needed, BaseAssumeRoleCredentialFetcher, \ AssumeRoleProvider, AssumeRoleCredentialFetcher, CredentialResolver, \ CanonicalNameCredentialSourcer, BotoProvider, OriginalEC2Provider, \ SSOProvider from botocore.exceptions import UnauthorizedSSOTokenError from botocore.exceptions import MetadataRetrievalError, CredentialRetrievalError, \ InvalidConfigError, PartialCredentialsError, RefreshWithMFAUnsupportedError, \ UnknownCredentialError from botocore.compat import compat_shell_split from botocore.utils import SSOTokenLoader from aiobotocore.utils import AioContainerMetadataFetcher, AioInstanceMetadataFetcher from aiobotocore.config import AioConfig logger = logging.getLogger(__name__) def create_credential_resolver(session, cache=None, region_name=None): """Create a default credential resolver. This creates a pre-configured credential resolver that includes the default lookup chain for credentials. """ profile_name = session.get_config_variable('profile') or 'default' metadata_timeout = session.get_config_variable('metadata_service_timeout') num_attempts = session.get_config_variable('metadata_service_num_attempts') disable_env_vars = session.instance_variables().get('profile') is not None imds_config = { 'ec2_metadata_service_endpoint': session.get_config_variable( 'ec2_metadata_service_endpoint'), 'imds_use_ipv6': session.get_config_variable('imds_use_ipv6') } if cache is None: cache = {} env_provider = AioEnvProvider() container_provider = AioContainerProvider() instance_metadata_provider = AioInstanceMetadataProvider( iam_role_fetcher=AioInstanceMetadataFetcher( timeout=metadata_timeout, num_attempts=num_attempts, user_agent=session.user_agent(), config=imds_config) ) profile_provider_builder = AioProfileProviderBuilder( session, cache=cache, region_name=region_name) assume_role_provider = AioAssumeRoleProvider( load_config=lambda: session.full_config, client_creator=_get_client_creator(session, region_name), cache=cache, profile_name=profile_name, credential_sourcer=AioCanonicalNameCredentialSourcer([ env_provider, container_provider, instance_metadata_provider ]), profile_provider_builder=profile_provider_builder, ) pre_profile = [ env_provider, assume_role_provider, ] profile_providers = profile_provider_builder.providers( profile_name=profile_name, disable_env_vars=disable_env_vars, ) post_profile = [ AioOriginalEC2Provider(), AioBotoProvider(), container_provider, instance_metadata_provider, ] providers = pre_profile + profile_providers + post_profile if disable_env_vars: # An explicitly provided profile will negate an EnvProvider. # We will defer to providers that understand the "profile" # concept to retrieve credentials. # The one edge case if is all three values are provided via # env vars: # export AWS_ACCESS_KEY_ID=foo # export AWS_SECRET_ACCESS_KEY=bar # export AWS_PROFILE=baz # Then, just like our client() calls, the explicit credentials # will take precedence. # # This precedence is enforced by leaving the EnvProvider in the chain. # This means that the only way a "profile" would win is if the # EnvProvider does not return credentials, which is what we want # in this scenario. providers.remove(env_provider) logger.debug('Skipping environment variable credential check' ' because profile name was explicitly set.') resolver = AioCredentialResolver(providers=providers) return resolver class AioProfileProviderBuilder(ProfileProviderBuilder): def _create_process_provider(self, profile_name): return AioProcessProvider( profile_name=profile_name, load_config=lambda: self._session.full_config, ) def _create_shared_credential_provider(self, profile_name): credential_file = self._session.get_config_variable('credentials_file') return AioSharedCredentialProvider( profile_name=profile_name, creds_filename=credential_file, ) def _create_config_provider(self, profile_name): config_file = self._session.get_config_variable('config_file') return AioConfigProvider( profile_name=profile_name, config_filename=config_file, ) def _create_web_identity_provider(self, profile_name, disable_env_vars): return AioAssumeRoleWithWebIdentityProvider( load_config=lambda: self._session.full_config, client_creator=_get_client_creator( self._session, self._region_name), cache=self._cache, profile_name=profile_name, disable_env_vars=disable_env_vars, ) def _create_sso_provider(self, profile_name): return AioSSOProvider( load_config=lambda: self._session.full_config, client_creator=self._session.create_client, profile_name=profile_name, cache=self._cache, token_cache=self._sso_token_cache, ) async def get_credentials(session): resolver = create_credential_resolver(session) return await resolver.load_credentials() def create_assume_role_refresher(client, params): async def refresh(): async with client as sts: response = await sts.assume_role(**params) credentials = response['Credentials'] # We need to normalize the credential names to # the values expected by the refresh creds. return { 'access_key': credentials['AccessKeyId'], 'secret_key': credentials['SecretAccessKey'], 'token': credentials['SessionToken'], 'expiry_time': _serialize_if_needed(credentials['Expiration']), } return refresh def create_aio_mfa_serial_refresher(actual_refresh): class _Refresher(object): def __init__(self, refresh): self._refresh = refresh self._has_been_called = False async def call(self): if self._has_been_called: # We can explore an option in the future to support # reprompting for MFA, but for now we just error out # when the temp creds expire. raise RefreshWithMFAUnsupportedError() self._has_been_called = True return await self._refresh() return _Refresher(actual_refresh).call class AioCredentials(Credentials): async def get_frozen_credentials(self): return ReadOnlyCredentials(self.access_key, self.secret_key, self.token) @classmethod def from_credentials(cls, obj: Optional[Credentials]): if obj is None: return None return cls( obj.access_key, obj.secret_key, obj.token, obj.method) class AioRefreshableCredentials(RefreshableCredentials): def __init__(self, *args, **kwargs): super(AioRefreshableCredentials, self).__init__(*args, **kwargs) self._refresh_lock = asyncio.Lock() @classmethod def from_refreshable_credentials(cls, obj: Optional[RefreshableCredentials]): if obj is None: return None return cls( # Using interval values here to skip property calling .refresh() obj._access_key, obj._secret_key, obj._token, obj._expiry_time, obj._refresh_using, obj.method, obj._time_fetcher ) # Redeclaring the properties so it doesnt call refresh # Have to redeclare setter as we're overriding the getter @property def access_key(self): # TODO: this needs to be resolved raise NotImplementedError("missing call to self._refresh. " "Use get_frozen_credentials instead") return self._access_key @access_key.setter def access_key(self, value): self._access_key = value @property def secret_key(self): # TODO: this needs to be resolved raise NotImplementedError("missing call to self._refresh. " "Use get_frozen_credentials instead") return self._secret_key @secret_key.setter def secret_key(self, value): self._secret_key = value @property def token(self): # TODO: this needs to be resolved raise NotImplementedError("missing call to self._refresh. " "Use get_frozen_credentials instead") return self._token @token.setter def token(self, value): self._token = value async def _refresh(self): if not self.refresh_needed(self._advisory_refresh_timeout): return # By this point we need a refresh but its not critical if not self._refresh_lock.locked(): async with self._refresh_lock: if not self.refresh_needed(self._advisory_refresh_timeout): return is_mandatory_refresh = self.refresh_needed( self._mandatory_refresh_timeout) await self._protected_refresh(is_mandatory=is_mandatory_refresh) return elif self.refresh_needed(self._mandatory_refresh_timeout): # If we're here, we absolutely need a refresh and the # lock is held so wait for it async with self._refresh_lock: # Might have refreshed by now if not self.refresh_needed(self._mandatory_refresh_timeout): return await self._protected_refresh(is_mandatory=True) async def _protected_refresh(self, is_mandatory): try: metadata = await self._refresh_using() except Exception: period_name = 'mandatory' if is_mandatory else 'advisory' logger.warning("Refreshing temporary credentials failed " "during %s refresh period.", period_name, exc_info=True) if is_mandatory: # If this is a mandatory refresh, then # all errors that occur when we attempt to refresh # credentials are propagated back to the user. raise # Otherwise we'll just return. # The end result will be that we'll use the current # set of temporary credentials we have. return self._set_from_data(metadata) self._frozen_credentials = ReadOnlyCredentials( self._access_key, self._secret_key, self._token) if self._is_expired(): msg = ("Credentials were refreshed, but the " "refreshed credentials are still expired.") logger.warning(msg) raise RuntimeError(msg) async def get_frozen_credentials(self): await self._refresh() return self._frozen_credentials class AioDeferredRefreshableCredentials(AioRefreshableCredentials): def __init__(self, refresh_using, method, time_fetcher=_local_now): self._refresh_using = refresh_using self._access_key = None self._secret_key = None self._token = None self._expiry_time = None self._time_fetcher = time_fetcher self._refresh_lock = asyncio.Lock() self.method = method self._frozen_credentials = None def refresh_needed(self, refresh_in=None): if self._frozen_credentials is None: return True return super(AioDeferredRefreshableCredentials, self).refresh_needed( refresh_in ) class AioCachedCredentialFetcher(CachedCredentialFetcher): async def _get_credentials(self): raise NotImplementedError('_get_credentials()') async def fetch_credentials(self): return await self._get_cached_credentials() async def _get_cached_credentials(self): """Get up-to-date credentials. This will check the cache for up-to-date credentials, calling assume role if none are available. """ response = self._load_from_cache() if response is None: response = await self._get_credentials() self._write_to_cache(response) else: logger.debug("Credentials for role retrieved from cache.") creds = response['Credentials'] expiration = _serialize_if_needed(creds['Expiration'], iso=True) return { 'access_key': creds['AccessKeyId'], 'secret_key': creds['SecretAccessKey'], 'token': creds['SessionToken'], 'expiry_time': expiration, } class AioBaseAssumeRoleCredentialFetcher(BaseAssumeRoleCredentialFetcher, AioCachedCredentialFetcher): pass class AioAssumeRoleCredentialFetcher(AssumeRoleCredentialFetcher, AioBaseAssumeRoleCredentialFetcher): async def _get_credentials(self): """Get credentials by calling assume role.""" kwargs = self._assume_role_kwargs() client = await self._create_client() async with client as sts: return await sts.assume_role(**kwargs) async def _create_client(self): """Create an STS client using the source credentials.""" frozen_credentials = await self._source_credentials.get_frozen_credentials() return self._client_creator( 'sts', aws_access_key_id=frozen_credentials.access_key, aws_secret_access_key=frozen_credentials.secret_key, aws_session_token=frozen_credentials.token, ) class AioAssumeRoleWithWebIdentityCredentialFetcher( AioBaseAssumeRoleCredentialFetcher ): def __init__(self, client_creator, web_identity_token_loader, role_arn, extra_args=None, cache=None, expiry_window_seconds=None): self._web_identity_token_loader = web_identity_token_loader super(AioAssumeRoleWithWebIdentityCredentialFetcher, self).__init__( client_creator, role_arn, extra_args=extra_args, cache=cache, expiry_window_seconds=expiry_window_seconds ) async def _get_credentials(self): """Get credentials by calling assume role.""" kwargs = self._assume_role_kwargs() # Assume role with web identity does not require credentials other than # the token, explicitly configure the client to not sign requests. config = AioConfig(signature_version=UNSIGNED) async with self._client_creator('sts', config=config) as client: return await client.assume_role_with_web_identity(**kwargs) def _assume_role_kwargs(self): """Get the arguments for assume role based on current configuration.""" assume_role_kwargs = deepcopy(self._assume_kwargs) identity_token = self._web_identity_token_loader() assume_role_kwargs['WebIdentityToken'] = identity_token return assume_role_kwargs class AioProcessProvider(ProcessProvider): def __init__(self, *args, popen=asyncio.create_subprocess_exec, **kwargs): super(AioProcessProvider, self).__init__(*args, **kwargs, popen=popen) async def load(self): credential_process = self._credential_process if credential_process is None: return creds_dict = await self._retrieve_credentials_using(credential_process) if creds_dict.get('expiry_time') is not None: return AioRefreshableCredentials.create_from_metadata( creds_dict, lambda: self._retrieve_credentials_using(credential_process), self.METHOD ) return AioCredentials( access_key=creds_dict['access_key'], secret_key=creds_dict['secret_key'], token=creds_dict.get('token'), method=self.METHOD ) async def _retrieve_credentials_using(self, credential_process): # We're not using shell=True, so we need to pass the # command and all arguments as a list. process_list = compat_shell_split(credential_process) p = await self._popen(*process_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = await p.communicate() if p.returncode != 0: raise CredentialRetrievalError( provider=self.METHOD, error_msg=stderr.decode('utf-8')) parsed = botocore.compat.json.loads(stdout.decode('utf-8')) version = parsed.get('Version', '<Version key not provided>') if version != 1: raise CredentialRetrievalError( provider=self.METHOD, error_msg=("Unsupported version '%s' for credential process " "provider, supported versions: 1" % version)) try: return { 'access_key': parsed['AccessKeyId'], 'secret_key': parsed['SecretAccessKey'], 'token': parsed.get('SessionToken'), 'expiry_time': parsed.get('Expiration'), } except KeyError as e: raise CredentialRetrievalError( provider=self.METHOD, error_msg="Missing required key in response: %s" % e ) class AioInstanceMetadataProvider(InstanceMetadataProvider): async def load(self): fetcher = self._role_fetcher metadata = await fetcher.retrieve_iam_role_credentials() if not metadata: return None logger.debug('Found credentials from IAM Role: %s', metadata['role_name']) creds = AioRefreshableCredentials.create_from_metadata( metadata, method=self.METHOD, refresh_using=fetcher.retrieve_iam_role_credentials, ) return creds class AioEnvProvider(EnvProvider): async def load(self): # It gets credentials from an env var, # so just convert the response to Aio variants result = super().load() if isinstance(result, RefreshableCredentials): return AioRefreshableCredentials.\ from_refreshable_credentials(result) elif isinstance(result, Credentials): return AioCredentials.from_credentials(result) return None class AioOriginalEC2Provider(OriginalEC2Provider): async def load(self): result = super(AioOriginalEC2Provider, self).load() if isinstance(result, Credentials): result = AioCredentials.from_credentials(result) return result class AioSharedCredentialProvider(SharedCredentialProvider): async def load(self): result = super(AioSharedCredentialProvider, self).load() if isinstance(result, Credentials): result = AioCredentials.from_credentials(result) return result class AioConfigProvider(ConfigProvider): async def load(self): result = super(AioConfigProvider, self).load() if isinstance(result, Credentials): result = AioCredentials.from_credentials(result) return result class AioBotoProvider(BotoProvider): async def load(self): result = super(AioBotoProvider, self).load() if isinstance(result, Credentials): result = AioCredentials.from_credentials(result) return result class AioAssumeRoleProvider(AssumeRoleProvider): async def load(self): self._loaded_config = self._load_config() profiles = self._loaded_config.get('profiles', {}) profile = profiles.get(self._profile_name, {}) if self._has_assume_role_config_vars(profile): return await self._load_creds_via_assume_role(self._profile_name) async def _load_creds_via_assume_role(self, profile_name): role_config = self._get_role_config(profile_name) source_credentials = await self._resolve_source_credentials( role_config, profile_name ) extra_args = {} role_session_name = role_config.get('role_session_name') if role_session_name is not None: extra_args['RoleSessionName'] = role_session_name external_id = role_config.get('external_id') if external_id is not None: extra_args['ExternalId'] = external_id mfa_serial = role_config.get('mfa_serial') if mfa_serial is not None: extra_args['SerialNumber'] = mfa_serial duration_seconds = role_config.get('duration_seconds') if duration_seconds is not None: extra_args['DurationSeconds'] = duration_seconds fetcher = AioAssumeRoleCredentialFetcher( client_creator=self._client_creator, source_credentials=source_credentials, role_arn=role_config['role_arn'], extra_args=extra_args, mfa_prompter=self._prompter, cache=self.cache, ) refresher = fetcher.fetch_credentials if mfa_serial is not None: refresher = create_aio_mfa_serial_refresher(refresher) # The initial credentials are empty and the expiration time is set # to now so that we can delay the call to assume role until it is # strictly needed. return AioDeferredRefreshableCredentials( method=self.METHOD, refresh_using=refresher, time_fetcher=_local_now ) async def _resolve_source_credentials(self, role_config, profile_name): credential_source = role_config.get('credential_source') if credential_source is not None: return await self._resolve_credentials_from_source( credential_source, profile_name ) source_profile = role_config['source_profile'] self._visited_profiles.append(source_profile) return await self._resolve_credentials_from_profile(source_profile) async def _resolve_credentials_from_profile(self, profile_name): profiles = self._loaded_config.get('profiles', {}) profile = profiles[profile_name] if self._has_static_credentials(profile) and \ not self._profile_provider_builder: return self._resolve_static_credentials_from_profile(profile) elif self._has_static_credentials(profile) or \ not self._has_assume_role_config_vars(profile): profile_providers = self._profile_provider_builder.providers( profile_name=profile_name, disable_env_vars=True, ) profile_chain = AioCredentialResolver(profile_providers) credentials = await profile_chain.load_credentials() if credentials is None: error_message = ( 'The source profile "%s" must have credentials.' ) raise InvalidConfigError( error_msg=error_message % profile_name, ) return credentials return self._load_creds_via_assume_role(profile_name) def _resolve_static_credentials_from_profile(self, profile): try: return AioCredentials( access_key=profile['aws_access_key_id'], secret_key=profile['aws_secret_access_key'], token=profile.get('aws_session_token') ) except KeyError as e: raise PartialCredentialsError( provider=self.METHOD, cred_var=str(e)) async def _resolve_credentials_from_source(self, credential_source, profile_name): credentials = await self._credential_sourcer.source_credentials( credential_source) if credentials is None: raise CredentialRetrievalError( provider=credential_source, error_msg=( 'No credentials found in credential_source referenced ' 'in profile %s' % profile_name ) ) return credentials class AioAssumeRoleWithWebIdentityProvider(AssumeRoleWithWebIdentityProvider): async def load(self): return await self._assume_role_with_web_identity() async def _assume_role_with_web_identity(self): token_path = self._get_config('web_identity_token_file') if not token_path: return None token_loader = self._token_loader_cls(token_path) role_arn = self._get_config('role_arn') if not role_arn: error_msg = ( 'The provided profile or the current environment is ' 'configured to assume role with web identity but has no ' 'role ARN configured. Ensure that the profile has the role_arn' 'configuration set or the AWS_ROLE_ARN env var is set.' ) raise InvalidConfigError(error_msg=error_msg) extra_args = {} role_session_name = self._get_config('role_session_name') if role_session_name is not None: extra_args['RoleSessionName'] = role_session_name fetcher = AioAssumeRoleWithWebIdentityCredentialFetcher( client_creator=self._client_creator, web_identity_token_loader=token_loader, role_arn=role_arn, extra_args=extra_args, cache=self.cache, ) # The initial credentials are empty and the expiration time is set # to now so that we can delay the call to assume role until it is # strictly needed. return AioDeferredRefreshableCredentials( method=self.METHOD, refresh_using=fetcher.fetch_credentials, ) class AioCanonicalNameCredentialSourcer(CanonicalNameCredentialSourcer): async def source_credentials(self, source_name): """Loads source credentials based on the provided configuration. :type source_name: str :param source_name: The value of credential_source in the config file. This is the canonical name of the credential provider. :rtype: Credentials """ source = self._get_provider(source_name) if isinstance(source, AioCredentialResolver): return await source.load_credentials() return await source.load() def _get_provider(self, canonical_name): """Return a credential provider by its canonical name. :type canonical_name: str :param canonical_name: The canonical name of the provider. :raises UnknownCredentialError: Raised if no credential provider by the provided name is found. """ provider = self._get_provider_by_canonical_name(canonical_name) # The AssumeRole provider should really be part of the SharedConfig # provider rather than being its own thing, but it is not. It is # effectively part of both the SharedConfig provider and the # SharedCredentials provider now due to the way it behaves. # Therefore if we want either of those providers we should return # the AssumeRole provider with it. if canonical_name.lower() in ['sharedconfig', 'sharedcredentials']: assume_role_provider = self._get_provider_by_method('assume-role') if assume_role_provider is not None: # The SharedConfig or SharedCredentials provider may not be # present if it was removed for some reason, but the # AssumeRole provider could still be present. In that case, # return the assume role provider by itself. if provider is None: return assume_role_provider # If both are present, return them both as a # CredentialResolver so that calling code can treat them as # a single entity. return AioCredentialResolver([assume_role_provider, provider]) if provider is None: raise UnknownCredentialError(name=canonical_name) return provider class AioContainerProvider(ContainerProvider): def __init__(self, *args, **kwargs): super(AioContainerProvider, self).__init__(*args, **kwargs) # This will always run if no fetcher arg is provided if isinstance(self._fetcher, ContainerMetadataFetcher): self._fetcher = AioContainerMetadataFetcher() async def load(self): if self.ENV_VAR in self._environ or self.ENV_VAR_FULL in self._environ: return await self._retrieve_or_fail() async def _retrieve_or_fail(self): if self._provided_relative_uri(): full_uri = self._fetcher.full_url(self._environ[self.ENV_VAR]) else: full_uri = self._environ[self.ENV_VAR_FULL] headers = self._build_headers() fetcher = self._create_fetcher(full_uri, headers) creds = await fetcher() return AioRefreshableCredentials( access_key=creds['access_key'], secret_key=creds['secret_key'], token=creds['token'], method=self.METHOD, expiry_time=_parse_if_needed(creds['expiry_time']), refresh_using=fetcher, ) def _create_fetcher(self, full_uri, headers): async def fetch_creds(): try: response = await self._fetcher.retrieve_full_uri( full_uri, headers=headers) except MetadataRetrievalError as e: logger.debug("Error retrieving container metadata: %s", e, exc_info=True) raise CredentialRetrievalError(provider=self.METHOD, error_msg=str(e)) return { 'access_key': response['AccessKeyId'], 'secret_key': response['SecretAccessKey'], 'token': response['Token'], 'expiry_time': response['Expiration'], } return fetch_creds class AioCredentialResolver(CredentialResolver): async def load_credentials(self): """ Goes through the credentials chain, returning the first ``Credentials`` that could be loaded. """ # First provider to return a non-None response wins. for provider in self.providers: logger.debug("Looking for credentials via: %s", provider.METHOD) creds = await provider.load() if creds is not None: return creds # If we got here, no credentials could be found. # This feels like it should be an exception, but historically, ``None`` # is returned. # # +1 # -js return None class AioSSOCredentialFetcher(AioCachedCredentialFetcher): _UTC_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' def __init__(self, start_url, sso_region, role_name, account_id, client_creator, token_loader=None, cache=None, expiry_window_seconds=None): self._client_creator = client_creator self._sso_region = sso_region self._role_name = role_name self._account_id = account_id self._start_url = start_url self._token_loader = token_loader super(AioSSOCredentialFetcher, self).__init__( cache, expiry_window_seconds ) def _create_cache_key(self): args = { 'startUrl': self._start_url, 'roleName': self._role_name, 'accountId': self._account_id, } args = json.dumps(args, sort_keys=True, separators=(',', ':')) argument_hash = sha1(args.encode('utf-8')).hexdigest() return self._make_file_safe(argument_hash) def _parse_timestamp(self, timestamp_ms): # fromtimestamp expects seconds so: milliseconds / 1000 = seconds timestamp_seconds = timestamp_ms / 1000.0 timestamp = datetime.datetime.fromtimestamp(timestamp_seconds, tzutc()) return timestamp.strftime(self._UTC_DATE_FORMAT) async def _get_credentials(self): """Get credentials by calling SSO get role credentials.""" config = Config( signature_version=UNSIGNED, region_name=self._sso_region, ) async with self._client_creator('sso', config=config) as client: kwargs = { 'roleName': self._role_name, 'accountId': self._account_id, 'accessToken': self._token_loader(self._start_url), } try: response = await client.get_role_credentials(**kwargs) except client.exceptions.UnauthorizedException: raise UnauthorizedSSOTokenError() credentials = response['roleCredentials'] credentials = { 'ProviderType': 'sso', 'Credentials': { 'AccessKeyId': credentials['accessKeyId'], 'SecretAccessKey': credentials['secretAccessKey'], 'SessionToken': credentials['sessionToken'], 'Expiration': self._parse_timestamp(credentials['expiration']), } } return credentials class AioSSOProvider(SSOProvider): async def load(self): sso_config = self._load_sso_config() if not sso_config: return None sso_fetcher = AioSSOCredentialFetcher( sso_config['sso_start_url'], sso_config['sso_region'], sso_config['sso_role_name'], sso_config['sso_account_id'], self._client_creator, token_loader=SSOTokenLoader(cache=self._token_cache), cache=self.cache, ) return AioDeferredRefreshableCredentials( method=self.METHOD, refresh_using=sso_fetcher.fetch_credentials, )
35,111
Python
37.627063
85
0.620404
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/paginate.py
from botocore.exceptions import PaginationError from botocore.paginate import Paginator, PageIterator from botocore.utils import set_value_from_jmespath, merge_dicts from botocore.compat import six import jmespath import aioitertools class AioPageIterator(PageIterator): def __aiter__(self): return self.__anext__() async def __anext__(self): current_kwargs = self._op_kwargs previous_next_token = None next_token = dict((key, None) for key in self._input_token) if self._starting_token is not None: # If the starting token exists, populate the next_token with the # values inside it. This ensures that we have the service's # pagination token on hand if we need to truncate after the # first response. next_token = self._parse_starting_token()[0] # The number of items from result_key we've seen so far. total_items = 0 first_request = True primary_result_key = self.result_keys[0] starting_truncation = 0 self._inject_starting_params(current_kwargs) while True: response = await self._make_request(current_kwargs) parsed = self._extract_parsed_response(response) if first_request: # The first request is handled differently. We could # possibly have a resume/starting token that tells us where # to index into the retrieved page. if self._starting_token is not None: starting_truncation = self._handle_first_request( parsed, primary_result_key, starting_truncation) first_request = False self._record_non_aggregate_key_values(parsed) else: # If this isn't the first request, we have already sliced into # the first request and had to make additional requests after. # We no longer need to add this to truncation. starting_truncation = 0 current_response = primary_result_key.search(parsed) if current_response is None: current_response = [] num_current_response = len(current_response) truncate_amount = 0 if self._max_items is not None: truncate_amount = (total_items + num_current_response) \ - self._max_items if truncate_amount > 0: self._truncate_response(parsed, primary_result_key, truncate_amount, starting_truncation, next_token) yield response break else: yield response total_items += num_current_response next_token = self._get_next_token(parsed) if all(t is None for t in next_token.values()): break if self._max_items is not None and \ total_items == self._max_items: # We're on a page boundary so we can set the current # next token to be the resume token. self.resume_token = next_token break if previous_next_token is not None and \ previous_next_token == next_token: message = ("The same next token was received " "twice: %s" % next_token) raise PaginationError(message=message) self._inject_token_into_kwargs(current_kwargs, next_token) previous_next_token = next_token def result_key_iters(self): teed_results = aioitertools.tee(self, len(self.result_keys)) return [ResultKeyIterator(i, result_key) for i, result_key in zip(teed_results, self.result_keys)] async def build_full_result(self): complete_result = {} async for response in self: page = response # We want to try to catch operation object pagination # and format correctly for those. They come in the form # of a tuple of two elements: (http_response, parsed_responsed). # We want the parsed_response as that is what the page iterator # uses. We can remove it though once operation objects are removed. if isinstance(response, tuple) and len(response) == 2: page = response[1] # We're incrementally building the full response page # by page. For each page in the response we need to # inject the necessary components from the page # into the complete_result. for result_expression in self.result_keys: # In order to incrementally update a result key # we need to search the existing value from complete_result, # then we need to search the _current_ page for the # current result key value. Then we append the current # value onto the existing value, and re-set that value # as the new value. result_value = result_expression.search(page) if result_value is None: continue existing_value = result_expression.search(complete_result) if existing_value is None: # Set the initial result set_value_from_jmespath( complete_result, result_expression.expression, result_value) continue # Now both result_value and existing_value contain something if isinstance(result_value, list): existing_value.extend(result_value) elif isinstance(result_value, (int, float, six.string_types)): # Modify the existing result with the sum or concatenation set_value_from_jmespath( complete_result, result_expression.expression, existing_value + result_value) merge_dicts(complete_result, self.non_aggregate_part) if self.resume_token is not None: complete_result['NextToken'] = self.resume_token return complete_result async def search(self, expression): compiled = jmespath.compile(expression) async for page in self: results = compiled.search(page) if isinstance(results, list): for element in results: yield element else: yield results class AioPaginator(Paginator): PAGE_ITERATOR_CLS = AioPageIterator class ResultKeyIterator: """Iterates over the results of paginated responses. Each iterator is associated with a single result key. Iterating over this object will give you each element in the result key list. :param pages_iterator: An iterator that will give you pages of results (a ``PageIterator`` class). :param result_key: The JMESPath expression representing the result key. """ def __init__(self, pages_iterator, result_key): self._pages_iterator = pages_iterator self.result_key = result_key def __aiter__(self): return self.__anext__() async def __anext__(self): async for page in self._pages_iterator: results = self.result_key.search(page) if results is None: results = [] for result in results: yield result
7,699
Python
42.75
79
0.572022
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/eventstream.py
from botocore.eventstream import EventStream, EventStreamBuffer class AioEventStream(EventStream): async def _create_raw_event_generator(self): event_stream_buffer = EventStreamBuffer() async for chunk, _ in self._raw_stream.iter_chunks(): event_stream_buffer.add_data(chunk) for event in event_stream_buffer: yield event def __iter__(self): raise NotImplementedError('Use async-for instead') def __aiter__(self): return self.__anext__() async def __anext__(self): async for event in self._event_generator: parsed_event = self._parse_event(event) if parsed_event: yield parsed_event
724
Python
30.521738
63
0.620166
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/client.py
from botocore.awsrequest import prepare_request_dict from botocore.client import logger, PaginatorDocstring, ClientCreator, \ BaseClient, ClientEndpointBridge, S3ArnParamHandler, S3EndpointSetter from botocore.exceptions import OperationNotPageableError from botocore.history import get_global_history_recorder from botocore.utils import get_service_module_name from botocore.waiter import xform_name from botocore.hooks import first_non_none_response from .paginate import AioPaginator from .args import AioClientArgsCreator from .utils import AioS3RegionRedirector from . import waiter history_recorder = get_global_history_recorder() class AioClientCreator(ClientCreator): async def create_client(self, service_name, region_name, is_secure=True, endpoint_url=None, verify=None, credentials=None, scoped_config=None, api_version=None, client_config=None): responses = await self._event_emitter.emit( 'choose-service-name', service_name=service_name) service_name = first_non_none_response(responses, default=service_name) service_model = self._load_service_model(service_name, api_version) cls = await self._create_client_class(service_name, service_model) endpoint_bridge = ClientEndpointBridge( self._endpoint_resolver, scoped_config, client_config, service_signing_name=service_model.metadata.get('signingName')) client_args = self._get_client_args( service_model, region_name, is_secure, endpoint_url, verify, credentials, scoped_config, client_config, endpoint_bridge) service_client = cls(**client_args) self._register_retries(service_client) self._register_s3_events( service_client, endpoint_bridge, endpoint_url, client_config, scoped_config) self._register_s3_events( service_client, endpoint_bridge, endpoint_url, client_config, scoped_config) self._register_endpoint_discovery( service_client, endpoint_url, client_config ) return service_client async def _create_client_class(self, service_name, service_model): class_attributes = self._create_methods(service_model) py_name_to_operation_name = self._create_name_mapping(service_model) class_attributes['_PY_TO_OP_NAME'] = py_name_to_operation_name bases = [AioBaseClient] service_id = service_model.service_id.hyphenize() await self._event_emitter.emit( 'creating-client-class.%s' % service_id, class_attributes=class_attributes, base_classes=bases) class_name = get_service_module_name(service_model) cls = type(str(class_name), tuple(bases), class_attributes) return cls def _register_s3_events(self, client, endpoint_bridge, endpoint_url, client_config, scoped_config): if client.meta.service_model.service_name != 's3': return AioS3RegionRedirector(endpoint_bridge, client).register() S3ArnParamHandler().register(client.meta.events) S3EndpointSetter( endpoint_resolver=self._endpoint_resolver, region=client.meta.region_name, s3_config=client.meta.config.s3, endpoint_url=endpoint_url, partition=client.meta.partition ).register(client.meta.events) self._set_s3_presign_signature_version( client.meta, client_config, scoped_config) def _get_client_args(self, service_model, region_name, is_secure, endpoint_url, verify, credentials, scoped_config, client_config, endpoint_bridge): # This is a near copy of ClientCreator. What's replaced # is ClientArgsCreator->AioClientArgsCreator args_creator = AioClientArgsCreator( self._event_emitter, self._user_agent, self._response_parser_factory, self._loader, self._exceptions_factory, config_store=self._config_store) return args_creator.get_client_args( service_model, region_name, is_secure, endpoint_url, verify, credentials, scoped_config, client_config, endpoint_bridge) class AioBaseClient(BaseClient): async def _async_getattr(self, item): event_name = 'getattr.%s.%s' % ( self._service_model.service_id.hyphenize(), item ) handler, event_response = await self.meta.events.emit_until_response( event_name, client=self) return event_response def __getattr__(self, item): # NOTE: we can not reliably support this because if we were to make this a # deferred attrgetter (See #803), it would resolve in hasattr always returning # true. This ends up breaking ddtrace for example when it tries to set a pin. raise AttributeError( "'%s' object has no attribute '%s'" % (self.__class__.__name__, item)) async def _make_api_call(self, operation_name, api_params): operation_model = self._service_model.operation_model(operation_name) service_name = self._service_model.service_name history_recorder.record('API_CALL', { 'service': service_name, 'operation': operation_name, 'params': api_params, }) if operation_model.deprecated: logger.debug('Warning: %s.%s() is deprecated', service_name, operation_name) request_context = { 'client_region': self.meta.region_name, 'client_config': self.meta.config, 'has_streaming_input': operation_model.has_streaming_input, 'auth_type': operation_model.auth_type, } request_dict = await self._convert_to_request_dict( api_params, operation_model, context=request_context) service_id = self._service_model.service_id.hyphenize() handler, event_response = await self.meta.events.emit_until_response( 'before-call.{service_id}.{operation_name}'.format( service_id=service_id, operation_name=operation_name), model=operation_model, params=request_dict, request_signer=self._request_signer, context=request_context) if event_response is not None: http, parsed_response = event_response else: http, parsed_response = await self._make_request( operation_model, request_dict, request_context) await self.meta.events.emit( 'after-call.{service_id}.{operation_name}'.format( service_id=service_id, operation_name=operation_name), http_response=http, parsed=parsed_response, model=operation_model, context=request_context ) if http.status_code >= 300: error_code = parsed_response.get("Error", {}).get("Code") error_class = self.exceptions.from_code(error_code) raise error_class(parsed_response, operation_name) else: return parsed_response async def _make_request(self, operation_model, request_dict, request_context): try: return await self._endpoint.make_request(operation_model, request_dict) except Exception as e: await self.meta.events.emit( 'after-call-error.{service_id}.{operation_name}'.format( service_id=self._service_model.service_id.hyphenize(), operation_name=operation_model.name), exception=e, context=request_context ) raise async def _convert_to_request_dict(self, api_params, operation_model, context=None): api_params = await self._emit_api_params( api_params, operation_model, context) request_dict = self._serializer.serialize_to_request( api_params, operation_model) if not self._client_config.inject_host_prefix: request_dict.pop('host_prefix', None) prepare_request_dict(request_dict, endpoint_url=self._endpoint.host, user_agent=self._client_config.user_agent, context=context) return request_dict async def _emit_api_params(self, api_params, operation_model, context): # Given the API params provided by the user and the operation_model # we can serialize the request to a request_dict. operation_name = operation_model.name # Emit an event that allows users to modify the parameters at the # beginning of the method. It allows handlers to modify existing # parameters or return a new set of parameters to use. service_id = self._service_model.service_id.hyphenize() responses = await self.meta.events.emit( 'provide-client-params.{service_id}.{operation_name}'.format( service_id=service_id, operation_name=operation_name), params=api_params, model=operation_model, context=context) api_params = first_non_none_response(responses, default=api_params) event_name = ( 'before-parameter-build.{service_id}.{operation_name}') await self.meta.events.emit( event_name.format( service_id=service_id, operation_name=operation_name), params=api_params, model=operation_model, context=context) return api_params def get_paginator(self, operation_name): """Create a paginator for an operation. :type operation_name: string :param operation_name: The operation name. This is the same name as the method name on the client. For example, if the method name is ``create_foo``, and you'd normally invoke the operation as ``client.create_foo(**kwargs)``, if the ``create_foo`` operation can be paginated, you can use the call ``client.get_paginator("create_foo")``. :raise OperationNotPageableError: Raised if the operation is not pageable. You can use the ``client.can_paginate`` method to check if an operation is pageable. :rtype: L{botocore.paginate.Paginator} :return: A paginator object. """ if not self.can_paginate(operation_name): raise OperationNotPageableError(operation_name=operation_name) else: actual_operation_name = self._PY_TO_OP_NAME[operation_name] # Create a new paginate method that will serve as a proxy to # the underlying Paginator.paginate method. This is needed to # attach a docstring to the method. def paginate(self, **kwargs): return AioPaginator.paginate(self, **kwargs) paginator_config = self._cache['page_config'][ actual_operation_name] # Add the docstring for the paginate method. paginate.__doc__ = PaginatorDocstring( paginator_name=actual_operation_name, event_emitter=self.meta.events, service_model=self.meta.service_model, paginator_config=paginator_config, include_signature=False ) # Rename the paginator class based on the type of paginator. paginator_class_name = str('%s.Paginator.%s' % ( get_service_module_name(self.meta.service_model), actual_operation_name)) # Create the new paginator class documented_paginator_cls = type( paginator_class_name, (AioPaginator,), {'paginate': paginate}) operation_model = self._service_model.operation_model(actual_operation_name) paginator = documented_paginator_cls( getattr(self, operation_name), paginator_config, operation_model) return paginator # NOTE: this method does not differ from botocore, however it's important to keep # as the "waiter" value points to our own asyncio waiter module def get_waiter(self, waiter_name): """Returns an object that can wait for some condition. :type waiter_name: str :param waiter_name: The name of the waiter to get. See the waiters section of the service docs for a list of available waiters. :returns: The specified waiter object. :rtype: botocore.waiter.Waiter """ config = self._get_waiter_config() if not config: raise ValueError("Waiter does not exist: %s" % waiter_name) model = waiter.WaiterModel(config) mapping = {} for name in model.waiter_names: mapping[xform_name(name)] = name if waiter_name not in mapping: raise ValueError("Waiter does not exist: %s" % waiter_name) return waiter.create_waiter_with_client( mapping[waiter_name], model, self) async def __aenter__(self): await self._endpoint.http_session.__aenter__() return self async def __aexit__(self, exc_type, exc_val, exc_tb): await self._endpoint.http_session.__aexit__(exc_type, exc_val, exc_tb) async def close(self): """Close all http connections.""" return await self._endpoint.http_session.close()
13,609
Python
44.366667
88
0.623117
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/session.py
from botocore.session import Session, EVENT_ALIASES, ServiceModel, UnknownServiceError from botocore import UNSIGNED from botocore import retryhandler, translate from botocore.exceptions import PartialCredentialsError from .client import AioClientCreator, AioBaseClient from .hooks import AioHierarchicalEmitter from .parsers import AioResponseParserFactory from .signers import add_generate_presigned_url, add_generate_presigned_post, \ add_generate_db_auth_token from .credentials import create_credential_resolver, AioCredentials class ClientCreatorContext: def __init__(self, coro): self._coro = coro self._client = None async def __aenter__(self) -> AioBaseClient: self._client = await self._coro return await self._client.__aenter__() async def __aexit__(self, exc_type, exc_val, exc_tb): await self._client.__aexit__(exc_type, exc_val, exc_tb) class AioSession(Session): # noinspection PyMissingConstructor def __init__(self, session_vars=None, event_hooks=None, include_builtin_handlers=True, profile=None): if event_hooks is None: event_hooks = AioHierarchicalEmitter() super().__init__(session_vars, event_hooks, include_builtin_handlers, profile) # Register our own handlers. These normally happen via # `botocore.handlers.BUILTIN_HANDLERS` self.register('creating-client-class', add_generate_presigned_url) self.register('creating-client-class.s3', add_generate_presigned_post) self.register('creating-client-class.rds', add_generate_db_auth_token), def _register_response_parser_factory(self): self._components.register_component('response_parser_factory', AioResponseParserFactory()) def create_client(self, *args, **kwargs): return ClientCreatorContext(self._create_client(*args, **kwargs)) async def _create_client(self, service_name, region_name=None, api_version=None, use_ssl=True, verify=None, endpoint_url=None, aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None, config=None): default_client_config = self.get_default_client_config() # If a config is provided and a default config is set, then # use the config resulting from merging the two. if config is not None and default_client_config is not None: config = default_client_config.merge(config) # If a config was not provided then use the default # client config from the session elif default_client_config is not None: config = default_client_config region_name = self._resolve_region_name(region_name, config) # Figure out the verify value base on the various # configuration options. if verify is None: verify = self.get_config_variable('ca_bundle') if api_version is None: api_version = self.get_config_variable('api_versions').get( service_name, None) loader = self.get_component('data_loader') event_emitter = self.get_component('event_emitter') response_parser_factory = self.get_component( 'response_parser_factory') if config is not None and config.signature_version is UNSIGNED: credentials = None elif aws_access_key_id is not None and \ aws_secret_access_key is not None: credentials = AioCredentials( access_key=aws_access_key_id, secret_key=aws_secret_access_key, token=aws_session_token) elif self._missing_cred_vars(aws_access_key_id, aws_secret_access_key): raise PartialCredentialsError( provider='explicit', cred_var=self._missing_cred_vars(aws_access_key_id, aws_secret_access_key)) else: credentials = await self.get_credentials() endpoint_resolver = self._get_internal_component('endpoint_resolver') exceptions_factory = self._get_internal_component('exceptions_factory') config_store = self.get_component('config_store') client_creator = AioClientCreator( loader, endpoint_resolver, self.user_agent(), event_emitter, retryhandler, translate, response_parser_factory, exceptions_factory, config_store) client = await client_creator.create_client( service_name=service_name, region_name=region_name, is_secure=use_ssl, endpoint_url=endpoint_url, verify=verify, credentials=credentials, scoped_config=self.get_scoped_config(), client_config=config, api_version=api_version) monitor = self._get_internal_component('monitor') if monitor is not None: monitor.register(client.meta.events) return client def _create_credential_resolver(self): return create_credential_resolver( self, region_name=self._last_client_region_used) async def get_credentials(self): if self._credentials is None: self._credentials = await (self._components.get_component( 'credential_provider').load_credentials()) return self._credentials def set_credentials(self, access_key, secret_key, token=None): self._credentials = AioCredentials(access_key, secret_key, token) async def get_service_model(self, service_name, api_version=None): service_description = await self.get_service_data(service_name, api_version) return ServiceModel(service_description, service_name=service_name) async def get_service_data(self, service_name, api_version=None): """ Retrieve the fully merged data associated with a service. """ data_path = service_name service_data = self.get_component('data_loader').load_service_model( data_path, type_name='service-2', api_version=api_version ) service_id = EVENT_ALIASES.get(service_name, service_name) await self._events.emit('service-data-loaded.%s' % service_id, service_data=service_data, service_name=service_name, session=self) return service_data async def get_available_regions(self, service_name, partition_name='aws', allow_non_regional=False): resolver = self._get_internal_component('endpoint_resolver') results = [] try: service_data = await self.get_service_data(service_name) endpoint_prefix = service_data['metadata'].get( 'endpointPrefix', service_name) results = resolver.get_available_endpoints( endpoint_prefix, partition_name, allow_non_regional) except UnknownServiceError: pass return results def get_session(env_vars=None): """ Return a new session object. """ return AioSession(env_vars)
7,266
Python
42.51497
86
0.630333
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiobotocore/parsers.py
from botocore.parsers import ResponseParserFactory, RestXMLParser, \ RestJSONParser, JSONParser, QueryParser, EC2QueryParser from .eventstream import AioEventStream class AioRestXMLParser(RestXMLParser): def _create_event_stream(self, response, shape): parser = self._event_stream_parser name = response['context'].get('operation_name') return AioEventStream(response['body'], shape, parser, name) class AioEC2QueryParser(EC2QueryParser): def _create_event_stream(self, response, shape): parser = self._event_stream_parser name = response['context'].get('operation_name') return AioEventStream(response['body'], shape, parser, name) class AioQueryParser(QueryParser): def _create_event_stream(self, response, shape): parser = self._event_stream_parser name = response['context'].get('operation_name') return AioEventStream(response['body'], shape, parser, name) class AioJSONParser(JSONParser): def _create_event_stream(self, response, shape): parser = self._event_stream_parser name = response['context'].get('operation_name') return AioEventStream(response['body'], shape, parser, name) class AioRestJSONParser(RestJSONParser): def _create_event_stream(self, response, shape): parser = self._event_stream_parser name = response['context'].get('operation_name') return AioEventStream(response['body'], shape, parser, name) PROTOCOL_PARSERS = { 'ec2': AioEC2QueryParser, 'query': AioQueryParser, 'json': AioJSONParser, 'rest-json': AioRestJSONParser, 'rest-xml': AioRestXMLParser, } class AioResponseParserFactory(ResponseParserFactory): def create_parser(self, protocol_name): parser_cls = PROTOCOL_PARSERS[protocol_name] return parser_cls(**self._defaults)
1,857
Python
33.407407
68
0.700054
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/certifi/__init__.py
from .core import contents, where __all__ = ["contents", "where"] __version__ = "2023.05.07"
94
Python
17.999996
33
0.617021
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/certifi/core.py
""" certifi.py ~~~~~~~~~~ This module returns the installation location of cacert.pem or its contents. """ import sys if sys.version_info >= (3, 11): from importlib.resources import as_file, files _CACERT_CTX = None _CACERT_PATH = None def where() -> str: # This is slightly terrible, but we want to delay extracting the file # in cases where we're inside of a zipimport situation until someone # actually calls where(), but we don't want to re-extract the file # on every call of where(), so we'll do it once then store it in a # global variable. global _CACERT_CTX global _CACERT_PATH if _CACERT_PATH is None: # This is slightly janky, the importlib.resources API wants you to # manage the cleanup of this file, so it doesn't actually return a # path, it returns a context manager that will give you the path # when you enter it and will do any cleanup when you leave it. In # the common case of not needing a temporary file, it will just # return the file system location and the __exit__() is a no-op. # # We also have to hold onto the actual context manager, because # it will do the cleanup whenever it gets garbage collected, so # we will also store that at the global level as well. _CACERT_CTX = as_file(files("certifi").joinpath("cacert.pem")) _CACERT_PATH = str(_CACERT_CTX.__enter__()) return _CACERT_PATH def contents() -> str: return files("certifi").joinpath("cacert.pem").read_text(encoding="ascii") elif sys.version_info >= (3, 7): from importlib.resources import path as get_path, read_text _CACERT_CTX = None _CACERT_PATH = None def where() -> str: # This is slightly terrible, but we want to delay extracting the # file in cases where we're inside of a zipimport situation until # someone actually calls where(), but we don't want to re-extract # the file on every call of where(), so we'll do it once then store # it in a global variable. global _CACERT_CTX global _CACERT_PATH if _CACERT_PATH is None: # This is slightly janky, the importlib.resources API wants you # to manage the cleanup of this file, so it doesn't actually # return a path, it returns a context manager that will give # you the path when you enter it and will do any cleanup when # you leave it. In the common case of not needing a temporary # file, it will just return the file system location and the # __exit__() is a no-op. # # We also have to hold onto the actual context manager, because # it will do the cleanup whenever it gets garbage collected, so # we will also store that at the global level as well. _CACERT_CTX = get_path("certifi", "cacert.pem") _CACERT_PATH = str(_CACERT_CTX.__enter__()) return _CACERT_PATH def contents() -> str: return read_text("certifi", "cacert.pem", encoding="ascii") else: import os import types from typing import Union Package = Union[types.ModuleType, str] Resource = Union[str, "os.PathLike"] # This fallback will work for Python versions prior to 3.7 that lack the # importlib.resources module but relies on the existing `where` function # so won't address issues with environments like PyOxidizer that don't set # __file__ on modules. def read_text( package: Package, resource: Resource, encoding: str = 'utf-8', errors: str = 'strict' ) -> str: with open(where(), encoding=encoding) as data: return data.read() # If we don't have importlib.resources, then we will just do the old logic # of assuming we're on the filesystem and munge the path directly. def where() -> str: f = os.path.dirname(__file__) return os.path.join(f, "cacert.pem") def contents() -> str: return read_text("certifi", "cacert.pem", encoding="ascii")
4,219
Python
37.715596
82
0.620289
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/certifi/__main__.py
import argparse from certifi import contents, where parser = argparse.ArgumentParser() parser.add_argument("-c", "--contents", action="store_true") args = parser.parse_args() if args.contents: print(contents()) else: print(where())
243
Python
17.769229
60
0.711934
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/validators.py
""" Creation and extension of validators, with implementations for existing drafts. """ from __future__ import division from warnings import warn import contextlib import json import numbers from six import add_metaclass from jsonschema import ( _legacy_validators, _types, _utils, _validators, exceptions, ) from jsonschema.compat import ( Sequence, int_types, iteritems, lru_cache, str_types, unquote, urldefrag, urljoin, urlopen, urlsplit, ) # Sigh. https://gitlab.com/pycqa/flake8/issues/280 # https://github.com/pyga/ebb-lint/issues/7 # Imported for backwards compatibility. from jsonschema.exceptions import ErrorTree ErrorTree class _DontDoThat(Exception): """ Raised when a Validators with non-default type checker is misused. Asking one for DEFAULT_TYPES doesn't make sense, since type checkers exist for the unrepresentable cases where DEFAULT_TYPES can't represent the type relationship. """ def __str__(self): return "DEFAULT_TYPES cannot be used on Validators using TypeCheckers" validators = {} meta_schemas = _utils.URIDict() def _generate_legacy_type_checks(types=()): """ Generate newer-style type checks out of JSON-type-name-to-type mappings. Arguments: types (dict): A mapping of type names to their Python types Returns: A dictionary of definitions to pass to `TypeChecker` """ types = dict(types) def gen_type_check(pytypes): pytypes = _utils.flatten(pytypes) def type_check(checker, instance): if isinstance(instance, bool): if bool not in pytypes: return False return isinstance(instance, pytypes) return type_check definitions = {} for typename, pytypes in iteritems(types): definitions[typename] = gen_type_check(pytypes) return definitions _DEPRECATED_DEFAULT_TYPES = { u"array": list, u"boolean": bool, u"integer": int_types, u"null": type(None), u"number": numbers.Number, u"object": dict, u"string": str_types, } _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES = _types.TypeChecker( type_checkers=_generate_legacy_type_checks(_DEPRECATED_DEFAULT_TYPES), ) def validates(version): """ Register the decorated validator for a ``version`` of the specification. Registered validators and their meta schemas will be considered when parsing ``$schema`` properties' URIs. Arguments: version (str): An identifier to use as the version's name Returns: collections.Callable: a class decorator to decorate the validator with the version """ def _validates(cls): validators[version] = cls meta_schema_id = cls.ID_OF(cls.META_SCHEMA) if meta_schema_id: meta_schemas[meta_schema_id] = cls return cls return _validates def _DEFAULT_TYPES(self): if self._CREATED_WITH_DEFAULT_TYPES is None: raise _DontDoThat() warn( ( "The DEFAULT_TYPES attribute is deprecated. " "See the type checker attached to this validator instead." ), DeprecationWarning, stacklevel=2, ) return self._DEFAULT_TYPES class _DefaultTypesDeprecatingMetaClass(type): DEFAULT_TYPES = property(_DEFAULT_TYPES) def _id_of(schema): if schema is True or schema is False: return u"" return schema.get(u"$id", u"") def create( meta_schema, validators=(), version=None, default_types=None, type_checker=None, id_of=_id_of, ): """ Create a new validator class. Arguments: meta_schema (collections.Mapping): the meta schema for the new validator class validators (collections.Mapping): a mapping from names to callables, where each callable will validate the schema property with the given name. Each callable should take 4 arguments: 1. a validator instance, 2. the value of the property being validated within the instance 3. the instance 4. the schema version (str): an identifier for the version that this validator class will validate. If provided, the returned validator class will have its ``__name__`` set to include the version, and also will have `jsonschema.validators.validates` automatically called for the given version. type_checker (jsonschema.TypeChecker): a type checker, used when applying the :validator:`type` validator. If unprovided, a `jsonschema.TypeChecker` will be created with a set of default types typical of JSON Schema drafts. default_types (collections.Mapping): .. deprecated:: 3.0.0 Please use the type_checker argument instead. If set, it provides mappings of JSON types to Python types that will be converted to functions and redefined in this object's `jsonschema.TypeChecker`. id_of (collections.Callable): A function that given a schema, returns its ID. Returns: a new `jsonschema.IValidator` class """ if default_types is not None: if type_checker is not None: raise TypeError( "Do not specify default_types when providing a type checker.", ) _created_with_default_types = True warn( ( "The default_types argument is deprecated. " "Use the type_checker argument instead." ), DeprecationWarning, stacklevel=2, ) type_checker = _types.TypeChecker( type_checkers=_generate_legacy_type_checks(default_types), ) else: default_types = _DEPRECATED_DEFAULT_TYPES if type_checker is None: _created_with_default_types = False type_checker = _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES elif type_checker is _TYPE_CHECKER_FOR_DEPRECATED_DEFAULT_TYPES: _created_with_default_types = False else: _created_with_default_types = None @add_metaclass(_DefaultTypesDeprecatingMetaClass) class Validator(object): VALIDATORS = dict(validators) META_SCHEMA = dict(meta_schema) TYPE_CHECKER = type_checker ID_OF = staticmethod(id_of) DEFAULT_TYPES = property(_DEFAULT_TYPES) _DEFAULT_TYPES = dict(default_types) _CREATED_WITH_DEFAULT_TYPES = _created_with_default_types def __init__( self, schema, types=(), resolver=None, format_checker=None, ): if types: warn( ( "The types argument is deprecated. Provide " "a type_checker to jsonschema.validators.extend " "instead." ), DeprecationWarning, stacklevel=2, ) self.TYPE_CHECKER = self.TYPE_CHECKER.redefine_many( _generate_legacy_type_checks(types), ) if resolver is None: resolver = RefResolver.from_schema(schema, id_of=id_of) self.resolver = resolver self.format_checker = format_checker self.schema = schema @classmethod def check_schema(cls, schema): for error in cls(cls.META_SCHEMA).iter_errors(schema): raise exceptions.SchemaError.create_from(error) def iter_errors(self, instance, _schema=None): if _schema is None: _schema = self.schema if _schema is True: return elif _schema is False: yield exceptions.ValidationError( "False schema does not allow %r" % (instance,), validator=None, validator_value=None, instance=instance, schema=_schema, ) return scope = id_of(_schema) if scope: self.resolver.push_scope(scope) try: ref = _schema.get(u"$ref") if ref is not None: validators = [(u"$ref", ref)] else: validators = iteritems(_schema) for k, v in validators: validator = self.VALIDATORS.get(k) if validator is None: continue errors = validator(self, v, instance, _schema) or () for error in errors: # set details if not already set by the called fn error._set( validator=k, validator_value=v, instance=instance, schema=_schema, ) if k != u"$ref": error.schema_path.appendleft(k) yield error finally: if scope: self.resolver.pop_scope() def descend(self, instance, schema, path=None, schema_path=None): for error in self.iter_errors(instance, schema): if path is not None: error.path.appendleft(path) if schema_path is not None: error.schema_path.appendleft(schema_path) yield error def validate(self, *args, **kwargs): for error in self.iter_errors(*args, **kwargs): raise error def is_type(self, instance, type): try: return self.TYPE_CHECKER.is_type(instance, type) except exceptions.UndefinedTypeCheck: raise exceptions.UnknownType(type, instance, self.schema) def is_valid(self, instance, _schema=None): error = next(self.iter_errors(instance, _schema), None) return error is None if version is not None: Validator = validates(version)(Validator) Validator.__name__ = version.title().replace(" ", "") + "Validator" return Validator def extend(validator, validators=(), version=None, type_checker=None): """ Create a new validator class by extending an existing one. Arguments: validator (jsonschema.IValidator): an existing validator class validators (collections.Mapping): a mapping of new validator callables to extend with, whose structure is as in `create`. .. note:: Any validator callables with the same name as an existing one will (silently) replace the old validator callable entirely, effectively overriding any validation done in the "parent" validator class. If you wish to instead extend the behavior of a parent's validator callable, delegate and call it directly in the new validator function by retrieving it using ``OldValidator.VALIDATORS["validator_name"]``. version (str): a version for the new validator class type_checker (jsonschema.TypeChecker): a type checker, used when applying the :validator:`type` validator. If unprovided, the type checker of the extended `jsonschema.IValidator` will be carried along.` Returns: a new `jsonschema.IValidator` class extending the one provided .. note:: Meta Schemas The new validator class will have its parent's meta schema. If you wish to change or extend the meta schema in the new validator class, modify ``META_SCHEMA`` directly on the returned class. Note that no implicit copying is done, so a copy should likely be made before modifying it, in order to not affect the old validator. """ all_validators = dict(validator.VALIDATORS) all_validators.update(validators) if type_checker is None: type_checker = validator.TYPE_CHECKER elif validator._CREATED_WITH_DEFAULT_TYPES: raise TypeError( "Cannot extend a validator created with default_types " "with a type_checker. Update the validator to use a " "type_checker when created." ) return create( meta_schema=validator.META_SCHEMA, validators=all_validators, version=version, type_checker=type_checker, id_of=validator.ID_OF, ) Draft3Validator = create( meta_schema=_utils.load_schema("draft3"), validators={ u"$ref": _validators.ref, u"additionalItems": _validators.additionalItems, u"additionalProperties": _validators.additionalProperties, u"dependencies": _legacy_validators.dependencies_draft3, u"disallow": _legacy_validators.disallow_draft3, u"divisibleBy": _validators.multipleOf, u"enum": _validators.enum, u"extends": _legacy_validators.extends_draft3, u"format": _validators.format, u"items": _legacy_validators.items_draft3_draft4, u"maxItems": _validators.maxItems, u"maxLength": _validators.maxLength, u"maximum": _legacy_validators.maximum_draft3_draft4, u"minItems": _validators.minItems, u"minLength": _validators.minLength, u"minimum": _legacy_validators.minimum_draft3_draft4, u"pattern": _validators.pattern, u"patternProperties": _validators.patternProperties, u"properties": _legacy_validators.properties_draft3, u"type": _legacy_validators.type_draft3, u"uniqueItems": _validators.uniqueItems, }, type_checker=_types.draft3_type_checker, version="draft3", id_of=lambda schema: schema.get(u"id", ""), ) Draft4Validator = create( meta_schema=_utils.load_schema("draft4"), validators={ u"$ref": _validators.ref, u"additionalItems": _validators.additionalItems, u"additionalProperties": _validators.additionalProperties, u"allOf": _validators.allOf, u"anyOf": _validators.anyOf, u"dependencies": _validators.dependencies, u"enum": _validators.enum, u"format": _validators.format, u"items": _legacy_validators.items_draft3_draft4, u"maxItems": _validators.maxItems, u"maxLength": _validators.maxLength, u"maxProperties": _validators.maxProperties, u"maximum": _legacy_validators.maximum_draft3_draft4, u"minItems": _validators.minItems, u"minLength": _validators.minLength, u"minProperties": _validators.minProperties, u"minimum": _legacy_validators.minimum_draft3_draft4, u"multipleOf": _validators.multipleOf, u"not": _validators.not_, u"oneOf": _validators.oneOf, u"pattern": _validators.pattern, u"patternProperties": _validators.patternProperties, u"properties": _validators.properties, u"required": _validators.required, u"type": _validators.type, u"uniqueItems": _validators.uniqueItems, }, type_checker=_types.draft4_type_checker, version="draft4", id_of=lambda schema: schema.get(u"id", ""), ) Draft6Validator = create( meta_schema=_utils.load_schema("draft6"), validators={ u"$ref": _validators.ref, u"additionalItems": _validators.additionalItems, u"additionalProperties": _validators.additionalProperties, u"allOf": _validators.allOf, u"anyOf": _validators.anyOf, u"const": _validators.const, u"contains": _validators.contains, u"dependencies": _validators.dependencies, u"enum": _validators.enum, u"exclusiveMaximum": _validators.exclusiveMaximum, u"exclusiveMinimum": _validators.exclusiveMinimum, u"format": _validators.format, u"items": _validators.items, u"maxItems": _validators.maxItems, u"maxLength": _validators.maxLength, u"maxProperties": _validators.maxProperties, u"maximum": _validators.maximum, u"minItems": _validators.minItems, u"minLength": _validators.minLength, u"minProperties": _validators.minProperties, u"minimum": _validators.minimum, u"multipleOf": _validators.multipleOf, u"not": _validators.not_, u"oneOf": _validators.oneOf, u"pattern": _validators.pattern, u"patternProperties": _validators.patternProperties, u"properties": _validators.properties, u"propertyNames": _validators.propertyNames, u"required": _validators.required, u"type": _validators.type, u"uniqueItems": _validators.uniqueItems, }, type_checker=_types.draft6_type_checker, version="draft6", ) Draft7Validator = create( meta_schema=_utils.load_schema("draft7"), validators={ u"$ref": _validators.ref, u"additionalItems": _validators.additionalItems, u"additionalProperties": _validators.additionalProperties, u"allOf": _validators.allOf, u"anyOf": _validators.anyOf, u"const": _validators.const, u"contains": _validators.contains, u"dependencies": _validators.dependencies, u"enum": _validators.enum, u"exclusiveMaximum": _validators.exclusiveMaximum, u"exclusiveMinimum": _validators.exclusiveMinimum, u"format": _validators.format, u"if": _validators.if_, u"items": _validators.items, u"maxItems": _validators.maxItems, u"maxLength": _validators.maxLength, u"maxProperties": _validators.maxProperties, u"maximum": _validators.maximum, u"minItems": _validators.minItems, u"minLength": _validators.minLength, u"minProperties": _validators.minProperties, u"minimum": _validators.minimum, u"multipleOf": _validators.multipleOf, u"oneOf": _validators.oneOf, u"not": _validators.not_, u"pattern": _validators.pattern, u"patternProperties": _validators.patternProperties, u"properties": _validators.properties, u"propertyNames": _validators.propertyNames, u"required": _validators.required, u"type": _validators.type, u"uniqueItems": _validators.uniqueItems, }, type_checker=_types.draft7_type_checker, version="draft7", ) _LATEST_VERSION = Draft7Validator class RefResolver(object): """ Resolve JSON References. Arguments: base_uri (str): The URI of the referring document referrer: The actual referring document store (dict): A mapping from URIs to documents to cache cache_remote (bool): Whether remote refs should be cached after first resolution handlers (dict): A mapping from URI schemes to functions that should be used to retrieve them urljoin_cache (:func:`functools.lru_cache`): A cache that will be used for caching the results of joining the resolution scope to subscopes. remote_cache (:func:`functools.lru_cache`): A cache that will be used for caching the results of resolved remote URLs. Attributes: cache_remote (bool): Whether remote refs should be cached after first resolution """ def __init__( self, base_uri, referrer, store=(), cache_remote=True, handlers=(), urljoin_cache=None, remote_cache=None, ): if urljoin_cache is None: urljoin_cache = lru_cache(1024)(urljoin) if remote_cache is None: remote_cache = lru_cache(1024)(self.resolve_from_url) self.referrer = referrer self.cache_remote = cache_remote self.handlers = dict(handlers) self._scopes_stack = [base_uri] self.store = _utils.URIDict( (id, validator.META_SCHEMA) for id, validator in iteritems(meta_schemas) ) self.store.update(store) self.store[base_uri] = referrer self._urljoin_cache = urljoin_cache self._remote_cache = remote_cache @classmethod def from_schema(cls, schema, id_of=_id_of, *args, **kwargs): """ Construct a resolver from a JSON schema object. Arguments: schema: the referring schema Returns: `RefResolver` """ return cls(base_uri=id_of(schema), referrer=schema, *args, **kwargs) def push_scope(self, scope): """ Enter a given sub-scope. Treats further dereferences as being performed underneath the given scope. """ self._scopes_stack.append( self._urljoin_cache(self.resolution_scope, scope), ) def pop_scope(self): """ Exit the most recent entered scope. Treats further dereferences as being performed underneath the original scope. Don't call this method more times than `push_scope` has been called. """ try: self._scopes_stack.pop() except IndexError: raise exceptions.RefResolutionError( "Failed to pop the scope from an empty stack. " "`pop_scope()` should only be called once for every " "`push_scope()`" ) @property def resolution_scope(self): """ Retrieve the current resolution scope. """ return self._scopes_stack[-1] @property def base_uri(self): """ Retrieve the current base URI, not including any fragment. """ uri, _ = urldefrag(self.resolution_scope) return uri @contextlib.contextmanager def in_scope(self, scope): """ Temporarily enter the given scope for the duration of the context. """ self.push_scope(scope) try: yield finally: self.pop_scope() @contextlib.contextmanager def resolving(self, ref): """ Resolve the given ``ref`` and enter its resolution scope. Exits the scope on exit of this context manager. Arguments: ref (str): The reference to resolve """ url, resolved = self.resolve(ref) self.push_scope(url) try: yield resolved finally: self.pop_scope() def resolve(self, ref): """ Resolve the given reference. """ url = self._urljoin_cache(self.resolution_scope, ref) return url, self._remote_cache(url) def resolve_from_url(self, url): """ Resolve the given remote URL. """ url, fragment = urldefrag(url) try: document = self.store[url] except KeyError: try: document = self.resolve_remote(url) except Exception as exc: raise exceptions.RefResolutionError(exc) return self.resolve_fragment(document, fragment) def resolve_fragment(self, document, fragment): """ Resolve a ``fragment`` within the referenced ``document``. Arguments: document: The referent document fragment (str): a URI fragment to resolve within it """ fragment = fragment.lstrip(u"/") parts = unquote(fragment).split(u"/") if fragment else [] for part in parts: part = part.replace(u"~1", u"/").replace(u"~0", u"~") if isinstance(document, Sequence): # Array indexes should be turned into integers try: part = int(part) except ValueError: pass try: document = document[part] except (TypeError, LookupError): raise exceptions.RefResolutionError( "Unresolvable JSON pointer: %r" % fragment ) return document def resolve_remote(self, uri): """ Resolve a remote ``uri``. If called directly, does not check the store first, but after retrieving the document at the specified URI it will be saved in the store if :attr:`cache_remote` is True. .. note:: If the requests_ library is present, ``jsonschema`` will use it to request the remote ``uri``, so that the correct encoding is detected and used. If it isn't, or if the scheme of the ``uri`` is not ``http`` or ``https``, UTF-8 is assumed. Arguments: uri (str): The URI to resolve Returns: The retrieved document .. _requests: https://pypi.org/project/requests/ """ try: import requests except ImportError: requests = None scheme = urlsplit(uri).scheme if scheme in self.handlers: result = self.handlers[scheme](uri) elif scheme in [u"http", u"https"] and requests: # Requests has support for detecting the correct encoding of # json over http result = requests.get(uri).json() else: # Otherwise, pass off to urllib and assume utf-8 with urlopen(uri) as url: result = json.loads(url.read().decode("utf-8")) if self.cache_remote: self.store[uri] = result return result def validate(instance, schema, cls=None, *args, **kwargs): """ Validate an instance under the given schema. >>> validate([2, 3, 4], {"maxItems": 2}) Traceback (most recent call last): ... ValidationError: [2, 3, 4] is too long :func:`validate` will first verify that the provided schema is itself valid, since not doing so can lead to less obvious error messages and fail in less obvious or consistent ways. If you know you have a valid schema already, especially if you intend to validate multiple instances with the same schema, you likely would prefer using the `IValidator.validate` method directly on a specific validator (e.g. ``Draft7Validator.validate``). Arguments: instance: The instance to validate schema: The schema to validate with cls (IValidator): The class that will be used to validate the instance. If the ``cls`` argument is not provided, two things will happen in accordance with the specification. First, if the schema has a :validator:`$schema` property containing a known meta-schema [#]_ then the proper validator will be used. The specification recommends that all schemas contain :validator:`$schema` properties for this reason. If no :validator:`$schema` property is found, the default validator class is the latest released draft. Any other provided positional and keyword arguments will be passed on when instantiating the ``cls``. Raises: `jsonschema.exceptions.ValidationError` if the instance is invalid `jsonschema.exceptions.SchemaError` if the schema itself is invalid .. rubric:: Footnotes .. [#] known by a validator registered with `jsonschema.validators.validates` """ if cls is None: cls = validator_for(schema) cls.check_schema(schema) validator = cls(schema, *args, **kwargs) error = exceptions.best_match(validator.iter_errors(instance)) if error is not None: raise error def validator_for(schema, default=_LATEST_VERSION): """ Retrieve the validator class appropriate for validating the given schema. Uses the :validator:`$schema` property that should be present in the given schema to look up the appropriate validator class. Arguments: schema (collections.Mapping or bool): the schema to look at default: the default to return if the appropriate validator class cannot be determined. If unprovided, the default is to return the latest supported draft. """ if schema is True or schema is False or u"$schema" not in schema: return default if schema[u"$schema"] not in meta_schemas: warn( ( "The metaschema specified by $schema was not found. " "Using the latest draft to validate, but this will raise " "an error in the future." ), DeprecationWarning, stacklevel=2, ) return meta_schemas.get(schema[u"$schema"], _LATEST_VERSION)
29,400
Python
29.279094
79
0.59085
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/_format.py
import datetime import re import socket import struct from jsonschema.compat import str_types from jsonschema.exceptions import FormatError class FormatChecker(object): """ A ``format`` property checker. JSON Schema does not mandate that the ``format`` property actually do any validation. If validation is desired however, instances of this class can be hooked into validators to enable format validation. `FormatChecker` objects always return ``True`` when asked about formats that they do not know how to validate. To check a custom format using a function that takes an instance and returns a ``bool``, use the `FormatChecker.checks` or `FormatChecker.cls_checks` decorators. Arguments: formats (~collections.Iterable): The known formats to validate. This argument can be used to limit which formats will be used during validation. """ checkers = {} def __init__(self, formats=None): if formats is None: self.checkers = self.checkers.copy() else: self.checkers = dict((k, self.checkers[k]) for k in formats) def __repr__(self): return "<FormatChecker checkers={}>".format(sorted(self.checkers)) def checks(self, format, raises=()): """ Register a decorated function as validating a new format. Arguments: format (str): The format that the decorated function will check. raises (Exception): The exception(s) raised by the decorated function when an invalid instance is found. The exception object will be accessible as the `jsonschema.exceptions.ValidationError.cause` attribute of the resulting validation error. """ def _checks(func): self.checkers[format] = (func, raises) return func return _checks cls_checks = classmethod(checks) def check(self, instance, format): """ Check whether the instance conforms to the given format. Arguments: instance (*any primitive type*, i.e. str, number, bool): The instance to check format (str): The format that instance should conform to Raises: FormatError: if the instance does not conform to ``format`` """ if format not in self.checkers: return func, raises = self.checkers[format] result, cause = None, None try: result = func(instance) except raises as e: cause = e if not result: raise FormatError( "%r is not a %r" % (instance, format), cause=cause, ) def conforms(self, instance, format): """ Check whether the instance conforms to the given format. Arguments: instance (*any primitive type*, i.e. str, number, bool): The instance to check format (str): The format that instance should conform to Returns: bool: whether it conformed """ try: self.check(instance, format) except FormatError: return False else: return True draft3_format_checker = FormatChecker() draft4_format_checker = FormatChecker() draft6_format_checker = FormatChecker() draft7_format_checker = FormatChecker() _draft_checkers = dict( draft3=draft3_format_checker, draft4=draft4_format_checker, draft6=draft6_format_checker, draft7=draft7_format_checker, ) def _checks_drafts( name=None, draft3=None, draft4=None, draft6=None, draft7=None, raises=(), ): draft3 = draft3 or name draft4 = draft4 or name draft6 = draft6 or name draft7 = draft7 or name def wrap(func): if draft3: func = _draft_checkers["draft3"].checks(draft3, raises)(func) if draft4: func = _draft_checkers["draft4"].checks(draft4, raises)(func) if draft6: func = _draft_checkers["draft6"].checks(draft6, raises)(func) if draft7: func = _draft_checkers["draft7"].checks(draft7, raises)(func) # Oy. This is bad global state, but relied upon for now, until # deprecation. See https://github.com/Julian/jsonschema/issues/519 # and test_format_checkers_come_with_defaults FormatChecker.cls_checks(draft7 or draft6 or draft4 or draft3, raises)( func, ) return func return wrap @_checks_drafts(name="idn-email") @_checks_drafts(name="email") def is_email(instance): if not isinstance(instance, str_types): return True return "@" in instance _ipv4_re = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$") @_checks_drafts( draft3="ip-address", draft4="ipv4", draft6="ipv4", draft7="ipv4", ) def is_ipv4(instance): if not isinstance(instance, str_types): return True if not _ipv4_re.match(instance): return False return all(0 <= int(component) <= 255 for component in instance.split(".")) if hasattr(socket, "inet_pton"): # FIXME: Really this only should raise struct.error, but see the sadness # that is https://twistedmatrix.com/trac/ticket/9409 @_checks_drafts( name="ipv6", raises=(socket.error, struct.error, ValueError), ) def is_ipv6(instance): if not isinstance(instance, str_types): return True return socket.inet_pton(socket.AF_INET6, instance) _host_name_re = re.compile(r"^[A-Za-z0-9][A-Za-z0-9\.\-]{1,255}$") @_checks_drafts( draft3="host-name", draft4="hostname", draft6="hostname", draft7="hostname", ) def is_host_name(instance): if not isinstance(instance, str_types): return True if not _host_name_re.match(instance): return False components = instance.split(".") for component in components: if len(component) > 63: return False return True try: # The built-in `idna` codec only implements RFC 3890, so we go elsewhere. import idna except ImportError: pass else: @_checks_drafts(draft7="idn-hostname", raises=idna.IDNAError) def is_idn_host_name(instance): if not isinstance(instance, str_types): return True idna.encode(instance) return True try: import rfc3987 except ImportError: try: from rfc3986_validator import validate_rfc3986 except ImportError: pass else: @_checks_drafts(name="uri") def is_uri(instance): if not isinstance(instance, str_types): return True return validate_rfc3986(instance, rule="URI") @_checks_drafts( draft6="uri-reference", draft7="uri-reference", raises=ValueError, ) def is_uri_reference(instance): if not isinstance(instance, str_types): return True return validate_rfc3986(instance, rule="URI_reference") else: @_checks_drafts(draft7="iri", raises=ValueError) def is_iri(instance): if not isinstance(instance, str_types): return True return rfc3987.parse(instance, rule="IRI") @_checks_drafts(draft7="iri-reference", raises=ValueError) def is_iri_reference(instance): if not isinstance(instance, str_types): return True return rfc3987.parse(instance, rule="IRI_reference") @_checks_drafts(name="uri", raises=ValueError) def is_uri(instance): if not isinstance(instance, str_types): return True return rfc3987.parse(instance, rule="URI") @_checks_drafts( draft6="uri-reference", draft7="uri-reference", raises=ValueError, ) def is_uri_reference(instance): if not isinstance(instance, str_types): return True return rfc3987.parse(instance, rule="URI_reference") try: from strict_rfc3339 import validate_rfc3339 except ImportError: try: from rfc3339_validator import validate_rfc3339 except ImportError: validate_rfc3339 = None if validate_rfc3339: @_checks_drafts(name="date-time") def is_datetime(instance): if not isinstance(instance, str_types): return True return validate_rfc3339(instance) @_checks_drafts(draft7="time") def is_time(instance): if not isinstance(instance, str_types): return True return is_datetime("1970-01-01T" + instance) @_checks_drafts(name="regex", raises=re.error) def is_regex(instance): if not isinstance(instance, str_types): return True return re.compile(instance) @_checks_drafts(draft3="date", draft7="date", raises=ValueError) def is_date(instance): if not isinstance(instance, str_types): return True return datetime.datetime.strptime(instance, "%Y-%m-%d") @_checks_drafts(draft3="time", raises=ValueError) def is_draft3_time(instance): if not isinstance(instance, str_types): return True return datetime.datetime.strptime(instance, "%H:%M:%S") try: import webcolors except ImportError: pass else: def is_css_color_code(instance): return webcolors.normalize_hex(instance) @_checks_drafts(draft3="color", raises=(ValueError, TypeError)) def is_css21_color(instance): if ( not isinstance(instance, str_types) or instance.lower() in webcolors.css21_names_to_hex ): return True return is_css_color_code(instance) def is_css3_color(instance): if instance.lower() in webcolors.css3_names_to_hex: return True return is_css_color_code(instance) try: import jsonpointer except ImportError: pass else: @_checks_drafts( draft6="json-pointer", draft7="json-pointer", raises=jsonpointer.JsonPointerException, ) def is_json_pointer(instance): if not isinstance(instance, str_types): return True return jsonpointer.JsonPointer(instance) # TODO: I don't want to maintain this, so it # needs to go either into jsonpointer (pending # https://github.com/stefankoegl/python-json-pointer/issues/34) or # into a new external library. @_checks_drafts( draft7="relative-json-pointer", raises=jsonpointer.JsonPointerException, ) def is_relative_json_pointer(instance): # Definition taken from: # https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3 if not isinstance(instance, str_types): return True non_negative_integer, rest = [], "" for i, character in enumerate(instance): if character.isdigit(): non_negative_integer.append(character) continue if not non_negative_integer: return False rest = instance[i:] break return (rest == "#") or jsonpointer.JsonPointer(rest) try: import uritemplate.exceptions except ImportError: pass else: @_checks_drafts( draft6="uri-template", draft7="uri-template", raises=uritemplate.exceptions.InvalidTemplate, ) def is_uri_template( instance, template_validator=uritemplate.Validator().force_balanced_braces(), ): template = uritemplate.URITemplate(instance) return template_validator.validate(template)
11,691
Python
26.446009
87
0.616029
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/exceptions.py
""" Validation errors, and some surrounding helpers. """ from collections import defaultdict, deque import itertools import pprint import textwrap import attr from jsonschema import _utils from jsonschema.compat import PY3, iteritems WEAK_MATCHES = frozenset(["anyOf", "oneOf"]) STRONG_MATCHES = frozenset() _unset = _utils.Unset() class _Error(Exception): def __init__( self, message, validator=_unset, path=(), cause=None, context=(), validator_value=_unset, instance=_unset, schema=_unset, schema_path=(), parent=None, ): super(_Error, self).__init__( message, validator, path, cause, context, validator_value, instance, schema, schema_path, parent, ) self.message = message self.path = self.relative_path = deque(path) self.schema_path = self.relative_schema_path = deque(schema_path) self.context = list(context) self.cause = self.__cause__ = cause self.validator = validator self.validator_value = validator_value self.instance = instance self.schema = schema self.parent = parent for error in context: error.parent = self def __repr__(self): return "<%s: %r>" % (self.__class__.__name__, self.message) def __unicode__(self): essential_for_verbose = ( self.validator, self.validator_value, self.instance, self.schema, ) if any(m is _unset for m in essential_for_verbose): return self.message pschema = pprint.pformat(self.schema, width=72) pinstance = pprint.pformat(self.instance, width=72) return self.message + textwrap.dedent(""" Failed validating %r in %s%s: %s On %s%s: %s """.rstrip() ) % ( self.validator, self._word_for_schema_in_error_message, _utils.format_as_index(list(self.relative_schema_path)[:-1]), _utils.indent(pschema), self._word_for_instance_in_error_message, _utils.format_as_index(self.relative_path), _utils.indent(pinstance), ) if PY3: __str__ = __unicode__ else: def __str__(self): return unicode(self).encode("utf-8") @classmethod def create_from(cls, other): return cls(**other._contents()) @property def absolute_path(self): parent = self.parent if parent is None: return self.relative_path path = deque(self.relative_path) path.extendleft(reversed(parent.absolute_path)) return path @property def absolute_schema_path(self): parent = self.parent if parent is None: return self.relative_schema_path path = deque(self.relative_schema_path) path.extendleft(reversed(parent.absolute_schema_path)) return path def _set(self, **kwargs): for k, v in iteritems(kwargs): if getattr(self, k) is _unset: setattr(self, k, v) def _contents(self): attrs = ( "message", "cause", "context", "validator", "validator_value", "path", "schema_path", "instance", "schema", "parent", ) return dict((attr, getattr(self, attr)) for attr in attrs) class ValidationError(_Error): """ An instance was invalid under a provided schema. """ _word_for_schema_in_error_message = "schema" _word_for_instance_in_error_message = "instance" class SchemaError(_Error): """ A schema was invalid under its corresponding metaschema. """ _word_for_schema_in_error_message = "metaschema" _word_for_instance_in_error_message = "schema" @attr.s(hash=True) class RefResolutionError(Exception): """ A ref could not be resolved. """ _cause = attr.ib() def __str__(self): return str(self._cause) class UndefinedTypeCheck(Exception): """ A type checker was asked to check a type it did not have registered. """ def __init__(self, type): self.type = type def __unicode__(self): return "Type %r is unknown to this type checker" % self.type if PY3: __str__ = __unicode__ else: def __str__(self): return unicode(self).encode("utf-8") class UnknownType(Exception): """ A validator was asked to validate an instance against an unknown type. """ def __init__(self, type, instance, schema): self.type = type self.instance = instance self.schema = schema def __unicode__(self): pschema = pprint.pformat(self.schema, width=72) pinstance = pprint.pformat(self.instance, width=72) return textwrap.dedent(""" Unknown type %r for validator with schema: %s While checking instance: %s """.rstrip() ) % (self.type, _utils.indent(pschema), _utils.indent(pinstance)) if PY3: __str__ = __unicode__ else: def __str__(self): return unicode(self).encode("utf-8") class FormatError(Exception): """ Validating a format failed. """ def __init__(self, message, cause=None): super(FormatError, self).__init__(message, cause) self.message = message self.cause = self.__cause__ = cause def __unicode__(self): return self.message if PY3: __str__ = __unicode__ else: def __str__(self): return self.message.encode("utf-8") class ErrorTree(object): """ ErrorTrees make it easier to check which validations failed. """ _instance = _unset def __init__(self, errors=()): self.errors = {} self._contents = defaultdict(self.__class__) for error in errors: container = self for element in error.path: container = container[element] container.errors[error.validator] = error container._instance = error.instance def __contains__(self, index): """ Check whether ``instance[index]`` has any errors. """ return index in self._contents def __getitem__(self, index): """ Retrieve the child tree one level down at the given ``index``. If the index is not in the instance that this tree corresponds to and is not known by this tree, whatever error would be raised by ``instance.__getitem__`` will be propagated (usually this is some subclass of `exceptions.LookupError`. """ if self._instance is not _unset and index not in self: self._instance[index] return self._contents[index] def __setitem__(self, index, value): """ Add an error to the tree at the given ``index``. """ self._contents[index] = value def __iter__(self): """ Iterate (non-recursively) over the indices in the instance with errors. """ return iter(self._contents) def __len__(self): """ Return the `total_errors`. """ return self.total_errors def __repr__(self): return "<%s (%s total errors)>" % (self.__class__.__name__, len(self)) @property def total_errors(self): """ The total number of errors in the entire tree, including children. """ child_errors = sum(len(tree) for _, tree in iteritems(self._contents)) return len(self.errors) + child_errors def by_relevance(weak=WEAK_MATCHES, strong=STRONG_MATCHES): """ Create a key function that can be used to sort errors by relevance. Arguments: weak (set): a collection of validator names to consider to be "weak". If there are two errors at the same level of the instance and one is in the set of weak validator names, the other error will take priority. By default, :validator:`anyOf` and :validator:`oneOf` are considered weak validators and will be superseded by other same-level validation errors. strong (set): a collection of validator names to consider to be "strong" """ def relevance(error): validator = error.validator return -len(error.path), validator not in weak, validator in strong return relevance relevance = by_relevance() def best_match(errors, key=relevance): """ Try to find an error that appears to be the best match among given errors. In general, errors that are higher up in the instance (i.e. for which `ValidationError.path` is shorter) are considered better matches, since they indicate "more" is wrong with the instance. If the resulting match is either :validator:`oneOf` or :validator:`anyOf`, the *opposite* assumption is made -- i.e. the deepest error is picked, since these validators only need to match once, and any other errors may not be relevant. Arguments: errors (collections.Iterable): the errors to select from. Do not provide a mixture of errors from different validation attempts (i.e. from different instances or schemas), since it won't produce sensical output. key (collections.Callable): the key to use when sorting errors. See `relevance` and transitively `by_relevance` for more details (the default is to sort with the defaults of that function). Changing the default is only useful if you want to change the function that rates errors but still want the error context descent done by this function. Returns: the best matching error, or ``None`` if the iterable was empty .. note:: This function is a heuristic. Its return value may change for a given set of inputs from version to version if better heuristics are added. """ errors = iter(errors) best = next(errors, None) if best is None: return best = max(itertools.chain([best], errors), key=key) while best.context: best = min(best.context, key=key) return best
10,450
Python
26.869333
79
0.585359
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/_types.py
import numbers from pyrsistent import pmap import attr from jsonschema.compat import int_types, str_types from jsonschema.exceptions import UndefinedTypeCheck def is_array(checker, instance): return isinstance(instance, list) def is_bool(checker, instance): return isinstance(instance, bool) def is_integer(checker, instance): # bool inherits from int, so ensure bools aren't reported as ints if isinstance(instance, bool): return False return isinstance(instance, int_types) def is_null(checker, instance): return instance is None def is_number(checker, instance): # bool inherits from int, so ensure bools aren't reported as ints if isinstance(instance, bool): return False return isinstance(instance, numbers.Number) def is_object(checker, instance): return isinstance(instance, dict) def is_string(checker, instance): return isinstance(instance, str_types) def is_any(checker, instance): return True @attr.s(frozen=True) class TypeChecker(object): """ A ``type`` property checker. A `TypeChecker` performs type checking for an `IValidator`. Type checks to perform are updated using `TypeChecker.redefine` or `TypeChecker.redefine_many` and removed via `TypeChecker.remove`. Each of these return a new `TypeChecker` object. Arguments: type_checkers (dict): The initial mapping of types to their checking functions. """ _type_checkers = attr.ib(default=pmap(), converter=pmap) def is_type(self, instance, type): """ Check if the instance is of the appropriate type. Arguments: instance (object): The instance to check type (str): The name of the type that is expected. Returns: bool: Whether it conformed. Raises: `jsonschema.exceptions.UndefinedTypeCheck`: if type is unknown to this object. """ try: fn = self._type_checkers[type] except KeyError: raise UndefinedTypeCheck(type) return fn(self, instance) def redefine(self, type, fn): """ Produce a new checker with the given type redefined. Arguments: type (str): The name of the type to check. fn (collections.Callable): A function taking exactly two parameters - the type checker calling the function and the instance to check. The function should return true if instance is of this type and false otherwise. Returns: A new `TypeChecker` instance. """ return self.redefine_many({type: fn}) def redefine_many(self, definitions=()): """ Produce a new checker with the given types redefined. Arguments: definitions (dict): A dictionary mapping types to their checking functions. Returns: A new `TypeChecker` instance. """ return attr.evolve( self, type_checkers=self._type_checkers.update(definitions), ) def remove(self, *types): """ Produce a new checker with the given types forgotten. Arguments: types (~collections.Iterable): the names of the types to remove. Returns: A new `TypeChecker` instance Raises: `jsonschema.exceptions.UndefinedTypeCheck`: if any given type is unknown to this object """ checkers = self._type_checkers for each in types: try: checkers = checkers.remove(each) except KeyError: raise UndefinedTypeCheck(each) return attr.evolve(self, type_checkers=checkers) draft3_type_checker = TypeChecker( { u"any": is_any, u"array": is_array, u"boolean": is_bool, u"integer": is_integer, u"object": is_object, u"null": is_null, u"number": is_number, u"string": is_string, }, ) draft4_type_checker = draft3_type_checker.remove(u"any") draft6_type_checker = draft4_type_checker.redefine( u"integer", lambda checker, instance: ( is_integer(checker, instance) or isinstance(instance, float) and instance.is_integer() ), ) draft7_type_checker = draft6_type_checker
4,490
Python
22.761905
72
0.607572
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/_reflect.py
# -*- test-case-name: twisted.test.test_reflect -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Standardized versions of various cool and/or strange things that you can do with Python's reflection capabilities. """ import sys from jsonschema.compat import PY3 class _NoModuleFound(Exception): """ No module was found because none exists. """ class InvalidName(ValueError): """ The given name is not a dot-separated list of Python objects. """ class ModuleNotFound(InvalidName): """ The module associated with the given name doesn't exist and it can't be imported. """ class ObjectNotFound(InvalidName): """ The object associated with the given name doesn't exist and it can't be imported. """ if PY3: def reraise(exception, traceback): raise exception.with_traceback(traceback) else: exec("""def reraise(exception, traceback): raise exception.__class__, exception, traceback""") reraise.__doc__ = """ Re-raise an exception, with an optional traceback, in a way that is compatible with both Python 2 and Python 3. Note that on Python 3, re-raised exceptions will be mutated, with their C{__traceback__} attribute being set. @param exception: The exception instance. @param traceback: The traceback to use, or C{None} indicating a new traceback. """ def _importAndCheckStack(importName): """ Import the given name as a module, then walk the stack to determine whether the failure was the module not existing, or some code in the module (for example a dependent import) failing. This can be helpful to determine whether any actual application code was run. For example, to distiguish administrative error (entering the wrong module name), from programmer error (writing buggy code in a module that fails to import). @param importName: The name of the module to import. @type importName: C{str} @raise Exception: if something bad happens. This can be any type of exception, since nobody knows what loading some arbitrary code might do. @raise _NoModuleFound: if no module was found. """ try: return __import__(importName) except ImportError: excType, excValue, excTraceback = sys.exc_info() while excTraceback: execName = excTraceback.tb_frame.f_globals["__name__"] # in Python 2 execName is None when an ImportError is encountered, # where in Python 3 execName is equal to the importName. if execName is None or execName == importName: reraise(excValue, excTraceback) excTraceback = excTraceback.tb_next raise _NoModuleFound() def namedAny(name): """ Retrieve a Python object by its fully qualified name from the global Python module namespace. The first part of the name, that describes a module, will be discovered and imported. Each subsequent part of the name is treated as the name of an attribute of the object specified by all of the name which came before it. For example, the fully-qualified name of this object is 'twisted.python.reflect.namedAny'. @type name: L{str} @param name: The name of the object to return. @raise InvalidName: If the name is an empty string, starts or ends with a '.', or is otherwise syntactically incorrect. @raise ModuleNotFound: If the name is syntactically correct but the module it specifies cannot be imported because it does not appear to exist. @raise ObjectNotFound: If the name is syntactically correct, includes at least one '.', but the module it specifies cannot be imported because it does not appear to exist. @raise AttributeError: If an attribute of an object along the way cannot be accessed, or a module along the way is not found. @return: the Python object identified by 'name'. """ if not name: raise InvalidName('Empty module name') names = name.split('.') # if the name starts or ends with a '.' or contains '..', the __import__ # will raise an 'Empty module name' error. This will provide a better error # message. if '' in names: raise InvalidName( "name must be a string giving a '.'-separated list of Python " "identifiers, not %r" % (name,)) topLevelPackage = None moduleNames = names[:] while not topLevelPackage: if moduleNames: trialname = '.'.join(moduleNames) try: topLevelPackage = _importAndCheckStack(trialname) except _NoModuleFound: moduleNames.pop() else: if len(names) == 1: raise ModuleNotFound("No module named %r" % (name,)) else: raise ObjectNotFound('%r does not name an object' % (name,)) obj = topLevelPackage for n in names[1:]: obj = getattr(obj, n) return obj
5,023
Python
31.205128
79
0.66514
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/_validators.py
import re from jsonschema._utils import ( ensure_list, equal, extras_msg, find_additional_properties, types_msg, unbool, uniq, ) from jsonschema.exceptions import FormatError, ValidationError from jsonschema.compat import iteritems def patternProperties(validator, patternProperties, instance, schema): if not validator.is_type(instance, "object"): return for pattern, subschema in iteritems(patternProperties): for k, v in iteritems(instance): if re.search(pattern, k): for error in validator.descend( v, subschema, path=k, schema_path=pattern, ): yield error def propertyNames(validator, propertyNames, instance, schema): if not validator.is_type(instance, "object"): return for property in instance: for error in validator.descend( instance=property, schema=propertyNames, ): yield error def additionalProperties(validator, aP, instance, schema): if not validator.is_type(instance, "object"): return extras = set(find_additional_properties(instance, schema)) if validator.is_type(aP, "object"): for extra in extras: for error in validator.descend(instance[extra], aP, path=extra): yield error elif not aP and extras: if "patternProperties" in schema: patterns = sorted(schema["patternProperties"]) if len(extras) == 1: verb = "does" else: verb = "do" error = "%s %s not match any of the regexes: %s" % ( ", ".join(map(repr, sorted(extras))), verb, ", ".join(map(repr, patterns)), ) yield ValidationError(error) else: error = "Additional properties are not allowed (%s %s unexpected)" yield ValidationError(error % extras_msg(extras)) def items(validator, items, instance, schema): if not validator.is_type(instance, "array"): return if validator.is_type(items, "array"): for (index, item), subschema in zip(enumerate(instance), items): for error in validator.descend( item, subschema, path=index, schema_path=index, ): yield error else: for index, item in enumerate(instance): for error in validator.descend(item, items, path=index): yield error def additionalItems(validator, aI, instance, schema): if ( not validator.is_type(instance, "array") or validator.is_type(schema.get("items", {}), "object") ): return len_items = len(schema.get("items", [])) if validator.is_type(aI, "object"): for index, item in enumerate(instance[len_items:], start=len_items): for error in validator.descend(item, aI, path=index): yield error elif not aI and len(instance) > len(schema.get("items", [])): error = "Additional items are not allowed (%s %s unexpected)" yield ValidationError( error % extras_msg(instance[len(schema.get("items", [])):]) ) def const(validator, const, instance, schema): if not equal(instance, const): yield ValidationError("%r was expected" % (const,)) def contains(validator, contains, instance, schema): if not validator.is_type(instance, "array"): return if not any(validator.is_valid(element, contains) for element in instance): yield ValidationError( "None of %r are valid under the given schema" % (instance,) ) def exclusiveMinimum(validator, minimum, instance, schema): if not validator.is_type(instance, "number"): return if instance <= minimum: yield ValidationError( "%r is less than or equal to the minimum of %r" % ( instance, minimum, ), ) def exclusiveMaximum(validator, maximum, instance, schema): if not validator.is_type(instance, "number"): return if instance >= maximum: yield ValidationError( "%r is greater than or equal to the maximum of %r" % ( instance, maximum, ), ) def minimum(validator, minimum, instance, schema): if not validator.is_type(instance, "number"): return if instance < minimum: yield ValidationError( "%r is less than the minimum of %r" % (instance, minimum) ) def maximum(validator, maximum, instance, schema): if not validator.is_type(instance, "number"): return if instance > maximum: yield ValidationError( "%r is greater than the maximum of %r" % (instance, maximum) ) def multipleOf(validator, dB, instance, schema): if not validator.is_type(instance, "number"): return if isinstance(dB, float): quotient = instance / dB failed = int(quotient) != quotient else: failed = instance % dB if failed: yield ValidationError("%r is not a multiple of %r" % (instance, dB)) def minItems(validator, mI, instance, schema): if validator.is_type(instance, "array") and len(instance) < mI: yield ValidationError("%r is too short" % (instance,)) def maxItems(validator, mI, instance, schema): if validator.is_type(instance, "array") and len(instance) > mI: yield ValidationError("%r is too long" % (instance,)) def uniqueItems(validator, uI, instance, schema): if ( uI and validator.is_type(instance, "array") and not uniq(instance) ): yield ValidationError("%r has non-unique elements" % (instance,)) def pattern(validator, patrn, instance, schema): if ( validator.is_type(instance, "string") and not re.search(patrn, instance) ): yield ValidationError("%r does not match %r" % (instance, patrn)) def format(validator, format, instance, schema): if validator.format_checker is not None: try: validator.format_checker.check(instance, format) except FormatError as error: yield ValidationError(error.message, cause=error.cause) def minLength(validator, mL, instance, schema): if validator.is_type(instance, "string") and len(instance) < mL: yield ValidationError("%r is too short" % (instance,)) def maxLength(validator, mL, instance, schema): if validator.is_type(instance, "string") and len(instance) > mL: yield ValidationError("%r is too long" % (instance,)) def dependencies(validator, dependencies, instance, schema): if not validator.is_type(instance, "object"): return for property, dependency in iteritems(dependencies): if property not in instance: continue if validator.is_type(dependency, "array"): for each in dependency: if each not in instance: message = "%r is a dependency of %r" yield ValidationError(message % (each, property)) else: for error in validator.descend( instance, dependency, schema_path=property, ): yield error def enum(validator, enums, instance, schema): if instance == 0 or instance == 1: unbooled = unbool(instance) if all(unbooled != unbool(each) for each in enums): yield ValidationError("%r is not one of %r" % (instance, enums)) elif instance not in enums: yield ValidationError("%r is not one of %r" % (instance, enums)) def ref(validator, ref, instance, schema): resolve = getattr(validator.resolver, "resolve", None) if resolve is None: with validator.resolver.resolving(ref) as resolved: for error in validator.descend(instance, resolved): yield error else: scope, resolved = validator.resolver.resolve(ref) validator.resolver.push_scope(scope) try: for error in validator.descend(instance, resolved): yield error finally: validator.resolver.pop_scope() def type(validator, types, instance, schema): types = ensure_list(types) if not any(validator.is_type(instance, type) for type in types): yield ValidationError(types_msg(instance, types)) def properties(validator, properties, instance, schema): if not validator.is_type(instance, "object"): return for property, subschema in iteritems(properties): if property in instance: for error in validator.descend( instance[property], subschema, path=property, schema_path=property, ): yield error def required(validator, required, instance, schema): if not validator.is_type(instance, "object"): return for property in required: if property not in instance: yield ValidationError("%r is a required property" % property) def minProperties(validator, mP, instance, schema): if validator.is_type(instance, "object") and len(instance) < mP: yield ValidationError( "%r does not have enough properties" % (instance,) ) def maxProperties(validator, mP, instance, schema): if not validator.is_type(instance, "object"): return if validator.is_type(instance, "object") and len(instance) > mP: yield ValidationError("%r has too many properties" % (instance,)) def allOf(validator, allOf, instance, schema): for index, subschema in enumerate(allOf): for error in validator.descend(instance, subschema, schema_path=index): yield error def anyOf(validator, anyOf, instance, schema): all_errors = [] for index, subschema in enumerate(anyOf): errs = list(validator.descend(instance, subschema, schema_path=index)) if not errs: break all_errors.extend(errs) else: yield ValidationError( "%r is not valid under any of the given schemas" % (instance,), context=all_errors, ) def oneOf(validator, oneOf, instance, schema): subschemas = enumerate(oneOf) all_errors = [] for index, subschema in subschemas: errs = list(validator.descend(instance, subschema, schema_path=index)) if not errs: first_valid = subschema break all_errors.extend(errs) else: yield ValidationError( "%r is not valid under any of the given schemas" % (instance,), context=all_errors, ) more_valid = [s for i, s in subschemas if validator.is_valid(instance, s)] if more_valid: more_valid.append(first_valid) reprs = ", ".join(repr(schema) for schema in more_valid) yield ValidationError( "%r is valid under each of %s" % (instance, reprs) ) def not_(validator, not_schema, instance, schema): if validator.is_valid(instance, not_schema): yield ValidationError( "%r is not allowed for %r" % (not_schema, instance) ) def if_(validator, if_schema, instance, schema): if validator.is_valid(instance, if_schema): if u"then" in schema: then = schema[u"then"] for error in validator.descend(instance, then, schema_path="then"): yield error elif u"else" in schema: else_ = schema[u"else"] for error in validator.descend(instance, else_, schema_path="else"): yield error
11,703
Python
30.294118
79
0.607366
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/__init__.py
""" An implementation of JSON Schema for Python The main functionality is provided by the validator classes for each of the supported JSON Schema versions. Most commonly, `validate` is the quickest way to simply validate a given instance under a schema, and will create a validator for you. """ from jsonschema.exceptions import ( ErrorTree, FormatError, RefResolutionError, SchemaError, ValidationError ) from jsonschema._format import ( FormatChecker, draft3_format_checker, draft4_format_checker, draft6_format_checker, draft7_format_checker, ) from jsonschema._types import TypeChecker from jsonschema.validators import ( Draft3Validator, Draft4Validator, Draft6Validator, Draft7Validator, RefResolver, validate, ) try: from importlib import metadata except ImportError: # for Python<3.8 import importlib_metadata as metadata __version__ = metadata.version("jsonschema")
934
Python
25.714285
76
0.761242
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/_legacy_validators.py
from jsonschema import _utils from jsonschema.compat import iteritems from jsonschema.exceptions import ValidationError def dependencies_draft3(validator, dependencies, instance, schema): if not validator.is_type(instance, "object"): return for property, dependency in iteritems(dependencies): if property not in instance: continue if validator.is_type(dependency, "object"): for error in validator.descend( instance, dependency, schema_path=property, ): yield error elif validator.is_type(dependency, "string"): if dependency not in instance: yield ValidationError( "%r is a dependency of %r" % (dependency, property) ) else: for each in dependency: if each not in instance: message = "%r is a dependency of %r" yield ValidationError(message % (each, property)) def disallow_draft3(validator, disallow, instance, schema): for disallowed in _utils.ensure_list(disallow): if validator.is_valid(instance, {"type": [disallowed]}): yield ValidationError( "%r is disallowed for %r" % (disallowed, instance) ) def extends_draft3(validator, extends, instance, schema): if validator.is_type(extends, "object"): for error in validator.descend(instance, extends): yield error return for index, subschema in enumerate(extends): for error in validator.descend(instance, subschema, schema_path=index): yield error def items_draft3_draft4(validator, items, instance, schema): if not validator.is_type(instance, "array"): return if validator.is_type(items, "object"): for index, item in enumerate(instance): for error in validator.descend(item, items, path=index): yield error else: for (index, item), subschema in zip(enumerate(instance), items): for error in validator.descend( item, subschema, path=index, schema_path=index, ): yield error def minimum_draft3_draft4(validator, minimum, instance, schema): if not validator.is_type(instance, "number"): return if schema.get("exclusiveMinimum", False): failed = instance <= minimum cmp = "less than or equal to" else: failed = instance < minimum cmp = "less than" if failed: yield ValidationError( "%r is %s the minimum of %r" % (instance, cmp, minimum) ) def maximum_draft3_draft4(validator, maximum, instance, schema): if not validator.is_type(instance, "number"): return if schema.get("exclusiveMaximum", False): failed = instance >= maximum cmp = "greater than or equal to" else: failed = instance > maximum cmp = "greater than" if failed: yield ValidationError( "%r is %s the maximum of %r" % (instance, cmp, maximum) ) def properties_draft3(validator, properties, instance, schema): if not validator.is_type(instance, "object"): return for property, subschema in iteritems(properties): if property in instance: for error in validator.descend( instance[property], subschema, path=property, schema_path=property, ): yield error elif subschema.get("required", False): error = ValidationError("%r is a required property" % property) error._set( validator="required", validator_value=subschema["required"], instance=instance, schema=schema, ) error.path.appendleft(property) error.schema_path.extend([property, "required"]) yield error def type_draft3(validator, types, instance, schema): types = _utils.ensure_list(types) all_errors = [] for index, type in enumerate(types): if validator.is_type(type, "object"): errors = list(validator.descend(instance, type, schema_path=index)) if not errors: return all_errors.extend(errors) else: if validator.is_type(instance, type): return else: yield ValidationError( _utils.types_msg(instance, types), context=all_errors, )
4,584
Python
31.288732
79
0.585079
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/_utils.py
import itertools import json import pkgutil import re from jsonschema.compat import MutableMapping, str_types, urlsplit class URIDict(MutableMapping): """ Dictionary which uses normalized URIs as keys. """ def normalize(self, uri): return urlsplit(uri).geturl() def __init__(self, *args, **kwargs): self.store = dict() self.store.update(*args, **kwargs) def __getitem__(self, uri): return self.store[self.normalize(uri)] def __setitem__(self, uri, value): self.store[self.normalize(uri)] = value def __delitem__(self, uri): del self.store[self.normalize(uri)] def __iter__(self): return iter(self.store) def __len__(self): return len(self.store) def __repr__(self): return repr(self.store) class Unset(object): """ An as-of-yet unset attribute or unprovided default parameter. """ def __repr__(self): return "<unset>" def load_schema(name): """ Load a schema from ./schemas/``name``.json and return it. """ data = pkgutil.get_data("jsonschema", "schemas/{0}.json".format(name)) return json.loads(data.decode("utf-8")) def indent(string, times=1): """ A dumb version of `textwrap.indent` from Python 3.3. """ return "\n".join(" " * (4 * times) + line for line in string.splitlines()) def format_as_index(indices): """ Construct a single string containing indexing operations for the indices. For example, [1, 2, "foo"] -> [1][2]["foo"] Arguments: indices (sequence): The indices to format. """ if not indices: return "" return "[%s]" % "][".join(repr(index) for index in indices) def find_additional_properties(instance, schema): """ Return the set of additional properties for the given ``instance``. Weeds out properties that should have been validated by ``properties`` and / or ``patternProperties``. Assumes ``instance`` is dict-like already. """ properties = schema.get("properties", {}) patterns = "|".join(schema.get("patternProperties", {})) for property in instance: if property not in properties: if patterns and re.search(patterns, property): continue yield property def extras_msg(extras): """ Create an error message for extra items or properties. """ if len(extras) == 1: verb = "was" else: verb = "were" return ", ".join(repr(extra) for extra in extras), verb def types_msg(instance, types): """ Create an error message for a failure to match the given types. If the ``instance`` is an object and contains a ``name`` property, it will be considered to be a description of that object and used as its type. Otherwise the message is simply the reprs of the given ``types``. """ reprs = [] for type in types: try: reprs.append(repr(type["name"])) except Exception: reprs.append(repr(type)) return "%r is not of type %s" % (instance, ", ".join(reprs)) def flatten(suitable_for_isinstance): """ isinstance() can accept a bunch of really annoying different types: * a single type * a tuple of types * an arbitrary nested tree of tuples Return a flattened tuple of the given argument. """ types = set() if not isinstance(suitable_for_isinstance, tuple): suitable_for_isinstance = (suitable_for_isinstance,) for thing in suitable_for_isinstance: if isinstance(thing, tuple): types.update(flatten(thing)) else: types.add(thing) return tuple(types) def ensure_list(thing): """ Wrap ``thing`` in a list if it's a single str. Otherwise, return it unchanged. """ if isinstance(thing, str_types): return [thing] return thing def equal(one, two): """ Check if two things are equal, but evade booleans and ints being equal. """ return unbool(one) == unbool(two) def unbool(element, true=object(), false=object()): """ A hack to make True and 1 and False and 0 unique for ``uniq``. """ if element is True: return true elif element is False: return false return element def uniq(container): """ Check if all of a container's elements are unique. Successively tries first to rely that the elements are hashable, then falls back on them being sortable, and finally falls back on brute force. """ try: return len(set(unbool(i) for i in container)) == len(container) except TypeError: try: sort = sorted(unbool(i) for i in container) sliced = itertools.islice(sort, 1, None) for i, j in zip(sort, sliced): if i == j: return False except (NotImplementedError, TypeError): seen = [] for e in container: e = unbool(e) if e in seen: return False seen.append(e) return True
5,168
Python
23.267606
78
0.595395
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/compat.py
""" Python 2/3 compatibility helpers. Note: This module is *not* public API. """ import contextlib import operator import sys try: from collections.abc import MutableMapping, Sequence # noqa except ImportError: from collections import MutableMapping, Sequence # noqa PY3 = sys.version_info[0] >= 3 if PY3: zip = zip from functools import lru_cache from io import StringIO as NativeIO from urllib.parse import ( unquote, urljoin, urlunsplit, SplitResult, urlsplit ) from urllib.request import pathname2url, urlopen str_types = str, int_types = int, iteritems = operator.methodcaller("items") else: from itertools import izip as zip # noqa from io import BytesIO as NativeIO from urlparse import urljoin, urlunsplit, SplitResult, urlsplit from urllib import pathname2url, unquote # noqa import urllib2 # noqa def urlopen(*args, **kwargs): return contextlib.closing(urllib2.urlopen(*args, **kwargs)) str_types = basestring int_types = int, long iteritems = operator.methodcaller("iteritems") from functools32 import lru_cache def urldefrag(url): if "#" in url: s, n, p, q, frag = urlsplit(url) defrag = urlunsplit((s, n, p, q, "")) else: defrag = url frag = "" return defrag, frag # flake8: noqa
1,353
Python
23.178571
67
0.670362
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/__main__.py
from jsonschema.cli import main main()
39
Python
12.333329
31
0.794872
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/cli.py
""" The ``jsonschema`` command line. """ from __future__ import absolute_import import argparse import json import sys from jsonschema import __version__ from jsonschema._reflect import namedAny from jsonschema.validators import validator_for def _namedAnyWithDefault(name): if "." not in name: name = "jsonschema." + name return namedAny(name) def _json_file(path): with open(path) as file: return json.load(file) parser = argparse.ArgumentParser( description="JSON Schema Validation CLI", ) parser.add_argument( "-i", "--instance", action="append", dest="instances", type=_json_file, help=( "a path to a JSON instance (i.e. filename.json) " "to validate (may be specified multiple times)" ), ) parser.add_argument( "-F", "--error-format", default="{error.instance}: {error.message}\n", help=( "the format to use for each error output message, specified in " "a form suitable for passing to str.format, which will be called " "with 'error' for each error" ), ) parser.add_argument( "-V", "--validator", type=_namedAnyWithDefault, help=( "the fully qualified object name of a validator to use, or, for " "validators that are registered with jsonschema, simply the name " "of the class." ), ) parser.add_argument( "--version", action="version", version=__version__, ) parser.add_argument( "schema", help="the JSON Schema to validate with (i.e. schema.json)", type=_json_file, ) def parse_args(args): arguments = vars(parser.parse_args(args=args or ["--help"])) if arguments["validator"] is None: arguments["validator"] = validator_for(arguments["schema"]) return arguments def main(args=sys.argv[1:]): sys.exit(run(arguments=parse_args(args=args))) def run(arguments, stdout=sys.stdout, stderr=sys.stderr): error_format = arguments["error_format"] validator = arguments["validator"](schema=arguments["schema"]) validator.check_schema(arguments["schema"]) errored = False for instance in arguments["instances"] or (): for error in validator.iter_errors(instance): stderr.write(error_format.format(error=error)) errored = True return errored
2,310
Python
24.395604
74
0.649784
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/tests/test_jsonschema_test_suite.py
""" Test runner for the JSON Schema official test suite Tests comprehensive correctness of each draft's validator. See https://github.com/json-schema-org/JSON-Schema-Test-Suite for details. """ import sys import warnings from jsonschema import ( Draft3Validator, Draft4Validator, Draft6Validator, Draft7Validator, draft3_format_checker, draft4_format_checker, draft6_format_checker, draft7_format_checker, ) from jsonschema.tests._helpers import bug from jsonschema.tests._suite import Suite from jsonschema.validators import _DEPRECATED_DEFAULT_TYPES, create SUITE = Suite() DRAFT3 = SUITE.version(name="draft3") DRAFT4 = SUITE.version(name="draft4") DRAFT6 = SUITE.version(name="draft6") DRAFT7 = SUITE.version(name="draft7") def skip(message, **kwargs): def skipper(test): if all(value == getattr(test, attr) for attr, value in kwargs.items()): return message return skipper def missing_format(checker): def missing_format(test): schema = test.schema if schema is True or schema is False or "format" not in schema: return if schema["format"] not in checker.checkers: return "Format checker {0!r} not found.".format(schema["format"]) return missing_format is_narrow_build = sys.maxunicode == 2 ** 16 - 1 if is_narrow_build: # pragma: no cover message = "Not running surrogate Unicode case, this Python is narrow." def narrow_unicode_build(test): # pragma: no cover return skip( message=message, description="one supplementary Unicode code point is not long enough", )(test) or skip( message=message, description="two supplementary Unicode code points is long enough", )(test) else: def narrow_unicode_build(test): # pragma: no cover return TestDraft3 = DRAFT3.to_unittest_testcase( DRAFT3.tests(), DRAFT3.optional_tests_of(name="bignum"), DRAFT3.optional_tests_of(name="format"), DRAFT3.optional_tests_of(name="zeroTerminatedFloats"), Validator=Draft3Validator, format_checker=draft3_format_checker, skip=lambda test: ( narrow_unicode_build(test) or missing_format(draft3_format_checker)(test) or skip( message="Upstream bug in strict_rfc3339", subject="format", description="case-insensitive T and Z", )(test) ), ) TestDraft4 = DRAFT4.to_unittest_testcase( DRAFT4.tests(), DRAFT4.optional_tests_of(name="bignum"), DRAFT4.optional_tests_of(name="format"), DRAFT4.optional_tests_of(name="zeroTerminatedFloats"), Validator=Draft4Validator, format_checker=draft4_format_checker, skip=lambda test: ( narrow_unicode_build(test) or missing_format(draft4_format_checker)(test) or skip( message=bug(), subject="ref", case_description="Recursive references between schemas", )(test) or skip( message=bug(371), subject="ref", case_description="Location-independent identifier", )(test) or skip( message=bug(371), subject="ref", case_description=( "Location-independent identifier with absolute URI" ), )(test) or skip( message=bug(371), subject="ref", case_description=( "Location-independent identifier with base URI change in subschema" ), )(test) or skip( message=bug(), subject="refRemote", case_description="base URI change - change folder in subschema", )(test) or skip( message="Upstream bug in strict_rfc3339", subject="format", description="case-insensitive T and Z", )(test) ), ) TestDraft6 = DRAFT6.to_unittest_testcase( DRAFT6.tests(), DRAFT6.optional_tests_of(name="bignum"), DRAFT6.optional_tests_of(name="format"), DRAFT6.optional_tests_of(name="zeroTerminatedFloats"), Validator=Draft6Validator, format_checker=draft6_format_checker, skip=lambda test: ( narrow_unicode_build(test) or missing_format(draft6_format_checker)(test) or skip( message=bug(), subject="ref", case_description="Recursive references between schemas", )(test) or skip( message=bug(371), subject="ref", case_description="Location-independent identifier", )(test) or skip( message=bug(371), subject="ref", case_description=( "Location-independent identifier with absolute URI" ), )(test) or skip( message=bug(371), subject="ref", case_description=( "Location-independent identifier with base URI change in subschema" ), )(test) or skip( message=bug(), subject="refRemote", case_description="base URI change - change folder in subschema", )(test) or skip( message="Upstream bug in strict_rfc3339", subject="format", description="case-insensitive T and Z", )(test) ), ) TestDraft7 = DRAFT7.to_unittest_testcase( DRAFT7.tests(), DRAFT7.format_tests(), DRAFT7.optional_tests_of(name="bignum"), DRAFT7.optional_tests_of(name="content"), DRAFT7.optional_tests_of(name="zeroTerminatedFloats"), Validator=Draft7Validator, format_checker=draft7_format_checker, skip=lambda test: ( narrow_unicode_build(test) or missing_format(draft7_format_checker)(test) or skip( message=bug(), subject="ref", case_description="Recursive references between schemas", )(test) or skip( message=bug(371), subject="ref", case_description="Location-independent identifier", )(test) or skip( message=bug(371), subject="ref", case_description=( "Location-independent identifier with absolute URI" ), )(test) or skip( message=bug(371), subject="ref", case_description=( "Location-independent identifier with base URI change in subschema" ), )(test) or skip( message=bug(), subject="refRemote", case_description="base URI change - change folder in subschema", )(test) or skip( message="Upstream bug in strict_rfc3339", subject="date-time", description="case-insensitive T and Z", )(test) or skip( message=bug(593), subject="content", case_description=( "validation of string-encoded content based on media type" ), )(test) or skip( message=bug(593), subject="content", case_description="validation of binary string-encoding", )(test) or skip( message=bug(593), subject="content", case_description=( "validation of binary-encoded media type documents" ), )(test) ), ) with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) TestDraft3LegacyTypeCheck = DRAFT3.to_unittest_testcase( # Interestingly the any part couldn't really be done w/the old API. ( (test for test in each if test.schema != {"type": "any"}) for each in DRAFT3.tests_of(name="type") ), name="TestDraft3LegacyTypeCheck", Validator=create( meta_schema=Draft3Validator.META_SCHEMA, validators=Draft3Validator.VALIDATORS, default_types=_DEPRECATED_DEFAULT_TYPES, ), ) TestDraft4LegacyTypeCheck = DRAFT4.to_unittest_testcase( DRAFT4.tests_of(name="type"), name="TestDraft4LegacyTypeCheck", Validator=create( meta_schema=Draft4Validator.META_SCHEMA, validators=Draft4Validator.VALIDATORS, default_types=_DEPRECATED_DEFAULT_TYPES, ), )
8,464
Python
29.44964
83
0.586366
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/tests/test_cli.py
from unittest import TestCase import json import subprocess import sys from jsonschema import Draft4Validator, ValidationError, cli, __version__ from jsonschema.compat import NativeIO from jsonschema.exceptions import SchemaError def fake_validator(*errors): errors = list(reversed(errors)) class FakeValidator(object): def __init__(self, *args, **kwargs): pass def iter_errors(self, instance): if errors: return errors.pop() return [] def check_schema(self, schema): pass return FakeValidator class TestParser(TestCase): FakeValidator = fake_validator() instance_file = "foo.json" schema_file = "schema.json" def setUp(self): cli.open = self.fake_open self.addCleanup(delattr, cli, "open") def fake_open(self, path): if path == self.instance_file: contents = "" elif path == self.schema_file: contents = {} else: # pragma: no cover self.fail("What is {!r}".format(path)) return NativeIO(json.dumps(contents)) def test_find_validator_by_fully_qualified_object_name(self): arguments = cli.parse_args( [ "--validator", "jsonschema.tests.test_cli.TestParser.FakeValidator", "--instance", self.instance_file, self.schema_file, ] ) self.assertIs(arguments["validator"], self.FakeValidator) def test_find_validator_in_jsonschema(self): arguments = cli.parse_args( [ "--validator", "Draft4Validator", "--instance", self.instance_file, self.schema_file, ] ) self.assertIs(arguments["validator"], Draft4Validator) class TestCLI(TestCase): def test_draft3_schema_draft4_validator(self): stdout, stderr = NativeIO(), NativeIO() with self.assertRaises(SchemaError): cli.run( { "validator": Draft4Validator, "schema": { "anyOf": [ {"minimum": 20}, {"type": "string"}, {"required": True}, ], }, "instances": [1], "error_format": "{error.message}", }, stdout=stdout, stderr=stderr, ) def test_successful_validation(self): stdout, stderr = NativeIO(), NativeIO() exit_code = cli.run( { "validator": fake_validator(), "schema": {}, "instances": [1], "error_format": "{error.message}", }, stdout=stdout, stderr=stderr, ) self.assertFalse(stdout.getvalue()) self.assertFalse(stderr.getvalue()) self.assertEqual(exit_code, 0) def test_unsuccessful_validation(self): error = ValidationError("I am an error!", instance=1) stdout, stderr = NativeIO(), NativeIO() exit_code = cli.run( { "validator": fake_validator([error]), "schema": {}, "instances": [1], "error_format": "{error.instance} - {error.message}", }, stdout=stdout, stderr=stderr, ) self.assertFalse(stdout.getvalue()) self.assertEqual(stderr.getvalue(), "1 - I am an error!") self.assertEqual(exit_code, 1) def test_unsuccessful_validation_multiple_instances(self): first_errors = [ ValidationError("9", instance=1), ValidationError("8", instance=1), ] second_errors = [ValidationError("7", instance=2)] stdout, stderr = NativeIO(), NativeIO() exit_code = cli.run( { "validator": fake_validator(first_errors, second_errors), "schema": {}, "instances": [1, 2], "error_format": "{error.instance} - {error.message}\t", }, stdout=stdout, stderr=stderr, ) self.assertFalse(stdout.getvalue()) self.assertEqual(stderr.getvalue(), "1 - 9\t1 - 8\t2 - 7\t") self.assertEqual(exit_code, 1) def test_version(self): version = subprocess.check_output( [sys.executable, "-m", "jsonschema", "--version"], stderr=subprocess.STDOUT, ) version = version.decode("utf-8").strip() self.assertEqual(version, __version__)
4,727
Python
30.105263
73
0.512376
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/tests/_suite.py
""" Python representations of the JSON Schema Test Suite tests. """ from functools import partial import json import os import re import subprocess import sys import unittest from twisted.python.filepath import FilePath import attr from jsonschema.compat import PY3 from jsonschema.validators import validators import jsonschema def _find_suite(): root = os.environ.get("JSON_SCHEMA_TEST_SUITE") if root is not None: return FilePath(root) root = FilePath(jsonschema.__file__).parent().sibling("json") if not root.isdir(): # pragma: no cover raise ValueError( ( "Can't find the JSON-Schema-Test-Suite directory. " "Set the 'JSON_SCHEMA_TEST_SUITE' environment " "variable or run the tests from alongside a checkout " "of the suite." ), ) return root @attr.s(hash=True) class Suite(object): _root = attr.ib(default=attr.Factory(_find_suite)) def _remotes(self): jsonschema_suite = self._root.descendant(["bin", "jsonschema_suite"]) remotes = subprocess.check_output( [sys.executable, jsonschema_suite.path, "remotes"], ) return { "http://localhost:1234/" + name: schema for name, schema in json.loads(remotes.decode("utf-8")).items() } def benchmark(self, runner): # pragma: no cover for name in validators: self.version(name=name).benchmark(runner=runner) def version(self, name): return Version( name=name, path=self._root.descendant(["tests", name]), remotes=self._remotes(), ) @attr.s(hash=True) class Version(object): _path = attr.ib() _remotes = attr.ib() name = attr.ib() def benchmark(self, runner, **kwargs): # pragma: no cover for suite in self.tests(): for test in suite: runner.bench_func( test.fully_qualified_name, partial(test.validate_ignoring_errors, **kwargs), ) def tests(self): return ( test for child in self._path.globChildren("*.json") for test in self._tests_in( subject=child.basename()[:-5], path=child, ) ) def format_tests(self): path = self._path.descendant(["optional", "format"]) return ( test for child in path.globChildren("*.json") for test in self._tests_in( subject=child.basename()[:-5], path=child, ) ) def tests_of(self, name): return self._tests_in( subject=name, path=self._path.child(name + ".json"), ) def optional_tests_of(self, name): return self._tests_in( subject=name, path=self._path.descendant(["optional", name + ".json"]), ) def to_unittest_testcase(self, *suites, **kwargs): name = kwargs.pop("name", "Test" + self.name.title()) methods = { test.method_name: test.to_unittest_method(**kwargs) for suite in suites for tests in suite for test in tests } cls = type(name, (unittest.TestCase,), methods) try: cls.__module__ = _someone_save_us_the_module_of_the_caller() except Exception: # pragma: no cover # We're doing crazy things, so if they go wrong, like a function # behaving differently on some other interpreter, just make them # not happen. pass return cls def _tests_in(self, subject, path): for each in json.loads(path.getContent().decode("utf-8")): yield ( _Test( version=self, subject=subject, case_description=each["description"], schema=each["schema"], remotes=self._remotes, **test ) for test in each["tests"] ) @attr.s(hash=True, repr=False) class _Test(object): version = attr.ib() subject = attr.ib() case_description = attr.ib() description = attr.ib() data = attr.ib() schema = attr.ib(repr=False) valid = attr.ib() _remotes = attr.ib() def __repr__(self): # pragma: no cover return "<Test {}>".format(self.fully_qualified_name) @property def fully_qualified_name(self): # pragma: no cover return " > ".join( [ self.version.name, self.subject, self.case_description, self.description, ] ) @property def method_name(self): delimiters = r"[\W\- ]+" name = "test_%s_%s_%s" % ( re.sub(delimiters, "_", self.subject), re.sub(delimiters, "_", self.case_description), re.sub(delimiters, "_", self.description), ) if not PY3: # pragma: no cover name = name.encode("utf-8") return name def to_unittest_method(self, skip=lambda test: None, **kwargs): if self.valid: def fn(this): self.validate(**kwargs) else: def fn(this): with this.assertRaises(jsonschema.ValidationError): self.validate(**kwargs) fn.__name__ = self.method_name reason = skip(self) return unittest.skipIf(reason is not None, reason)(fn) def validate(self, Validator, **kwargs): resolver = jsonschema.RefResolver.from_schema( schema=self.schema, store=self._remotes, id_of=Validator.ID_OF, ) jsonschema.validate( instance=self.data, schema=self.schema, cls=Validator, resolver=resolver, **kwargs ) def validate_ignoring_errors(self, Validator): # pragma: no cover try: self.validate(Validator=Validator) except jsonschema.ValidationError: pass def _someone_save_us_the_module_of_the_caller(): """ The FQON of the module 2nd stack frames up from here. This is intended to allow us to dynamicallly return test case classes that are indistinguishable from being defined in the module that wants them. Otherwise, trial will mis-print the FQON, and copy pasting it won't re-run the class that really is running. Save us all, this is all so so so so so terrible. """ return sys._getframe(2).f_globals["__name__"]
6,728
Python
27.0375
78
0.546225
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/tests/test_exceptions.py
from unittest import TestCase import textwrap from jsonschema import Draft4Validator, exceptions from jsonschema.compat import PY3 class TestBestMatch(TestCase): def best_match(self, errors): errors = list(errors) best = exceptions.best_match(errors) reversed_best = exceptions.best_match(reversed(errors)) msg = "Didn't return a consistent best match!\nGot: {0}\n\nThen: {1}" self.assertEqual( best._contents(), reversed_best._contents(), msg=msg.format(best, reversed_best), ) return best def test_shallower_errors_are_better_matches(self): validator = Draft4Validator( { "properties": { "foo": { "minProperties": 2, "properties": {"bar": {"type": "object"}}, }, }, }, ) best = self.best_match(validator.iter_errors({"foo": {"bar": []}})) self.assertEqual(best.validator, "minProperties") def test_oneOf_and_anyOf_are_weak_matches(self): """ A property you *must* match is probably better than one you have to match a part of. """ validator = Draft4Validator( { "minProperties": 2, "anyOf": [{"type": "string"}, {"type": "number"}], "oneOf": [{"type": "string"}, {"type": "number"}], } ) best = self.best_match(validator.iter_errors({})) self.assertEqual(best.validator, "minProperties") def test_if_the_most_relevant_error_is_anyOf_it_is_traversed(self): """ If the most relevant error is an anyOf, then we traverse its context and select the otherwise *least* relevant error, since in this case that means the most specific, deep, error inside the instance. I.e. since only one of the schemas must match, we look for the most relevant one. """ validator = Draft4Validator( { "properties": { "foo": { "anyOf": [ {"type": "string"}, {"properties": {"bar": {"type": "array"}}}, ], }, }, }, ) best = self.best_match(validator.iter_errors({"foo": {"bar": 12}})) self.assertEqual(best.validator_value, "array") def test_if_the_most_relevant_error_is_oneOf_it_is_traversed(self): """ If the most relevant error is an oneOf, then we traverse its context and select the otherwise *least* relevant error, since in this case that means the most specific, deep, error inside the instance. I.e. since only one of the schemas must match, we look for the most relevant one. """ validator = Draft4Validator( { "properties": { "foo": { "oneOf": [ {"type": "string"}, {"properties": {"bar": {"type": "array"}}}, ], }, }, }, ) best = self.best_match(validator.iter_errors({"foo": {"bar": 12}})) self.assertEqual(best.validator_value, "array") def test_if_the_most_relevant_error_is_allOf_it_is_traversed(self): """ Now, if the error is allOf, we traverse but select the *most* relevant error from the context, because all schemas here must match anyways. """ validator = Draft4Validator( { "properties": { "foo": { "allOf": [ {"type": "string"}, {"properties": {"bar": {"type": "array"}}}, ], }, }, }, ) best = self.best_match(validator.iter_errors({"foo": {"bar": 12}})) self.assertEqual(best.validator_value, "string") def test_nested_context_for_oneOf(self): validator = Draft4Validator( { "properties": { "foo": { "oneOf": [ {"type": "string"}, { "oneOf": [ {"type": "string"}, { "properties": { "bar": {"type": "array"}, }, }, ], }, ], }, }, }, ) best = self.best_match(validator.iter_errors({"foo": {"bar": 12}})) self.assertEqual(best.validator_value, "array") def test_one_error(self): validator = Draft4Validator({"minProperties": 2}) error, = validator.iter_errors({}) self.assertEqual( exceptions.best_match(validator.iter_errors({})).validator, "minProperties", ) def test_no_errors(self): validator = Draft4Validator({}) self.assertIsNone(exceptions.best_match(validator.iter_errors({}))) class TestByRelevance(TestCase): def test_short_paths_are_better_matches(self): shallow = exceptions.ValidationError("Oh no!", path=["baz"]) deep = exceptions.ValidationError("Oh yes!", path=["foo", "bar"]) match = max([shallow, deep], key=exceptions.relevance) self.assertIs(match, shallow) match = max([deep, shallow], key=exceptions.relevance) self.assertIs(match, shallow) def test_global_errors_are_even_better_matches(self): shallow = exceptions.ValidationError("Oh no!", path=[]) deep = exceptions.ValidationError("Oh yes!", path=["foo"]) errors = sorted([shallow, deep], key=exceptions.relevance) self.assertEqual( [list(error.path) for error in errors], [["foo"], []], ) errors = sorted([deep, shallow], key=exceptions.relevance) self.assertEqual( [list(error.path) for error in errors], [["foo"], []], ) def test_weak_validators_are_lower_priority(self): weak = exceptions.ValidationError("Oh no!", path=[], validator="a") normal = exceptions.ValidationError("Oh yes!", path=[], validator="b") best_match = exceptions.by_relevance(weak="a") match = max([weak, normal], key=best_match) self.assertIs(match, normal) match = max([normal, weak], key=best_match) self.assertIs(match, normal) def test_strong_validators_are_higher_priority(self): weak = exceptions.ValidationError("Oh no!", path=[], validator="a") normal = exceptions.ValidationError("Oh yes!", path=[], validator="b") strong = exceptions.ValidationError("Oh fine!", path=[], validator="c") best_match = exceptions.by_relevance(weak="a", strong="c") match = max([weak, normal, strong], key=best_match) self.assertIs(match, strong) match = max([strong, normal, weak], key=best_match) self.assertIs(match, strong) class TestErrorTree(TestCase): def test_it_knows_how_many_total_errors_it_contains(self): # FIXME: https://github.com/Julian/jsonschema/issues/442 errors = [ exceptions.ValidationError("Something", validator=i) for i in range(8) ] tree = exceptions.ErrorTree(errors) self.assertEqual(tree.total_errors, 8) def test_it_contains_an_item_if_the_item_had_an_error(self): errors = [exceptions.ValidationError("a message", path=["bar"])] tree = exceptions.ErrorTree(errors) self.assertIn("bar", tree) def test_it_does_not_contain_an_item_if_the_item_had_no_error(self): errors = [exceptions.ValidationError("a message", path=["bar"])] tree = exceptions.ErrorTree(errors) self.assertNotIn("foo", tree) def test_validators_that_failed_appear_in_errors_dict(self): error = exceptions.ValidationError("a message", validator="foo") tree = exceptions.ErrorTree([error]) self.assertEqual(tree.errors, {"foo": error}) def test_it_creates_a_child_tree_for_each_nested_path(self): errors = [ exceptions.ValidationError("a bar message", path=["bar"]), exceptions.ValidationError("a bar -> 0 message", path=["bar", 0]), ] tree = exceptions.ErrorTree(errors) self.assertIn(0, tree["bar"]) self.assertNotIn(1, tree["bar"]) def test_children_have_their_errors_dicts_built(self): e1, e2 = ( exceptions.ValidationError("1", validator="foo", path=["bar", 0]), exceptions.ValidationError("2", validator="quux", path=["bar", 0]), ) tree = exceptions.ErrorTree([e1, e2]) self.assertEqual(tree["bar"][0].errors, {"foo": e1, "quux": e2}) def test_multiple_errors_with_instance(self): e1, e2 = ( exceptions.ValidationError( "1", validator="foo", path=["bar", "bar2"], instance="i1"), exceptions.ValidationError( "2", validator="quux", path=["foobar", 2], instance="i2"), ) exceptions.ErrorTree([e1, e2]) def test_it_does_not_contain_subtrees_that_are_not_in_the_instance(self): error = exceptions.ValidationError("123", validator="foo", instance=[]) tree = exceptions.ErrorTree([error]) with self.assertRaises(IndexError): tree[0] def test_if_its_in_the_tree_anyhow_it_does_not_raise_an_error(self): """ If a validator is dumb (like :validator:`required` in draft 3) and refers to a path that isn't in the instance, the tree still properly returns a subtree for that path. """ error = exceptions.ValidationError( "a message", validator="foo", instance={}, path=["foo"], ) tree = exceptions.ErrorTree([error]) self.assertIsInstance(tree["foo"], exceptions.ErrorTree) class TestErrorInitReprStr(TestCase): def make_error(self, **kwargs): defaults = dict( message=u"hello", validator=u"type", validator_value=u"string", instance=5, schema={u"type": u"string"}, ) defaults.update(kwargs) return exceptions.ValidationError(**defaults) def assertShows(self, expected, **kwargs): if PY3: # pragma: no cover expected = expected.replace("u'", "'") expected = textwrap.dedent(expected).rstrip("\n") error = self.make_error(**kwargs) message_line, _, rest = str(error).partition("\n") self.assertEqual(message_line, error.message) self.assertEqual(rest, expected) def test_it_calls_super_and_sets_args(self): error = self.make_error() self.assertGreater(len(error.args), 1) def test_repr(self): self.assertEqual( repr(exceptions.ValidationError(message="Hello!")), "<ValidationError: %r>" % "Hello!", ) def test_unset_error(self): error = exceptions.ValidationError("message") self.assertEqual(str(error), "message") kwargs = { "validator": "type", "validator_value": "string", "instance": 5, "schema": {"type": "string"}, } # Just the message should show if any of the attributes are unset for attr in kwargs: k = dict(kwargs) del k[attr] error = exceptions.ValidationError("message", **k) self.assertEqual(str(error), "message") def test_empty_paths(self): self.assertShows( """ Failed validating u'type' in schema: {u'type': u'string'} On instance: 5 """, path=[], schema_path=[], ) def test_one_item_paths(self): self.assertShows( """ Failed validating u'type' in schema: {u'type': u'string'} On instance[0]: 5 """, path=[0], schema_path=["items"], ) def test_multiple_item_paths(self): self.assertShows( """ Failed validating u'type' in schema[u'items'][0]: {u'type': u'string'} On instance[0][u'a']: 5 """, path=[0, u"a"], schema_path=[u"items", 0, 1], ) def test_uses_pprint(self): self.assertShows( """ Failed validating u'maxLength' in schema: {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, 12: 12, 13: 13, 14: 14, 15: 15, 16: 16, 17: 17, 18: 18, 19: 19} On instance: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] """, instance=list(range(25)), schema=dict(zip(range(20), range(20))), validator=u"maxLength", ) def test_str_works_with_instances_having_overriden_eq_operator(self): """ Check for https://github.com/Julian/jsonschema/issues/164 which rendered exceptions unusable when a `ValidationError` involved instances with an `__eq__` method that returned truthy values. """ class DontEQMeBro(object): def __eq__(this, other): # pragma: no cover self.fail("Don't!") def __ne__(this, other): # pragma: no cover self.fail("Don't!") instance = DontEQMeBro() error = exceptions.ValidationError( "a message", validator="foo", instance=instance, validator_value="some", schema="schema", ) self.assertIn(repr(instance), str(error)) class TestHashable(TestCase): def test_hashable(self): set([exceptions.ValidationError("")]) set([exceptions.SchemaError("")])
15,348
Python
32.151188
79
0.49179
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/tests/_helpers.py
def bug(issue=None): message = "A known bug." if issue is not None: message += " See issue #{issue}.".format(issue=issue) return message
157
Python
25.333329
61
0.611465
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/tests/test_format.py
""" Tests for the parts of jsonschema related to the :validator:`format` property. """ from unittest import TestCase from jsonschema import FormatError, ValidationError, FormatChecker from jsonschema.validators import Draft4Validator BOOM = ValueError("Boom!") BANG = ZeroDivisionError("Bang!") def boom(thing): if thing == "bang": raise BANG raise BOOM class TestFormatChecker(TestCase): def test_it_can_validate_no_formats(self): checker = FormatChecker(formats=()) self.assertFalse(checker.checkers) def test_it_raises_a_key_error_for_unknown_formats(self): with self.assertRaises(KeyError): FormatChecker(formats=["o noes"]) def test_it_can_register_cls_checkers(self): original = dict(FormatChecker.checkers) self.addCleanup(FormatChecker.checkers.pop, "boom") FormatChecker.cls_checks("boom")(boom) self.assertEqual( FormatChecker.checkers, dict(original, boom=(boom, ())), ) def test_it_can_register_checkers(self): checker = FormatChecker() checker.checks("boom")(boom) self.assertEqual( checker.checkers, dict(FormatChecker.checkers, boom=(boom, ())) ) def test_it_catches_registered_errors(self): checker = FormatChecker() checker.checks("boom", raises=type(BOOM))(boom) with self.assertRaises(FormatError) as cm: checker.check(instance=12, format="boom") self.assertIs(cm.exception.cause, BOOM) self.assertIs(cm.exception.__cause__, BOOM) # Unregistered errors should not be caught with self.assertRaises(type(BANG)): checker.check(instance="bang", format="boom") def test_format_error_causes_become_validation_error_causes(self): checker = FormatChecker() checker.checks("boom", raises=ValueError)(boom) validator = Draft4Validator({"format": "boom"}, format_checker=checker) with self.assertRaises(ValidationError) as cm: validator.validate("BOOM") self.assertIs(cm.exception.cause, BOOM) self.assertIs(cm.exception.__cause__, BOOM) def test_format_checkers_come_with_defaults(self): # This is bad :/ but relied upon. # The docs for quite awhile recommended people do things like # validate(..., format_checker=FormatChecker()) # We should change that, but we can't without deprecation... checker = FormatChecker() with self.assertRaises(FormatError): checker.check(instance="not-an-ipv4", format="ipv4") def test_repr(self): checker = FormatChecker(formats=()) checker.checks("foo")(lambda thing: True) checker.checks("bar")(lambda thing: True) checker.checks("baz")(lambda thing: True) self.assertEqual( repr(checker), "<FormatChecker checkers=['bar', 'baz', 'foo']>", )
2,982
Python
32.144444
79
0.640174
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/tests/test_types.py
""" Tests on the new type interface. The actual correctness of the type checking is handled in test_jsonschema_test_suite; these tests check that TypeChecker functions correctly and can facilitate extensions to type checking """ from collections import namedtuple from unittest import TestCase from jsonschema import ValidationError, _validators from jsonschema._types import TypeChecker from jsonschema.exceptions import UndefinedTypeCheck from jsonschema.validators import Draft4Validator, extend def equals_2(checker, instance): return instance == 2 def is_namedtuple(instance): return isinstance(instance, tuple) and getattr(instance, "_fields", None) def is_object_or_named_tuple(checker, instance): if Draft4Validator.TYPE_CHECKER.is_type(instance, "object"): return True return is_namedtuple(instance) def coerce_named_tuple(fn): def coerced(validator, value, instance, schema): if is_namedtuple(instance): instance = instance._asdict() return fn(validator, value, instance, schema) return coerced required = coerce_named_tuple(_validators.required) properties = coerce_named_tuple(_validators.properties) class TestTypeChecker(TestCase): def test_is_type(self): checker = TypeChecker({"two": equals_2}) self.assertEqual( ( checker.is_type(instance=2, type="two"), checker.is_type(instance="bar", type="two"), ), (True, False), ) def test_is_unknown_type(self): with self.assertRaises(UndefinedTypeCheck) as context: TypeChecker().is_type(4, "foobar") self.assertIn("foobar", str(context.exception)) def test_checks_can_be_added_at_init(self): checker = TypeChecker({"two": equals_2}) self.assertEqual(checker, TypeChecker().redefine("two", equals_2)) def test_redefine_existing_type(self): self.assertEqual( TypeChecker().redefine("two", object()).redefine("two", equals_2), TypeChecker().redefine("two", equals_2), ) def test_remove(self): self.assertEqual( TypeChecker({"two": equals_2}).remove("two"), TypeChecker(), ) def test_remove_unknown_type(self): with self.assertRaises(UndefinedTypeCheck) as context: TypeChecker().remove("foobar") self.assertIn("foobar", str(context.exception)) def test_redefine_many(self): self.assertEqual( TypeChecker().redefine_many({"foo": int, "bar": str}), TypeChecker().redefine("foo", int).redefine("bar", str), ) def test_remove_multiple(self): self.assertEqual( TypeChecker({"foo": int, "bar": str}).remove("foo", "bar"), TypeChecker(), ) def test_type_check_can_raise_key_error(self): """ Make sure no one writes: try: self._type_checkers[type](...) except KeyError: ignoring the fact that the function itself can raise that. """ error = KeyError("Stuff") def raises_keyerror(checker, instance): raise error with self.assertRaises(KeyError) as context: TypeChecker({"foo": raises_keyerror}).is_type(4, "foo") self.assertIs(context.exception, error) class TestCustomTypes(TestCase): def test_simple_type_can_be_extended(self): def int_or_str_int(checker, instance): if not isinstance(instance, (int, str)): return False try: int(instance) except ValueError: return False return True CustomValidator = extend( Draft4Validator, type_checker=Draft4Validator.TYPE_CHECKER.redefine( "integer", int_or_str_int, ), ) validator = CustomValidator({"type": "integer"}) validator.validate(4) validator.validate("4") with self.assertRaises(ValidationError): validator.validate(4.4) def test_object_can_be_extended(self): schema = {"type": "object"} Point = namedtuple("Point", ["x", "y"]) type_checker = Draft4Validator.TYPE_CHECKER.redefine( u"object", is_object_or_named_tuple, ) CustomValidator = extend(Draft4Validator, type_checker=type_checker) validator = CustomValidator(schema) validator.validate(Point(x=4, y=5)) def test_object_extensions_require_custom_validators(self): schema = {"type": "object", "required": ["x"]} type_checker = Draft4Validator.TYPE_CHECKER.redefine( u"object", is_object_or_named_tuple, ) CustomValidator = extend(Draft4Validator, type_checker=type_checker) validator = CustomValidator(schema) Point = namedtuple("Point", ["x", "y"]) # Cannot handle required with self.assertRaises(ValidationError): validator.validate(Point(x=4, y=5)) def test_object_extensions_can_handle_custom_validators(self): schema = { "type": "object", "required": ["x"], "properties": {"x": {"type": "integer"}}, } type_checker = Draft4Validator.TYPE_CHECKER.redefine( u"object", is_object_or_named_tuple, ) CustomValidator = extend( Draft4Validator, type_checker=type_checker, validators={"required": required, "properties": properties}, ) validator = CustomValidator(schema) Point = namedtuple("Point", ["x", "y"]) # Can now process required and properties validator.validate(Point(x=4, y=5)) with self.assertRaises(ValidationError): validator.validate(Point(x="not an integer", y=5))
5,902
Python
29.905759
78
0.609454
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/tests/test_validators.py
from collections import deque from contextlib import contextmanager from decimal import Decimal from io import BytesIO from unittest import TestCase import json import os import sys import tempfile import unittest from twisted.trial.unittest import SynchronousTestCase import attr from jsonschema import FormatChecker, TypeChecker, exceptions, validators from jsonschema.compat import PY3, pathname2url from jsonschema.tests._helpers import bug def startswith(validator, startswith, instance, schema): if not instance.startswith(startswith): yield exceptions.ValidationError(u"Whoops!") class TestCreateAndExtend(SynchronousTestCase): def setUp(self): self.addCleanup( self.assertEqual, validators.meta_schemas, dict(validators.meta_schemas), ) self.meta_schema = {u"$id": "some://meta/schema"} self.validators = {u"startswith": startswith} self.type_checker = TypeChecker() self.Validator = validators.create( meta_schema=self.meta_schema, validators=self.validators, type_checker=self.type_checker, ) def test_attrs(self): self.assertEqual( ( self.Validator.VALIDATORS, self.Validator.META_SCHEMA, self.Validator.TYPE_CHECKER, ), ( self.validators, self.meta_schema, self.type_checker, ), ) def test_init(self): schema = {u"startswith": u"foo"} self.assertEqual(self.Validator(schema).schema, schema) def test_iter_errors(self): schema = {u"startswith": u"hel"} iter_errors = self.Validator(schema).iter_errors errors = list(iter_errors(u"hello")) self.assertEqual(errors, []) expected_error = exceptions.ValidationError( u"Whoops!", instance=u"goodbye", schema=schema, validator=u"startswith", validator_value=u"hel", schema_path=deque([u"startswith"]), ) errors = list(iter_errors(u"goodbye")) self.assertEqual(len(errors), 1) self.assertEqual(errors[0]._contents(), expected_error._contents()) def test_if_a_version_is_provided_it_is_registered(self): Validator = validators.create( meta_schema={u"$id": "something"}, version="my version", ) self.addCleanup(validators.meta_schemas.pop, "something") self.assertEqual(Validator.__name__, "MyVersionValidator") def test_if_a_version_is_not_provided_it_is_not_registered(self): original = dict(validators.meta_schemas) validators.create(meta_schema={u"id": "id"}) self.assertEqual(validators.meta_schemas, original) def test_validates_registers_meta_schema_id(self): meta_schema_key = "meta schema id" my_meta_schema = {u"id": meta_schema_key} validators.create( meta_schema=my_meta_schema, version="my version", id_of=lambda s: s.get("id", ""), ) self.addCleanup(validators.meta_schemas.pop, meta_schema_key) self.assertIn(meta_schema_key, validators.meta_schemas) def test_validates_registers_meta_schema_draft6_id(self): meta_schema_key = "meta schema $id" my_meta_schema = {u"$id": meta_schema_key} validators.create( meta_schema=my_meta_schema, version="my version", ) self.addCleanup(validators.meta_schemas.pop, meta_schema_key) self.assertIn(meta_schema_key, validators.meta_schemas) def test_create_default_types(self): Validator = validators.create(meta_schema={}, validators=()) self.assertTrue( all( Validator({}).is_type(instance=instance, type=type) for type, instance in [ (u"array", []), (u"boolean", True), (u"integer", 12), (u"null", None), (u"number", 12.0), (u"object", {}), (u"string", u"foo"), ] ), ) def test_extend(self): original = dict(self.Validator.VALIDATORS) new = object() Extended = validators.extend( self.Validator, validators={u"new": new}, ) self.assertEqual( ( Extended.VALIDATORS, Extended.META_SCHEMA, Extended.TYPE_CHECKER, self.Validator.VALIDATORS, ), ( dict(original, new=new), self.Validator.META_SCHEMA, self.Validator.TYPE_CHECKER, original, ), ) def test_extend_idof(self): """ Extending a validator preserves its notion of schema IDs. """ def id_of(schema): return schema.get(u"__test__", self.Validator.ID_OF(schema)) correct_id = "the://correct/id/" meta_schema = { u"$id": "the://wrong/id/", u"__test__": correct_id, } Original = validators.create( meta_schema=meta_schema, validators=self.validators, type_checker=self.type_checker, id_of=id_of, ) self.assertEqual(Original.ID_OF(Original.META_SCHEMA), correct_id) Derived = validators.extend(Original) self.assertEqual(Derived.ID_OF(Derived.META_SCHEMA), correct_id) class TestLegacyTypeChecking(SynchronousTestCase): def test_create_default_types(self): Validator = validators.create(meta_schema={}, validators=()) self.assertEqual( set(Validator.DEFAULT_TYPES), { u"array", u"boolean", u"integer", u"null", u"number", u"object", u"string", }, ) self.flushWarnings() def test_extend(self): Validator = validators.create(meta_schema={}, validators=()) original = dict(Validator.VALIDATORS) new = object() Extended = validators.extend( Validator, validators={u"new": new}, ) self.assertEqual( ( Extended.VALIDATORS, Extended.META_SCHEMA, Extended.TYPE_CHECKER, Validator.VALIDATORS, Extended.DEFAULT_TYPES, Extended({}).DEFAULT_TYPES, self.flushWarnings()[0]["message"], ), ( dict(original, new=new), Validator.META_SCHEMA, Validator.TYPE_CHECKER, original, Validator.DEFAULT_TYPES, Validator.DEFAULT_TYPES, self.flushWarnings()[0]["message"], ), ) def test_types_redefines_the_validators_type_checker(self): schema = {"type": "string"} self.assertFalse(validators.Draft7Validator(schema).is_valid(12)) validator = validators.Draft7Validator( schema, types={"string": (str, int)}, ) self.assertTrue(validator.is_valid(12)) self.flushWarnings() def test_providing_default_types_warns(self): self.assertWarns( category=DeprecationWarning, message=( "The default_types argument is deprecated. " "Use the type_checker argument instead." ), # https://tm.tl/9363 :'( filename=sys.modules[self.assertWarns.__module__].__file__, f=validators.create, meta_schema={}, validators={}, default_types={"foo": object}, ) def test_cannot_ask_for_default_types_with_non_default_type_checker(self): """ We raise an error when you ask a validator with non-default type checker for its DEFAULT_TYPES. The type checker argument is new, so no one but this library itself should be trying to use it, and doing so while then asking for DEFAULT_TYPES makes no sense (not to mention is deprecated), since type checkers are not strictly about Python type. """ Validator = validators.create( meta_schema={}, validators={}, type_checker=TypeChecker(), ) with self.assertRaises(validators._DontDoThat) as e: Validator.DEFAULT_TYPES self.assertIn( "DEFAULT_TYPES cannot be used on Validators using TypeCheckers", str(e.exception), ) with self.assertRaises(validators._DontDoThat): Validator({}).DEFAULT_TYPES self.assertFalse(self.flushWarnings()) def test_providing_explicit_type_checker_does_not_warn(self): Validator = validators.create( meta_schema={}, validators={}, type_checker=TypeChecker(), ) self.assertFalse(self.flushWarnings()) Validator({}) self.assertFalse(self.flushWarnings()) def test_providing_neither_does_not_warn(self): Validator = validators.create(meta_schema={}, validators={}) self.assertFalse(self.flushWarnings()) Validator({}) self.assertFalse(self.flushWarnings()) def test_providing_default_types_with_type_checker_errors(self): with self.assertRaises(TypeError) as e: validators.create( meta_schema={}, validators={}, default_types={"foo": object}, type_checker=TypeChecker(), ) self.assertIn( "Do not specify default_types when providing a type checker", str(e.exception), ) self.assertFalse(self.flushWarnings()) def test_extending_a_legacy_validator_with_a_type_checker_errors(self): Validator = validators.create( meta_schema={}, validators={}, default_types={u"array": list} ) with self.assertRaises(TypeError) as e: validators.extend( Validator, validators={}, type_checker=TypeChecker(), ) self.assertIn( ( "Cannot extend a validator created with default_types " "with a type_checker. Update the validator to use a " "type_checker when created." ), str(e.exception), ) self.flushWarnings() def test_extending_a_legacy_validator_does_not_rewarn(self): Validator = validators.create(meta_schema={}, default_types={}) self.assertTrue(self.flushWarnings()) validators.extend(Validator) self.assertFalse(self.flushWarnings()) def test_accessing_default_types_warns(self): Validator = validators.create(meta_schema={}, validators={}) self.assertFalse(self.flushWarnings()) self.assertWarns( DeprecationWarning, ( "The DEFAULT_TYPES attribute is deprecated. " "See the type checker attached to this validator instead." ), # https://tm.tl/9363 :'( sys.modules[self.assertWarns.__module__].__file__, getattr, Validator, "DEFAULT_TYPES", ) def test_accessing_default_types_on_the_instance_warns(self): Validator = validators.create(meta_schema={}, validators={}) self.assertFalse(self.flushWarnings()) self.assertWarns( DeprecationWarning, ( "The DEFAULT_TYPES attribute is deprecated. " "See the type checker attached to this validator instead." ), # https://tm.tl/9363 :'( sys.modules[self.assertWarns.__module__].__file__, getattr, Validator({}), "DEFAULT_TYPES", ) def test_providing_types_to_init_warns(self): Validator = validators.create(meta_schema={}, validators={}) self.assertFalse(self.flushWarnings()) self.assertWarns( category=DeprecationWarning, message=( "The types argument is deprecated. " "Provide a type_checker to jsonschema.validators.extend " "instead." ), # https://tm.tl/9363 :'( filename=sys.modules[self.assertWarns.__module__].__file__, f=Validator, schema={}, types={"bar": object}, ) class TestIterErrors(TestCase): def setUp(self): self.validator = validators.Draft3Validator({}) def test_iter_errors(self): instance = [1, 2] schema = { u"disallow": u"array", u"enum": [["a", "b", "c"], ["d", "e", "f"]], u"minItems": 3, } got = (e.message for e in self.validator.iter_errors(instance, schema)) expected = [ "%r is disallowed for [1, 2]" % (schema["disallow"],), "[1, 2] is too short", "[1, 2] is not one of %r" % (schema["enum"],), ] self.assertEqual(sorted(got), sorted(expected)) def test_iter_errors_multiple_failures_one_validator(self): instance = {"foo": 2, "bar": [1], "baz": 15, "quux": "spam"} schema = { u"properties": { "foo": {u"type": "string"}, "bar": {u"minItems": 2}, "baz": {u"maximum": 10, u"enum": [2, 4, 6, 8]}, }, } errors = list(self.validator.iter_errors(instance, schema)) self.assertEqual(len(errors), 4) class TestValidationErrorMessages(TestCase): def message_for(self, instance, schema, *args, **kwargs): kwargs.setdefault("cls", validators.Draft3Validator) with self.assertRaises(exceptions.ValidationError) as e: validators.validate(instance, schema, *args, **kwargs) return e.exception.message def test_single_type_failure(self): message = self.message_for(instance=1, schema={u"type": u"string"}) self.assertEqual(message, "1 is not of type %r" % u"string") def test_single_type_list_failure(self): message = self.message_for(instance=1, schema={u"type": [u"string"]}) self.assertEqual(message, "1 is not of type %r" % u"string") def test_multiple_type_failure(self): types = u"string", u"object" message = self.message_for(instance=1, schema={u"type": list(types)}) self.assertEqual(message, "1 is not of type %r, %r" % types) def test_object_without_title_type_failure(self): type = {u"type": [{u"minimum": 3}]} message = self.message_for(instance=1, schema={u"type": [type]}) self.assertEqual(message, "1 is less than the minimum of 3") def test_object_with_named_type_failure(self): schema = {u"type": [{u"name": "Foo", u"minimum": 3}]} message = self.message_for(instance=1, schema=schema) self.assertEqual(message, "1 is less than the minimum of 3") def test_minimum(self): message = self.message_for(instance=1, schema={"minimum": 2}) self.assertEqual(message, "1 is less than the minimum of 2") def test_maximum(self): message = self.message_for(instance=1, schema={"maximum": 0}) self.assertEqual(message, "1 is greater than the maximum of 0") def test_dependencies_single_element(self): depend, on = "bar", "foo" schema = {u"dependencies": {depend: on}} message = self.message_for( instance={"bar": 2}, schema=schema, cls=validators.Draft3Validator, ) self.assertEqual(message, "%r is a dependency of %r" % (on, depend)) def test_dependencies_list_draft3(self): depend, on = "bar", "foo" schema = {u"dependencies": {depend: [on]}} message = self.message_for( instance={"bar": 2}, schema=schema, cls=validators.Draft3Validator, ) self.assertEqual(message, "%r is a dependency of %r" % (on, depend)) def test_dependencies_list_draft7(self): depend, on = "bar", "foo" schema = {u"dependencies": {depend: [on]}} message = self.message_for( instance={"bar": 2}, schema=schema, cls=validators.Draft7Validator, ) self.assertEqual(message, "%r is a dependency of %r" % (on, depend)) def test_additionalItems_single_failure(self): message = self.message_for( instance=[2], schema={u"items": [], u"additionalItems": False}, ) self.assertIn("(2 was unexpected)", message) def test_additionalItems_multiple_failures(self): message = self.message_for( instance=[1, 2, 3], schema={u"items": [], u"additionalItems": False} ) self.assertIn("(1, 2, 3 were unexpected)", message) def test_additionalProperties_single_failure(self): additional = "foo" schema = {u"additionalProperties": False} message = self.message_for(instance={additional: 2}, schema=schema) self.assertIn("(%r was unexpected)" % (additional,), message) def test_additionalProperties_multiple_failures(self): schema = {u"additionalProperties": False} message = self.message_for( instance=dict.fromkeys(["foo", "bar"]), schema=schema, ) self.assertIn(repr("foo"), message) self.assertIn(repr("bar"), message) self.assertIn("were unexpected)", message) def test_const(self): schema = {u"const": 12} message = self.message_for( instance={"foo": "bar"}, schema=schema, cls=validators.Draft6Validator, ) self.assertIn("12 was expected", message) def test_contains(self): schema = {u"contains": {u"const": 12}} message = self.message_for( instance=[2, {}, []], schema=schema, cls=validators.Draft6Validator, ) self.assertIn( "None of [2, {}, []] are valid under the given schema", message, ) def test_invalid_format_default_message(self): checker = FormatChecker(formats=()) checker.checks(u"thing")(lambda value: False) schema = {u"format": u"thing"} message = self.message_for( instance="bla", schema=schema, format_checker=checker, ) self.assertIn(repr("bla"), message) self.assertIn(repr("thing"), message) self.assertIn("is not a", message) def test_additionalProperties_false_patternProperties(self): schema = {u"type": u"object", u"additionalProperties": False, u"patternProperties": { u"^abc$": {u"type": u"string"}, u"^def$": {u"type": u"string"}, }} message = self.message_for( instance={u"zebra": 123}, schema=schema, cls=validators.Draft4Validator, ) self.assertEqual( message, "{} does not match any of the regexes: {}, {}".format( repr(u"zebra"), repr(u"^abc$"), repr(u"^def$"), ), ) message = self.message_for( instance={u"zebra": 123, u"fish": 456}, schema=schema, cls=validators.Draft4Validator, ) self.assertEqual( message, "{}, {} do not match any of the regexes: {}, {}".format( repr(u"fish"), repr(u"zebra"), repr(u"^abc$"), repr(u"^def$") ), ) def test_False_schema(self): message = self.message_for( instance="something", schema=False, cls=validators.Draft7Validator, ) self.assertIn("False schema does not allow 'something'", message) class TestValidationErrorDetails(TestCase): # TODO: These really need unit tests for each individual validator, rather # than just these higher level tests. def test_anyOf(self): instance = 5 schema = { "anyOf": [ {"minimum": 20}, {"type": "string"}, ], } validator = validators.Draft4Validator(schema) errors = list(validator.iter_errors(instance)) self.assertEqual(len(errors), 1) e = errors[0] self.assertEqual(e.validator, "anyOf") self.assertEqual(e.validator_value, schema["anyOf"]) self.assertEqual(e.instance, instance) self.assertEqual(e.schema, schema) self.assertIsNone(e.parent) self.assertEqual(e.path, deque([])) self.assertEqual(e.relative_path, deque([])) self.assertEqual(e.absolute_path, deque([])) self.assertEqual(e.schema_path, deque(["anyOf"])) self.assertEqual(e.relative_schema_path, deque(["anyOf"])) self.assertEqual(e.absolute_schema_path, deque(["anyOf"])) self.assertEqual(len(e.context), 2) e1, e2 = sorted_errors(e.context) self.assertEqual(e1.validator, "minimum") self.assertEqual(e1.validator_value, schema["anyOf"][0]["minimum"]) self.assertEqual(e1.instance, instance) self.assertEqual(e1.schema, schema["anyOf"][0]) self.assertIs(e1.parent, e) self.assertEqual(e1.path, deque([])) self.assertEqual(e1.absolute_path, deque([])) self.assertEqual(e1.relative_path, deque([])) self.assertEqual(e1.schema_path, deque([0, "minimum"])) self.assertEqual(e1.relative_schema_path, deque([0, "minimum"])) self.assertEqual( e1.absolute_schema_path, deque(["anyOf", 0, "minimum"]), ) self.assertFalse(e1.context) self.assertEqual(e2.validator, "type") self.assertEqual(e2.validator_value, schema["anyOf"][1]["type"]) self.assertEqual(e2.instance, instance) self.assertEqual(e2.schema, schema["anyOf"][1]) self.assertIs(e2.parent, e) self.assertEqual(e2.path, deque([])) self.assertEqual(e2.relative_path, deque([])) self.assertEqual(e2.absolute_path, deque([])) self.assertEqual(e2.schema_path, deque([1, "type"])) self.assertEqual(e2.relative_schema_path, deque([1, "type"])) self.assertEqual(e2.absolute_schema_path, deque(["anyOf", 1, "type"])) self.assertEqual(len(e2.context), 0) def test_type(self): instance = {"foo": 1} schema = { "type": [ {"type": "integer"}, { "type": "object", "properties": {"foo": {"enum": [2]}}, }, ], } validator = validators.Draft3Validator(schema) errors = list(validator.iter_errors(instance)) self.assertEqual(len(errors), 1) e = errors[0] self.assertEqual(e.validator, "type") self.assertEqual(e.validator_value, schema["type"]) self.assertEqual(e.instance, instance) self.assertEqual(e.schema, schema) self.assertIsNone(e.parent) self.assertEqual(e.path, deque([])) self.assertEqual(e.relative_path, deque([])) self.assertEqual(e.absolute_path, deque([])) self.assertEqual(e.schema_path, deque(["type"])) self.assertEqual(e.relative_schema_path, deque(["type"])) self.assertEqual(e.absolute_schema_path, deque(["type"])) self.assertEqual(len(e.context), 2) e1, e2 = sorted_errors(e.context) self.assertEqual(e1.validator, "type") self.assertEqual(e1.validator_value, schema["type"][0]["type"]) self.assertEqual(e1.instance, instance) self.assertEqual(e1.schema, schema["type"][0]) self.assertIs(e1.parent, e) self.assertEqual(e1.path, deque([])) self.assertEqual(e1.relative_path, deque([])) self.assertEqual(e1.absolute_path, deque([])) self.assertEqual(e1.schema_path, deque([0, "type"])) self.assertEqual(e1.relative_schema_path, deque([0, "type"])) self.assertEqual(e1.absolute_schema_path, deque(["type", 0, "type"])) self.assertFalse(e1.context) self.assertEqual(e2.validator, "enum") self.assertEqual(e2.validator_value, [2]) self.assertEqual(e2.instance, 1) self.assertEqual(e2.schema, {u"enum": [2]}) self.assertIs(e2.parent, e) self.assertEqual(e2.path, deque(["foo"])) self.assertEqual(e2.relative_path, deque(["foo"])) self.assertEqual(e2.absolute_path, deque(["foo"])) self.assertEqual( e2.schema_path, deque([1, "properties", "foo", "enum"]), ) self.assertEqual( e2.relative_schema_path, deque([1, "properties", "foo", "enum"]), ) self.assertEqual( e2.absolute_schema_path, deque(["type", 1, "properties", "foo", "enum"]), ) self.assertFalse(e2.context) def test_single_nesting(self): instance = {"foo": 2, "bar": [1], "baz": 15, "quux": "spam"} schema = { "properties": { "foo": {"type": "string"}, "bar": {"minItems": 2}, "baz": {"maximum": 10, "enum": [2, 4, 6, 8]}, }, } validator = validators.Draft3Validator(schema) errors = validator.iter_errors(instance) e1, e2, e3, e4 = sorted_errors(errors) self.assertEqual(e1.path, deque(["bar"])) self.assertEqual(e2.path, deque(["baz"])) self.assertEqual(e3.path, deque(["baz"])) self.assertEqual(e4.path, deque(["foo"])) self.assertEqual(e1.relative_path, deque(["bar"])) self.assertEqual(e2.relative_path, deque(["baz"])) self.assertEqual(e3.relative_path, deque(["baz"])) self.assertEqual(e4.relative_path, deque(["foo"])) self.assertEqual(e1.absolute_path, deque(["bar"])) self.assertEqual(e2.absolute_path, deque(["baz"])) self.assertEqual(e3.absolute_path, deque(["baz"])) self.assertEqual(e4.absolute_path, deque(["foo"])) self.assertEqual(e1.validator, "minItems") self.assertEqual(e2.validator, "enum") self.assertEqual(e3.validator, "maximum") self.assertEqual(e4.validator, "type") def test_multiple_nesting(self): instance = [1, {"foo": 2, "bar": {"baz": [1]}}, "quux"] schema = { "type": "string", "items": { "type": ["string", "object"], "properties": { "foo": {"enum": [1, 3]}, "bar": { "type": "array", "properties": { "bar": {"required": True}, "baz": {"minItems": 2}, }, }, }, }, } validator = validators.Draft3Validator(schema) errors = validator.iter_errors(instance) e1, e2, e3, e4, e5, e6 = sorted_errors(errors) self.assertEqual(e1.path, deque([])) self.assertEqual(e2.path, deque([0])) self.assertEqual(e3.path, deque([1, "bar"])) self.assertEqual(e4.path, deque([1, "bar", "bar"])) self.assertEqual(e5.path, deque([1, "bar", "baz"])) self.assertEqual(e6.path, deque([1, "foo"])) self.assertEqual(e1.schema_path, deque(["type"])) self.assertEqual(e2.schema_path, deque(["items", "type"])) self.assertEqual( list(e3.schema_path), ["items", "properties", "bar", "type"], ) self.assertEqual( list(e4.schema_path), ["items", "properties", "bar", "properties", "bar", "required"], ) self.assertEqual( list(e5.schema_path), ["items", "properties", "bar", "properties", "baz", "minItems"] ) self.assertEqual( list(e6.schema_path), ["items", "properties", "foo", "enum"], ) self.assertEqual(e1.validator, "type") self.assertEqual(e2.validator, "type") self.assertEqual(e3.validator, "type") self.assertEqual(e4.validator, "required") self.assertEqual(e5.validator, "minItems") self.assertEqual(e6.validator, "enum") def test_recursive(self): schema = { "definitions": { "node": { "anyOf": [{ "type": "object", "required": ["name", "children"], "properties": { "name": { "type": "string", }, "children": { "type": "object", "patternProperties": { "^.*$": { "$ref": "#/definitions/node", }, }, }, }, }], }, }, "type": "object", "required": ["root"], "properties": {"root": {"$ref": "#/definitions/node"}}, } instance = { "root": { "name": "root", "children": { "a": { "name": "a", "children": { "ab": { "name": "ab", # missing "children" }, }, }, }, }, } validator = validators.Draft4Validator(schema) e, = validator.iter_errors(instance) self.assertEqual(e.absolute_path, deque(["root"])) self.assertEqual( e.absolute_schema_path, deque(["properties", "root", "anyOf"]), ) e1, = e.context self.assertEqual(e1.absolute_path, deque(["root", "children", "a"])) self.assertEqual( e1.absolute_schema_path, deque( [ "properties", "root", "anyOf", 0, "properties", "children", "patternProperties", "^.*$", "anyOf", ], ), ) e2, = e1.context self.assertEqual( e2.absolute_path, deque( ["root", "children", "a", "children", "ab"], ), ) self.assertEqual( e2.absolute_schema_path, deque( [ "properties", "root", "anyOf", 0, "properties", "children", "patternProperties", "^.*$", "anyOf", 0, "properties", "children", "patternProperties", "^.*$", "anyOf", ], ), ) def test_additionalProperties(self): instance = {"bar": "bar", "foo": 2} schema = {"additionalProperties": {"type": "integer", "minimum": 5}} validator = validators.Draft3Validator(schema) errors = validator.iter_errors(instance) e1, e2 = sorted_errors(errors) self.assertEqual(e1.path, deque(["bar"])) self.assertEqual(e2.path, deque(["foo"])) self.assertEqual(e1.validator, "type") self.assertEqual(e2.validator, "minimum") def test_patternProperties(self): instance = {"bar": 1, "foo": 2} schema = { "patternProperties": { "bar": {"type": "string"}, "foo": {"minimum": 5}, }, } validator = validators.Draft3Validator(schema) errors = validator.iter_errors(instance) e1, e2 = sorted_errors(errors) self.assertEqual(e1.path, deque(["bar"])) self.assertEqual(e2.path, deque(["foo"])) self.assertEqual(e1.validator, "type") self.assertEqual(e2.validator, "minimum") def test_additionalItems(self): instance = ["foo", 1] schema = { "items": [], "additionalItems": {"type": "integer", "minimum": 5}, } validator = validators.Draft3Validator(schema) errors = validator.iter_errors(instance) e1, e2 = sorted_errors(errors) self.assertEqual(e1.path, deque([0])) self.assertEqual(e2.path, deque([1])) self.assertEqual(e1.validator, "type") self.assertEqual(e2.validator, "minimum") def test_additionalItems_with_items(self): instance = ["foo", "bar", 1] schema = { "items": [{}], "additionalItems": {"type": "integer", "minimum": 5}, } validator = validators.Draft3Validator(schema) errors = validator.iter_errors(instance) e1, e2 = sorted_errors(errors) self.assertEqual(e1.path, deque([1])) self.assertEqual(e2.path, deque([2])) self.assertEqual(e1.validator, "type") self.assertEqual(e2.validator, "minimum") def test_propertyNames(self): instance = {"foo": 12} schema = {"propertyNames": {"not": {"const": "foo"}}} validator = validators.Draft7Validator(schema) error, = validator.iter_errors(instance) self.assertEqual(error.validator, "not") self.assertEqual( error.message, "%r is not allowed for %r" % ({"const": "foo"}, "foo"), ) self.assertEqual(error.path, deque([])) self.assertEqual(error.schema_path, deque(["propertyNames", "not"])) def test_if_then(self): schema = { "if": {"const": 12}, "then": {"const": 13}, } validator = validators.Draft7Validator(schema) error, = validator.iter_errors(12) self.assertEqual(error.validator, "const") self.assertEqual(error.message, "13 was expected") self.assertEqual(error.path, deque([])) self.assertEqual(error.schema_path, deque(["if", "then", "const"])) def test_if_else(self): schema = { "if": {"const": 12}, "else": {"const": 13}, } validator = validators.Draft7Validator(schema) error, = validator.iter_errors(15) self.assertEqual(error.validator, "const") self.assertEqual(error.message, "13 was expected") self.assertEqual(error.path, deque([])) self.assertEqual(error.schema_path, deque(["if", "else", "const"])) def test_boolean_schema_False(self): validator = validators.Draft7Validator(False) error, = validator.iter_errors(12) self.assertEqual( ( error.message, error.validator, error.validator_value, error.instance, error.schema, error.schema_path, ), ( "False schema does not allow 12", None, None, 12, False, deque([]), ), ) def test_ref(self): ref, schema = "someRef", {"additionalProperties": {"type": "integer"}} validator = validators.Draft7Validator( {"$ref": ref}, resolver=validators.RefResolver("", {}, store={ref: schema}), ) error, = validator.iter_errors({"foo": "notAnInteger"}) self.assertEqual( ( error.message, error.validator, error.validator_value, error.instance, error.absolute_path, error.schema, error.schema_path, ), ( "'notAnInteger' is not of type 'integer'", "type", "integer", "notAnInteger", deque(["foo"]), {"type": "integer"}, deque(["additionalProperties", "type"]), ), ) class MetaSchemaTestsMixin(object): # TODO: These all belong upstream def test_invalid_properties(self): with self.assertRaises(exceptions.SchemaError): self.Validator.check_schema({"properties": {"test": object()}}) def test_minItems_invalid_string(self): with self.assertRaises(exceptions.SchemaError): # needs to be an integer self.Validator.check_schema({"minItems": "1"}) def test_enum_allows_empty_arrays(self): """ Technically, all the spec says is they SHOULD have elements, not MUST. See https://github.com/Julian/jsonschema/issues/529. """ self.Validator.check_schema({"enum": []}) def test_enum_allows_non_unique_items(self): """ Technically, all the spec says is they SHOULD be unique, not MUST. See https://github.com/Julian/jsonschema/issues/529. """ self.Validator.check_schema({"enum": [12, 12]}) class ValidatorTestMixin(MetaSchemaTestsMixin, object): def test_valid_instances_are_valid(self): schema, instance = self.valid self.assertTrue(self.Validator(schema).is_valid(instance)) def test_invalid_instances_are_not_valid(self): schema, instance = self.invalid self.assertFalse(self.Validator(schema).is_valid(instance)) def test_non_existent_properties_are_ignored(self): self.Validator({object(): object()}).validate(instance=object()) def test_it_creates_a_ref_resolver_if_not_provided(self): self.assertIsInstance( self.Validator({}).resolver, validators.RefResolver, ) def test_it_delegates_to_a_ref_resolver(self): ref, schema = "someCoolRef", {"type": "integer"} resolver = validators.RefResolver("", {}, store={ref: schema}) validator = self.Validator({"$ref": ref}, resolver=resolver) with self.assertRaises(exceptions.ValidationError): validator.validate(None) def test_it_delegates_to_a_legacy_ref_resolver(self): """ Legacy RefResolvers support only the context manager form of resolution. """ class LegacyRefResolver(object): @contextmanager def resolving(this, ref): self.assertEqual(ref, "the ref") yield {"type": "integer"} resolver = LegacyRefResolver() schema = {"$ref": "the ref"} with self.assertRaises(exceptions.ValidationError): self.Validator(schema, resolver=resolver).validate(None) def test_is_type_is_true_for_valid_type(self): self.assertTrue(self.Validator({}).is_type("foo", "string")) def test_is_type_is_false_for_invalid_type(self): self.assertFalse(self.Validator({}).is_type("foo", "array")) def test_is_type_evades_bool_inheriting_from_int(self): self.assertFalse(self.Validator({}).is_type(True, "integer")) self.assertFalse(self.Validator({}).is_type(True, "number")) @unittest.skipIf(PY3, "In Python 3 json.load always produces unicode") def test_string_a_bytestring_is_a_string(self): self.Validator({"type": "string"}).validate(b"foo") def test_patterns_can_be_native_strings(self): """ See https://github.com/Julian/jsonschema/issues/611. """ self.Validator({"pattern": "foo"}).validate("foo") def test_it_can_validate_with_decimals(self): schema = {"items": {"type": "number"}} Validator = validators.extend( self.Validator, type_checker=self.Validator.TYPE_CHECKER.redefine( "number", lambda checker, thing: isinstance( thing, (int, float, Decimal), ) and not isinstance(thing, bool), ) ) validator = Validator(schema) validator.validate([1, 1.1, Decimal(1) / Decimal(8)]) invalid = ["foo", {}, [], True, None] self.assertEqual( [error.instance for error in validator.iter_errors(invalid)], invalid, ) def test_it_returns_true_for_formats_it_does_not_know_about(self): validator = self.Validator( {"format": "carrot"}, format_checker=FormatChecker(), ) validator.validate("bugs") def test_it_does_not_validate_formats_by_default(self): validator = self.Validator({}) self.assertIsNone(validator.format_checker) def test_it_validates_formats_if_a_checker_is_provided(self): checker = FormatChecker() bad = ValueError("Bad!") @checker.checks("foo", raises=ValueError) def check(value): if value == "good": return True elif value == "bad": raise bad else: # pragma: no cover self.fail("What is {}? [Baby Don't Hurt Me]".format(value)) validator = self.Validator( {"format": "foo"}, format_checker=checker, ) validator.validate("good") with self.assertRaises(exceptions.ValidationError) as cm: validator.validate("bad") # Make sure original cause is attached self.assertIs(cm.exception.cause, bad) def test_non_string_custom_type(self): non_string_type = object() schema = {"type": [non_string_type]} Crazy = validators.extend( self.Validator, type_checker=self.Validator.TYPE_CHECKER.redefine( non_string_type, lambda checker, thing: isinstance(thing, int), ) ) Crazy(schema).validate(15) def test_it_properly_formats_tuples_in_errors(self): """ A tuple instance properly formats validation errors for uniqueItems. See https://github.com/Julian/jsonschema/pull/224 """ TupleValidator = validators.extend( self.Validator, type_checker=self.Validator.TYPE_CHECKER.redefine( "array", lambda checker, thing: isinstance(thing, tuple), ) ) with self.assertRaises(exceptions.ValidationError) as e: TupleValidator({"uniqueItems": True}).validate((1, 1)) self.assertIn("(1, 1) has non-unique elements", str(e.exception)) class AntiDraft6LeakMixin(object): """ Make sure functionality from draft 6 doesn't leak backwards in time. """ def test_True_is_not_a_schema(self): with self.assertRaises(exceptions.SchemaError) as e: self.Validator.check_schema(True) self.assertIn("True is not of type", str(e.exception)) def test_False_is_not_a_schema(self): with self.assertRaises(exceptions.SchemaError) as e: self.Validator.check_schema(False) self.assertIn("False is not of type", str(e.exception)) @unittest.skip(bug(523)) def test_True_is_not_a_schema_even_if_you_forget_to_check(self): resolver = validators.RefResolver("", {}) with self.assertRaises(Exception) as e: self.Validator(True, resolver=resolver).validate(12) self.assertNotIsInstance(e.exception, exceptions.ValidationError) @unittest.skip(bug(523)) def test_False_is_not_a_schema_even_if_you_forget_to_check(self): resolver = validators.RefResolver("", {}) with self.assertRaises(Exception) as e: self.Validator(False, resolver=resolver).validate(12) self.assertNotIsInstance(e.exception, exceptions.ValidationError) class TestDraft3Validator(AntiDraft6LeakMixin, ValidatorTestMixin, TestCase): Validator = validators.Draft3Validator valid = {}, {} invalid = {"type": "integer"}, "foo" def test_any_type_is_valid_for_type_any(self): validator = self.Validator({"type": "any"}) validator.validate(object()) def test_any_type_is_redefinable(self): """ Sigh, because why not. """ Crazy = validators.extend( self.Validator, type_checker=self.Validator.TYPE_CHECKER.redefine( "any", lambda checker, thing: isinstance(thing, int), ) ) validator = Crazy({"type": "any"}) validator.validate(12) with self.assertRaises(exceptions.ValidationError): validator.validate("foo") def test_is_type_is_true_for_any_type(self): self.assertTrue(self.Validator({}).is_valid(object(), {"type": "any"})) def test_is_type_does_not_evade_bool_if_it_is_being_tested(self): self.assertTrue(self.Validator({}).is_type(True, "boolean")) self.assertTrue(self.Validator({}).is_valid(True, {"type": "any"})) class TestDraft4Validator(AntiDraft6LeakMixin, ValidatorTestMixin, TestCase): Validator = validators.Draft4Validator valid = {}, {} invalid = {"type": "integer"}, "foo" class TestDraft6Validator(ValidatorTestMixin, TestCase): Validator = validators.Draft6Validator valid = {}, {} invalid = {"type": "integer"}, "foo" class TestDraft7Validator(ValidatorTestMixin, TestCase): Validator = validators.Draft7Validator valid = {}, {} invalid = {"type": "integer"}, "foo" class TestValidatorFor(SynchronousTestCase): def test_draft_3(self): schema = {"$schema": "http://json-schema.org/draft-03/schema"} self.assertIs( validators.validator_for(schema), validators.Draft3Validator, ) schema = {"$schema": "http://json-schema.org/draft-03/schema#"} self.assertIs( validators.validator_for(schema), validators.Draft3Validator, ) def test_draft_4(self): schema = {"$schema": "http://json-schema.org/draft-04/schema"} self.assertIs( validators.validator_for(schema), validators.Draft4Validator, ) schema = {"$schema": "http://json-schema.org/draft-04/schema#"} self.assertIs( validators.validator_for(schema), validators.Draft4Validator, ) def test_draft_6(self): schema = {"$schema": "http://json-schema.org/draft-06/schema"} self.assertIs( validators.validator_for(schema), validators.Draft6Validator, ) schema = {"$schema": "http://json-schema.org/draft-06/schema#"} self.assertIs( validators.validator_for(schema), validators.Draft6Validator, ) def test_draft_7(self): schema = {"$schema": "http://json-schema.org/draft-07/schema"} self.assertIs( validators.validator_for(schema), validators.Draft7Validator, ) schema = {"$schema": "http://json-schema.org/draft-07/schema#"} self.assertIs( validators.validator_for(schema), validators.Draft7Validator, ) def test_True(self): self.assertIs( validators.validator_for(True), validators._LATEST_VERSION, ) def test_False(self): self.assertIs( validators.validator_for(False), validators._LATEST_VERSION, ) def test_custom_validator(self): Validator = validators.create( meta_schema={"id": "meta schema id"}, version="12", id_of=lambda s: s.get("id", ""), ) schema = {"$schema": "meta schema id"} self.assertIs( validators.validator_for(schema), Validator, ) def test_custom_validator_draft6(self): Validator = validators.create( meta_schema={"$id": "meta schema $id"}, version="13", ) schema = {"$schema": "meta schema $id"} self.assertIs( validators.validator_for(schema), Validator, ) def test_validator_for_jsonschema_default(self): self.assertIs(validators.validator_for({}), validators._LATEST_VERSION) def test_validator_for_custom_default(self): self.assertIs(validators.validator_for({}, default=None), None) def test_warns_if_meta_schema_specified_was_not_found(self): self.assertWarns( category=DeprecationWarning, message=( "The metaschema specified by $schema was not found. " "Using the latest draft to validate, but this will raise " "an error in the future." ), # https://tm.tl/9363 :'( filename=sys.modules[self.assertWarns.__module__].__file__, f=validators.validator_for, schema={u"$schema": "unknownSchema"}, default={}, ) def test_does_not_warn_if_meta_schema_is_unspecified(self): validators.validator_for(schema={}, default={}), self.assertFalse(self.flushWarnings()) class TestValidate(SynchronousTestCase): def assertUses(self, schema, Validator): result = [] self.patch(Validator, "check_schema", result.append) validators.validate({}, schema) self.assertEqual(result, [schema]) def test_draft3_validator_is_chosen(self): self.assertUses( schema={"$schema": "http://json-schema.org/draft-03/schema#"}, Validator=validators.Draft3Validator, ) # Make sure it works without the empty fragment self.assertUses( schema={"$schema": "http://json-schema.org/draft-03/schema"}, Validator=validators.Draft3Validator, ) def test_draft4_validator_is_chosen(self): self.assertUses( schema={"$schema": "http://json-schema.org/draft-04/schema#"}, Validator=validators.Draft4Validator, ) # Make sure it works without the empty fragment self.assertUses( schema={"$schema": "http://json-schema.org/draft-04/schema"}, Validator=validators.Draft4Validator, ) def test_draft6_validator_is_chosen(self): self.assertUses( schema={"$schema": "http://json-schema.org/draft-06/schema#"}, Validator=validators.Draft6Validator, ) # Make sure it works without the empty fragment self.assertUses( schema={"$schema": "http://json-schema.org/draft-06/schema"}, Validator=validators.Draft6Validator, ) def test_draft7_validator_is_chosen(self): self.assertUses( schema={"$schema": "http://json-schema.org/draft-07/schema#"}, Validator=validators.Draft7Validator, ) # Make sure it works without the empty fragment self.assertUses( schema={"$schema": "http://json-schema.org/draft-07/schema"}, Validator=validators.Draft7Validator, ) def test_draft7_validator_is_the_default(self): self.assertUses(schema={}, Validator=validators.Draft7Validator) def test_validation_error_message(self): with self.assertRaises(exceptions.ValidationError) as e: validators.validate(12, {"type": "string"}) self.assertRegexpMatches( str(e.exception), "(?s)Failed validating u?'.*' in schema.*On instance", ) def test_schema_error_message(self): with self.assertRaises(exceptions.SchemaError) as e: validators.validate(12, {"type": 12}) self.assertRegexpMatches( str(e.exception), "(?s)Failed validating u?'.*' in metaschema.*On schema", ) def test_it_uses_best_match(self): # This is a schema that best_match will recurse into schema = {"oneOf": [{"type": "string"}, {"type": "array"}]} with self.assertRaises(exceptions.ValidationError) as e: validators.validate(12, schema) self.assertIn("12 is not of type", str(e.exception)) class TestRefResolver(SynchronousTestCase): base_uri = "" stored_uri = "foo://stored" stored_schema = {"stored": "schema"} def setUp(self): self.referrer = {} self.store = {self.stored_uri: self.stored_schema} self.resolver = validators.RefResolver( self.base_uri, self.referrer, self.store, ) def test_it_does_not_retrieve_schema_urls_from_the_network(self): ref = validators.Draft3Validator.META_SCHEMA["id"] self.patch( self.resolver, "resolve_remote", lambda *args, **kwargs: self.fail("Should not have been called!"), ) with self.resolver.resolving(ref) as resolved: pass self.assertEqual(resolved, validators.Draft3Validator.META_SCHEMA) def test_it_resolves_local_refs(self): ref = "#/properties/foo" self.referrer["properties"] = {"foo": object()} with self.resolver.resolving(ref) as resolved: self.assertEqual(resolved, self.referrer["properties"]["foo"]) def test_it_resolves_local_refs_with_id(self): schema = {"id": "http://bar/schema#", "a": {"foo": "bar"}} resolver = validators.RefResolver.from_schema( schema, id_of=lambda schema: schema.get(u"id", u""), ) with resolver.resolving("#/a") as resolved: self.assertEqual(resolved, schema["a"]) with resolver.resolving("http://bar/schema#/a") as resolved: self.assertEqual(resolved, schema["a"]) def test_it_retrieves_stored_refs(self): with self.resolver.resolving(self.stored_uri) as resolved: self.assertIs(resolved, self.stored_schema) self.resolver.store["cached_ref"] = {"foo": 12} with self.resolver.resolving("cached_ref#/foo") as resolved: self.assertEqual(resolved, 12) def test_it_retrieves_unstored_refs_via_requests(self): ref = "http://bar#baz" schema = {"baz": 12} if "requests" in sys.modules: self.addCleanup( sys.modules.__setitem__, "requests", sys.modules["requests"], ) sys.modules["requests"] = ReallyFakeRequests({"http://bar": schema}) with self.resolver.resolving(ref) as resolved: self.assertEqual(resolved, 12) def test_it_retrieves_unstored_refs_via_urlopen(self): ref = "http://bar#baz" schema = {"baz": 12} if "requests" in sys.modules: self.addCleanup( sys.modules.__setitem__, "requests", sys.modules["requests"], ) sys.modules["requests"] = None @contextmanager def fake_urlopen(url): self.assertEqual(url, "http://bar") yield BytesIO(json.dumps(schema).encode("utf8")) self.addCleanup(setattr, validators, "urlopen", validators.urlopen) validators.urlopen = fake_urlopen with self.resolver.resolving(ref) as resolved: pass self.assertEqual(resolved, 12) def test_it_retrieves_local_refs_via_urlopen(self): with tempfile.NamedTemporaryFile(delete=False, mode="wt") as tempf: self.addCleanup(os.remove, tempf.name) json.dump({"foo": "bar"}, tempf) ref = "file://{}#foo".format(pathname2url(tempf.name)) with self.resolver.resolving(ref) as resolved: self.assertEqual(resolved, "bar") def test_it_can_construct_a_base_uri_from_a_schema(self): schema = {"id": "foo"} resolver = validators.RefResolver.from_schema( schema, id_of=lambda schema: schema.get(u"id", u""), ) self.assertEqual(resolver.base_uri, "foo") self.assertEqual(resolver.resolution_scope, "foo") with resolver.resolving("") as resolved: self.assertEqual(resolved, schema) with resolver.resolving("#") as resolved: self.assertEqual(resolved, schema) with resolver.resolving("foo") as resolved: self.assertEqual(resolved, schema) with resolver.resolving("foo#") as resolved: self.assertEqual(resolved, schema) def test_it_can_construct_a_base_uri_from_a_schema_without_id(self): schema = {} resolver = validators.RefResolver.from_schema(schema) self.assertEqual(resolver.base_uri, "") self.assertEqual(resolver.resolution_scope, "") with resolver.resolving("") as resolved: self.assertEqual(resolved, schema) with resolver.resolving("#") as resolved: self.assertEqual(resolved, schema) def test_custom_uri_scheme_handlers(self): def handler(url): self.assertEqual(url, ref) return schema schema = {"foo": "bar"} ref = "foo://bar" resolver = validators.RefResolver("", {}, handlers={"foo": handler}) with resolver.resolving(ref) as resolved: self.assertEqual(resolved, schema) def test_cache_remote_on(self): response = [object()] def handler(url): try: return response.pop() except IndexError: # pragma: no cover self.fail("Response must not have been cached!") ref = "foo://bar" resolver = validators.RefResolver( "", {}, cache_remote=True, handlers={"foo": handler}, ) with resolver.resolving(ref): pass with resolver.resolving(ref): pass def test_cache_remote_off(self): response = [object()] def handler(url): try: return response.pop() except IndexError: # pragma: no cover self.fail("Handler called twice!") ref = "foo://bar" resolver = validators.RefResolver( "", {}, cache_remote=False, handlers={"foo": handler}, ) with resolver.resolving(ref): pass def test_if_you_give_it_junk_you_get_a_resolution_error(self): error = ValueError("Oh no! What's this?") def handler(url): raise error ref = "foo://bar" resolver = validators.RefResolver("", {}, handlers={"foo": handler}) with self.assertRaises(exceptions.RefResolutionError) as err: with resolver.resolving(ref): self.fail("Shouldn't get this far!") # pragma: no cover self.assertEqual(err.exception, exceptions.RefResolutionError(error)) def test_helpful_error_message_on_failed_pop_scope(self): resolver = validators.RefResolver("", {}) resolver.pop_scope() with self.assertRaises(exceptions.RefResolutionError) as exc: resolver.pop_scope() self.assertIn("Failed to pop the scope", str(exc.exception)) def sorted_errors(errors): def key(error): return ( [str(e) for e in error.path], [str(e) for e in error.schema_path], ) return sorted(errors, key=key) @attr.s class ReallyFakeRequests(object): _responses = attr.ib() def get(self, url): response = self._responses.get(url) if url is None: # pragma: no cover raise ValueError("Unknown URL: " + repr(url)) return _ReallyFakeJSONResponse(json.dumps(response)) @attr.s class _ReallyFakeJSONResponse(object): _response = attr.ib() def json(self): return json.loads(self._response)
60,394
Python
33.256948
79
0.554078
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/benchmarks/issue232.py
#!/usr/bin/env python """ A performance benchmark using the example from issue #232. See https://github.com/Julian/jsonschema/pull/232. """ from twisted.python.filepath import FilePath from pyperf import Runner from pyrsistent import m from jsonschema.tests._suite import Version import jsonschema issue232 = Version( path=FilePath(__file__).sibling("issue232"), remotes=m(), name="issue232", ) if __name__ == "__main__": issue232.benchmark( runner=Runner(), Validator=jsonschema.Draft4Validator, )
541
Python
19.074073
58
0.696858
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/benchmarks/__init__.py
""" Benchmarks for validation. This package is *not* public API. """
70
Python
10.833332
33
0.685714
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/jsonschema/benchmarks/json_schema_test_suite.py
#!/usr/bin/env python """ A performance benchmark using the official test suite. This benchmarks jsonschema using every valid example in the JSON-Schema-Test-Suite. It will take some time to complete. """ from pyperf import Runner from jsonschema.tests._suite import Suite if __name__ == "__main__": Suite().benchmark(runner=Runner())
343
Python
21.933332
59
0.734694
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/connector.py
import asyncio import functools import random import sys import traceback import warnings from collections import defaultdict, deque from contextlib import suppress from http.cookies import SimpleCookie from itertools import cycle, islice from time import monotonic from types import TracebackType from typing import ( TYPE_CHECKING, Any, Awaitable, Callable, DefaultDict, Dict, Iterator, List, Optional, Set, Tuple, Type, Union, cast, ) import attr from . import hdrs, helpers from .abc import AbstractResolver from .client_exceptions import ( ClientConnectionError, ClientConnectorCertificateError, ClientConnectorError, ClientConnectorSSLError, ClientHttpProxyError, ClientProxyConnectionError, ServerFingerprintMismatch, UnixClientConnectorError, cert_errors, ssl_errors, ) from .client_proto import ResponseHandler from .client_reqrep import ClientRequest, Fingerprint, _merge_ssl_params from .helpers import ( PY_36, ceil_timeout, get_running_loop, is_ip_address, noop, sentinel, ) from .http import RESPONSES from .locks import EventResultOrError from .resolver import DefaultResolver try: import ssl SSLContext = ssl.SSLContext except ImportError: # pragma: no cover ssl = None # type: ignore[assignment] SSLContext = object # type: ignore[misc,assignment] __all__ = ("BaseConnector", "TCPConnector", "UnixConnector", "NamedPipeConnector") if TYPE_CHECKING: # pragma: no cover from .client import ClientTimeout from .client_reqrep import ConnectionKey from .tracing import Trace class _DeprecationWaiter: __slots__ = ("_awaitable", "_awaited") def __init__(self, awaitable: Awaitable[Any]) -> None: self._awaitable = awaitable self._awaited = False def __await__(self) -> Any: self._awaited = True return self._awaitable.__await__() def __del__(self) -> None: if not self._awaited: warnings.warn( "Connector.close() is a coroutine, " "please use await connector.close()", DeprecationWarning, ) class Connection: _source_traceback = None _transport = None def __init__( self, connector: "BaseConnector", key: "ConnectionKey", protocol: ResponseHandler, loop: asyncio.AbstractEventLoop, ) -> None: self._key = key self._connector = connector self._loop = loop self._protocol: Optional[ResponseHandler] = protocol self._callbacks: List[Callable[[], None]] = [] if loop.get_debug(): self._source_traceback = traceback.extract_stack(sys._getframe(1)) def __repr__(self) -> str: return f"Connection<{self._key}>" def __del__(self, _warnings: Any = warnings) -> None: if self._protocol is not None: if PY_36: kwargs = {"source": self} else: kwargs = {} _warnings.warn(f"Unclosed connection {self!r}", ResourceWarning, **kwargs) if self._loop.is_closed(): return self._connector._release(self._key, self._protocol, should_close=True) context = {"client_connection": self, "message": "Unclosed connection"} if self._source_traceback is not None: context["source_traceback"] = self._source_traceback self._loop.call_exception_handler(context) @property def loop(self) -> asyncio.AbstractEventLoop: warnings.warn( "connector.loop property is deprecated", DeprecationWarning, stacklevel=2 ) return self._loop @property def transport(self) -> Optional[asyncio.Transport]: if self._protocol is None: return None return self._protocol.transport @property def protocol(self) -> Optional[ResponseHandler]: return self._protocol def add_callback(self, callback: Callable[[], None]) -> None: if callback is not None: self._callbacks.append(callback) def _notify_release(self) -> None: callbacks, self._callbacks = self._callbacks[:], [] for cb in callbacks: with suppress(Exception): cb() def close(self) -> None: self._notify_release() if self._protocol is not None: self._connector._release(self._key, self._protocol, should_close=True) self._protocol = None def release(self) -> None: self._notify_release() if self._protocol is not None: self._connector._release( self._key, self._protocol, should_close=self._protocol.should_close ) self._protocol = None @property def closed(self) -> bool: return self._protocol is None or not self._protocol.is_connected() class _TransportPlaceholder: """placeholder for BaseConnector.connect function""" def close(self) -> None: pass class BaseConnector: """Base connector class. keepalive_timeout - (optional) Keep-alive timeout. force_close - Set to True to force close and do reconnect after each request (and between redirects). limit - The total number of simultaneous connections. limit_per_host - Number of simultaneous connections to one host. enable_cleanup_closed - Enables clean-up closed ssl transports. Disabled by default. loop - Optional event loop. """ _closed = True # prevent AttributeError in __del__ if ctor was failed _source_traceback = None # abort transport after 2 seconds (cleanup broken connections) _cleanup_closed_period = 2.0 def __init__( self, *, keepalive_timeout: Union[object, None, float] = sentinel, force_close: bool = False, limit: int = 100, limit_per_host: int = 0, enable_cleanup_closed: bool = False, loop: Optional[asyncio.AbstractEventLoop] = None, ) -> None: if force_close: if keepalive_timeout is not None and keepalive_timeout is not sentinel: raise ValueError( "keepalive_timeout cannot " "be set if force_close is True" ) else: if keepalive_timeout is sentinel: keepalive_timeout = 15.0 loop = get_running_loop(loop) self._closed = False if loop.get_debug(): self._source_traceback = traceback.extract_stack(sys._getframe(1)) self._conns: Dict[ConnectionKey, List[Tuple[ResponseHandler, float]]] = {} self._limit = limit self._limit_per_host = limit_per_host self._acquired: Set[ResponseHandler] = set() self._acquired_per_host: DefaultDict[ ConnectionKey, Set[ResponseHandler] ] = defaultdict(set) self._keepalive_timeout = cast(float, keepalive_timeout) self._force_close = force_close # {host_key: FIFO list of waiters} self._waiters = defaultdict(deque) # type: ignore[var-annotated] self._loop = loop self._factory = functools.partial(ResponseHandler, loop=loop) self.cookies: SimpleCookie[str] = SimpleCookie() # start keep-alive connection cleanup task self._cleanup_handle: Optional[asyncio.TimerHandle] = None # start cleanup closed transports task self._cleanup_closed_handle: Optional[asyncio.TimerHandle] = None self._cleanup_closed_disabled = not enable_cleanup_closed self._cleanup_closed_transports: List[Optional[asyncio.Transport]] = [] self._cleanup_closed() def __del__(self, _warnings: Any = warnings) -> None: if self._closed: return if not self._conns: return conns = [repr(c) for c in self._conns.values()] self._close() if PY_36: kwargs = {"source": self} else: kwargs = {} _warnings.warn(f"Unclosed connector {self!r}", ResourceWarning, **kwargs) context = { "connector": self, "connections": conns, "message": "Unclosed connector", } if self._source_traceback is not None: context["source_traceback"] = self._source_traceback self._loop.call_exception_handler(context) def __enter__(self) -> "BaseConnector": warnings.warn( '"with Connector():" is deprecated, ' 'use "async with Connector():" instead', DeprecationWarning, ) return self def __exit__(self, *exc: Any) -> None: self._close() async def __aenter__(self) -> "BaseConnector": return self async def __aexit__( self, exc_type: Optional[Type[BaseException]] = None, exc_value: Optional[BaseException] = None, exc_traceback: Optional[TracebackType] = None, ) -> None: await self.close() @property def force_close(self) -> bool: """Ultimately close connection on releasing if True.""" return self._force_close @property def limit(self) -> int: """The total number for simultaneous connections. If limit is 0 the connector has no limit. The default limit size is 100. """ return self._limit @property def limit_per_host(self) -> int: """The limit for simultaneous connections to the same endpoint. Endpoints are the same if they are have equal (host, port, is_ssl) triple. """ return self._limit_per_host def _cleanup(self) -> None: """Cleanup unused transports.""" if self._cleanup_handle: self._cleanup_handle.cancel() # _cleanup_handle should be unset, otherwise _release() will not # recreate it ever! self._cleanup_handle = None now = self._loop.time() timeout = self._keepalive_timeout if self._conns: connections = {} deadline = now - timeout for key, conns in self._conns.items(): alive = [] for proto, use_time in conns: if proto.is_connected(): if use_time - deadline < 0: transport = proto.transport proto.close() if key.is_ssl and not self._cleanup_closed_disabled: self._cleanup_closed_transports.append(transport) else: alive.append((proto, use_time)) else: transport = proto.transport proto.close() if key.is_ssl and not self._cleanup_closed_disabled: self._cleanup_closed_transports.append(transport) if alive: connections[key] = alive self._conns = connections if self._conns: self._cleanup_handle = helpers.weakref_handle( self, "_cleanup", timeout, self._loop ) def _drop_acquired_per_host( self, key: "ConnectionKey", val: ResponseHandler ) -> None: acquired_per_host = self._acquired_per_host if key not in acquired_per_host: return conns = acquired_per_host[key] conns.remove(val) if not conns: del self._acquired_per_host[key] def _cleanup_closed(self) -> None: """Double confirmation for transport close. Some broken ssl servers may leave socket open without proper close. """ if self._cleanup_closed_handle: self._cleanup_closed_handle.cancel() for transport in self._cleanup_closed_transports: if transport is not None: transport.abort() self._cleanup_closed_transports = [] if not self._cleanup_closed_disabled: self._cleanup_closed_handle = helpers.weakref_handle( self, "_cleanup_closed", self._cleanup_closed_period, self._loop ) def close(self) -> Awaitable[None]: """Close all opened transports.""" self._close() return _DeprecationWaiter(noop()) def _close(self) -> None: if self._closed: return self._closed = True try: if self._loop.is_closed(): return # cancel cleanup task if self._cleanup_handle: self._cleanup_handle.cancel() # cancel cleanup close task if self._cleanup_closed_handle: self._cleanup_closed_handle.cancel() for data in self._conns.values(): for proto, t0 in data: proto.close() for proto in self._acquired: proto.close() for transport in self._cleanup_closed_transports: if transport is not None: transport.abort() finally: self._conns.clear() self._acquired.clear() self._waiters.clear() self._cleanup_handle = None self._cleanup_closed_transports.clear() self._cleanup_closed_handle = None @property def closed(self) -> bool: """Is connector closed. A readonly property. """ return self._closed def _available_connections(self, key: "ConnectionKey") -> int: """ Return number of available connections. The limit, limit_per_host and the connection key are taken into account. If it returns less than 1 means that there are no connections available. """ if self._limit: # total calc available connections available = self._limit - len(self._acquired) # check limit per host if ( self._limit_per_host and available > 0 and key in self._acquired_per_host ): acquired = self._acquired_per_host.get(key) assert acquired is not None available = self._limit_per_host - len(acquired) elif self._limit_per_host and key in self._acquired_per_host: # check limit per host acquired = self._acquired_per_host.get(key) assert acquired is not None available = self._limit_per_host - len(acquired) else: available = 1 return available async def connect( self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout" ) -> Connection: """Get from pool or create new connection.""" key = req.connection_key available = self._available_connections(key) # Wait if there are no available connections or if there are/were # waiters (i.e. don't steal connection from a waiter about to wake up) if available <= 0 or key in self._waiters: fut = self._loop.create_future() # This connection will now count towards the limit. self._waiters[key].append(fut) if traces: for trace in traces: await trace.send_connection_queued_start() try: await fut except BaseException as e: if key in self._waiters: # remove a waiter even if it was cancelled, normally it's # removed when it's notified try: self._waiters[key].remove(fut) except ValueError: # fut may no longer be in list pass raise e finally: if key in self._waiters and not self._waiters[key]: del self._waiters[key] if traces: for trace in traces: await trace.send_connection_queued_end() proto = self._get(key) if proto is None: placeholder = cast(ResponseHandler, _TransportPlaceholder()) self._acquired.add(placeholder) self._acquired_per_host[key].add(placeholder) if traces: for trace in traces: await trace.send_connection_create_start() try: proto = await self._create_connection(req, traces, timeout) if self._closed: proto.close() raise ClientConnectionError("Connector is closed.") except BaseException: if not self._closed: self._acquired.remove(placeholder) self._drop_acquired_per_host(key, placeholder) self._release_waiter() raise else: if not self._closed: self._acquired.remove(placeholder) self._drop_acquired_per_host(key, placeholder) if traces: for trace in traces: await trace.send_connection_create_end() else: if traces: # Acquire the connection to prevent race conditions with limits placeholder = cast(ResponseHandler, _TransportPlaceholder()) self._acquired.add(placeholder) self._acquired_per_host[key].add(placeholder) for trace in traces: await trace.send_connection_reuseconn() self._acquired.remove(placeholder) self._drop_acquired_per_host(key, placeholder) self._acquired.add(proto) self._acquired_per_host[key].add(proto) return Connection(self, key, proto, self._loop) def _get(self, key: "ConnectionKey") -> Optional[ResponseHandler]: try: conns = self._conns[key] except KeyError: return None t1 = self._loop.time() while conns: proto, t0 = conns.pop() if proto.is_connected(): if t1 - t0 > self._keepalive_timeout: transport = proto.transport proto.close() # only for SSL transports if key.is_ssl and not self._cleanup_closed_disabled: self._cleanup_closed_transports.append(transport) else: if not conns: # The very last connection was reclaimed: drop the key del self._conns[key] return proto else: transport = proto.transport proto.close() if key.is_ssl and not self._cleanup_closed_disabled: self._cleanup_closed_transports.append(transport) # No more connections: drop the key del self._conns[key] return None def _release_waiter(self) -> None: """ Iterates over all waiters until one to be released is found. The one to be released is not finsihed and belongs to a host that has available connections. """ if not self._waiters: return # Having the dict keys ordered this avoids to iterate # at the same order at each call. queues = list(self._waiters.keys()) random.shuffle(queues) for key in queues: if self._available_connections(key) < 1: continue waiters = self._waiters[key] while waiters: waiter = waiters.popleft() if not waiter.done(): waiter.set_result(None) return def _release_acquired(self, key: "ConnectionKey", proto: ResponseHandler) -> None: if self._closed: # acquired connection is already released on connector closing return try: self._acquired.remove(proto) self._drop_acquired_per_host(key, proto) except KeyError: # pragma: no cover # this may be result of undetermenistic order of objects # finalization due garbage collection. pass else: self._release_waiter() def _release( self, key: "ConnectionKey", protocol: ResponseHandler, *, should_close: bool = False, ) -> None: if self._closed: # acquired connection is already released on connector closing return self._release_acquired(key, protocol) if self._force_close: should_close = True if should_close or protocol.should_close: transport = protocol.transport protocol.close() if key.is_ssl and not self._cleanup_closed_disabled: self._cleanup_closed_transports.append(transport) else: conns = self._conns.get(key) if conns is None: conns = self._conns[key] = [] conns.append((protocol, self._loop.time())) if self._cleanup_handle is None: self._cleanup_handle = helpers.weakref_handle( self, "_cleanup", self._keepalive_timeout, self._loop ) async def _create_connection( self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout" ) -> ResponseHandler: raise NotImplementedError() class _DNSCacheTable: def __init__(self, ttl: Optional[float] = None) -> None: self._addrs_rr: Dict[Tuple[str, int], Tuple[Iterator[Dict[str, Any]], int]] = {} self._timestamps: Dict[Tuple[str, int], float] = {} self._ttl = ttl def __contains__(self, host: object) -> bool: return host in self._addrs_rr def add(self, key: Tuple[str, int], addrs: List[Dict[str, Any]]) -> None: self._addrs_rr[key] = (cycle(addrs), len(addrs)) if self._ttl: self._timestamps[key] = monotonic() def remove(self, key: Tuple[str, int]) -> None: self._addrs_rr.pop(key, None) if self._ttl: self._timestamps.pop(key, None) def clear(self) -> None: self._addrs_rr.clear() self._timestamps.clear() def next_addrs(self, key: Tuple[str, int]) -> List[Dict[str, Any]]: loop, length = self._addrs_rr[key] addrs = list(islice(loop, length)) # Consume one more element to shift internal state of `cycle` next(loop) return addrs def expired(self, key: Tuple[str, int]) -> bool: if self._ttl is None: return False return self._timestamps[key] + self._ttl < monotonic() class TCPConnector(BaseConnector): """TCP connector. verify_ssl - Set to True to check ssl certifications. fingerprint - Pass the binary sha256 digest of the expected certificate in DER format to verify that the certificate the server presents matches. See also https://en.wikipedia.org/wiki/Transport_Layer_Security#Certificate_pinning resolver - Enable DNS lookups and use this resolver use_dns_cache - Use memory cache for DNS lookups. ttl_dns_cache - Max seconds having cached a DNS entry, None forever. family - socket address family local_addr - local tuple of (host, port) to bind socket to keepalive_timeout - (optional) Keep-alive timeout. force_close - Set to True to force close and do reconnect after each request (and between redirects). limit - The total number of simultaneous connections. limit_per_host - Number of simultaneous connections to one host. enable_cleanup_closed - Enables clean-up closed ssl transports. Disabled by default. loop - Optional event loop. """ def __init__( self, *, verify_ssl: bool = True, fingerprint: Optional[bytes] = None, use_dns_cache: bool = True, ttl_dns_cache: Optional[int] = 10, family: int = 0, ssl_context: Optional[SSLContext] = None, ssl: Union[None, bool, Fingerprint, SSLContext] = None, local_addr: Optional[Tuple[str, int]] = None, resolver: Optional[AbstractResolver] = None, keepalive_timeout: Union[None, float, object] = sentinel, force_close: bool = False, limit: int = 100, limit_per_host: int = 0, enable_cleanup_closed: bool = False, loop: Optional[asyncio.AbstractEventLoop] = None, ): super().__init__( keepalive_timeout=keepalive_timeout, force_close=force_close, limit=limit, limit_per_host=limit_per_host, enable_cleanup_closed=enable_cleanup_closed, loop=loop, ) self._ssl = _merge_ssl_params(ssl, verify_ssl, ssl_context, fingerprint) if resolver is None: resolver = DefaultResolver(loop=self._loop) self._resolver = resolver self._use_dns_cache = use_dns_cache self._cached_hosts = _DNSCacheTable(ttl=ttl_dns_cache) self._throttle_dns_events: Dict[Tuple[str, int], EventResultOrError] = {} self._family = family self._local_addr = local_addr def close(self) -> Awaitable[None]: """Close all ongoing DNS calls.""" for ev in self._throttle_dns_events.values(): ev.cancel() return super().close() @property def family(self) -> int: """Socket family like AF_INET.""" return self._family @property def use_dns_cache(self) -> bool: """True if local DNS caching is enabled.""" return self._use_dns_cache def clear_dns_cache( self, host: Optional[str] = None, port: Optional[int] = None ) -> None: """Remove specified host/port or clear all dns local cache.""" if host is not None and port is not None: self._cached_hosts.remove((host, port)) elif host is not None or port is not None: raise ValueError("either both host and port " "or none of them are allowed") else: self._cached_hosts.clear() async def _resolve_host( self, host: str, port: int, traces: Optional[List["Trace"]] = None ) -> List[Dict[str, Any]]: if is_ip_address(host): return [ { "hostname": host, "host": host, "port": port, "family": self._family, "proto": 0, "flags": 0, } ] if not self._use_dns_cache: if traces: for trace in traces: await trace.send_dns_resolvehost_start(host) res = await self._resolver.resolve(host, port, family=self._family) if traces: for trace in traces: await trace.send_dns_resolvehost_end(host) return res key = (host, port) if (key in self._cached_hosts) and (not self._cached_hosts.expired(key)): # get result early, before any await (#4014) result = self._cached_hosts.next_addrs(key) if traces: for trace in traces: await trace.send_dns_cache_hit(host) return result if key in self._throttle_dns_events: # get event early, before any await (#4014) event = self._throttle_dns_events[key] if traces: for trace in traces: await trace.send_dns_cache_hit(host) await event.wait() else: # update dict early, before any await (#4014) self._throttle_dns_events[key] = EventResultOrError(self._loop) if traces: for trace in traces: await trace.send_dns_cache_miss(host) try: if traces: for trace in traces: await trace.send_dns_resolvehost_start(host) addrs = await self._resolver.resolve(host, port, family=self._family) if traces: for trace in traces: await trace.send_dns_resolvehost_end(host) self._cached_hosts.add(key, addrs) self._throttle_dns_events[key].set() except BaseException as e: # any DNS exception, independently of the implementation # is set for the waiters to raise the same exception. self._throttle_dns_events[key].set(exc=e) raise finally: self._throttle_dns_events.pop(key) return self._cached_hosts.next_addrs(key) async def _create_connection( self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout" ) -> ResponseHandler: """Create connection. Has same keyword arguments as BaseEventLoop.create_connection. """ if req.proxy: _, proto = await self._create_proxy_connection(req, traces, timeout) else: _, proto = await self._create_direct_connection(req, traces, timeout) return proto @staticmethod @functools.lru_cache(None) def _make_ssl_context(verified: bool) -> SSLContext: if verified: return ssl.create_default_context() else: sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) sslcontext.options |= ssl.OP_NO_SSLv2 sslcontext.options |= ssl.OP_NO_SSLv3 sslcontext.check_hostname = False sslcontext.verify_mode = ssl.CERT_NONE try: sslcontext.options |= ssl.OP_NO_COMPRESSION except AttributeError as attr_err: warnings.warn( "{!s}: The Python interpreter is compiled " "against OpenSSL < 1.0.0. Ref: " "https://docs.python.org/3/library/ssl.html" "#ssl.OP_NO_COMPRESSION".format(attr_err), ) sslcontext.set_default_verify_paths() return sslcontext def _get_ssl_context(self, req: "ClientRequest") -> Optional[SSLContext]: """Logic to get the correct SSL context 0. if req.ssl is false, return None 1. if ssl_context is specified in req, use it 2. if _ssl_context is specified in self, use it 3. otherwise: 1. if verify_ssl is not specified in req, use self.ssl_context (will generate a default context according to self.verify_ssl) 2. if verify_ssl is True in req, generate a default SSL context 3. if verify_ssl is False in req, generate a SSL context that won't verify """ if req.is_ssl(): if ssl is None: # pragma: no cover raise RuntimeError("SSL is not supported.") sslcontext = req.ssl if isinstance(sslcontext, ssl.SSLContext): return sslcontext if sslcontext is not None: # not verified or fingerprinted return self._make_ssl_context(False) sslcontext = self._ssl if isinstance(sslcontext, ssl.SSLContext): return sslcontext if sslcontext is not None: # not verified or fingerprinted return self._make_ssl_context(False) return self._make_ssl_context(True) else: return None def _get_fingerprint(self, req: "ClientRequest") -> Optional["Fingerprint"]: ret = req.ssl if isinstance(ret, Fingerprint): return ret ret = self._ssl if isinstance(ret, Fingerprint): return ret return None async def _wrap_create_connection( self, *args: Any, req: "ClientRequest", timeout: "ClientTimeout", client_error: Type[Exception] = ClientConnectorError, **kwargs: Any, ) -> Tuple[asyncio.Transport, ResponseHandler]: try: async with ceil_timeout(timeout.sock_connect): return await self._loop.create_connection(*args, **kwargs) # type: ignore[return-value] # noqa except cert_errors as exc: raise ClientConnectorCertificateError(req.connection_key, exc) from exc except ssl_errors as exc: raise ClientConnectorSSLError(req.connection_key, exc) from exc except OSError as exc: if exc.errno is None and isinstance(exc, asyncio.TimeoutError): raise raise client_error(req.connection_key, exc) from exc def _fail_on_no_start_tls(self, req: "ClientRequest") -> None: """Raise a :py:exc:`RuntimeError` on missing ``start_tls()``. One case is that :py:meth:`asyncio.loop.start_tls` is not yet implemented under Python 3.6. It is necessary for TLS-in-TLS so that it is possible to send HTTPS queries through HTTPS proxies. This doesn't affect regular HTTP requests, though. """ if not req.is_ssl(): return proxy_url = req.proxy assert proxy_url is not None if proxy_url.scheme != "https": return self._check_loop_for_start_tls() def _check_loop_for_start_tls(self) -> None: try: self._loop.start_tls except AttributeError as attr_exc: raise RuntimeError( "An HTTPS request is being sent through an HTTPS proxy. " "This needs support for TLS in TLS but it is not implemented " "in your runtime for the stdlib asyncio.\n\n" "Please upgrade to Python 3.7 or higher. For more details, " "please see:\n" "* https://bugs.python.org/issue37179\n" "* https://github.com/python/cpython/pull/28073\n" "* https://docs.aiohttp.org/en/stable/" "client_advanced.html#proxy-support\n" "* https://github.com/aio-libs/aiohttp/discussions/6044\n", ) from attr_exc def _loop_supports_start_tls(self) -> bool: try: self._check_loop_for_start_tls() except RuntimeError: return False else: return True def _warn_about_tls_in_tls( self, underlying_transport: asyncio.Transport, req: "ClientRequest", ) -> None: """Issue a warning if the requested URL has HTTPS scheme.""" if req.request_info.url.scheme != "https": return asyncio_supports_tls_in_tls = getattr( underlying_transport, "_start_tls_compatible", False, ) if asyncio_supports_tls_in_tls: return warnings.warn( "An HTTPS request is being sent through an HTTPS proxy. " "This support for TLS in TLS is known to be disabled " "in the stdlib asyncio. This is why you'll probably see " "an error in the log below.\n\n" "It is possible to enable it via monkeypatching under " "Python 3.7 or higher. For more details, see:\n" "* https://bugs.python.org/issue37179\n" "* https://github.com/python/cpython/pull/28073\n\n" "You can temporarily patch this as follows:\n" "* https://docs.aiohttp.org/en/stable/client_advanced.html#proxy-support\n" "* https://github.com/aio-libs/aiohttp/discussions/6044\n", RuntimeWarning, source=self, # Why `4`? At least 3 of the calls in the stack originate # from the methods in this class. stacklevel=3, ) async def _start_tls_connection( self, underlying_transport: asyncio.Transport, req: "ClientRequest", timeout: "ClientTimeout", client_error: Type[Exception] = ClientConnectorError, ) -> Tuple[asyncio.BaseTransport, ResponseHandler]: """Wrap the raw TCP transport with TLS.""" tls_proto = self._factory() # Create a brand new proto for TLS # Safety of the `cast()` call here is based on the fact that # internally `_get_ssl_context()` only returns `None` when # `req.is_ssl()` evaluates to `False` which is never gonna happen # in this code path. Of course, it's rather fragile # maintainability-wise but this is to be solved separately. sslcontext = cast(ssl.SSLContext, self._get_ssl_context(req)) try: async with ceil_timeout(timeout.sock_connect): try: tls_transport = await self._loop.start_tls( underlying_transport, tls_proto, sslcontext, server_hostname=req.host, ssl_handshake_timeout=timeout.total, ) except BaseException: # We need to close the underlying transport since # `start_tls()` probably failed before it had a # chance to do this: underlying_transport.close() raise except cert_errors as exc: raise ClientConnectorCertificateError(req.connection_key, exc) from exc except ssl_errors as exc: raise ClientConnectorSSLError(req.connection_key, exc) from exc except OSError as exc: if exc.errno is None and isinstance(exc, asyncio.TimeoutError): raise raise client_error(req.connection_key, exc) from exc except TypeError as type_err: # Example cause looks like this: # TypeError: transport <asyncio.sslproto._SSLProtocolTransport # object at 0x7f760615e460> is not supported by start_tls() raise ClientConnectionError( "Cannot initialize a TLS-in-TLS connection to host " f"{req.host!s}:{req.port:d} through an underlying connection " f"to an HTTPS proxy {req.proxy!s} ssl:{req.ssl or 'default'} " f"[{type_err!s}]" ) from type_err else: tls_proto.connection_made( tls_transport ) # Kick the state machine of the new TLS protocol return tls_transport, tls_proto async def _create_direct_connection( self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout", *, client_error: Type[Exception] = ClientConnectorError, ) -> Tuple[asyncio.Transport, ResponseHandler]: sslcontext = self._get_ssl_context(req) fingerprint = self._get_fingerprint(req) host = req.url.raw_host assert host is not None port = req.port assert port is not None host_resolved = asyncio.ensure_future( self._resolve_host(host, port, traces=traces), loop=self._loop ) try: # Cancelling this lookup should not cancel the underlying lookup # or else the cancel event will get broadcast to all the waiters # across all connections. hosts = await asyncio.shield(host_resolved) except asyncio.CancelledError: def drop_exception(fut: "asyncio.Future[List[Dict[str, Any]]]") -> None: with suppress(Exception, asyncio.CancelledError): fut.result() host_resolved.add_done_callback(drop_exception) raise except OSError as exc: if exc.errno is None and isinstance(exc, asyncio.TimeoutError): raise # in case of proxy it is not ClientProxyConnectionError # it is problem of resolving proxy ip itself raise ClientConnectorError(req.connection_key, exc) from exc last_exc: Optional[Exception] = None for hinfo in hosts: host = hinfo["host"] port = hinfo["port"] try: transp, proto = await self._wrap_create_connection( self._factory, host, port, timeout=timeout, ssl=sslcontext, family=hinfo["family"], proto=hinfo["proto"], flags=hinfo["flags"], server_hostname=hinfo["hostname"] if sslcontext else None, local_addr=self._local_addr, req=req, client_error=client_error, ) except ClientConnectorError as exc: last_exc = exc continue if req.is_ssl() and fingerprint: try: fingerprint.check(transp) except ServerFingerprintMismatch as exc: transp.close() if not self._cleanup_closed_disabled: self._cleanup_closed_transports.append(transp) last_exc = exc continue return transp, proto else: assert last_exc is not None raise last_exc async def _create_proxy_connection( self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout" ) -> Tuple[asyncio.BaseTransport, ResponseHandler]: self._fail_on_no_start_tls(req) runtime_has_start_tls = self._loop_supports_start_tls() headers: Dict[str, str] = {} if req.proxy_headers is not None: headers = req.proxy_headers # type: ignore[assignment] headers[hdrs.HOST] = req.headers[hdrs.HOST] url = req.proxy assert url is not None proxy_req = ClientRequest( hdrs.METH_GET, url, headers=headers, auth=req.proxy_auth, loop=self._loop, ssl=req.ssl, ) # create connection to proxy server transport, proto = await self._create_direct_connection( proxy_req, [], timeout, client_error=ClientProxyConnectionError ) # Many HTTP proxies has buggy keepalive support. Let's not # reuse connection but close it after processing every # response. proto.force_close() auth = proxy_req.headers.pop(hdrs.AUTHORIZATION, None) if auth is not None: if not req.is_ssl(): req.headers[hdrs.PROXY_AUTHORIZATION] = auth else: proxy_req.headers[hdrs.PROXY_AUTHORIZATION] = auth if req.is_ssl(): if runtime_has_start_tls: self._warn_about_tls_in_tls(transport, req) # For HTTPS requests over HTTP proxy # we must notify proxy to tunnel connection # so we send CONNECT command: # CONNECT www.python.org:443 HTTP/1.1 # Host: www.python.org # # next we must do TLS handshake and so on # to do this we must wrap raw socket into secure one # asyncio handles this perfectly proxy_req.method = hdrs.METH_CONNECT proxy_req.url = req.url key = attr.evolve( req.connection_key, proxy=None, proxy_auth=None, proxy_headers_hash=None ) conn = Connection(self, key, proto, self._loop) proxy_resp = await proxy_req.send(conn) try: protocol = conn._protocol assert protocol is not None # read_until_eof=True will ensure the connection isn't closed # once the response is received and processed allowing # START_TLS to work on the connection below. protocol.set_response_params(read_until_eof=runtime_has_start_tls) resp = await proxy_resp.start(conn) except BaseException: proxy_resp.close() conn.close() raise else: conn._protocol = None conn._transport = None try: if resp.status != 200: message = resp.reason if message is None: message = RESPONSES[resp.status][0] raise ClientHttpProxyError( proxy_resp.request_info, resp.history, status=resp.status, message=message, headers=resp.headers, ) if not runtime_has_start_tls: rawsock = transport.get_extra_info("socket", default=None) if rawsock is None: raise RuntimeError( "Transport does not expose socket instance" ) # Duplicate the socket, so now we can close proxy transport rawsock = rawsock.dup() except BaseException: # It shouldn't be closed in `finally` because it's fed to # `loop.start_tls()` and the docs say not to touch it after # passing there. transport.close() raise finally: if not runtime_has_start_tls: transport.close() if not runtime_has_start_tls: # HTTP proxy with support for upgrade to HTTPS sslcontext = self._get_ssl_context(req) return await self._wrap_create_connection( self._factory, timeout=timeout, ssl=sslcontext, sock=rawsock, server_hostname=req.host, req=req, ) return await self._start_tls_connection( # Access the old transport for the last time before it's # closed and forgotten forever: transport, req=req, timeout=timeout, ) finally: proxy_resp.close() return transport, proto class UnixConnector(BaseConnector): """Unix socket connector. path - Unix socket path. keepalive_timeout - (optional) Keep-alive timeout. force_close - Set to True to force close and do reconnect after each request (and between redirects). limit - The total number of simultaneous connections. limit_per_host - Number of simultaneous connections to one host. loop - Optional event loop. """ def __init__( self, path: str, force_close: bool = False, keepalive_timeout: Union[object, float, None] = sentinel, limit: int = 100, limit_per_host: int = 0, loop: Optional[asyncio.AbstractEventLoop] = None, ) -> None: super().__init__( force_close=force_close, keepalive_timeout=keepalive_timeout, limit=limit, limit_per_host=limit_per_host, loop=loop, ) self._path = path @property def path(self) -> str: """Path to unix socket.""" return self._path async def _create_connection( self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout" ) -> ResponseHandler: try: async with ceil_timeout(timeout.sock_connect): _, proto = await self._loop.create_unix_connection( self._factory, self._path ) except OSError as exc: if exc.errno is None and isinstance(exc, asyncio.TimeoutError): raise raise UnixClientConnectorError(self.path, req.connection_key, exc) from exc return cast(ResponseHandler, proto) class NamedPipeConnector(BaseConnector): """Named pipe connector. Only supported by the proactor event loop. See also: https://docs.python.org/3.7/library/asyncio-eventloop.html path - Windows named pipe path. keepalive_timeout - (optional) Keep-alive timeout. force_close - Set to True to force close and do reconnect after each request (and between redirects). limit - The total number of simultaneous connections. limit_per_host - Number of simultaneous connections to one host. loop - Optional event loop. """ def __init__( self, path: str, force_close: bool = False, keepalive_timeout: Union[object, float, None] = sentinel, limit: int = 100, limit_per_host: int = 0, loop: Optional[asyncio.AbstractEventLoop] = None, ) -> None: super().__init__( force_close=force_close, keepalive_timeout=keepalive_timeout, limit=limit, limit_per_host=limit_per_host, loop=loop, ) if not isinstance( self._loop, asyncio.ProactorEventLoop # type: ignore[attr-defined] ): raise RuntimeError( "Named Pipes only available in proactor " "loop under windows" ) self._path = path @property def path(self) -> str: """Path to the named pipe.""" return self._path async def _create_connection( self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout" ) -> ResponseHandler: try: async with ceil_timeout(timeout.sock_connect): _, proto = await self._loop.create_pipe_connection( # type: ignore[attr-defined] # noqa: E501 self._factory, self._path ) # the drain is required so that the connection_made is called # and transport is set otherwise it is not set before the # `assert conn.transport is not None` # in client.py's _request method await asyncio.sleep(0) # other option is to manually set transport like # `proto.transport = trans` except OSError as exc: if exc.errno is None and isinstance(exc, asyncio.TimeoutError): raise raise ClientConnectorError(req.connection_key, exc) from exc return cast(ResponseHandler, proto)
51,177
Python
34.198074
112
0.55566
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/web_middlewares.py
import re from typing import TYPE_CHECKING, Awaitable, Callable, Tuple, Type, TypeVar from .typedefs import Handler from .web_exceptions import HTTPPermanentRedirect, _HTTPMove from .web_request import Request from .web_response import StreamResponse from .web_urldispatcher import SystemRoute __all__ = ( "middleware", "normalize_path_middleware", ) if TYPE_CHECKING: # pragma: no cover from .web_app import Application _Func = TypeVar("_Func") async def _check_request_resolves(request: Request, path: str) -> Tuple[bool, Request]: alt_request = request.clone(rel_url=path) match_info = await request.app.router.resolve(alt_request) alt_request._match_info = match_info if match_info.http_exception is None: return True, alt_request return False, request def middleware(f: _Func) -> _Func: f.__middleware_version__ = 1 # type: ignore[attr-defined] return f _Middleware = Callable[[Request, Handler], Awaitable[StreamResponse]] def normalize_path_middleware( *, append_slash: bool = True, remove_slash: bool = False, merge_slashes: bool = True, redirect_class: Type[_HTTPMove] = HTTPPermanentRedirect, ) -> _Middleware: """Factory for producing a middleware that normalizes the path of a request. Normalizing means: - Add or remove a trailing slash to the path. - Double slashes are replaced by one. The middleware returns as soon as it finds a path that resolves correctly. The order if both merge and append/remove are enabled is 1) merge slashes 2) append/remove slash 3) both merge slashes and append/remove slash. If the path resolves with at least one of those conditions, it will redirect to the new path. Only one of `append_slash` and `remove_slash` can be enabled. If both are `True` the factory will raise an assertion error If `append_slash` is `True` the middleware will append a slash when needed. If a resource is defined with trailing slash and the request comes without it, it will append it automatically. If `remove_slash` is `True`, `append_slash` must be `False`. When enabled the middleware will remove trailing slashes and redirect if the resource is defined If merge_slashes is True, merge multiple consecutive slashes in the path into one. """ correct_configuration = not (append_slash and remove_slash) assert correct_configuration, "Cannot both remove and append slash" @middleware async def impl(request: Request, handler: Handler) -> StreamResponse: if isinstance(request.match_info.route, SystemRoute): paths_to_check = [] if "?" in request.raw_path: path, query = request.raw_path.split("?", 1) query = "?" + query else: query = "" path = request.raw_path if merge_slashes: paths_to_check.append(re.sub("//+", "/", path)) if append_slash and not request.path.endswith("/"): paths_to_check.append(path + "/") if remove_slash and request.path.endswith("/"): paths_to_check.append(path[:-1]) if merge_slashes and append_slash: paths_to_check.append(re.sub("//+", "/", path + "/")) if merge_slashes and remove_slash: merged_slashes = re.sub("//+", "/", path) paths_to_check.append(merged_slashes[:-1]) for path in paths_to_check: path = re.sub("^//+", "/", path) # SECURITY: GHSA-v6wp-4m6f-gcjg resolves, request = await _check_request_resolves(request, path) if resolves: raise redirect_class(request.raw_path + query) return await handler(request) return impl def _fix_request_current_app(app: "Application") -> _Middleware: @middleware async def impl(request: Request, handler: Handler) -> StreamResponse: with request.match_info.set_current_app(app): return await handler(request) return impl
4,137
Python
33.483333
87
0.641528
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/tcp_helpers.py
"""Helper methods to tune a TCP connection""" import asyncio import socket from contextlib import suppress from typing import Optional # noqa __all__ = ("tcp_keepalive", "tcp_nodelay") if hasattr(socket, "SO_KEEPALIVE"): def tcp_keepalive(transport: asyncio.Transport) -> None: sock = transport.get_extra_info("socket") if sock is not None: sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) else: def tcp_keepalive(transport: asyncio.Transport) -> None: # pragma: no cover pass def tcp_nodelay(transport: asyncio.Transport, value: bool) -> None: sock = transport.get_extra_info("socket") if sock is None: return if sock.family not in (socket.AF_INET, socket.AF_INET6): return value = bool(value) # socket may be closed already, on windows OSError get raised with suppress(OSError): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, value)
961
Python
24.315789
80
0.674298
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/formdata.py
import io from typing import Any, Iterable, List, Optional from urllib.parse import urlencode from multidict import MultiDict, MultiDictProxy from . import hdrs, multipart, payload from .helpers import guess_filename from .payload import Payload __all__ = ("FormData",) class FormData: """Helper class for form body generation. Supports multipart/form-data and application/x-www-form-urlencoded. """ def __init__( self, fields: Iterable[Any] = (), quote_fields: bool = True, charset: Optional[str] = None, ) -> None: self._writer = multipart.MultipartWriter("form-data") self._fields: List[Any] = [] self._is_multipart = False self._is_processed = False self._quote_fields = quote_fields self._charset = charset if isinstance(fields, dict): fields = list(fields.items()) elif not isinstance(fields, (list, tuple)): fields = (fields,) self.add_fields(*fields) @property def is_multipart(self) -> bool: return self._is_multipart def add_field( self, name: str, value: Any, *, content_type: Optional[str] = None, filename: Optional[str] = None, content_transfer_encoding: Optional[str] = None, ) -> None: if isinstance(value, io.IOBase): self._is_multipart = True elif isinstance(value, (bytes, bytearray, memoryview)): if filename is None and content_transfer_encoding is None: filename = name type_options: MultiDict[str] = MultiDict({"name": name}) if filename is not None and not isinstance(filename, str): raise TypeError( "filename must be an instance of str. " "Got: %s" % filename ) if filename is None and isinstance(value, io.IOBase): filename = guess_filename(value, name) if filename is not None: type_options["filename"] = filename self._is_multipart = True headers = {} if content_type is not None: if not isinstance(content_type, str): raise TypeError( "content_type must be an instance of str. " "Got: %s" % content_type ) headers[hdrs.CONTENT_TYPE] = content_type self._is_multipart = True if content_transfer_encoding is not None: if not isinstance(content_transfer_encoding, str): raise TypeError( "content_transfer_encoding must be an instance" " of str. Got: %s" % content_transfer_encoding ) headers[hdrs.CONTENT_TRANSFER_ENCODING] = content_transfer_encoding self._is_multipart = True self._fields.append((type_options, headers, value)) def add_fields(self, *fields: Any) -> None: to_add = list(fields) while to_add: rec = to_add.pop(0) if isinstance(rec, io.IOBase): k = guess_filename(rec, "unknown") self.add_field(k, rec) # type: ignore[arg-type] elif isinstance(rec, (MultiDictProxy, MultiDict)): to_add.extend(rec.items()) elif isinstance(rec, (list, tuple)) and len(rec) == 2: k, fp = rec self.add_field(k, fp) # type: ignore[arg-type] else: raise TypeError( "Only io.IOBase, multidict and (name, file) " "pairs allowed, use .add_field() for passing " "more complex parameters, got {!r}".format(rec) ) def _gen_form_urlencoded(self) -> payload.BytesPayload: # form data (x-www-form-urlencoded) data = [] for type_options, _, value in self._fields: data.append((type_options["name"], value)) charset = self._charset if self._charset is not None else "utf-8" if charset == "utf-8": content_type = "application/x-www-form-urlencoded" else: content_type = "application/x-www-form-urlencoded; " "charset=%s" % charset return payload.BytesPayload( urlencode(data, doseq=True, encoding=charset).encode(), content_type=content_type, ) def _gen_form_data(self) -> multipart.MultipartWriter: """Encode a list of fields using the multipart/form-data MIME format""" if self._is_processed: raise RuntimeError("Form data has been processed already") for dispparams, headers, value in self._fields: try: if hdrs.CONTENT_TYPE in headers: part = payload.get_payload( value, content_type=headers[hdrs.CONTENT_TYPE], headers=headers, encoding=self._charset, ) else: part = payload.get_payload( value, headers=headers, encoding=self._charset ) except Exception as exc: raise TypeError( "Can not serialize value type: %r\n " "headers: %r\n value: %r" % (type(value), headers, value) ) from exc if dispparams: part.set_content_disposition( "form-data", quote_fields=self._quote_fields, **dispparams ) # FIXME cgi.FieldStorage doesn't likes body parts with # Content-Length which were sent via chunked transfer encoding assert part.headers is not None part.headers.popall(hdrs.CONTENT_LENGTH, None) self._writer.append_payload(part) self._is_processed = True return self._writer def __call__(self) -> Payload: if self._is_multipart: return self._gen_form_data() else: return self._gen_form_urlencoded()
6,106
Python
34.300578
88
0.546348
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/web_routedef.py
import abc import os # noqa from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterator, List, Optional, Sequence, Type, Union, overload, ) import attr from . import hdrs from .abc import AbstractView from .typedefs import Handler, PathLike if TYPE_CHECKING: # pragma: no cover from .web_request import Request from .web_response import StreamResponse from .web_urldispatcher import AbstractRoute, UrlDispatcher else: Request = StreamResponse = UrlDispatcher = AbstractRoute = None __all__ = ( "AbstractRouteDef", "RouteDef", "StaticDef", "RouteTableDef", "head", "options", "get", "post", "patch", "put", "delete", "route", "view", "static", ) class AbstractRouteDef(abc.ABC): @abc.abstractmethod def register(self, router: UrlDispatcher) -> List[AbstractRoute]: pass # pragma: no cover _HandlerType = Union[Type[AbstractView], Handler] @attr.s(auto_attribs=True, frozen=True, repr=False, slots=True) class RouteDef(AbstractRouteDef): method: str path: str handler: _HandlerType kwargs: Dict[str, Any] def __repr__(self) -> str: info = [] for name, value in sorted(self.kwargs.items()): info.append(f", {name}={value!r}") return "<RouteDef {method} {path} -> {handler.__name__!r}" "{info}>".format( method=self.method, path=self.path, handler=self.handler, info="".join(info) ) def register(self, router: UrlDispatcher) -> List[AbstractRoute]: if self.method in hdrs.METH_ALL: reg = getattr(router, "add_" + self.method.lower()) return [reg(self.path, self.handler, **self.kwargs)] else: return [ router.add_route(self.method, self.path, self.handler, **self.kwargs) ] @attr.s(auto_attribs=True, frozen=True, repr=False, slots=True) class StaticDef(AbstractRouteDef): prefix: str path: PathLike kwargs: Dict[str, Any] def __repr__(self) -> str: info = [] for name, value in sorted(self.kwargs.items()): info.append(f", {name}={value!r}") return "<StaticDef {prefix} -> {path}" "{info}>".format( prefix=self.prefix, path=self.path, info="".join(info) ) def register(self, router: UrlDispatcher) -> List[AbstractRoute]: resource = router.add_static(self.prefix, self.path, **self.kwargs) routes = resource.get_info().get("routes", {}) return list(routes.values()) def route(method: str, path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: return RouteDef(method, path, handler, kwargs) def head(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: return route(hdrs.METH_HEAD, path, handler, **kwargs) def options(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: return route(hdrs.METH_OPTIONS, path, handler, **kwargs) def get( path: str, handler: _HandlerType, *, name: Optional[str] = None, allow_head: bool = True, **kwargs: Any, ) -> RouteDef: return route( hdrs.METH_GET, path, handler, name=name, allow_head=allow_head, **kwargs ) def post(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: return route(hdrs.METH_POST, path, handler, **kwargs) def put(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: return route(hdrs.METH_PUT, path, handler, **kwargs) def patch(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: return route(hdrs.METH_PATCH, path, handler, **kwargs) def delete(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef: return route(hdrs.METH_DELETE, path, handler, **kwargs) def view(path: str, handler: Type[AbstractView], **kwargs: Any) -> RouteDef: return route(hdrs.METH_ANY, path, handler, **kwargs) def static(prefix: str, path: PathLike, **kwargs: Any) -> StaticDef: return StaticDef(prefix, path, kwargs) _Deco = Callable[[_HandlerType], _HandlerType] class RouteTableDef(Sequence[AbstractRouteDef]): """Route definition table""" def __init__(self) -> None: self._items: List[AbstractRouteDef] = [] def __repr__(self) -> str: return f"<RouteTableDef count={len(self._items)}>" @overload def __getitem__(self, index: int) -> AbstractRouteDef: ... @overload def __getitem__(self, index: slice) -> List[AbstractRouteDef]: ... def __getitem__(self, index): # type: ignore[no-untyped-def] return self._items[index] def __iter__(self) -> Iterator[AbstractRouteDef]: return iter(self._items) def __len__(self) -> int: return len(self._items) def __contains__(self, item: object) -> bool: return item in self._items def route(self, method: str, path: str, **kwargs: Any) -> _Deco: def inner(handler: _HandlerType) -> _HandlerType: self._items.append(RouteDef(method, path, handler, kwargs)) return handler return inner def head(self, path: str, **kwargs: Any) -> _Deco: return self.route(hdrs.METH_HEAD, path, **kwargs) def get(self, path: str, **kwargs: Any) -> _Deco: return self.route(hdrs.METH_GET, path, **kwargs) def post(self, path: str, **kwargs: Any) -> _Deco: return self.route(hdrs.METH_POST, path, **kwargs) def put(self, path: str, **kwargs: Any) -> _Deco: return self.route(hdrs.METH_PUT, path, **kwargs) def patch(self, path: str, **kwargs: Any) -> _Deco: return self.route(hdrs.METH_PATCH, path, **kwargs) def delete(self, path: str, **kwargs: Any) -> _Deco: return self.route(hdrs.METH_DELETE, path, **kwargs) def options(self, path: str, **kwargs: Any) -> _Deco: return self.route(hdrs.METH_OPTIONS, path, **kwargs) def view(self, path: str, **kwargs: Any) -> _Deco: return self.route(hdrs.METH_ANY, path, **kwargs) def static(self, prefix: str, path: PathLike, **kwargs: Any) -> None: self._items.append(StaticDef(prefix, path, kwargs))
6,152
Python
27.354839
88
0.617685
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/http.py
import http.server import sys from typing import Mapping, Tuple from . import __version__ from .http_exceptions import HttpProcessingError as HttpProcessingError from .http_parser import ( HeadersParser as HeadersParser, HttpParser as HttpParser, HttpRequestParser as HttpRequestParser, HttpResponseParser as HttpResponseParser, RawRequestMessage as RawRequestMessage, RawResponseMessage as RawResponseMessage, ) from .http_websocket import ( WS_CLOSED_MESSAGE as WS_CLOSED_MESSAGE, WS_CLOSING_MESSAGE as WS_CLOSING_MESSAGE, WS_KEY as WS_KEY, WebSocketError as WebSocketError, WebSocketReader as WebSocketReader, WebSocketWriter as WebSocketWriter, WSCloseCode as WSCloseCode, WSMessage as WSMessage, WSMsgType as WSMsgType, ws_ext_gen as ws_ext_gen, ws_ext_parse as ws_ext_parse, ) from .http_writer import ( HttpVersion as HttpVersion, HttpVersion10 as HttpVersion10, HttpVersion11 as HttpVersion11, StreamWriter as StreamWriter, ) __all__ = ( "HttpProcessingError", "RESPONSES", "SERVER_SOFTWARE", # .http_writer "StreamWriter", "HttpVersion", "HttpVersion10", "HttpVersion11", # .http_parser "HeadersParser", "HttpParser", "HttpRequestParser", "HttpResponseParser", "RawRequestMessage", "RawResponseMessage", # .http_websocket "WS_CLOSED_MESSAGE", "WS_CLOSING_MESSAGE", "WS_KEY", "WebSocketReader", "WebSocketWriter", "ws_ext_gen", "ws_ext_parse", "WSMessage", "WebSocketError", "WSMsgType", "WSCloseCode", ) SERVER_SOFTWARE: str = "Python/{0[0]}.{0[1]} aiohttp/{1}".format( sys.version_info, __version__ ) RESPONSES: Mapping[int, Tuple[str, str]] = http.server.BaseHTTPRequestHandler.responses
1,800
Python
24.366197
87
0.698333
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/helpers.py
"""Various helper functions""" import asyncio import base64 import binascii import datetime import functools import inspect import netrc import os import platform import re import sys import time import warnings import weakref from collections import namedtuple from contextlib import suppress from email.parser import HeaderParser from email.utils import parsedate from math import ceil from pathlib import Path from types import TracebackType from typing import ( Any, Callable, ContextManager, Dict, Generator, Generic, Iterable, Iterator, List, Mapping, Optional, Pattern, Set, Tuple, Type, TypeVar, Union, cast, ) from urllib.parse import quote from urllib.request import getproxies, proxy_bypass import async_timeout import attr from multidict import MultiDict, MultiDictProxy from yarl import URL from . import hdrs from .log import client_logger, internal_logger from .typedefs import PathLike, Protocol # noqa __all__ = ("BasicAuth", "ChainMapProxy", "ETag") IS_MACOS = platform.system() == "Darwin" IS_WINDOWS = platform.system() == "Windows" PY_36 = sys.version_info >= (3, 6) PY_37 = sys.version_info >= (3, 7) PY_38 = sys.version_info >= (3, 8) PY_310 = sys.version_info >= (3, 10) if sys.version_info < (3, 7): import idna_ssl idna_ssl.patch_match_hostname() def all_tasks( loop: Optional[asyncio.AbstractEventLoop] = None, ) -> Set["asyncio.Task[Any]"]: tasks = list(asyncio.Task.all_tasks(loop)) return {t for t in tasks if not t.done()} else: all_tasks = asyncio.all_tasks _T = TypeVar("_T") _S = TypeVar("_S") sentinel: Any = object() NO_EXTENSIONS: bool = bool(os.environ.get("AIOHTTP_NO_EXTENSIONS")) # N.B. sys.flags.dev_mode is available on Python 3.7+, use getattr # for compatibility with older versions DEBUG: bool = getattr(sys.flags, "dev_mode", False) or ( not sys.flags.ignore_environment and bool(os.environ.get("PYTHONASYNCIODEBUG")) ) CHAR = {chr(i) for i in range(0, 128)} CTL = {chr(i) for i in range(0, 32)} | { chr(127), } SEPARATORS = { "(", ")", "<", ">", "@", ",", ";", ":", "\\", '"', "/", "[", "]", "?", "=", "{", "}", " ", chr(9), } TOKEN = CHAR ^ CTL ^ SEPARATORS class noop: def __await__(self) -> Generator[None, None, None]: yield class BasicAuth(namedtuple("BasicAuth", ["login", "password", "encoding"])): """Http basic authentication helper.""" def __new__( cls, login: str, password: str = "", encoding: str = "latin1" ) -> "BasicAuth": if login is None: raise ValueError("None is not allowed as login value") if password is None: raise ValueError("None is not allowed as password value") if ":" in login: raise ValueError('A ":" is not allowed in login (RFC 1945#section-11.1)') return super().__new__(cls, login, password, encoding) @classmethod def decode(cls, auth_header: str, encoding: str = "latin1") -> "BasicAuth": """Create a BasicAuth object from an Authorization HTTP header.""" try: auth_type, encoded_credentials = auth_header.split(" ", 1) except ValueError: raise ValueError("Could not parse authorization header.") if auth_type.lower() != "basic": raise ValueError("Unknown authorization method %s" % auth_type) try: decoded = base64.b64decode( encoded_credentials.encode("ascii"), validate=True ).decode(encoding) except binascii.Error: raise ValueError("Invalid base64 encoding.") try: # RFC 2617 HTTP Authentication # https://www.ietf.org/rfc/rfc2617.txt # the colon must be present, but the username and password may be # otherwise blank. username, password = decoded.split(":", 1) except ValueError: raise ValueError("Invalid credentials.") return cls(username, password, encoding=encoding) @classmethod def from_url(cls, url: URL, *, encoding: str = "latin1") -> Optional["BasicAuth"]: """Create BasicAuth from url.""" if not isinstance(url, URL): raise TypeError("url should be yarl.URL instance") if url.user is None: return None return cls(url.user, url.password or "", encoding=encoding) def encode(self) -> str: """Encode credentials.""" creds = (f"{self.login}:{self.password}").encode(self.encoding) return "Basic %s" % base64.b64encode(creds).decode(self.encoding) def strip_auth_from_url(url: URL) -> Tuple[URL, Optional[BasicAuth]]: auth = BasicAuth.from_url(url) if auth is None: return url, None else: return url.with_user(None), auth def netrc_from_env() -> Optional[netrc.netrc]: """Load netrc from file. Attempt to load it from the path specified by the env-var NETRC or in the default location in the user's home directory. Returns None if it couldn't be found or fails to parse. """ netrc_env = os.environ.get("NETRC") if netrc_env is not None: netrc_path = Path(netrc_env) else: try: home_dir = Path.home() except RuntimeError as e: # pragma: no cover # if pathlib can't resolve home, it may raise a RuntimeError client_logger.debug( "Could not resolve home directory when " "trying to look for .netrc file: %s", e, ) return None netrc_path = home_dir / ("_netrc" if IS_WINDOWS else ".netrc") try: return netrc.netrc(str(netrc_path)) except netrc.NetrcParseError as e: client_logger.warning("Could not parse .netrc file: %s", e) except OSError as e: # we couldn't read the file (doesn't exist, permissions, etc.) if netrc_env or netrc_path.is_file(): # only warn if the environment wanted us to load it, # or it appears like the default file does actually exist client_logger.warning("Could not read .netrc file: %s", e) return None @attr.s(auto_attribs=True, frozen=True, slots=True) class ProxyInfo: proxy: URL proxy_auth: Optional[BasicAuth] def proxies_from_env() -> Dict[str, ProxyInfo]: proxy_urls = { k: URL(v) for k, v in getproxies().items() if k in ("http", "https", "ws", "wss") } netrc_obj = netrc_from_env() stripped = {k: strip_auth_from_url(v) for k, v in proxy_urls.items()} ret = {} for proto, val in stripped.items(): proxy, auth = val if proxy.scheme in ("https", "wss"): client_logger.warning( "%s proxies %s are not supported, ignoring", proxy.scheme.upper(), proxy ) continue if netrc_obj and auth is None: auth_from_netrc = None if proxy.host is not None: auth_from_netrc = netrc_obj.authenticators(proxy.host) if auth_from_netrc is not None: # auth_from_netrc is a (`user`, `account`, `password`) tuple, # `user` and `account` both can be username, # if `user` is None, use `account` *logins, password = auth_from_netrc login = logins[0] if logins[0] else logins[-1] auth = BasicAuth(cast(str, login), cast(str, password)) ret[proto] = ProxyInfo(proxy, auth) return ret def current_task( loop: Optional[asyncio.AbstractEventLoop] = None, ) -> "Optional[asyncio.Task[Any]]": if sys.version_info >= (3, 7): return asyncio.current_task(loop=loop) else: return asyncio.Task.current_task(loop=loop) def get_running_loop( loop: Optional[asyncio.AbstractEventLoop] = None, ) -> asyncio.AbstractEventLoop: if loop is None: loop = asyncio.get_event_loop() if not loop.is_running(): warnings.warn( "The object should be created within an async function", DeprecationWarning, stacklevel=3, ) if loop.get_debug(): internal_logger.warning( "The object should be created within an async function", stack_info=True ) return loop def isasyncgenfunction(obj: Any) -> bool: func = getattr(inspect, "isasyncgenfunction", None) if func is not None: return func(obj) # type: ignore[no-any-return] else: return False def get_env_proxy_for_url(url: URL) -> Tuple[URL, Optional[BasicAuth]]: """Get a permitted proxy for the given URL from the env.""" if url.host is not None and proxy_bypass(url.host): raise LookupError(f"Proxying is disallowed for `{url.host!r}`") proxies_in_env = proxies_from_env() try: proxy_info = proxies_in_env[url.scheme] except KeyError: raise LookupError(f"No proxies found for `{url!s}` in the env") else: return proxy_info.proxy, proxy_info.proxy_auth @attr.s(auto_attribs=True, frozen=True, slots=True) class MimeType: type: str subtype: str suffix: str parameters: "MultiDictProxy[str]" @functools.lru_cache(maxsize=56) def parse_mimetype(mimetype: str) -> MimeType: """Parses a MIME type into its components. mimetype is a MIME type string. Returns a MimeType object. Example: >>> parse_mimetype('text/html; charset=utf-8') MimeType(type='text', subtype='html', suffix='', parameters={'charset': 'utf-8'}) """ if not mimetype: return MimeType( type="", subtype="", suffix="", parameters=MultiDictProxy(MultiDict()) ) parts = mimetype.split(";") params: MultiDict[str] = MultiDict() for item in parts[1:]: if not item: continue key, value = cast( Tuple[str, str], item.split("=", 1) if "=" in item else (item, "") ) params.add(key.lower().strip(), value.strip(' "')) fulltype = parts[0].strip().lower() if fulltype == "*": fulltype = "*/*" mtype, stype = ( cast(Tuple[str, str], fulltype.split("/", 1)) if "/" in fulltype else (fulltype, "") ) stype, suffix = ( cast(Tuple[str, str], stype.split("+", 1)) if "+" in stype else (stype, "") ) return MimeType( type=mtype, subtype=stype, suffix=suffix, parameters=MultiDictProxy(params) ) def guess_filename(obj: Any, default: Optional[str] = None) -> Optional[str]: name = getattr(obj, "name", None) if name and isinstance(name, str) and name[0] != "<" and name[-1] != ">": return Path(name).name return default not_qtext_re = re.compile(r"[^\041\043-\133\135-\176]") QCONTENT = {chr(i) for i in range(0x20, 0x7F)} | {"\t"} def quoted_string(content: str) -> str: """Return 7-bit content as quoted-string. Format content into a quoted-string as defined in RFC5322 for Internet Message Format. Notice that this is not the 8-bit HTTP format, but the 7-bit email format. Content must be in usascii or a ValueError is raised. """ if not (QCONTENT > set(content)): raise ValueError(f"bad content for quoted-string {content!r}") return not_qtext_re.sub(lambda x: "\\" + x.group(0), content) def content_disposition_header( disptype: str, quote_fields: bool = True, _charset: str = "utf-8", **params: str ) -> str: """Sets ``Content-Disposition`` header for MIME. This is the MIME payload Content-Disposition header from RFC 2183 and RFC 7579 section 4.2, not the HTTP Content-Disposition from RFC 6266. disptype is a disposition type: inline, attachment, form-data. Should be valid extension token (see RFC 2183) quote_fields performs value quoting to 7-bit MIME headers according to RFC 7578. Set to quote_fields to False if recipient can take 8-bit file names and field values. _charset specifies the charset to use when quote_fields is True. params is a dict with disposition params. """ if not disptype or not (TOKEN > set(disptype)): raise ValueError("bad content disposition type {!r}" "".format(disptype)) value = disptype if params: lparams = [] for key, val in params.items(): if not key or not (TOKEN > set(key)): raise ValueError( "bad content disposition parameter" " {!r}={!r}".format(key, val) ) if quote_fields: if key.lower() == "filename": qval = quote(val, "", encoding=_charset) lparams.append((key, '"%s"' % qval)) else: try: qval = quoted_string(val) except ValueError: qval = "".join( (_charset, "''", quote(val, "", encoding=_charset)) ) lparams.append((key + "*", qval)) else: lparams.append((key, '"%s"' % qval)) else: qval = val.replace("\\", "\\\\").replace('"', '\\"') lparams.append((key, '"%s"' % qval)) sparams = "; ".join("=".join(pair) for pair in lparams) value = "; ".join((value, sparams)) return value class _TSelf(Protocol, Generic[_T]): _cache: Dict[str, _T] class reify(Generic[_T]): """Use as a class method decorator. It operates almost exactly like the Python `@property` decorator, but it puts the result of the method it decorates into the instance dict after the first call, effectively replacing the function it decorates with an instance variable. It is, in Python parlance, a data descriptor. """ def __init__(self, wrapped: Callable[..., _T]) -> None: self.wrapped = wrapped self.__doc__ = wrapped.__doc__ self.name = wrapped.__name__ def __get__(self, inst: _TSelf[_T], owner: Optional[Type[Any]] = None) -> _T: try: try: return inst._cache[self.name] except KeyError: val = self.wrapped(inst) inst._cache[self.name] = val return val except AttributeError: if inst is None: return self raise def __set__(self, inst: _TSelf[_T], value: _T) -> None: raise AttributeError("reified property is read-only") reify_py = reify try: from ._helpers import reify as reify_c if not NO_EXTENSIONS: reify = reify_c # type: ignore[misc,assignment] except ImportError: pass _ipv4_pattern = ( r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}" r"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" ) _ipv6_pattern = ( r"^(?:(?:(?:[A-F0-9]{1,4}:){6}|(?=(?:[A-F0-9]{0,4}:){0,6}" r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}$)(([0-9A-F]{1,4}:){0,5}|:)" r"((:[0-9A-F]{1,4}){1,5}:|:)|::(?:[A-F0-9]{1,4}:){5})" r"(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\.){3}" r"(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])|(?:[A-F0-9]{1,4}:){7}" r"[A-F0-9]{1,4}|(?=(?:[A-F0-9]{0,4}:){0,7}[A-F0-9]{0,4}$)" r"(([0-9A-F]{1,4}:){1,7}|:)((:[0-9A-F]{1,4}){1,7}|:)|(?:[A-F0-9]{1,4}:){7}" r":|:(:[A-F0-9]{1,4}){7})$" ) _ipv4_regex = re.compile(_ipv4_pattern) _ipv6_regex = re.compile(_ipv6_pattern, flags=re.IGNORECASE) _ipv4_regexb = re.compile(_ipv4_pattern.encode("ascii")) _ipv6_regexb = re.compile(_ipv6_pattern.encode("ascii"), flags=re.IGNORECASE) def _is_ip_address( regex: Pattern[str], regexb: Pattern[bytes], host: Optional[Union[str, bytes]] ) -> bool: if host is None: return False if isinstance(host, str): return bool(regex.match(host)) elif isinstance(host, (bytes, bytearray, memoryview)): return bool(regexb.match(host)) else: raise TypeError(f"{host} [{type(host)}] is not a str or bytes") is_ipv4_address = functools.partial(_is_ip_address, _ipv4_regex, _ipv4_regexb) is_ipv6_address = functools.partial(_is_ip_address, _ipv6_regex, _ipv6_regexb) def is_ip_address(host: Optional[Union[str, bytes, bytearray, memoryview]]) -> bool: return is_ipv4_address(host) or is_ipv6_address(host) def next_whole_second() -> datetime.datetime: """Return current time rounded up to the next whole second.""" return datetime.datetime.now(datetime.timezone.utc).replace( microsecond=0 ) + datetime.timedelta(seconds=0) _cached_current_datetime: Optional[int] = None _cached_formatted_datetime = "" def rfc822_formatted_time() -> str: global _cached_current_datetime global _cached_formatted_datetime now = int(time.time()) if now != _cached_current_datetime: # Weekday and month names for HTTP date/time formatting; # always English! # Tuples are constants stored in codeobject! _weekdayname = ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun") _monthname = ( "", # Dummy so we can use 1-based month numbers "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", ) year, month, day, hh, mm, ss, wd, *tail = time.gmtime(now) _cached_formatted_datetime = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( _weekdayname[wd], day, _monthname[month], year, hh, mm, ss, ) _cached_current_datetime = now return _cached_formatted_datetime def _weakref_handle(info: "Tuple[weakref.ref[object], str]") -> None: ref, name = info ob = ref() if ob is not None: with suppress(Exception): getattr(ob, name)() def weakref_handle( ob: object, name: str, timeout: float, loop: asyncio.AbstractEventLoop ) -> Optional[asyncio.TimerHandle]: if timeout is not None and timeout > 0: when = loop.time() + timeout if timeout >= 5: when = ceil(when) return loop.call_at(when, _weakref_handle, (weakref.ref(ob), name)) return None def call_later( cb: Callable[[], Any], timeout: float, loop: asyncio.AbstractEventLoop ) -> Optional[asyncio.TimerHandle]: if timeout is not None and timeout > 0: when = loop.time() + timeout if timeout > 5: when = ceil(when) return loop.call_at(when, cb) return None class TimeoutHandle: """Timeout handle""" def __init__( self, loop: asyncio.AbstractEventLoop, timeout: Optional[float] ) -> None: self._timeout = timeout self._loop = loop self._callbacks: List[ Tuple[Callable[..., None], Tuple[Any, ...], Dict[str, Any]] ] = [] def register( self, callback: Callable[..., None], *args: Any, **kwargs: Any ) -> None: self._callbacks.append((callback, args, kwargs)) def close(self) -> None: self._callbacks.clear() def start(self) -> Optional[asyncio.Handle]: timeout = self._timeout if timeout is not None and timeout > 0: when = self._loop.time() + timeout if timeout >= 5: when = ceil(when) return self._loop.call_at(when, self.__call__) else: return None def timer(self) -> "BaseTimerContext": if self._timeout is not None and self._timeout > 0: timer = TimerContext(self._loop) self.register(timer.timeout) return timer else: return TimerNoop() def __call__(self) -> None: for cb, args, kwargs in self._callbacks: with suppress(Exception): cb(*args, **kwargs) self._callbacks.clear() class BaseTimerContext(ContextManager["BaseTimerContext"]): pass class TimerNoop(BaseTimerContext): def __enter__(self) -> BaseTimerContext: return self def __exit__( self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> None: return class TimerContext(BaseTimerContext): """Low resolution timeout context manager""" def __init__(self, loop: asyncio.AbstractEventLoop) -> None: self._loop = loop self._tasks: List[asyncio.Task[Any]] = [] self._cancelled = False def __enter__(self) -> BaseTimerContext: task = current_task(loop=self._loop) if task is None: raise RuntimeError( "Timeout context manager should be used " "inside a task" ) if self._cancelled: raise asyncio.TimeoutError from None self._tasks.append(task) return self def __exit__( self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType], ) -> Optional[bool]: if self._tasks: self._tasks.pop() if exc_type is asyncio.CancelledError and self._cancelled: raise asyncio.TimeoutError from None return None def timeout(self) -> None: if not self._cancelled: for task in set(self._tasks): task.cancel() self._cancelled = True def ceil_timeout(delay: Optional[float]) -> async_timeout.Timeout: if delay is None or delay <= 0: return async_timeout.timeout(None) loop = get_running_loop() now = loop.time() when = now + delay if delay > 5: when = ceil(when) return async_timeout.timeout_at(when) class HeadersMixin: ATTRS = frozenset(["_content_type", "_content_dict", "_stored_content_type"]) _content_type: Optional[str] = None _content_dict: Optional[Dict[str, str]] = None _stored_content_type = sentinel def _parse_content_type(self, raw: str) -> None: self._stored_content_type = raw if raw is None: # default value according to RFC 2616 self._content_type = "application/octet-stream" self._content_dict = {} else: msg = HeaderParser().parsestr("Content-Type: " + raw) self._content_type = msg.get_content_type() params = msg.get_params() self._content_dict = dict(params[1:]) # First element is content type again @property def content_type(self) -> str: """The value of content part for Content-Type HTTP header.""" raw = self._headers.get(hdrs.CONTENT_TYPE) # type: ignore[attr-defined] if self._stored_content_type != raw: self._parse_content_type(raw) return self._content_type # type: ignore[return-value] @property def charset(self) -> Optional[str]: """The value of charset part for Content-Type HTTP header.""" raw = self._headers.get(hdrs.CONTENT_TYPE) # type: ignore[attr-defined] if self._stored_content_type != raw: self._parse_content_type(raw) return self._content_dict.get("charset") # type: ignore[union-attr] @property def content_length(self) -> Optional[int]: """The value of Content-Length HTTP header.""" content_length = self._headers.get( # type: ignore[attr-defined] hdrs.CONTENT_LENGTH ) if content_length is not None: return int(content_length) else: return None def set_result(fut: "asyncio.Future[_T]", result: _T) -> None: if not fut.done(): fut.set_result(result) def set_exception(fut: "asyncio.Future[_T]", exc: BaseException) -> None: if not fut.done(): fut.set_exception(exc) class ChainMapProxy(Mapping[str, Any]): __slots__ = ("_maps",) def __init__(self, maps: Iterable[Mapping[str, Any]]) -> None: self._maps = tuple(maps) def __init_subclass__(cls) -> None: raise TypeError( "Inheritance class {} from ChainMapProxy " "is forbidden".format(cls.__name__) ) def __getitem__(self, key: str) -> Any: for mapping in self._maps: try: return mapping[key] except KeyError: pass raise KeyError(key) def get(self, key: str, default: Any = None) -> Any: return self[key] if key in self else default def __len__(self) -> int: # reuses stored hash values if possible return len(set().union(*self._maps)) # type: ignore[arg-type] def __iter__(self) -> Iterator[str]: d: Dict[str, Any] = {} for mapping in reversed(self._maps): # reuses stored hash values if possible d.update(mapping) return iter(d) def __contains__(self, key: object) -> bool: return any(key in m for m in self._maps) def __bool__(self) -> bool: return any(self._maps) def __repr__(self) -> str: content = ", ".join(map(repr, self._maps)) return f"ChainMapProxy({content})" # https://tools.ietf.org/html/rfc7232#section-2.3 _ETAGC = r"[!#-}\x80-\xff]+" _ETAGC_RE = re.compile(_ETAGC) _QUOTED_ETAG = rf'(W/)?"({_ETAGC})"' QUOTED_ETAG_RE = re.compile(_QUOTED_ETAG) LIST_QUOTED_ETAG_RE = re.compile(rf"({_QUOTED_ETAG})(?:\s*,\s*|$)|(.)") ETAG_ANY = "*" @attr.s(auto_attribs=True, frozen=True, slots=True) class ETag: value: str is_weak: bool = False def validate_etag_value(value: str) -> None: if value != ETAG_ANY and not _ETAGC_RE.fullmatch(value): raise ValueError( f"Value {value!r} is not a valid etag. Maybe it contains '\"'?" ) def parse_http_date(date_str: Optional[str]) -> Optional[datetime.datetime]: """Process a date string, return a datetime object""" if date_str is not None: timetuple = parsedate(date_str) if timetuple is not None: with suppress(ValueError): return datetime.datetime(*timetuple[:6], tzinfo=datetime.timezone.utc) return None
26,361
Python
29.025057
88
0.579113
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/web_response.py
import asyncio import collections.abc import datetime import enum import json import math import time import warnings import zlib from concurrent.futures import Executor from http.cookies import Morsel, SimpleCookie from typing import ( TYPE_CHECKING, Any, Dict, Iterator, Mapping, MutableMapping, Optional, Tuple, Union, cast, ) from multidict import CIMultiDict, istr from . import hdrs, payload from .abc import AbstractStreamWriter from .helpers import ( ETAG_ANY, PY_38, QUOTED_ETAG_RE, ETag, HeadersMixin, parse_http_date, rfc822_formatted_time, sentinel, validate_etag_value, ) from .http import RESPONSES, SERVER_SOFTWARE, HttpVersion10, HttpVersion11 from .payload import Payload from .typedefs import JSONEncoder, LooseHeaders __all__ = ("ContentCoding", "StreamResponse", "Response", "json_response") if TYPE_CHECKING: # pragma: no cover from .web_request import BaseRequest BaseClass = MutableMapping[str, Any] else: BaseClass = collections.abc.MutableMapping if not PY_38: # allow samesite to be used in python < 3.8 # already permitted in python 3.8, see https://bugs.python.org/issue29613 Morsel._reserved["samesite"] = "SameSite" # type: ignore[attr-defined] class ContentCoding(enum.Enum): # The content codings that we have support for. # # Additional registered codings are listed at: # https://www.iana.org/assignments/http-parameters/http-parameters.xhtml#content-coding deflate = "deflate" gzip = "gzip" identity = "identity" ############################################################ # HTTP Response classes ############################################################ class StreamResponse(BaseClass, HeadersMixin): _length_check = True def __init__( self, *, status: int = 200, reason: Optional[str] = None, headers: Optional[LooseHeaders] = None, ) -> None: self._body = None self._keep_alive: Optional[bool] = None self._chunked = False self._compression = False self._compression_force: Optional[ContentCoding] = None self._cookies: SimpleCookie[str] = SimpleCookie() self._req: Optional[BaseRequest] = None self._payload_writer: Optional[AbstractStreamWriter] = None self._eof_sent = False self._body_length = 0 self._state: Dict[str, Any] = {} if headers is not None: self._headers: CIMultiDict[str] = CIMultiDict(headers) else: self._headers = CIMultiDict() self.set_status(status, reason) @property def prepared(self) -> bool: return self._payload_writer is not None @property def task(self) -> "Optional[asyncio.Task[None]]": if self._req: return self._req.task else: return None @property def status(self) -> int: return self._status @property def chunked(self) -> bool: return self._chunked @property def compression(self) -> bool: return self._compression @property def reason(self) -> str: return self._reason def set_status( self, status: int, reason: Optional[str] = None, _RESPONSES: Mapping[int, Tuple[str, str]] = RESPONSES, ) -> None: assert not self.prepared, ( "Cannot change the response status code after " "the headers have been sent" ) self._status = int(status) if reason is None: try: reason = _RESPONSES[self._status][0] except Exception: reason = "" self._reason = reason @property def keep_alive(self) -> Optional[bool]: return self._keep_alive def force_close(self) -> None: self._keep_alive = False @property def body_length(self) -> int: return self._body_length @property def output_length(self) -> int: warnings.warn("output_length is deprecated", DeprecationWarning) assert self._payload_writer return self._payload_writer.buffer_size def enable_chunked_encoding(self, chunk_size: Optional[int] = None) -> None: """Enables automatic chunked transfer encoding.""" self._chunked = True if hdrs.CONTENT_LENGTH in self._headers: raise RuntimeError( "You can't enable chunked encoding when " "a content length is set" ) if chunk_size is not None: warnings.warn("Chunk size is deprecated #1615", DeprecationWarning) def enable_compression( self, force: Optional[Union[bool, ContentCoding]] = None ) -> None: """Enables response compression encoding.""" # Backwards compatibility for when force was a bool <0.17. if type(force) == bool: force = ContentCoding.deflate if force else ContentCoding.identity warnings.warn( "Using boolean for force is deprecated #3318", DeprecationWarning ) elif force is not None: assert isinstance(force, ContentCoding), ( "force should one of " "None, bool or " "ContentEncoding" ) self._compression = True self._compression_force = force @property def headers(self) -> "CIMultiDict[str]": return self._headers @property def cookies(self) -> "SimpleCookie[str]": return self._cookies def set_cookie( self, name: str, value: str, *, expires: Optional[str] = None, domain: Optional[str] = None, max_age: Optional[Union[int, str]] = None, path: str = "/", secure: Optional[bool] = None, httponly: Optional[bool] = None, version: Optional[str] = None, samesite: Optional[str] = None, ) -> None: """Set or update response cookie. Sets new cookie or updates existent with new value. Also updates only those params which are not None. """ old = self._cookies.get(name) if old is not None and old.coded_value == "": # deleted cookie self._cookies.pop(name, None) self._cookies[name] = value c = self._cookies[name] if expires is not None: c["expires"] = expires elif c.get("expires") == "Thu, 01 Jan 1970 00:00:00 GMT": del c["expires"] if domain is not None: c["domain"] = domain if max_age is not None: c["max-age"] = str(max_age) elif "max-age" in c: del c["max-age"] c["path"] = path if secure is not None: c["secure"] = secure if httponly is not None: c["httponly"] = httponly if version is not None: c["version"] = version if samesite is not None: c["samesite"] = samesite def del_cookie( self, name: str, *, domain: Optional[str] = None, path: str = "/" ) -> None: """Delete cookie. Creates new empty expired cookie. """ # TODO: do we need domain/path here? self._cookies.pop(name, None) self.set_cookie( name, "", max_age=0, expires="Thu, 01 Jan 1970 00:00:00 GMT", domain=domain, path=path, ) @property def content_length(self) -> Optional[int]: # Just a placeholder for adding setter return super().content_length @content_length.setter def content_length(self, value: Optional[int]) -> None: if value is not None: value = int(value) if self._chunked: raise RuntimeError( "You can't set content length when " "chunked encoding is enable" ) self._headers[hdrs.CONTENT_LENGTH] = str(value) else: self._headers.pop(hdrs.CONTENT_LENGTH, None) @property def content_type(self) -> str: # Just a placeholder for adding setter return super().content_type @content_type.setter def content_type(self, value: str) -> None: self.content_type # read header values if needed self._content_type = str(value) self._generate_content_type_header() @property def charset(self) -> Optional[str]: # Just a placeholder for adding setter return super().charset @charset.setter def charset(self, value: Optional[str]) -> None: ctype = self.content_type # read header values if needed if ctype == "application/octet-stream": raise RuntimeError( "Setting charset for application/octet-stream " "doesn't make sense, setup content_type first" ) assert self._content_dict is not None if value is None: self._content_dict.pop("charset", None) else: self._content_dict["charset"] = str(value).lower() self._generate_content_type_header() @property def last_modified(self) -> Optional[datetime.datetime]: """The value of Last-Modified HTTP header, or None. This header is represented as a `datetime` object. """ return parse_http_date(self._headers.get(hdrs.LAST_MODIFIED)) @last_modified.setter def last_modified( self, value: Optional[Union[int, float, datetime.datetime, str]] ) -> None: if value is None: self._headers.pop(hdrs.LAST_MODIFIED, None) elif isinstance(value, (int, float)): self._headers[hdrs.LAST_MODIFIED] = time.strftime( "%a, %d %b %Y %H:%M:%S GMT", time.gmtime(math.ceil(value)) ) elif isinstance(value, datetime.datetime): self._headers[hdrs.LAST_MODIFIED] = time.strftime( "%a, %d %b %Y %H:%M:%S GMT", value.utctimetuple() ) elif isinstance(value, str): self._headers[hdrs.LAST_MODIFIED] = value @property def etag(self) -> Optional[ETag]: quoted_value = self._headers.get(hdrs.ETAG) if not quoted_value: return None elif quoted_value == ETAG_ANY: return ETag(value=ETAG_ANY) match = QUOTED_ETAG_RE.fullmatch(quoted_value) if not match: return None is_weak, value = match.group(1, 2) return ETag( is_weak=bool(is_weak), value=value, ) @etag.setter def etag(self, value: Optional[Union[ETag, str]]) -> None: if value is None: self._headers.pop(hdrs.ETAG, None) elif (isinstance(value, str) and value == ETAG_ANY) or ( isinstance(value, ETag) and value.value == ETAG_ANY ): self._headers[hdrs.ETAG] = ETAG_ANY elif isinstance(value, str): validate_etag_value(value) self._headers[hdrs.ETAG] = f'"{value}"' elif isinstance(value, ETag) and isinstance(value.value, str): validate_etag_value(value.value) hdr_value = f'W/"{value.value}"' if value.is_weak else f'"{value.value}"' self._headers[hdrs.ETAG] = hdr_value else: raise ValueError( f"Unsupported etag type: {type(value)}. " f"etag must be str, ETag or None" ) def _generate_content_type_header( self, CONTENT_TYPE: istr = hdrs.CONTENT_TYPE ) -> None: assert self._content_dict is not None assert self._content_type is not None params = "; ".join(f"{k}={v}" for k, v in self._content_dict.items()) if params: ctype = self._content_type + "; " + params else: ctype = self._content_type self._headers[CONTENT_TYPE] = ctype async def _do_start_compression(self, coding: ContentCoding) -> None: if coding != ContentCoding.identity: assert self._payload_writer is not None self._headers[hdrs.CONTENT_ENCODING] = coding.value self._payload_writer.enable_compression(coding.value) # Compressed payload may have different content length, # remove the header self._headers.popall(hdrs.CONTENT_LENGTH, None) async def _start_compression(self, request: "BaseRequest") -> None: if self._compression_force: await self._do_start_compression(self._compression_force) else: accept_encoding = request.headers.get(hdrs.ACCEPT_ENCODING, "").lower() for coding in ContentCoding: if coding.value in accept_encoding: await self._do_start_compression(coding) return async def prepare(self, request: "BaseRequest") -> Optional[AbstractStreamWriter]: if self._eof_sent: return None if self._payload_writer is not None: return self._payload_writer return await self._start(request) async def _start(self, request: "BaseRequest") -> AbstractStreamWriter: self._req = request writer = self._payload_writer = request._payload_writer await self._prepare_headers() await request._prepare_hook(self) await self._write_headers() return writer async def _prepare_headers(self) -> None: request = self._req assert request is not None writer = self._payload_writer assert writer is not None keep_alive = self._keep_alive if keep_alive is None: keep_alive = request.keep_alive self._keep_alive = keep_alive version = request.version headers = self._headers for cookie in self._cookies.values(): value = cookie.output(header="")[1:] headers.add(hdrs.SET_COOKIE, value) if self._compression: await self._start_compression(request) if self._chunked: if version != HttpVersion11: raise RuntimeError( "Using chunked encoding is forbidden " "for HTTP/{0.major}.{0.minor}".format(request.version) ) writer.enable_chunking() headers[hdrs.TRANSFER_ENCODING] = "chunked" if hdrs.CONTENT_LENGTH in headers: del headers[hdrs.CONTENT_LENGTH] elif self._length_check: writer.length = self.content_length if writer.length is None: if version >= HttpVersion11 and self.status != 204: writer.enable_chunking() headers[hdrs.TRANSFER_ENCODING] = "chunked" if hdrs.CONTENT_LENGTH in headers: del headers[hdrs.CONTENT_LENGTH] else: keep_alive = False # HTTP 1.1: https://tools.ietf.org/html/rfc7230#section-3.3.2 # HTTP 1.0: https://tools.ietf.org/html/rfc1945#section-10.4 elif version >= HttpVersion11 and self.status in (100, 101, 102, 103, 204): del headers[hdrs.CONTENT_LENGTH] if self.status not in (204, 304): headers.setdefault(hdrs.CONTENT_TYPE, "application/octet-stream") headers.setdefault(hdrs.DATE, rfc822_formatted_time()) headers.setdefault(hdrs.SERVER, SERVER_SOFTWARE) # connection header if hdrs.CONNECTION not in headers: if keep_alive: if version == HttpVersion10: headers[hdrs.CONNECTION] = "keep-alive" else: if version == HttpVersion11: headers[hdrs.CONNECTION] = "close" async def _write_headers(self) -> None: request = self._req assert request is not None writer = self._payload_writer assert writer is not None # status line version = request.version status_line = "HTTP/{}.{} {} {}".format( version[0], version[1], self._status, self._reason ) await writer.write_headers(status_line, self._headers) async def write(self, data: bytes) -> None: assert isinstance( data, (bytes, bytearray, memoryview) ), "data argument must be byte-ish (%r)" % type(data) if self._eof_sent: raise RuntimeError("Cannot call write() after write_eof()") if self._payload_writer is None: raise RuntimeError("Cannot call write() before prepare()") await self._payload_writer.write(data) async def drain(self) -> None: assert not self._eof_sent, "EOF has already been sent" assert self._payload_writer is not None, "Response has not been started" warnings.warn( "drain method is deprecated, use await resp.write()", DeprecationWarning, stacklevel=2, ) await self._payload_writer.drain() async def write_eof(self, data: bytes = b"") -> None: assert isinstance( data, (bytes, bytearray, memoryview) ), "data argument must be byte-ish (%r)" % type(data) if self._eof_sent: return assert self._payload_writer is not None, "Response has not been started" await self._payload_writer.write_eof(data) self._eof_sent = True self._req = None self._body_length = self._payload_writer.output_size self._payload_writer = None def __repr__(self) -> str: if self._eof_sent: info = "eof" elif self.prepared: assert self._req is not None info = f"{self._req.method} {self._req.path} " else: info = "not prepared" return f"<{self.__class__.__name__} {self.reason} {info}>" def __getitem__(self, key: str) -> Any: return self._state[key] def __setitem__(self, key: str, value: Any) -> None: self._state[key] = value def __delitem__(self, key: str) -> None: del self._state[key] def __len__(self) -> int: return len(self._state) def __iter__(self) -> Iterator[str]: return iter(self._state) def __hash__(self) -> int: return hash(id(self)) def __eq__(self, other: object) -> bool: return self is other class Response(StreamResponse): def __init__( self, *, body: Any = None, status: int = 200, reason: Optional[str] = None, text: Optional[str] = None, headers: Optional[LooseHeaders] = None, content_type: Optional[str] = None, charset: Optional[str] = None, zlib_executor_size: Optional[int] = None, zlib_executor: Optional[Executor] = None, ) -> None: if body is not None and text is not None: raise ValueError("body and text are not allowed together") if headers is None: real_headers: CIMultiDict[str] = CIMultiDict() elif not isinstance(headers, CIMultiDict): real_headers = CIMultiDict(headers) else: real_headers = headers # = cast('CIMultiDict[str]', headers) if content_type is not None and "charset" in content_type: raise ValueError("charset must not be in content_type " "argument") if text is not None: if hdrs.CONTENT_TYPE in real_headers: if content_type or charset: raise ValueError( "passing both Content-Type header and " "content_type or charset params " "is forbidden" ) else: # fast path for filling headers if not isinstance(text, str): raise TypeError("text argument must be str (%r)" % type(text)) if content_type is None: content_type = "text/plain" if charset is None: charset = "utf-8" real_headers[hdrs.CONTENT_TYPE] = content_type + "; charset=" + charset body = text.encode(charset) text = None else: if hdrs.CONTENT_TYPE in real_headers: if content_type is not None or charset is not None: raise ValueError( "passing both Content-Type header and " "content_type or charset params " "is forbidden" ) else: if content_type is not None: if charset is not None: content_type += "; charset=" + charset real_headers[hdrs.CONTENT_TYPE] = content_type super().__init__(status=status, reason=reason, headers=real_headers) if text is not None: self.text = text else: self.body = body self._compressed_body: Optional[bytes] = None self._zlib_executor_size = zlib_executor_size self._zlib_executor = zlib_executor @property def body(self) -> Optional[Union[bytes, Payload]]: return self._body @body.setter def body( self, body: bytes, CONTENT_TYPE: istr = hdrs.CONTENT_TYPE, CONTENT_LENGTH: istr = hdrs.CONTENT_LENGTH, ) -> None: if body is None: self._body: Optional[bytes] = None self._body_payload: bool = False elif isinstance(body, (bytes, bytearray)): self._body = body self._body_payload = False else: try: self._body = body = payload.PAYLOAD_REGISTRY.get(body) except payload.LookupError: raise ValueError("Unsupported body type %r" % type(body)) self._body_payload = True headers = self._headers # set content-length header if needed if not self._chunked and CONTENT_LENGTH not in headers: size = body.size if size is not None: headers[CONTENT_LENGTH] = str(size) # set content-type if CONTENT_TYPE not in headers: headers[CONTENT_TYPE] = body.content_type # copy payload headers if body.headers: for (key, value) in body.headers.items(): if key not in headers: headers[key] = value self._compressed_body = None @property def text(self) -> Optional[str]: if self._body is None: return None return self._body.decode(self.charset or "utf-8") @text.setter def text(self, text: str) -> None: assert text is None or isinstance( text, str ), "text argument must be str (%r)" % type(text) if self.content_type == "application/octet-stream": self.content_type = "text/plain" if self.charset is None: self.charset = "utf-8" self._body = text.encode(self.charset) self._body_payload = False self._compressed_body = None @property def content_length(self) -> Optional[int]: if self._chunked: return None if hdrs.CONTENT_LENGTH in self._headers: return super().content_length if self._compressed_body is not None: # Return length of the compressed body return len(self._compressed_body) elif self._body_payload: # A payload without content length, or a compressed payload return None elif self._body is not None: return len(self._body) else: return 0 @content_length.setter def content_length(self, value: Optional[int]) -> None: raise RuntimeError("Content length is set automatically") async def write_eof(self, data: bytes = b"") -> None: if self._eof_sent: return if self._compressed_body is None: body: Optional[Union[bytes, Payload]] = self._body else: body = self._compressed_body assert not data, f"data arg is not supported, got {data!r}" assert self._req is not None assert self._payload_writer is not None if body is not None: if self._req._method == hdrs.METH_HEAD or self._status in [204, 304]: await super().write_eof() elif self._body_payload: payload = cast(Payload, body) await payload.write(self._payload_writer) await super().write_eof() else: await super().write_eof(cast(bytes, body)) else: await super().write_eof() async def _start(self, request: "BaseRequest") -> AbstractStreamWriter: if not self._chunked and hdrs.CONTENT_LENGTH not in self._headers: if not self._body_payload: if self._body is not None: self._headers[hdrs.CONTENT_LENGTH] = str(len(self._body)) else: self._headers[hdrs.CONTENT_LENGTH] = "0" return await super()._start(request) def _compress_body(self, zlib_mode: int) -> None: assert zlib_mode > 0 compressobj = zlib.compressobj(wbits=zlib_mode) body_in = self._body assert body_in is not None self._compressed_body = compressobj.compress(body_in) + compressobj.flush() async def _do_start_compression(self, coding: ContentCoding) -> None: if self._body_payload or self._chunked: return await super()._do_start_compression(coding) if coding != ContentCoding.identity: # Instead of using _payload_writer.enable_compression, # compress the whole body zlib_mode = ( 16 + zlib.MAX_WBITS if coding == ContentCoding.gzip else zlib.MAX_WBITS ) body_in = self._body assert body_in is not None if ( self._zlib_executor_size is not None and len(body_in) > self._zlib_executor_size ): await asyncio.get_event_loop().run_in_executor( self._zlib_executor, self._compress_body, zlib_mode ) else: self._compress_body(zlib_mode) body_out = self._compressed_body assert body_out is not None self._headers[hdrs.CONTENT_ENCODING] = coding.value self._headers[hdrs.CONTENT_LENGTH] = str(len(body_out)) def json_response( data: Any = sentinel, *, text: Optional[str] = None, body: Optional[bytes] = None, status: int = 200, reason: Optional[str] = None, headers: Optional[LooseHeaders] = None, content_type: str = "application/json", dumps: JSONEncoder = json.dumps, ) -> Response: if data is not sentinel: if text or body: raise ValueError("only one of data, text, or body should be specified") else: text = dumps(data) return Response( text=text, body=body, status=status, reason=reason, headers=headers, content_type=content_type, )
27,471
Python
32.25908
91
0.562812
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/typedefs.py
import json import os import sys from typing import ( TYPE_CHECKING, Any, Awaitable, Callable, Iterable, Mapping, Tuple, Union, ) from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy, istr from yarl import URL # These are for other modules to use (to avoid repeating the conditional import). if sys.version_info >= (3, 8): from typing import Final as Final, Protocol as Protocol, TypedDict as TypedDict else: from typing_extensions import ( # noqa: F401 Final, Protocol as Protocol, TypedDict as TypedDict, ) DEFAULT_JSON_ENCODER = json.dumps DEFAULT_JSON_DECODER = json.loads if TYPE_CHECKING: # pragma: no cover _CIMultiDict = CIMultiDict[str] _CIMultiDictProxy = CIMultiDictProxy[str] _MultiDict = MultiDict[str] _MultiDictProxy = MultiDictProxy[str] from http.cookies import BaseCookie, Morsel from .web import Request, StreamResponse else: _CIMultiDict = CIMultiDict _CIMultiDictProxy = CIMultiDictProxy _MultiDict = MultiDict _MultiDictProxy = MultiDictProxy Byteish = Union[bytes, bytearray, memoryview] JSONEncoder = Callable[[Any], str] JSONDecoder = Callable[[str], Any] LooseHeaders = Union[Mapping[Union[str, istr], str], _CIMultiDict, _CIMultiDictProxy] RawHeaders = Tuple[Tuple[bytes, bytes], ...] StrOrURL = Union[str, URL] LooseCookiesMappings = Mapping[str, Union[str, "BaseCookie[str]", "Morsel[Any]"]] LooseCookiesIterables = Iterable[ Tuple[str, Union[str, "BaseCookie[str]", "Morsel[Any]"]] ] LooseCookies = Union[ LooseCookiesMappings, LooseCookiesIterables, "BaseCookie[str]", ] Handler = Callable[["Request"], Awaitable["StreamResponse"]] PathLike = Union[str, "os.PathLike[str]"]
1,766
Python
26.184615
85
0.713477
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/tracing.py
from types import SimpleNamespace from typing import TYPE_CHECKING, Awaitable, Optional, Type, TypeVar import attr from aiosignal import Signal from multidict import CIMultiDict from yarl import URL from .client_reqrep import ClientResponse if TYPE_CHECKING: # pragma: no cover from .client import ClientSession from .typedefs import Protocol _ParamT_contra = TypeVar("_ParamT_contra", contravariant=True) class _SignalCallback(Protocol[_ParamT_contra]): def __call__( self, __client_session: ClientSession, __trace_config_ctx: SimpleNamespace, __params: _ParamT_contra, ) -> Awaitable[None]: ... __all__ = ( "TraceConfig", "TraceRequestStartParams", "TraceRequestEndParams", "TraceRequestExceptionParams", "TraceConnectionQueuedStartParams", "TraceConnectionQueuedEndParams", "TraceConnectionCreateStartParams", "TraceConnectionCreateEndParams", "TraceConnectionReuseconnParams", "TraceDnsResolveHostStartParams", "TraceDnsResolveHostEndParams", "TraceDnsCacheHitParams", "TraceDnsCacheMissParams", "TraceRequestRedirectParams", "TraceRequestChunkSentParams", "TraceResponseChunkReceivedParams", "TraceRequestHeadersSentParams", ) class TraceConfig: """First-class used to trace requests launched via ClientSession objects.""" def __init__( self, trace_config_ctx_factory: Type[SimpleNamespace] = SimpleNamespace ) -> None: self._on_request_start: Signal[ _SignalCallback[TraceRequestStartParams] ] = Signal(self) self._on_request_chunk_sent: Signal[ _SignalCallback[TraceRequestChunkSentParams] ] = Signal(self) self._on_response_chunk_received: Signal[ _SignalCallback[TraceResponseChunkReceivedParams] ] = Signal(self) self._on_request_end: Signal[_SignalCallback[TraceRequestEndParams]] = Signal( self ) self._on_request_exception: Signal[ _SignalCallback[TraceRequestExceptionParams] ] = Signal(self) self._on_request_redirect: Signal[ _SignalCallback[TraceRequestRedirectParams] ] = Signal(self) self._on_connection_queued_start: Signal[ _SignalCallback[TraceConnectionQueuedStartParams] ] = Signal(self) self._on_connection_queued_end: Signal[ _SignalCallback[TraceConnectionQueuedEndParams] ] = Signal(self) self._on_connection_create_start: Signal[ _SignalCallback[TraceConnectionCreateStartParams] ] = Signal(self) self._on_connection_create_end: Signal[ _SignalCallback[TraceConnectionCreateEndParams] ] = Signal(self) self._on_connection_reuseconn: Signal[ _SignalCallback[TraceConnectionReuseconnParams] ] = Signal(self) self._on_dns_resolvehost_start: Signal[ _SignalCallback[TraceDnsResolveHostStartParams] ] = Signal(self) self._on_dns_resolvehost_end: Signal[ _SignalCallback[TraceDnsResolveHostEndParams] ] = Signal(self) self._on_dns_cache_hit: Signal[ _SignalCallback[TraceDnsCacheHitParams] ] = Signal(self) self._on_dns_cache_miss: Signal[ _SignalCallback[TraceDnsCacheMissParams] ] = Signal(self) self._on_request_headers_sent: Signal[ _SignalCallback[TraceRequestHeadersSentParams] ] = Signal(self) self._trace_config_ctx_factory = trace_config_ctx_factory def trace_config_ctx( self, trace_request_ctx: Optional[SimpleNamespace] = None ) -> SimpleNamespace: """Return a new trace_config_ctx instance""" return self._trace_config_ctx_factory(trace_request_ctx=trace_request_ctx) def freeze(self) -> None: self._on_request_start.freeze() self._on_request_chunk_sent.freeze() self._on_response_chunk_received.freeze() self._on_request_end.freeze() self._on_request_exception.freeze() self._on_request_redirect.freeze() self._on_connection_queued_start.freeze() self._on_connection_queued_end.freeze() self._on_connection_create_start.freeze() self._on_connection_create_end.freeze() self._on_connection_reuseconn.freeze() self._on_dns_resolvehost_start.freeze() self._on_dns_resolvehost_end.freeze() self._on_dns_cache_hit.freeze() self._on_dns_cache_miss.freeze() self._on_request_headers_sent.freeze() @property def on_request_start(self) -> "Signal[_SignalCallback[TraceRequestStartParams]]": return self._on_request_start @property def on_request_chunk_sent( self, ) -> "Signal[_SignalCallback[TraceRequestChunkSentParams]]": return self._on_request_chunk_sent @property def on_response_chunk_received( self, ) -> "Signal[_SignalCallback[TraceResponseChunkReceivedParams]]": return self._on_response_chunk_received @property def on_request_end(self) -> "Signal[_SignalCallback[TraceRequestEndParams]]": return self._on_request_end @property def on_request_exception( self, ) -> "Signal[_SignalCallback[TraceRequestExceptionParams]]": return self._on_request_exception @property def on_request_redirect( self, ) -> "Signal[_SignalCallback[TraceRequestRedirectParams]]": return self._on_request_redirect @property def on_connection_queued_start( self, ) -> "Signal[_SignalCallback[TraceConnectionQueuedStartParams]]": return self._on_connection_queued_start @property def on_connection_queued_end( self, ) -> "Signal[_SignalCallback[TraceConnectionQueuedEndParams]]": return self._on_connection_queued_end @property def on_connection_create_start( self, ) -> "Signal[_SignalCallback[TraceConnectionCreateStartParams]]": return self._on_connection_create_start @property def on_connection_create_end( self, ) -> "Signal[_SignalCallback[TraceConnectionCreateEndParams]]": return self._on_connection_create_end @property def on_connection_reuseconn( self, ) -> "Signal[_SignalCallback[TraceConnectionReuseconnParams]]": return self._on_connection_reuseconn @property def on_dns_resolvehost_start( self, ) -> "Signal[_SignalCallback[TraceDnsResolveHostStartParams]]": return self._on_dns_resolvehost_start @property def on_dns_resolvehost_end( self, ) -> "Signal[_SignalCallback[TraceDnsResolveHostEndParams]]": return self._on_dns_resolvehost_end @property def on_dns_cache_hit(self) -> "Signal[_SignalCallback[TraceDnsCacheHitParams]]": return self._on_dns_cache_hit @property def on_dns_cache_miss(self) -> "Signal[_SignalCallback[TraceDnsCacheMissParams]]": return self._on_dns_cache_miss @property def on_request_headers_sent( self, ) -> "Signal[_SignalCallback[TraceRequestHeadersSentParams]]": return self._on_request_headers_sent @attr.s(auto_attribs=True, frozen=True, slots=True) class TraceRequestStartParams: """Parameters sent by the `on_request_start` signal""" method: str url: URL headers: "CIMultiDict[str]" @attr.s(auto_attribs=True, frozen=True, slots=True) class TraceRequestChunkSentParams: """Parameters sent by the `on_request_chunk_sent` signal""" method: str url: URL chunk: bytes @attr.s(auto_attribs=True, frozen=True, slots=True) class TraceResponseChunkReceivedParams: """Parameters sent by the `on_response_chunk_received` signal""" method: str url: URL chunk: bytes @attr.s(auto_attribs=True, frozen=True, slots=True) class TraceRequestEndParams: """Parameters sent by the `on_request_end` signal""" method: str url: URL headers: "CIMultiDict[str]" response: ClientResponse @attr.s(auto_attribs=True, frozen=True, slots=True) class TraceRequestExceptionParams: """Parameters sent by the `on_request_exception` signal""" method: str url: URL headers: "CIMultiDict[str]" exception: BaseException @attr.s(auto_attribs=True, frozen=True, slots=True) class TraceRequestRedirectParams: """Parameters sent by the `on_request_redirect` signal""" method: str url: URL headers: "CIMultiDict[str]" response: ClientResponse @attr.s(auto_attribs=True, frozen=True, slots=True) class TraceConnectionQueuedStartParams: """Parameters sent by the `on_connection_queued_start` signal""" @attr.s(auto_attribs=True, frozen=True, slots=True) class TraceConnectionQueuedEndParams: """Parameters sent by the `on_connection_queued_end` signal""" @attr.s(auto_attribs=True, frozen=True, slots=True) class TraceConnectionCreateStartParams: """Parameters sent by the `on_connection_create_start` signal""" @attr.s(auto_attribs=True, frozen=True, slots=True) class TraceConnectionCreateEndParams: """Parameters sent by the `on_connection_create_end` signal""" @attr.s(auto_attribs=True, frozen=True, slots=True) class TraceConnectionReuseconnParams: """Parameters sent by the `on_connection_reuseconn` signal""" @attr.s(auto_attribs=True, frozen=True, slots=True) class TraceDnsResolveHostStartParams: """Parameters sent by the `on_dns_resolvehost_start` signal""" host: str @attr.s(auto_attribs=True, frozen=True, slots=True) class TraceDnsResolveHostEndParams: """Parameters sent by the `on_dns_resolvehost_end` signal""" host: str @attr.s(auto_attribs=True, frozen=True, slots=True) class TraceDnsCacheHitParams: """Parameters sent by the `on_dns_cache_hit` signal""" host: str @attr.s(auto_attribs=True, frozen=True, slots=True) class TraceDnsCacheMissParams: """Parameters sent by the `on_dns_cache_miss` signal""" host: str @attr.s(auto_attribs=True, frozen=True, slots=True) class TraceRequestHeadersSentParams: """Parameters sent by the `on_request_headers_sent` signal""" method: str url: URL headers: "CIMultiDict[str]" class Trace: """Internal dependency holder class. Used to keep together the main dependencies used at the moment of send a signal. """ def __init__( self, session: "ClientSession", trace_config: TraceConfig, trace_config_ctx: SimpleNamespace, ) -> None: self._trace_config = trace_config self._trace_config_ctx = trace_config_ctx self._session = session async def send_request_start( self, method: str, url: URL, headers: "CIMultiDict[str]" ) -> None: return await self._trace_config.on_request_start.send( self._session, self._trace_config_ctx, TraceRequestStartParams(method, url, headers), ) async def send_request_chunk_sent( self, method: str, url: URL, chunk: bytes ) -> None: return await self._trace_config.on_request_chunk_sent.send( self._session, self._trace_config_ctx, TraceRequestChunkSentParams(method, url, chunk), ) async def send_response_chunk_received( self, method: str, url: URL, chunk: bytes ) -> None: return await self._trace_config.on_response_chunk_received.send( self._session, self._trace_config_ctx, TraceResponseChunkReceivedParams(method, url, chunk), ) async def send_request_end( self, method: str, url: URL, headers: "CIMultiDict[str]", response: ClientResponse, ) -> None: return await self._trace_config.on_request_end.send( self._session, self._trace_config_ctx, TraceRequestEndParams(method, url, headers, response), ) async def send_request_exception( self, method: str, url: URL, headers: "CIMultiDict[str]", exception: BaseException, ) -> None: return await self._trace_config.on_request_exception.send( self._session, self._trace_config_ctx, TraceRequestExceptionParams(method, url, headers, exception), ) async def send_request_redirect( self, method: str, url: URL, headers: "CIMultiDict[str]", response: ClientResponse, ) -> None: return await self._trace_config._on_request_redirect.send( self._session, self._trace_config_ctx, TraceRequestRedirectParams(method, url, headers, response), ) async def send_connection_queued_start(self) -> None: return await self._trace_config.on_connection_queued_start.send( self._session, self._trace_config_ctx, TraceConnectionQueuedStartParams() ) async def send_connection_queued_end(self) -> None: return await self._trace_config.on_connection_queued_end.send( self._session, self._trace_config_ctx, TraceConnectionQueuedEndParams() ) async def send_connection_create_start(self) -> None: return await self._trace_config.on_connection_create_start.send( self._session, self._trace_config_ctx, TraceConnectionCreateStartParams() ) async def send_connection_create_end(self) -> None: return await self._trace_config.on_connection_create_end.send( self._session, self._trace_config_ctx, TraceConnectionCreateEndParams() ) async def send_connection_reuseconn(self) -> None: return await self._trace_config.on_connection_reuseconn.send( self._session, self._trace_config_ctx, TraceConnectionReuseconnParams() ) async def send_dns_resolvehost_start(self, host: str) -> None: return await self._trace_config.on_dns_resolvehost_start.send( self._session, self._trace_config_ctx, TraceDnsResolveHostStartParams(host) ) async def send_dns_resolvehost_end(self, host: str) -> None: return await self._trace_config.on_dns_resolvehost_end.send( self._session, self._trace_config_ctx, TraceDnsResolveHostEndParams(host) ) async def send_dns_cache_hit(self, host: str) -> None: return await self._trace_config.on_dns_cache_hit.send( self._session, self._trace_config_ctx, TraceDnsCacheHitParams(host) ) async def send_dns_cache_miss(self, host: str) -> None: return await self._trace_config.on_dns_cache_miss.send( self._session, self._trace_config_ctx, TraceDnsCacheMissParams(host) ) async def send_request_headers( self, method: str, url: URL, headers: "CIMultiDict[str]" ) -> None: return await self._trace_config._on_request_headers_sent.send( self._session, self._trace_config_ctx, TraceRequestHeadersSentParams(method, url, headers), )
15,177
Python
31.088795
87
0.657508
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/abc.py
import asyncio import logging from abc import ABC, abstractmethod from collections.abc import Sized from http.cookies import BaseCookie, Morsel from typing import ( TYPE_CHECKING, Any, Awaitable, Callable, Dict, Generator, Iterable, List, Optional, Tuple, ) from multidict import CIMultiDict from yarl import URL from .helpers import get_running_loop from .typedefs import LooseCookies if TYPE_CHECKING: # pragma: no cover from .web_app import Application from .web_exceptions import HTTPException from .web_request import BaseRequest, Request from .web_response import StreamResponse else: BaseRequest = Request = Application = StreamResponse = None HTTPException = None class AbstractRouter(ABC): def __init__(self) -> None: self._frozen = False def post_init(self, app: Application) -> None: """Post init stage. Not an abstract method for sake of backward compatibility, but if the router wants to be aware of the application it can override this. """ @property def frozen(self) -> bool: return self._frozen def freeze(self) -> None: """Freeze router.""" self._frozen = True @abstractmethod async def resolve(self, request: Request) -> "AbstractMatchInfo": """Return MATCH_INFO for given request""" class AbstractMatchInfo(ABC): @property # pragma: no branch @abstractmethod def handler(self) -> Callable[[Request], Awaitable[StreamResponse]]: """Execute matched request handler""" @property @abstractmethod def expect_handler(self) -> Callable[[Request], Awaitable[None]]: """Expect handler for 100-continue processing""" @property # pragma: no branch @abstractmethod def http_exception(self) -> Optional[HTTPException]: """HTTPException instance raised on router's resolving, or None""" @abstractmethod # pragma: no branch def get_info(self) -> Dict[str, Any]: """Return a dict with additional info useful for introspection""" @property # pragma: no branch @abstractmethod def apps(self) -> Tuple[Application, ...]: """Stack of nested applications. Top level application is left-most element. """ @abstractmethod def add_app(self, app: Application) -> None: """Add application to the nested apps stack.""" @abstractmethod def freeze(self) -> None: """Freeze the match info. The method is called after route resolution. After the call .add_app() is forbidden. """ class AbstractView(ABC): """Abstract class based view.""" def __init__(self, request: Request) -> None: self._request = request @property def request(self) -> Request: """Request instance.""" return self._request @abstractmethod def __await__(self) -> Generator[Any, None, StreamResponse]: """Execute the view handler.""" class AbstractResolver(ABC): """Abstract DNS resolver.""" @abstractmethod async def resolve(self, host: str, port: int, family: int) -> List[Dict[str, Any]]: """Return IP address for given hostname""" @abstractmethod async def close(self) -> None: """Release resolver""" if TYPE_CHECKING: # pragma: no cover IterableBase = Iterable[Morsel[str]] else: IterableBase = Iterable ClearCookiePredicate = Callable[["Morsel[str]"], bool] class AbstractCookieJar(Sized, IterableBase): """Abstract Cookie Jar.""" def __init__(self, *, loop: Optional[asyncio.AbstractEventLoop] = None) -> None: self._loop = get_running_loop(loop) @abstractmethod def clear(self, predicate: Optional[ClearCookiePredicate] = None) -> None: """Clear all cookies if no predicate is passed.""" @abstractmethod def clear_domain(self, domain: str) -> None: """Clear all cookies for domain and all subdomains.""" @abstractmethod def update_cookies(self, cookies: LooseCookies, response_url: URL = URL()) -> None: """Update cookies.""" @abstractmethod def filter_cookies(self, request_url: URL) -> "BaseCookie[str]": """Return the jar's cookies filtered by their attributes.""" class AbstractStreamWriter(ABC): """Abstract stream writer.""" buffer_size = 0 output_size = 0 length: Optional[int] = 0 @abstractmethod async def write(self, chunk: bytes) -> None: """Write chunk into stream.""" @abstractmethod async def write_eof(self, chunk: bytes = b"") -> None: """Write last chunk.""" @abstractmethod async def drain(self) -> None: """Flush the write buffer.""" @abstractmethod def enable_compression(self, encoding: str = "deflate") -> None: """Enable HTTP body compression""" @abstractmethod def enable_chunking(self) -> None: """Enable HTTP chunked mode""" @abstractmethod async def write_headers( self, status_line: str, headers: "CIMultiDict[str]" ) -> None: """Write HTTP headers""" class AbstractAccessLogger(ABC): """Abstract writer to access log.""" def __init__(self, logger: logging.Logger, log_format: str) -> None: self.logger = logger self.log_format = log_format @abstractmethod def log(self, request: BaseRequest, response: StreamResponse, time: float) -> None: """Emit log to logger."""
5,505
Python
25.471154
87
0.641054
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/http_exceptions.py
"""Low-level http related exceptions.""" from typing import Optional, Union from .typedefs import _CIMultiDict __all__ = ("HttpProcessingError",) class HttpProcessingError(Exception): """HTTP error. Shortcut for raising HTTP errors with custom code, message and headers. code: HTTP Error code. message: (optional) Error message. headers: (optional) Headers to be sent in response, a list of pairs """ code = 0 message = "" headers = None def __init__( self, *, code: Optional[int] = None, message: str = "", headers: Optional[_CIMultiDict] = None, ) -> None: if code is not None: self.code = code self.headers = headers self.message = message def __str__(self) -> str: return f"{self.code}, message={self.message!r}" def __repr__(self) -> str: return f"<{self.__class__.__name__}: {self}>" class BadHttpMessage(HttpProcessingError): code = 400 message = "Bad Request" def __init__(self, message: str, *, headers: Optional[_CIMultiDict] = None) -> None: super().__init__(message=message, headers=headers) self.args = (message,) class HttpBadRequest(BadHttpMessage): code = 400 message = "Bad Request" class PayloadEncodingError(BadHttpMessage): """Base class for payload errors""" class ContentEncodingError(PayloadEncodingError): """Content encoding error.""" class TransferEncodingError(PayloadEncodingError): """transfer encoding error.""" class ContentLengthError(PayloadEncodingError): """Not enough data for satisfy content length header.""" class LineTooLong(BadHttpMessage): def __init__( self, line: str, limit: str = "Unknown", actual_size: str = "Unknown" ) -> None: super().__init__( f"Got more than {limit} bytes ({actual_size}) when reading {line}." ) self.args = (line, limit, actual_size) class InvalidHeader(BadHttpMessage): def __init__(self, hdr: Union[bytes, str]) -> None: if isinstance(hdr, bytes): hdr = hdr.decode("utf-8", "surrogateescape") super().__init__(f"Invalid HTTP Header: {hdr}") self.hdr = hdr self.args = (hdr,) class BadStatusLine(BadHttpMessage): def __init__(self, line: str = "") -> None: if not isinstance(line, str): line = repr(line) super().__init__(f"Bad status line {line!r}") self.args = (line,) self.line = line class InvalidURLError(BadHttpMessage): pass
2,586
Python
23.40566
88
0.607502
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/web_fileresponse.py
import asyncio import mimetypes import os import pathlib import sys from typing import ( # noqa IO, TYPE_CHECKING, Any, Awaitable, Callable, Iterator, List, Optional, Tuple, Union, cast, ) from . import hdrs from .abc import AbstractStreamWriter from .helpers import ETAG_ANY, ETag from .typedefs import Final, LooseHeaders from .web_exceptions import ( HTTPNotModified, HTTPPartialContent, HTTPPreconditionFailed, HTTPRequestRangeNotSatisfiable, ) from .web_response import StreamResponse __all__ = ("FileResponse",) if TYPE_CHECKING: # pragma: no cover from .web_request import BaseRequest _T_OnChunkSent = Optional[Callable[[bytes], Awaitable[None]]] NOSENDFILE: Final[bool] = bool(os.environ.get("AIOHTTP_NOSENDFILE")) class FileResponse(StreamResponse): """A response object can be used to send files.""" def __init__( self, path: Union[str, pathlib.Path], chunk_size: int = 256 * 1024, status: int = 200, reason: Optional[str] = None, headers: Optional[LooseHeaders] = None, ) -> None: super().__init__(status=status, reason=reason, headers=headers) if isinstance(path, str): path = pathlib.Path(path) self._path = path self._chunk_size = chunk_size async def _sendfile_fallback( self, writer: AbstractStreamWriter, fobj: IO[Any], offset: int, count: int ) -> AbstractStreamWriter: # To keep memory usage low,fobj is transferred in chunks # controlled by the constructor's chunk_size argument. chunk_size = self._chunk_size loop = asyncio.get_event_loop() await loop.run_in_executor(None, fobj.seek, offset) chunk = await loop.run_in_executor(None, fobj.read, chunk_size) while chunk: await writer.write(chunk) count = count - chunk_size if count <= 0: break chunk = await loop.run_in_executor(None, fobj.read, min(chunk_size, count)) await writer.drain() return writer async def _sendfile( self, request: "BaseRequest", fobj: IO[Any], offset: int, count: int ) -> AbstractStreamWriter: writer = await super().prepare(request) assert writer is not None if NOSENDFILE or sys.version_info < (3, 7) or self.compression: return await self._sendfile_fallback(writer, fobj, offset, count) loop = request._loop transport = request.transport assert transport is not None try: await loop.sendfile(transport, fobj, offset, count) except NotImplementedError: return await self._sendfile_fallback(writer, fobj, offset, count) await super().write_eof() return writer @staticmethod def _strong_etag_match(etag_value: str, etags: Tuple[ETag, ...]) -> bool: if len(etags) == 1 and etags[0].value == ETAG_ANY: return True return any(etag.value == etag_value for etag in etags if not etag.is_weak) async def _not_modified( self, request: "BaseRequest", etag_value: str, last_modified: float ) -> Optional[AbstractStreamWriter]: self.set_status(HTTPNotModified.status_code) self._length_check = False self.etag = etag_value # type: ignore[assignment] self.last_modified = last_modified # type: ignore[assignment] # Delete any Content-Length headers provided by user. HTTP 304 # should always have empty response body return await super().prepare(request) async def _precondition_failed( self, request: "BaseRequest" ) -> Optional[AbstractStreamWriter]: self.set_status(HTTPPreconditionFailed.status_code) self.content_length = 0 return await super().prepare(request) async def prepare(self, request: "BaseRequest") -> Optional[AbstractStreamWriter]: filepath = self._path gzip = False if "gzip" in request.headers.get(hdrs.ACCEPT_ENCODING, ""): gzip_path = filepath.with_name(filepath.name + ".gz") if gzip_path.is_file(): filepath = gzip_path gzip = True loop = asyncio.get_event_loop() st: os.stat_result = await loop.run_in_executor(None, filepath.stat) etag_value = f"{st.st_mtime_ns:x}-{st.st_size:x}" last_modified = st.st_mtime # https://tools.ietf.org/html/rfc7232#section-6 ifmatch = request.if_match if ifmatch is not None and not self._strong_etag_match(etag_value, ifmatch): return await self._precondition_failed(request) unmodsince = request.if_unmodified_since if ( unmodsince is not None and ifmatch is None and st.st_mtime > unmodsince.timestamp() ): return await self._precondition_failed(request) ifnonematch = request.if_none_match if ifnonematch is not None and self._strong_etag_match(etag_value, ifnonematch): return await self._not_modified(request, etag_value, last_modified) modsince = request.if_modified_since if ( modsince is not None and ifnonematch is None and st.st_mtime <= modsince.timestamp() ): return await self._not_modified(request, etag_value, last_modified) if hdrs.CONTENT_TYPE not in self.headers: ct, encoding = mimetypes.guess_type(str(filepath)) if not ct: ct = "application/octet-stream" should_set_ct = True else: encoding = "gzip" if gzip else None should_set_ct = False status = self._status file_size = st.st_size count = file_size start = None ifrange = request.if_range if ifrange is None or st.st_mtime <= ifrange.timestamp(): # If-Range header check: # condition = cached date >= last modification date # return 206 if True else 200. # if False: # Range header would not be processed, return 200 # if True but Range header missing # return 200 try: rng = request.http_range start = rng.start end = rng.stop except ValueError: # https://tools.ietf.org/html/rfc7233: # A server generating a 416 (Range Not Satisfiable) response to # a byte-range request SHOULD send a Content-Range header field # with an unsatisfied-range value. # The complete-length in a 416 response indicates the current # length of the selected representation. # # Will do the same below. Many servers ignore this and do not # send a Content-Range header with HTTP 416 self.headers[hdrs.CONTENT_RANGE] = f"bytes */{file_size}" self.set_status(HTTPRequestRangeNotSatisfiable.status_code) return await super().prepare(request) # If a range request has been made, convert start, end slice # notation into file pointer offset and count if start is not None or end is not None: if start < 0 and end is None: # return tail of file start += file_size if start < 0: # if Range:bytes=-1000 in request header but file size # is only 200, there would be trouble without this start = 0 count = file_size - start else: # rfc7233:If the last-byte-pos value is # absent, or if the value is greater than or equal to # the current length of the representation data, # the byte range is interpreted as the remainder # of the representation (i.e., the server replaces the # value of last-byte-pos with a value that is one less than # the current length of the selected representation). count = ( min(end if end is not None else file_size, file_size) - start ) if start >= file_size: # HTTP 416 should be returned in this case. # # According to https://tools.ietf.org/html/rfc7233: # If a valid byte-range-set includes at least one # byte-range-spec with a first-byte-pos that is less than # the current length of the representation, or at least one # suffix-byte-range-spec with a non-zero suffix-length, # then the byte-range-set is satisfiable. Otherwise, the # byte-range-set is unsatisfiable. self.headers[hdrs.CONTENT_RANGE] = f"bytes */{file_size}" self.set_status(HTTPRequestRangeNotSatisfiable.status_code) return await super().prepare(request) status = HTTPPartialContent.status_code # Even though you are sending the whole file, you should still # return a HTTP 206 for a Range request. self.set_status(status) if should_set_ct: self.content_type = ct # type: ignore[assignment] if encoding: self.headers[hdrs.CONTENT_ENCODING] = encoding if gzip: self.headers[hdrs.VARY] = hdrs.ACCEPT_ENCODING self.etag = etag_value # type: ignore[assignment] self.last_modified = st.st_mtime # type: ignore[assignment] self.content_length = count self.headers[hdrs.ACCEPT_RANGES] = "bytes" real_start = cast(int, start) if status == HTTPPartialContent.status_code: self.headers[hdrs.CONTENT_RANGE] = "bytes {}-{}/{}".format( real_start, real_start + count - 1, file_size ) # If we are sending 0 bytes calling sendfile() will throw a ValueError if count == 0 or request.method == hdrs.METH_HEAD or self.status in [204, 304]: return await super().prepare(request) fobj = await loop.run_in_executor(None, filepath.open, "rb") if start: # be aware that start could be None or int=0 here. offset = start else: offset = 0 try: return await self._sendfile(request, fobj, offset, count) finally: await loop.run_in_executor(None, fobj.close)
10,784
Python
36.318339
88
0.58355
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/hdrs.py
"""HTTP Headers constants.""" # After changing the file content call ./tools/gen.py # to regenerate the headers parser import sys from typing import Set from multidict import istr if sys.version_info >= (3, 8): from typing import Final else: from typing_extensions import Final METH_ANY: Final[str] = "*" METH_CONNECT: Final[str] = "CONNECT" METH_HEAD: Final[str] = "HEAD" METH_GET: Final[str] = "GET" METH_DELETE: Final[str] = "DELETE" METH_OPTIONS: Final[str] = "OPTIONS" METH_PATCH: Final[str] = "PATCH" METH_POST: Final[str] = "POST" METH_PUT: Final[str] = "PUT" METH_TRACE: Final[str] = "TRACE" METH_ALL: Final[Set[str]] = { METH_CONNECT, METH_HEAD, METH_GET, METH_DELETE, METH_OPTIONS, METH_PATCH, METH_POST, METH_PUT, METH_TRACE, } ACCEPT: Final[istr] = istr("Accept") ACCEPT_CHARSET: Final[istr] = istr("Accept-Charset") ACCEPT_ENCODING: Final[istr] = istr("Accept-Encoding") ACCEPT_LANGUAGE: Final[istr] = istr("Accept-Language") ACCEPT_RANGES: Final[istr] = istr("Accept-Ranges") ACCESS_CONTROL_MAX_AGE: Final[istr] = istr("Access-Control-Max-Age") ACCESS_CONTROL_ALLOW_CREDENTIALS: Final[istr] = istr("Access-Control-Allow-Credentials") ACCESS_CONTROL_ALLOW_HEADERS: Final[istr] = istr("Access-Control-Allow-Headers") ACCESS_CONTROL_ALLOW_METHODS: Final[istr] = istr("Access-Control-Allow-Methods") ACCESS_CONTROL_ALLOW_ORIGIN: Final[istr] = istr("Access-Control-Allow-Origin") ACCESS_CONTROL_EXPOSE_HEADERS: Final[istr] = istr("Access-Control-Expose-Headers") ACCESS_CONTROL_REQUEST_HEADERS: Final[istr] = istr("Access-Control-Request-Headers") ACCESS_CONTROL_REQUEST_METHOD: Final[istr] = istr("Access-Control-Request-Method") AGE: Final[istr] = istr("Age") ALLOW: Final[istr] = istr("Allow") AUTHORIZATION: Final[istr] = istr("Authorization") CACHE_CONTROL: Final[istr] = istr("Cache-Control") CONNECTION: Final[istr] = istr("Connection") CONTENT_DISPOSITION: Final[istr] = istr("Content-Disposition") CONTENT_ENCODING: Final[istr] = istr("Content-Encoding") CONTENT_LANGUAGE: Final[istr] = istr("Content-Language") CONTENT_LENGTH: Final[istr] = istr("Content-Length") CONTENT_LOCATION: Final[istr] = istr("Content-Location") CONTENT_MD5: Final[istr] = istr("Content-MD5") CONTENT_RANGE: Final[istr] = istr("Content-Range") CONTENT_TRANSFER_ENCODING: Final[istr] = istr("Content-Transfer-Encoding") CONTENT_TYPE: Final[istr] = istr("Content-Type") COOKIE: Final[istr] = istr("Cookie") DATE: Final[istr] = istr("Date") DESTINATION: Final[istr] = istr("Destination") DIGEST: Final[istr] = istr("Digest") ETAG: Final[istr] = istr("Etag") EXPECT: Final[istr] = istr("Expect") EXPIRES: Final[istr] = istr("Expires") FORWARDED: Final[istr] = istr("Forwarded") FROM: Final[istr] = istr("From") HOST: Final[istr] = istr("Host") IF_MATCH: Final[istr] = istr("If-Match") IF_MODIFIED_SINCE: Final[istr] = istr("If-Modified-Since") IF_NONE_MATCH: Final[istr] = istr("If-None-Match") IF_RANGE: Final[istr] = istr("If-Range") IF_UNMODIFIED_SINCE: Final[istr] = istr("If-Unmodified-Since") KEEP_ALIVE: Final[istr] = istr("Keep-Alive") LAST_EVENT_ID: Final[istr] = istr("Last-Event-ID") LAST_MODIFIED: Final[istr] = istr("Last-Modified") LINK: Final[istr] = istr("Link") LOCATION: Final[istr] = istr("Location") MAX_FORWARDS: Final[istr] = istr("Max-Forwards") ORIGIN: Final[istr] = istr("Origin") PRAGMA: Final[istr] = istr("Pragma") PROXY_AUTHENTICATE: Final[istr] = istr("Proxy-Authenticate") PROXY_AUTHORIZATION: Final[istr] = istr("Proxy-Authorization") RANGE: Final[istr] = istr("Range") REFERER: Final[istr] = istr("Referer") RETRY_AFTER: Final[istr] = istr("Retry-After") SEC_WEBSOCKET_ACCEPT: Final[istr] = istr("Sec-WebSocket-Accept") SEC_WEBSOCKET_VERSION: Final[istr] = istr("Sec-WebSocket-Version") SEC_WEBSOCKET_PROTOCOL: Final[istr] = istr("Sec-WebSocket-Protocol") SEC_WEBSOCKET_EXTENSIONS: Final[istr] = istr("Sec-WebSocket-Extensions") SEC_WEBSOCKET_KEY: Final[istr] = istr("Sec-WebSocket-Key") SEC_WEBSOCKET_KEY1: Final[istr] = istr("Sec-WebSocket-Key1") SERVER: Final[istr] = istr("Server") SET_COOKIE: Final[istr] = istr("Set-Cookie") TE: Final[istr] = istr("TE") TRAILER: Final[istr] = istr("Trailer") TRANSFER_ENCODING: Final[istr] = istr("Transfer-Encoding") UPGRADE: Final[istr] = istr("Upgrade") URI: Final[istr] = istr("URI") USER_AGENT: Final[istr] = istr("User-Agent") VARY: Final[istr] = istr("Vary") VIA: Final[istr] = istr("Via") WANT_DIGEST: Final[istr] = istr("Want-Digest") WARNING: Final[istr] = istr("Warning") WWW_AUTHENTICATE: Final[istr] = istr("WWW-Authenticate") X_FORWARDED_FOR: Final[istr] = istr("X-Forwarded-For") X_FORWARDED_HOST: Final[istr] = istr("X-Forwarded-Host") X_FORWARDED_PROTO: Final[istr] = istr("X-Forwarded-Proto")
4,724
Python
40.086956
88
0.715072
omniverse-code/kit/exts/omni.kit.pip_archive/pip_prebundle/aiohttp/client_ws.py
"""WebSocket client for asyncio.""" import asyncio from typing import Any, Optional, cast import async_timeout from .client_exceptions import ClientError from .client_reqrep import ClientResponse from .helpers import call_later, set_result from .http import ( WS_CLOSED_MESSAGE, WS_CLOSING_MESSAGE, WebSocketError, WSCloseCode, WSMessage, WSMsgType, ) from .http_websocket import WebSocketWriter # WSMessage from .streams import EofStream, FlowControlDataQueue from .typedefs import ( DEFAULT_JSON_DECODER, DEFAULT_JSON_ENCODER, JSONDecoder, JSONEncoder, ) class ClientWebSocketResponse: def __init__( self, reader: "FlowControlDataQueue[WSMessage]", writer: WebSocketWriter, protocol: Optional[str], response: ClientResponse, timeout: float, autoclose: bool, autoping: bool, loop: asyncio.AbstractEventLoop, *, receive_timeout: Optional[float] = None, heartbeat: Optional[float] = None, compress: int = 0, client_notakeover: bool = False, ) -> None: self._response = response self._conn = response.connection self._writer = writer self._reader = reader self._protocol = protocol self._closed = False self._closing = False self._close_code: Optional[int] = None self._timeout = timeout self._receive_timeout = receive_timeout self._autoclose = autoclose self._autoping = autoping self._heartbeat = heartbeat self._heartbeat_cb: Optional[asyncio.TimerHandle] = None if heartbeat is not None: self._pong_heartbeat = heartbeat / 2.0 self._pong_response_cb: Optional[asyncio.TimerHandle] = None self._loop = loop self._waiting: Optional[asyncio.Future[bool]] = None self._exception: Optional[BaseException] = None self._compress = compress self._client_notakeover = client_notakeover self._reset_heartbeat() def _cancel_heartbeat(self) -> None: if self._pong_response_cb is not None: self._pong_response_cb.cancel() self._pong_response_cb = None if self._heartbeat_cb is not None: self._heartbeat_cb.cancel() self._heartbeat_cb = None def _reset_heartbeat(self) -> None: self._cancel_heartbeat() if self._heartbeat is not None: self._heartbeat_cb = call_later( self._send_heartbeat, self._heartbeat, self._loop ) def _send_heartbeat(self) -> None: if self._heartbeat is not None and not self._closed: # fire-and-forget a task is not perfect but maybe ok for # sending ping. Otherwise we need a long-living heartbeat # task in the class. self._loop.create_task(self._writer.ping()) if self._pong_response_cb is not None: self._pong_response_cb.cancel() self._pong_response_cb = call_later( self._pong_not_received, self._pong_heartbeat, self._loop ) def _pong_not_received(self) -> None: if not self._closed: self._closed = True self._close_code = WSCloseCode.ABNORMAL_CLOSURE self._exception = asyncio.TimeoutError() self._response.close() @property def closed(self) -> bool: return self._closed @property def close_code(self) -> Optional[int]: return self._close_code @property def protocol(self) -> Optional[str]: return self._protocol @property def compress(self) -> int: return self._compress @property def client_notakeover(self) -> bool: return self._client_notakeover def get_extra_info(self, name: str, default: Any = None) -> Any: """extra info from connection transport""" conn = self._response.connection if conn is None: return default transport = conn.transport if transport is None: return default return transport.get_extra_info(name, default) def exception(self) -> Optional[BaseException]: return self._exception async def ping(self, message: bytes = b"") -> None: await self._writer.ping(message) async def pong(self, message: bytes = b"") -> None: await self._writer.pong(message) async def send_str(self, data: str, compress: Optional[int] = None) -> None: if not isinstance(data, str): raise TypeError("data argument must be str (%r)" % type(data)) await self._writer.send(data, binary=False, compress=compress) async def send_bytes(self, data: bytes, compress: Optional[int] = None) -> None: if not isinstance(data, (bytes, bytearray, memoryview)): raise TypeError("data argument must be byte-ish (%r)" % type(data)) await self._writer.send(data, binary=True, compress=compress) async def send_json( self, data: Any, compress: Optional[int] = None, *, dumps: JSONEncoder = DEFAULT_JSON_ENCODER, ) -> None: await self.send_str(dumps(data), compress=compress) async def close(self, *, code: int = WSCloseCode.OK, message: bytes = b"") -> bool: # we need to break `receive()` cycle first, # `close()` may be called from different task if self._waiting is not None and not self._closed: self._reader.feed_data(WS_CLOSING_MESSAGE, 0) await self._waiting if not self._closed: self._cancel_heartbeat() self._closed = True try: await self._writer.close(code, message) except asyncio.CancelledError: self._close_code = WSCloseCode.ABNORMAL_CLOSURE self._response.close() raise except Exception as exc: self._close_code = WSCloseCode.ABNORMAL_CLOSURE self._exception = exc self._response.close() return True if self._closing: self._response.close() return True while True: try: async with async_timeout.timeout(self._timeout): msg = await self._reader.read() except asyncio.CancelledError: self._close_code = WSCloseCode.ABNORMAL_CLOSURE self._response.close() raise except Exception as exc: self._close_code = WSCloseCode.ABNORMAL_CLOSURE self._exception = exc self._response.close() return True if msg.type == WSMsgType.CLOSE: self._close_code = msg.data self._response.close() return True else: return False async def receive(self, timeout: Optional[float] = None) -> WSMessage: while True: if self._waiting is not None: raise RuntimeError("Concurrent call to receive() is not allowed") if self._closed: return WS_CLOSED_MESSAGE elif self._closing: await self.close() return WS_CLOSED_MESSAGE try: self._waiting = self._loop.create_future() try: async with async_timeout.timeout(timeout or self._receive_timeout): msg = await self._reader.read() self._reset_heartbeat() finally: waiter = self._waiting self._waiting = None set_result(waiter, True) except (asyncio.CancelledError, asyncio.TimeoutError): self._close_code = WSCloseCode.ABNORMAL_CLOSURE raise except EofStream: self._close_code = WSCloseCode.OK await self.close() return WSMessage(WSMsgType.CLOSED, None, None) except ClientError: self._closed = True self._close_code = WSCloseCode.ABNORMAL_CLOSURE return WS_CLOSED_MESSAGE except WebSocketError as exc: self._close_code = exc.code await self.close(code=exc.code) return WSMessage(WSMsgType.ERROR, exc, None) except Exception as exc: self._exception = exc self._closing = True self._close_code = WSCloseCode.ABNORMAL_CLOSURE await self.close() return WSMessage(WSMsgType.ERROR, exc, None) if msg.type == WSMsgType.CLOSE: self._closing = True self._close_code = msg.data if not self._closed and self._autoclose: await self.close() elif msg.type == WSMsgType.CLOSING: self._closing = True elif msg.type == WSMsgType.PING and self._autoping: await self.pong(msg.data) continue elif msg.type == WSMsgType.PONG and self._autoping: continue return msg async def receive_str(self, *, timeout: Optional[float] = None) -> str: msg = await self.receive(timeout) if msg.type != WSMsgType.TEXT: raise TypeError(f"Received message {msg.type}:{msg.data!r} is not str") return cast(str, msg.data) async def receive_bytes(self, *, timeout: Optional[float] = None) -> bytes: msg = await self.receive(timeout) if msg.type != WSMsgType.BINARY: raise TypeError(f"Received message {msg.type}:{msg.data!r} is not bytes") return cast(bytes, msg.data) async def receive_json( self, *, loads: JSONDecoder = DEFAULT_JSON_DECODER, timeout: Optional[float] = None, ) -> Any: data = await self.receive_str(timeout=timeout) return loads(data) def __aiter__(self) -> "ClientWebSocketResponse": return self async def __anext__(self) -> WSMessage: msg = await self.receive() if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED): raise StopAsyncIteration return msg
10,516
Python
33.940199
87
0.56609